360 lines
12 KiB
Diff
360 lines
12 KiB
Diff
|
From dcdc1a56021edeb8d57c06b6a1c06827f368c259 Mon Sep 17 00:00:00 2001
|
||
|
From: Thomas Gleixner <tglx@linutronix.de>
|
||
|
Date: Wed, 24 Jul 2013 15:26:54 +0200
|
||
|
Subject: [PATCH 222/365] workqueue: Use normal rcu
|
||
|
|
||
|
There is no need for sched_rcu. The undocumented reason why sched_rcu
|
||
|
is used is to avoid a few explicit rcu_read_lock()/unlock() pairs by
|
||
|
abusing the fact that sched_rcu reader side critical sections are also
|
||
|
protected by preempt or irq disabled regions.
|
||
|
|
||
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||
|
---
|
||
|
kernel/workqueue.c | 95 +++++++++++++++++++++++++---------------------
|
||
|
1 file changed, 52 insertions(+), 43 deletions(-)
|
||
|
|
||
|
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
|
||
|
index 205c3131f8b0..8aa997b9220a 100644
|
||
|
--- a/kernel/workqueue.c
|
||
|
+++ b/kernel/workqueue.c
|
||
|
@@ -127,7 +127,7 @@ enum {
|
||
|
*
|
||
|
* PL: wq_pool_mutex protected.
|
||
|
*
|
||
|
- * PR: wq_pool_mutex protected for writes. Sched-RCU protected for reads.
|
||
|
+ * PR: wq_pool_mutex protected for writes. RCU protected for reads.
|
||
|
*
|
||
|
* PW: wq_pool_mutex and wq->mutex protected for writes. Either for reads.
|
||
|
*
|
||
|
@@ -136,7 +136,7 @@ enum {
|
||
|
*
|
||
|
* WQ: wq->mutex protected.
|
||
|
*
|
||
|
- * WR: wq->mutex protected for writes. Sched-RCU protected for reads.
|
||
|
+ * WR: wq->mutex protected for writes. RCU protected for reads.
|
||
|
*
|
||
|
* MD: wq_mayday_lock protected.
|
||
|
*/
|
||
|
@@ -186,7 +186,7 @@ struct worker_pool {
|
||
|
atomic_t nr_running ____cacheline_aligned_in_smp;
|
||
|
|
||
|
/*
|
||
|
- * Destruction of pool is sched-RCU protected to allow dereferences
|
||
|
+ * Destruction of pool is RCU protected to allow dereferences
|
||
|
* from get_work_pool().
|
||
|
*/
|
||
|
struct rcu_head rcu;
|
||
|
@@ -215,7 +215,7 @@ struct pool_workqueue {
|
||
|
/*
|
||
|
* Release of unbound pwq is punted to system_wq. See put_pwq()
|
||
|
* and pwq_unbound_release_workfn() for details. pool_workqueue
|
||
|
- * itself is also sched-RCU protected so that the first pwq can be
|
||
|
+ * itself is also RCU protected so that the first pwq can be
|
||
|
* determined without grabbing wq->mutex.
|
||
|
*/
|
||
|
struct work_struct unbound_release_work;
|
||
|
@@ -357,20 +357,20 @@ static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
|
||
|
#include <trace/events/workqueue.h>
|
||
|
|
||
|
#define assert_rcu_or_pool_mutex() \
|
||
|
- RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \
|
||
|
+ RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
|
||
|
!lockdep_is_held(&wq_pool_mutex), \
|
||
|
- "sched RCU or wq_pool_mutex should be held")
|
||
|
+ "RCU or wq_pool_mutex should be held")
|
||
|
|
||
|
#define assert_rcu_or_wq_mutex(wq) \
|
||
|
- RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \
|
||
|
+ RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
|
||
|
!lockdep_is_held(&wq->mutex), \
|
||
|
- "sched RCU or wq->mutex should be held")
|
||
|
+ "RCU or wq->mutex should be held")
|
||
|
|
||
|
#define assert_rcu_or_wq_mutex_or_pool_mutex(wq) \
|
||
|
- RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \
|
||
|
+ RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
|
||
|
!lockdep_is_held(&wq->mutex) && \
|
||
|
!lockdep_is_held(&wq_pool_mutex), \
|
||
|
- "sched RCU, wq->mutex or wq_pool_mutex should be held")
|
||
|
+ "RCU, wq->mutex or wq_pool_mutex should be held")
|
||
|
|
||
|
#define for_each_cpu_worker_pool(pool, cpu) \
|
||
|
for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \
|
||
|
@@ -382,7 +382,7 @@ static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
|
||
|
* @pool: iteration cursor
|
||
|
* @pi: integer used for iteration
|
||
|
*
|
||
|
- * This must be called either with wq_pool_mutex held or sched RCU read
|
||
|
+ * This must be called either with wq_pool_mutex held or RCU read
|
||
|
* locked. If the pool needs to be used beyond the locking in effect, the
|
||
|
* caller is responsible for guaranteeing that the pool stays online.
|
||
|
*
|
||
|
@@ -414,7 +414,7 @@ static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
|
||
|
* @pwq: iteration cursor
|
||
|
* @wq: the target workqueue
|
||
|
*
|
||
|
- * This must be called either with wq->mutex held or sched RCU read locked.
|
||
|
+ * This must be called either with wq->mutex held or RCU read locked.
|
||
|
* If the pwq needs to be used beyond the locking in effect, the caller is
|
||
|
* responsible for guaranteeing that the pwq stays online.
|
||
|
*
|
||
|
@@ -550,7 +550,7 @@ static int worker_pool_assign_id(struct worker_pool *pool)
|
||
|
* @wq: the target workqueue
|
||
|
* @node: the node ID
|
||
|
*
|
||
|
- * This must be called with any of wq_pool_mutex, wq->mutex or sched RCU
|
||
|
+ * This must be called with any of wq_pool_mutex, wq->mutex or RCU
|
||
|
* read locked.
|
||
|
* If the pwq needs to be used beyond the locking in effect, the caller is
|
||
|
* responsible for guaranteeing that the pwq stays online.
|
||
|
@@ -694,8 +694,8 @@ static struct pool_workqueue *get_work_pwq(struct work_struct *work)
|
||
|
* @work: the work item of interest
|
||
|
*
|
||
|
* Pools are created and destroyed under wq_pool_mutex, and allows read
|
||
|
- * access under sched-RCU read lock. As such, this function should be
|
||
|
- * called under wq_pool_mutex or with preemption disabled.
|
||
|
+ * access under RCU read lock. As such, this function should be
|
||
|
+ * called under wq_pool_mutex or inside of a rcu_read_lock() region.
|
||
|
*
|
||
|
* All fields of the returned pool are accessible as long as the above
|
||
|
* mentioned locking is in effect. If the returned pool needs to be used
|
||
|
@@ -1100,7 +1100,7 @@ static void put_pwq_unlocked(struct pool_workqueue *pwq)
|
||
|
{
|
||
|
if (pwq) {
|
||
|
/*
|
||
|
- * As both pwqs and pools are sched-RCU protected, the
|
||
|
+ * As both pwqs and pools are RCU protected, the
|
||
|
* following lock operations are safe.
|
||
|
*/
|
||
|
spin_lock_irq(&pwq->pool->lock);
|
||
|
@@ -1228,6 +1228,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
|
||
|
if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
|
||
|
return 0;
|
||
|
|
||
|
+ rcu_read_lock();
|
||
|
/*
|
||
|
* The queueing is in progress, or it is already queued. Try to
|
||
|
* steal it from ->worklist without clearing WORK_STRUCT_PENDING.
|
||
|
@@ -1266,10 +1267,12 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
|
||
|
set_work_pool_and_keep_pending(work, pool->id);
|
||
|
|
||
|
spin_unlock(&pool->lock);
|
||
|
+ rcu_read_unlock();
|
||
|
return 1;
|
||
|
}
|
||
|
spin_unlock(&pool->lock);
|
||
|
fail:
|
||
|
+ rcu_read_unlock();
|
||
|
local_irq_restore(*flags);
|
||
|
if (work_is_canceling(work))
|
||
|
return -ENOENT;
|
||
|
@@ -1383,6 +1386,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
|
||
|
if (unlikely(wq->flags & __WQ_DRAINING) &&
|
||
|
WARN_ON_ONCE(!is_chained_work(wq)))
|
||
|
return;
|
||
|
+ rcu_read_lock();
|
||
|
retry:
|
||
|
/* pwq which will be used unless @work is executing elsewhere */
|
||
|
if (wq->flags & WQ_UNBOUND) {
|
||
|
@@ -1441,10 +1445,8 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
|
||
|
/* pwq determined, queue */
|
||
|
trace_workqueue_queue_work(req_cpu, pwq, work);
|
||
|
|
||
|
- if (WARN_ON(!list_empty(&work->entry))) {
|
||
|
- spin_unlock(&pwq->pool->lock);
|
||
|
- return;
|
||
|
- }
|
||
|
+ if (WARN_ON(!list_empty(&work->entry)))
|
||
|
+ goto out;
|
||
|
|
||
|
pwq->nr_in_flight[pwq->work_color]++;
|
||
|
work_flags = work_color_to_flags(pwq->work_color);
|
||
|
@@ -1462,7 +1464,9 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
|
||
|
|
||
|
insert_work(pwq, work, worklist, work_flags);
|
||
|
|
||
|
+out:
|
||
|
spin_unlock(&pwq->pool->lock);
|
||
|
+ rcu_read_unlock();
|
||
|
}
|
||
|
|
||
|
/**
|
||
|
@@ -2789,14 +2793,14 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr)
|
||
|
|
||
|
might_sleep();
|
||
|
|
||
|
- local_irq_disable();
|
||
|
+ rcu_read_lock();
|
||
|
pool = get_work_pool(work);
|
||
|
if (!pool) {
|
||
|
- local_irq_enable();
|
||
|
+ rcu_read_unlock();
|
||
|
return false;
|
||
|
}
|
||
|
|
||
|
- spin_lock(&pool->lock);
|
||
|
+ spin_lock_irq(&pool->lock);
|
||
|
/* see the comment in try_to_grab_pending() with the same code */
|
||
|
pwq = get_work_pwq(work);
|
||
|
if (pwq) {
|
||
|
@@ -2825,10 +2829,11 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr)
|
||
|
else
|
||
|
lock_map_acquire_read(&pwq->wq->lockdep_map);
|
||
|
lock_map_release(&pwq->wq->lockdep_map);
|
||
|
-
|
||
|
+ rcu_read_unlock();
|
||
|
return true;
|
||
|
already_gone:
|
||
|
spin_unlock_irq(&pool->lock);
|
||
|
+ rcu_read_unlock();
|
||
|
return false;
|
||
|
}
|
||
|
|
||
|
@@ -3248,7 +3253,7 @@ static void rcu_free_pool(struct rcu_head *rcu)
|
||
|
* put_unbound_pool - put a worker_pool
|
||
|
* @pool: worker_pool to put
|
||
|
*
|
||
|
- * Put @pool. If its refcnt reaches zero, it gets destroyed in sched-RCU
|
||
|
+ * Put @pool. If its refcnt reaches zero, it gets destroyed in RCU
|
||
|
* safe manner. get_unbound_pool() calls this function on its failure path
|
||
|
* and this function should be able to release pools which went through,
|
||
|
* successfully or not, init_worker_pool().
|
||
|
@@ -3302,8 +3307,8 @@ static void put_unbound_pool(struct worker_pool *pool)
|
||
|
del_timer_sync(&pool->idle_timer);
|
||
|
del_timer_sync(&pool->mayday_timer);
|
||
|
|
||
|
- /* sched-RCU protected to allow dereferences from get_work_pool() */
|
||
|
- call_rcu_sched(&pool->rcu, rcu_free_pool);
|
||
|
+ /* RCU protected to allow dereferences from get_work_pool() */
|
||
|
+ call_rcu(&pool->rcu, rcu_free_pool);
|
||
|
}
|
||
|
|
||
|
/**
|
||
|
@@ -3410,14 +3415,14 @@ static void pwq_unbound_release_workfn(struct work_struct *work)
|
||
|
put_unbound_pool(pool);
|
||
|
mutex_unlock(&wq_pool_mutex);
|
||
|
|
||
|
- call_rcu_sched(&pwq->rcu, rcu_free_pwq);
|
||
|
+ call_rcu(&pwq->rcu, rcu_free_pwq);
|
||
|
|
||
|
/*
|
||
|
* If we're the last pwq going away, @wq is already dead and no one
|
||
|
* is gonna access it anymore. Schedule RCU free.
|
||
|
*/
|
||
|
if (is_last)
|
||
|
- call_rcu_sched(&wq->rcu, rcu_free_wq);
|
||
|
+ call_rcu(&wq->rcu, rcu_free_wq);
|
||
|
}
|
||
|
|
||
|
/**
|
||
|
@@ -4103,7 +4108,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
|
||
|
* The base ref is never dropped on per-cpu pwqs. Directly
|
||
|
* schedule RCU free.
|
||
|
*/
|
||
|
- call_rcu_sched(&wq->rcu, rcu_free_wq);
|
||
|
+ call_rcu(&wq->rcu, rcu_free_wq);
|
||
|
} else {
|
||
|
/*
|
||
|
* We're the sole accessor of @wq at this point. Directly
|
||
|
@@ -4213,7 +4218,8 @@ bool workqueue_congested(int cpu, struct workqueue_struct *wq)
|
||
|
struct pool_workqueue *pwq;
|
||
|
bool ret;
|
||
|
|
||
|
- rcu_read_lock_sched();
|
||
|
+ rcu_read_lock();
|
||
|
+ preempt_disable();
|
||
|
|
||
|
if (cpu == WORK_CPU_UNBOUND)
|
||
|
cpu = smp_processor_id();
|
||
|
@@ -4224,7 +4230,8 @@ bool workqueue_congested(int cpu, struct workqueue_struct *wq)
|
||
|
pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
|
||
|
|
||
|
ret = !list_empty(&pwq->delayed_works);
|
||
|
- rcu_read_unlock_sched();
|
||
|
+ preempt_enable();
|
||
|
+ rcu_read_unlock();
|
||
|
|
||
|
return ret;
|
||
|
}
|
||
|
@@ -4250,15 +4257,15 @@ unsigned int work_busy(struct work_struct *work)
|
||
|
if (work_pending(work))
|
||
|
ret |= WORK_BUSY_PENDING;
|
||
|
|
||
|
- local_irq_save(flags);
|
||
|
+ rcu_read_lock();
|
||
|
pool = get_work_pool(work);
|
||
|
if (pool) {
|
||
|
- spin_lock(&pool->lock);
|
||
|
+ spin_lock_irqsave(&pool->lock, flags);
|
||
|
if (find_worker_executing_work(pool, work))
|
||
|
ret |= WORK_BUSY_RUNNING;
|
||
|
- spin_unlock(&pool->lock);
|
||
|
+ spin_unlock_irqrestore(&pool->lock, flags);
|
||
|
}
|
||
|
- local_irq_restore(flags);
|
||
|
+ rcu_read_unlock();
|
||
|
|
||
|
return ret;
|
||
|
}
|
||
|
@@ -4448,7 +4455,7 @@ void show_workqueue_state(void)
|
||
|
unsigned long flags;
|
||
|
int pi;
|
||
|
|
||
|
- rcu_read_lock_sched();
|
||
|
+ rcu_read_lock();
|
||
|
|
||
|
pr_info("Showing busy workqueues and worker pools:\n");
|
||
|
|
||
|
@@ -4513,7 +4520,7 @@ void show_workqueue_state(void)
|
||
|
touch_nmi_watchdog();
|
||
|
}
|
||
|
|
||
|
- rcu_read_unlock_sched();
|
||
|
+ rcu_read_unlock();
|
||
|
}
|
||
|
|
||
|
/*
|
||
|
@@ -4851,16 +4858,16 @@ bool freeze_workqueues_busy(void)
|
||
|
* nr_active is monotonically decreasing. It's safe
|
||
|
* to peek without lock.
|
||
|
*/
|
||
|
- rcu_read_lock_sched();
|
||
|
+ rcu_read_lock();
|
||
|
for_each_pwq(pwq, wq) {
|
||
|
WARN_ON_ONCE(pwq->nr_active < 0);
|
||
|
if (pwq->nr_active) {
|
||
|
busy = true;
|
||
|
- rcu_read_unlock_sched();
|
||
|
+ rcu_read_unlock();
|
||
|
goto out_unlock;
|
||
|
}
|
||
|
}
|
||
|
- rcu_read_unlock_sched();
|
||
|
+ rcu_read_unlock();
|
||
|
}
|
||
|
out_unlock:
|
||
|
mutex_unlock(&wq_pool_mutex);
|
||
|
@@ -5050,7 +5057,8 @@ static ssize_t wq_pool_ids_show(struct device *dev,
|
||
|
const char *delim = "";
|
||
|
int node, written = 0;
|
||
|
|
||
|
- rcu_read_lock_sched();
|
||
|
+ get_online_cpus();
|
||
|
+ rcu_read_lock();
|
||
|
for_each_node(node) {
|
||
|
written += scnprintf(buf + written, PAGE_SIZE - written,
|
||
|
"%s%d:%d", delim, node,
|
||
|
@@ -5058,7 +5066,8 @@ static ssize_t wq_pool_ids_show(struct device *dev,
|
||
|
delim = " ";
|
||
|
}
|
||
|
written += scnprintf(buf + written, PAGE_SIZE - written, "\n");
|
||
|
- rcu_read_unlock_sched();
|
||
|
+ rcu_read_unlock();
|
||
|
+ put_online_cpus();
|
||
|
|
||
|
return written;
|
||
|
}
|
||
|
--
|
||
|
2.28.0
|
||
|
|