From 151827095d265c1e0ec69a11963b2489a2a4ff9d Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Fri, 2 Mar 2012 10:36:57 -0500 Subject: [PATCH 277/365] cpu: Make hotplug.lock a "sleeping" spinlock on RT Tasks can block on hotplug.lock in pin_current_cpu(), but their state might be != RUNNING. So the mutex wakeup will set the state unconditionally to RUNNING. That might cause spurious unexpected wakeups. We could provide a state preserving mutex_lock() function, but this is semantically backwards. So instead we convert the hotplug.lock() to a spinlock for RT, which has the state preserving semantics already. Signed-off-by: Steven Rostedt Cc: Carsten Emde Cc: John Kacur Cc: Peter Zijlstra Cc: Clark Williams Link: http://lkml.kernel.org/r/1330702617.25686.265.camel@gandalf.stny.rr.com Signed-off-by: Thomas Gleixner --- kernel/cpu.c | 32 +++++++++++++++++++++++++------- 1 file changed, 25 insertions(+), 7 deletions(-) diff --git a/kernel/cpu.c b/kernel/cpu.c index 1e8d61537430..adbc793d6b5e 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -218,10 +218,16 @@ static int cpu_hotplug_disabled; static struct { struct task_struct *active_writer; + /* wait queue to wake up the active_writer */ wait_queue_head_t wq; +#ifdef CONFIG_PREEMPT_RT_FULL + /* Makes the lock keep the task's state */ + spinlock_t lock; +#else /* verifies that no writer will get active while readers are active */ struct mutex lock; +#endif /* * Also blocks the new readers during * an ongoing cpu hotplug operation. @@ -234,12 +240,24 @@ static struct { } cpu_hotplug = { .active_writer = NULL, .wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq), +#ifdef CONFIG_PREEMPT_RT_FULL + .lock = __SPIN_LOCK_UNLOCKED(cpu_hotplug.lock), +#else .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock), +#endif #ifdef CONFIG_DEBUG_LOCK_ALLOC .dep_map = STATIC_LOCKDEP_MAP_INIT("cpu_hotplug.dep_map", &cpu_hotplug.dep_map), #endif }; +#ifdef CONFIG_PREEMPT_RT_FULL +# define hotplug_lock() rt_spin_lock__no_mg(&cpu_hotplug.lock) +# define hotplug_unlock() rt_spin_unlock__no_mg(&cpu_hotplug.lock) +#else +# define hotplug_lock() mutex_lock(&cpu_hotplug.lock) +# define hotplug_unlock() mutex_unlock(&cpu_hotplug.lock) +#endif + /* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */ #define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map) #define cpuhp_lock_acquire_tryread() \ @@ -276,8 +294,8 @@ void pin_current_cpu(void) return; } preempt_enable(); - mutex_lock(&cpu_hotplug.lock); - mutex_unlock(&cpu_hotplug.lock); + hotplug_lock(); + hotplug_unlock(); preempt_disable(); goto retry; } @@ -350,9 +368,9 @@ void get_online_cpus(void) if (cpu_hotplug.active_writer == current) return; cpuhp_lock_acquire_read(); - mutex_lock(&cpu_hotplug.lock); + hotplug_lock(); atomic_inc(&cpu_hotplug.refcount); - mutex_unlock(&cpu_hotplug.lock); + hotplug_unlock(); } EXPORT_SYMBOL_GPL(get_online_cpus); @@ -405,11 +423,11 @@ void cpu_hotplug_begin(void) cpuhp_lock_acquire(); for (;;) { - mutex_lock(&cpu_hotplug.lock); + hotplug_lock(); prepare_to_wait(&cpu_hotplug.wq, &wait, TASK_UNINTERRUPTIBLE); if (likely(!atomic_read(&cpu_hotplug.refcount))) break; - mutex_unlock(&cpu_hotplug.lock); + hotplug_unlock(); schedule(); } finish_wait(&cpu_hotplug.wq, &wait); @@ -418,7 +436,7 @@ void cpu_hotplug_begin(void) void cpu_hotplug_done(void) { cpu_hotplug.active_writer = NULL; - mutex_unlock(&cpu_hotplug.lock); + hotplug_unlock(); cpuhp_lock_release(); } -- 2.28.0