From 68026d4505c17ce86ee21feb4096b3318ad95db0 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 14 Sep 2011 11:57:04 +0200 Subject: [PATCH 265/365] ipc/sem: Rework semaphore wakeups Current sysv sems have a weird ass wakeup scheme that involves keeping preemption disabled over a potential O(n^2) loop and busy waiting on that on other CPUs. Kill this and simply wake the task directly from under the sem_lock. This was discovered by a migrate_disable() debug feature that disallows: spin_lock(); preempt_disable(); spin_unlock() preempt_enable(); Cc: Manfred Spraul Suggested-by: Thomas Gleixner Reported-by: Mike Galbraith Signed-off-by: Peter Zijlstra Cc: Manfred Spraul Link: http://lkml.kernel.org/r/1315994224.5040.1.camel@twins Signed-off-by: Thomas Gleixner --- ipc/sem.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/ipc/sem.c b/ipc/sem.c index 5cd9d802592f..c987efce266f 100644 --- a/ipc/sem.c +++ b/ipc/sem.c @@ -712,6 +712,13 @@ static int perform_atomic_semop(struct sem_array *sma, struct sem_queue *q) static void wake_up_sem_queue_prepare(struct list_head *pt, struct sem_queue *q, int error) { +#ifdef CONFIG_PREEMPT_RT_BASE + struct task_struct *p = q->sleeper; + get_task_struct(p); + q->status = error; + wake_up_process(p); + put_task_struct(p); +#else if (list_empty(pt)) { /* * Hold preempt off so that we don't get preempted and have the @@ -723,6 +730,7 @@ static void wake_up_sem_queue_prepare(struct list_head *pt, q->pid = error; list_add_tail(&q->list, pt); +#endif } /** @@ -736,6 +744,7 @@ static void wake_up_sem_queue_prepare(struct list_head *pt, */ static void wake_up_sem_queue_do(struct list_head *pt) { +#ifndef CONFIG_PREEMPT_RT_BASE struct sem_queue *q, *t; int did_something; @@ -748,6 +757,7 @@ static void wake_up_sem_queue_do(struct list_head *pt) } if (did_something) preempt_enable(); +#endif } static void unlink_queue(struct sem_array *sma, struct sem_queue *q) -- 2.28.0