256 lines
8.8 KiB
Diff
256 lines
8.8 KiB
Diff
|
From 57212acc72d4493fcbe96390d09d8487ae924703 Mon Sep 17 00:00:00 2001
|
||
|
From: Thomas Gleixner <tglx@linutronix.de>
|
||
|
Date: Fri, 10 Jun 2011 11:04:15 +0200
|
||
|
Subject: [PATCH 163/365] rtmutex: Handle the various new futex race conditions
|
||
|
|
||
|
RT opens a few new interesting race conditions in the rtmutex/futex
|
||
|
combo due to futex hash bucket lock being a 'sleeping' spinlock and
|
||
|
therefor not disabling preemption.
|
||
|
|
||
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||
|
---
|
||
|
kernel/futex.c | 77 ++++++++++++++++++++++++++-------
|
||
|
kernel/locking/rtmutex.c | 37 +++++++++++++---
|
||
|
kernel/locking/rtmutex_common.h | 2 +
|
||
|
3 files changed, 95 insertions(+), 21 deletions(-)
|
||
|
|
||
|
diff --git a/kernel/futex.c b/kernel/futex.c
|
||
|
index 86eec2e2b752..068c2a971bf6 100644
|
||
|
--- a/kernel/futex.c
|
||
|
+++ b/kernel/futex.c
|
||
|
@@ -2079,6 +2079,16 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
|
||
|
requeue_pi_wake_futex(this, &key2, hb2);
|
||
|
drop_count++;
|
||
|
continue;
|
||
|
+ } else if (ret == -EAGAIN) {
|
||
|
+ /*
|
||
|
+ * Waiter was woken by timeout or
|
||
|
+ * signal and has set pi_blocked_on to
|
||
|
+ * PI_WAKEUP_INPROGRESS before we
|
||
|
+ * tried to enqueue it on the rtmutex.
|
||
|
+ */
|
||
|
+ this->pi_state = NULL;
|
||
|
+ put_pi_state(pi_state);
|
||
|
+ continue;
|
||
|
} else if (ret) {
|
||
|
/*
|
||
|
* rt_mutex_start_proxy_lock() detected a
|
||
|
@@ -3064,7 +3074,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
|
||
|
struct hrtimer_sleeper timeout, *to = NULL;
|
||
|
struct futex_pi_state *pi_state = NULL;
|
||
|
struct rt_mutex_waiter rt_waiter;
|
||
|
- struct futex_hash_bucket *hb;
|
||
|
+ struct futex_hash_bucket *hb, *hb2;
|
||
|
union futex_key key2 = FUTEX_KEY_INIT;
|
||
|
struct futex_q q = futex_q_init;
|
||
|
int res, ret;
|
||
|
@@ -3120,20 +3130,55 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
|
||
|
/* Queue the futex_q, drop the hb lock, wait for wakeup. */
|
||
|
futex_wait_queue_me(hb, &q, to);
|
||
|
|
||
|
- spin_lock(&hb->lock);
|
||
|
- ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
|
||
|
- spin_unlock(&hb->lock);
|
||
|
- if (ret)
|
||
|
- goto out_put_keys;
|
||
|
+ /*
|
||
|
+ * On RT we must avoid races with requeue and trying to block
|
||
|
+ * on two mutexes (hb->lock and uaddr2's rtmutex) by
|
||
|
+ * serializing access to pi_blocked_on with pi_lock.
|
||
|
+ */
|
||
|
+ raw_spin_lock_irq(¤t->pi_lock);
|
||
|
+ if (current->pi_blocked_on) {
|
||
|
+ /*
|
||
|
+ * We have been requeued or are in the process of
|
||
|
+ * being requeued.
|
||
|
+ */
|
||
|
+ raw_spin_unlock_irq(¤t->pi_lock);
|
||
|
+ } else {
|
||
|
+ /*
|
||
|
+ * Setting pi_blocked_on to PI_WAKEUP_INPROGRESS
|
||
|
+ * prevents a concurrent requeue from moving us to the
|
||
|
+ * uaddr2 rtmutex. After that we can safely acquire
|
||
|
+ * (and possibly block on) hb->lock.
|
||
|
+ */
|
||
|
+ current->pi_blocked_on = PI_WAKEUP_INPROGRESS;
|
||
|
+ raw_spin_unlock_irq(¤t->pi_lock);
|
||
|
+
|
||
|
+ spin_lock(&hb->lock);
|
||
|
+
|
||
|
+ /*
|
||
|
+ * Clean up pi_blocked_on. We might leak it otherwise
|
||
|
+ * when we succeeded with the hb->lock in the fast
|
||
|
+ * path.
|
||
|
+ */
|
||
|
+ raw_spin_lock_irq(¤t->pi_lock);
|
||
|
+ current->pi_blocked_on = NULL;
|
||
|
+ raw_spin_unlock_irq(¤t->pi_lock);
|
||
|
+
|
||
|
+ ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
|
||
|
+ spin_unlock(&hb->lock);
|
||
|
+ if (ret)
|
||
|
+ goto out_put_keys;
|
||
|
+ }
|
||
|
|
||
|
/*
|
||
|
- * In order for us to be here, we know our q.key == key2, and since
|
||
|
- * we took the hb->lock above, we also know that futex_requeue() has
|
||
|
- * completed and we no longer have to concern ourselves with a wakeup
|
||
|
- * race with the atomic proxy lock acquisition by the requeue code. The
|
||
|
- * futex_requeue dropped our key1 reference and incremented our key2
|
||
|
- * reference count.
|
||
|
+ * In order to be here, we have either been requeued, are in
|
||
|
+ * the process of being requeued, or requeue successfully
|
||
|
+ * acquired uaddr2 on our behalf. If pi_blocked_on was
|
||
|
+ * non-null above, we may be racing with a requeue. Do not
|
||
|
+ * rely on q->lock_ptr to be hb2->lock until after blocking on
|
||
|
+ * hb->lock or hb2->lock. The futex_requeue dropped our key1
|
||
|
+ * reference and incremented our key2 reference count.
|
||
|
*/
|
||
|
+ hb2 = hash_futex(&key2);
|
||
|
|
||
|
/* Check if the requeue code acquired the second futex for us. */
|
||
|
if (!q.rt_waiter) {
|
||
|
@@ -3142,7 +3187,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
|
||
|
* did a lock-steal - fix up the PI-state in that case.
|
||
|
*/
|
||
|
if (q.pi_state && (q.pi_state->owner != current)) {
|
||
|
- spin_lock(q.lock_ptr);
|
||
|
+ spin_lock(&hb2->lock);
|
||
|
+ BUG_ON(&hb2->lock != q.lock_ptr);
|
||
|
ret = fixup_pi_state_owner(uaddr2, &q, current);
|
||
|
if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) {
|
||
|
pi_state = q.pi_state;
|
||
|
@@ -3153,7 +3199,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
|
||
|
* the requeue_pi() code acquired for us.
|
||
|
*/
|
||
|
put_pi_state(q.pi_state);
|
||
|
- spin_unlock(q.lock_ptr);
|
||
|
+ spin_unlock(&hb2->lock);
|
||
|
}
|
||
|
} else {
|
||
|
struct rt_mutex *pi_mutex;
|
||
|
@@ -3167,7 +3213,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
|
||
|
pi_mutex = &q.pi_state->pi_mutex;
|
||
|
ret = rt_mutex_wait_proxy_lock(pi_mutex, to, &rt_waiter);
|
||
|
|
||
|
- spin_lock(q.lock_ptr);
|
||
|
+ spin_lock(&hb2->lock);
|
||
|
+ BUG_ON(&hb2->lock != q.lock_ptr);
|
||
|
if (ret && !rt_mutex_cleanup_proxy_lock(pi_mutex, &rt_waiter))
|
||
|
ret = 0;
|
||
|
|
||
|
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
|
||
|
index e3dd21fee0c7..b1ad592cdbef 100644
|
||
|
--- a/kernel/locking/rtmutex.c
|
||
|
+++ b/kernel/locking/rtmutex.c
|
||
|
@@ -133,6 +133,11 @@ static void fixup_rt_mutex_waiters(struct rt_mutex *lock)
|
||
|
WRITE_ONCE(*p, owner & ~RT_MUTEX_HAS_WAITERS);
|
||
|
}
|
||
|
|
||
|
+static int rt_mutex_real_waiter(struct rt_mutex_waiter *waiter)
|
||
|
+{
|
||
|
+ return waiter && waiter != PI_WAKEUP_INPROGRESS;
|
||
|
+}
|
||
|
+
|
||
|
/*
|
||
|
* We can speed up the acquire/release, if there's no debugging state to be
|
||
|
* set up.
|
||
|
@@ -389,7 +394,8 @@ int max_lock_depth = 1024;
|
||
|
|
||
|
static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p)
|
||
|
{
|
||
|
- return p->pi_blocked_on ? p->pi_blocked_on->lock : NULL;
|
||
|
+ return rt_mutex_real_waiter(p->pi_blocked_on) ?
|
||
|
+ p->pi_blocked_on->lock : NULL;
|
||
|
}
|
||
|
|
||
|
/*
|
||
|
@@ -525,7 +531,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
|
||
|
* reached or the state of the chain has changed while we
|
||
|
* dropped the locks.
|
||
|
*/
|
||
|
- if (!waiter)
|
||
|
+ if (!rt_mutex_real_waiter(waiter))
|
||
|
goto out_unlock_pi;
|
||
|
|
||
|
/*
|
||
|
@@ -961,6 +967,23 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
|
||
|
return -EDEADLK;
|
||
|
|
||
|
raw_spin_lock(&task->pi_lock);
|
||
|
+
|
||
|
+ /*
|
||
|
+ * In the case of futex requeue PI, this will be a proxy
|
||
|
+ * lock. The task will wake unaware that it is enqueueed on
|
||
|
+ * this lock. Avoid blocking on two locks and corrupting
|
||
|
+ * pi_blocked_on via the PI_WAKEUP_INPROGRESS
|
||
|
+ * flag. futex_wait_requeue_pi() sets this when it wakes up
|
||
|
+ * before requeue (due to a signal or timeout). Do not enqueue
|
||
|
+ * the task if PI_WAKEUP_INPROGRESS is set.
|
||
|
+ */
|
||
|
+ if (task != current && task->pi_blocked_on == PI_WAKEUP_INPROGRESS) {
|
||
|
+ raw_spin_unlock(&task->pi_lock);
|
||
|
+ return -EAGAIN;
|
||
|
+ }
|
||
|
+
|
||
|
+ BUG_ON(rt_mutex_real_waiter(task->pi_blocked_on));
|
||
|
+
|
||
|
rt_mutex_adjust_prio(task);
|
||
|
waiter->task = task;
|
||
|
waiter->lock = lock;
|
||
|
@@ -985,7 +1008,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
|
||
|
rt_mutex_enqueue_pi(owner, waiter);
|
||
|
|
||
|
rt_mutex_adjust_prio(owner);
|
||
|
- if (owner->pi_blocked_on)
|
||
|
+ if (rt_mutex_real_waiter(owner->pi_blocked_on))
|
||
|
chain_walk = 1;
|
||
|
} else if (rt_mutex_cond_detect_deadlock(waiter, chwalk)) {
|
||
|
chain_walk = 1;
|
||
|
@@ -1081,7 +1104,7 @@ static void remove_waiter(struct rt_mutex *lock,
|
||
|
{
|
||
|
bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock));
|
||
|
struct task_struct *owner = rt_mutex_owner(lock);
|
||
|
- struct rt_mutex *next_lock;
|
||
|
+ struct rt_mutex *next_lock = NULL;
|
||
|
|
||
|
lockdep_assert_held(&lock->wait_lock);
|
||
|
|
||
|
@@ -1107,7 +1130,8 @@ static void remove_waiter(struct rt_mutex *lock,
|
||
|
rt_mutex_adjust_prio(owner);
|
||
|
|
||
|
/* Store the lock on which owner is blocked or NULL */
|
||
|
- next_lock = task_blocked_on_lock(owner);
|
||
|
+ if (rt_mutex_real_waiter(owner->pi_blocked_on))
|
||
|
+ next_lock = task_blocked_on_lock(owner);
|
||
|
|
||
|
raw_spin_unlock(&owner->pi_lock);
|
||
|
|
||
|
@@ -1143,7 +1167,8 @@ void rt_mutex_adjust_pi(struct task_struct *task)
|
||
|
raw_spin_lock_irqsave(&task->pi_lock, flags);
|
||
|
|
||
|
waiter = task->pi_blocked_on;
|
||
|
- if (!waiter || rt_mutex_waiter_equal(waiter, task_to_waiter(task))) {
|
||
|
+ if (!rt_mutex_real_waiter(waiter) ||
|
||
|
+ rt_mutex_waiter_equal(waiter, task_to_waiter(task))) {
|
||
|
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
|
||
|
return;
|
||
|
}
|
||
|
diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h
|
||
|
index 722e3cf38acf..be1c0171f647 100644
|
||
|
--- a/kernel/locking/rtmutex_common.h
|
||
|
+++ b/kernel/locking/rtmutex_common.h
|
||
|
@@ -99,6 +99,8 @@ enum rtmutex_chainwalk {
|
||
|
/*
|
||
|
* PI-futex support (proxy locking functions, etc.):
|
||
|
*/
|
||
|
+#define PI_WAKEUP_INPROGRESS ((struct rt_mutex_waiter *) 1)
|
||
|
+
|
||
|
extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock);
|
||
|
extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
|
||
|
struct task_struct *proxy_owner);
|
||
|
--
|
||
|
2.28.0
|
||
|
|