92 lines
3.0 KiB
Diff
92 lines
3.0 KiB
Diff
|
From aaa8caa9aa399fc8d31be22068755cc519f2d5bf Mon Sep 17 00:00:00 2001
|
||
|
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||
|
Date: Thu, 31 Aug 2017 18:19:06 +0200
|
||
|
Subject: [PATCH 336/365] kernel/hrtimer: don't wakeup a process while holding
|
||
|
the hrtimer base lock
|
||
|
|
||
|
We must not wake any process (and thus acquire the pi->lock) while
|
||
|
holding the hrtimer's base lock. This does not happen usually because
|
||
|
the hrtimer-callback is invoked in IRQ-context and so
|
||
|
raise_softirq_irqoff() does not wakeup a process.
|
||
|
However during CPU-hotplug it might get called from hrtimers_dead_cpu()
|
||
|
which would wakeup the thread immediately.
|
||
|
|
||
|
Reported-by: Mike Galbraith <efault@gmx.de>
|
||
|
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||
|
---
|
||
|
kernel/time/hrtimer.c | 15 ++++++++++-----
|
||
|
1 file changed, 10 insertions(+), 5 deletions(-)
|
||
|
|
||
|
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
|
||
|
index 0860d60957cd..bfe46fe6a700 100644
|
||
|
--- a/kernel/time/hrtimer.c
|
||
|
+++ b/kernel/time/hrtimer.c
|
||
|
@@ -1450,7 +1450,7 @@ static inline int hrtimer_rt_defer(struct hrtimer *timer) { return 0; }
|
||
|
|
||
|
static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer);
|
||
|
|
||
|
-static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now)
|
||
|
+static int __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now)
|
||
|
{
|
||
|
struct hrtimer_clock_base *base = cpu_base->clock_base;
|
||
|
unsigned int active = cpu_base->active_bases;
|
||
|
@@ -1500,8 +1500,7 @@ static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now)
|
||
|
raise = 1;
|
||
|
}
|
||
|
}
|
||
|
- if (raise)
|
||
|
- raise_softirq_irqoff(HRTIMER_SOFTIRQ);
|
||
|
+ return raise;
|
||
|
}
|
||
|
|
||
|
#ifdef CONFIG_HIGH_RES_TIMERS
|
||
|
@@ -1515,6 +1514,7 @@ void hrtimer_interrupt(struct clock_event_device *dev)
|
||
|
struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
|
||
|
ktime_t expires_next, now, entry_time, delta;
|
||
|
int retries = 0;
|
||
|
+ int raise;
|
||
|
|
||
|
BUG_ON(!cpu_base->hres_active);
|
||
|
cpu_base->nr_events++;
|
||
|
@@ -1533,7 +1533,7 @@ void hrtimer_interrupt(struct clock_event_device *dev)
|
||
|
*/
|
||
|
cpu_base->expires_next.tv64 = KTIME_MAX;
|
||
|
|
||
|
- __hrtimer_run_queues(cpu_base, now);
|
||
|
+ raise = __hrtimer_run_queues(cpu_base, now);
|
||
|
|
||
|
/* Reevaluate the clock bases for the next expiry */
|
||
|
expires_next = __hrtimer_get_next_event(cpu_base);
|
||
|
@@ -1544,6 +1544,8 @@ void hrtimer_interrupt(struct clock_event_device *dev)
|
||
|
cpu_base->expires_next = expires_next;
|
||
|
cpu_base->in_hrtirq = 0;
|
||
|
raw_spin_unlock(&cpu_base->lock);
|
||
|
+ if (raise)
|
||
|
+ raise_softirq_irqoff(HRTIMER_SOFTIRQ);
|
||
|
|
||
|
/* Reprogramming necessary ? */
|
||
|
if (!tick_program_event(expires_next, 0)) {
|
||
|
@@ -1623,6 +1625,7 @@ void hrtimer_run_queues(void)
|
||
|
{
|
||
|
struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
|
||
|
ktime_t now;
|
||
|
+ int raise;
|
||
|
|
||
|
if (__hrtimer_hres_active(cpu_base))
|
||
|
return;
|
||
|
@@ -1641,8 +1644,10 @@ void hrtimer_run_queues(void)
|
||
|
|
||
|
raw_spin_lock(&cpu_base->lock);
|
||
|
now = hrtimer_update_base(cpu_base);
|
||
|
- __hrtimer_run_queues(cpu_base, now);
|
||
|
+ raise = __hrtimer_run_queues(cpu_base, now);
|
||
|
raw_spin_unlock(&cpu_base->lock);
|
||
|
+ if (raise)
|
||
|
+ raise_softirq_irqoff(HRTIMER_SOFTIRQ);
|
||
|
}
|
||
|
|
||
|
/*
|
||
|
--
|
||
|
2.28.0
|
||
|
|