88 lines
2.8 KiB
Diff
88 lines
2.8 KiB
Diff
|
From 4602d705c8d62807ab76a3eedc71fd9e944ca0b3 Mon Sep 17 00:00:00 2001
|
||
|
From: Thomas Gleixner <tglx@linutronix.de>
|
||
|
Date: Sun, 15 Nov 2015 18:40:17 +0100
|
||
|
Subject: [PATCH 243/365] irqwork: Move irq safe work to irq context
|
||
|
|
||
|
On architectures where arch_irq_work_has_interrupt() returns false, we
|
||
|
end up running the irq safe work from the softirq context. That
|
||
|
results in a potential deadlock in the scheduler irq work which
|
||
|
expects that function to be called with interrupts disabled.
|
||
|
|
||
|
Split the irq_work_tick() function into a hard and soft variant. Call
|
||
|
the hard variant from the tick interrupt and add the soft variant to
|
||
|
the timer softirq.
|
||
|
|
||
|
Reported-and-tested-by: Yanjiang Jin <yanjiang.jin@windriver.com>
|
||
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||
|
Cc: stable-rt@vger.kernel.org
|
||
|
---
|
||
|
include/linux/irq_work.h | 6 ++++++
|
||
|
kernel/irq_work.c | 9 +++++++++
|
||
|
kernel/time/timer.c | 6 ++----
|
||
|
3 files changed, 17 insertions(+), 4 deletions(-)
|
||
|
|
||
|
diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h
|
||
|
index 0e427a9997f3..2543aab05daa 100644
|
||
|
--- a/include/linux/irq_work.h
|
||
|
+++ b/include/linux/irq_work.h
|
||
|
@@ -52,4 +52,10 @@ static inline bool irq_work_needs_cpu(void) { return false; }
|
||
|
static inline void irq_work_run(void) { }
|
||
|
#endif
|
||
|
|
||
|
+#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_PREEMPT_RT_FULL)
|
||
|
+void irq_work_tick_soft(void);
|
||
|
+#else
|
||
|
+static inline void irq_work_tick_soft(void) { }
|
||
|
+#endif
|
||
|
+
|
||
|
#endif /* _LINUX_IRQ_WORK_H */
|
||
|
diff --git a/kernel/irq_work.c b/kernel/irq_work.c
|
||
|
index 0ddaf1e66d8c..2899ba0d23d1 100644
|
||
|
--- a/kernel/irq_work.c
|
||
|
+++ b/kernel/irq_work.c
|
||
|
@@ -200,8 +200,17 @@ void irq_work_tick(void)
|
||
|
|
||
|
if (!llist_empty(raised) && !arch_irq_work_has_interrupt())
|
||
|
irq_work_run_list(raised);
|
||
|
+
|
||
|
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT_FULL))
|
||
|
+ irq_work_run_list(this_cpu_ptr(&lazy_list));
|
||
|
+}
|
||
|
+
|
||
|
+#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_PREEMPT_RT_FULL)
|
||
|
+void irq_work_tick_soft(void)
|
||
|
+{
|
||
|
irq_work_run_list(this_cpu_ptr(&lazy_list));
|
||
|
}
|
||
|
+#endif
|
||
|
|
||
|
/*
|
||
|
* Synchronize against the irq_work @entry, ensures the entry is not
|
||
|
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
|
||
|
index b742e84cbe1c..af9d338103a9 100644
|
||
|
--- a/kernel/time/timer.c
|
||
|
+++ b/kernel/time/timer.c
|
||
|
@@ -1664,7 +1664,7 @@ void update_process_times(int user_tick)
|
||
|
scheduler_tick();
|
||
|
run_local_timers();
|
||
|
rcu_check_callbacks(user_tick);
|
||
|
-#if defined(CONFIG_IRQ_WORK) && !defined(CONFIG_PREEMPT_RT_FULL)
|
||
|
+#if defined(CONFIG_IRQ_WORK)
|
||
|
if (in_irq())
|
||
|
irq_work_tick();
|
||
|
#endif
|
||
|
@@ -1720,9 +1720,7 @@ static __latent_entropy void run_timer_softirq(struct softirq_action *h)
|
||
|
{
|
||
|
struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
|
||
|
|
||
|
-#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_PREEMPT_RT_FULL)
|
||
|
- irq_work_tick();
|
||
|
-#endif
|
||
|
+ irq_work_tick_soft();
|
||
|
|
||
|
__run_timers(base);
|
||
|
if (IS_ENABLED(CONFIG_NO_HZ_COMMON))
|
||
|
--
|
||
|
2.28.0
|
||
|
|