From 1de2b9a54477a93c4c1d762eb9481aa8e958ae24 Mon Sep 17 00:00:00 2001 From: Yang Shi Date: Tue, 23 Feb 2016 13:23:23 -0800 Subject: [PATCH 066/365] trace: Use rcuidle version for preemptoff_hist trace point When running -rt kernel with both PREEMPT_OFF_HIST and LOCKDEP enabled, the below error is reported: [ INFO: suspicious RCU usage. ] 4.4.1-rt6 #1 Not tainted include/trace/events/hist.h:31 suspicious rcu_dereference_check() usage! other info that might help us debug this: RCU used illegally from idle CPU! rcu_scheduler_active = 1, debug_locks = 0 RCU used illegally from extended quiescent state! no locks held by swapper/0/0. stack backtrace: CPU: 0 PID: 0 Comm: swapper/0 Not tainted 4.4.1-rt6-WR8.0.0.0_standard #1 Stack : 0000000000000006 0000000000000000 ffffffff81ca8c38 ffffffff81c8fc80 ffffffff811bdd68 ffffffff81cb0000 0000000000000000 ffffffff81cb0000 0000000000000000 0000000000000000 0000000000000004 0000000000000000 0000000000000004 ffffffff811bdf50 0000000000000000 ffffffff82b60000 0000000000000000 ffffffff812897ac ffffffff819f0000 000000000000000b ffffffff811be460 ffffffff81b7c588 ffffffff81c8fc80 0000000000000000 0000000000000000 ffffffff81ec7f88 ffffffff81d70000 ffffffff81b70000 ffffffff81c90000 ffffffff81c3fb00 ffffffff81c3fc28 ffffffff815e6f98 0000000000000000 ffffffff81c8fa87 ffffffff81b70958 ffffffff811bf2c4 0707fe32e8d60ca5 ffffffff81126d60 0000000000000000 0000000000000000 ... Call Trace: [] show_stack+0xe8/0x108 [] dump_stack+0x88/0xb0 [] time_hardirqs_off+0x204/0x300 [] trace_hardirqs_off_caller+0x24/0xe8 [] cpu_startup_entry+0x39c/0x508 [] start_kernel+0x584/0x5a0 Replace regular trace_preemptoff_hist to rcuidle version to avoid the error. Signed-off-by: Yang Shi Cc: bigeasy@linutronix.de Cc: rostedt@goodmis.org Cc: linux-rt-users@vger.kernel.org Link: http://lkml.kernel.org/r/1456262603-10075-1-git-send-email-yang.shi@windriver.com Signed-off-by: Thomas Gleixner --- kernel/trace/trace_irqsoff.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c index 3a03bf08f2b7..609a4fbcebfd 100644 --- a/kernel/trace/trace_irqsoff.c +++ b/kernel/trace/trace_irqsoff.c @@ -429,13 +429,13 @@ void start_critical_timings(void) { if (preempt_trace() || irq_trace()) start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); - trace_preemptirqsoff_hist(TRACE_START, 1); + trace_preemptirqsoff_hist_rcuidle(TRACE_START, 1); } EXPORT_SYMBOL_GPL(start_critical_timings); void stop_critical_timings(void) { - trace_preemptirqsoff_hist(TRACE_STOP, 0); + trace_preemptirqsoff_hist_rcuidle(TRACE_STOP, 0); if (preempt_trace() || irq_trace()) stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); } @@ -445,7 +445,7 @@ EXPORT_SYMBOL_GPL(stop_critical_timings); #ifdef CONFIG_PROVE_LOCKING void time_hardirqs_on(unsigned long a0, unsigned long a1) { - trace_preemptirqsoff_hist(IRQS_ON, 0); + trace_preemptirqsoff_hist_rcuidle(IRQS_ON, 0); if (!preempt_trace() && irq_trace()) stop_critical_timing(a0, a1); } @@ -454,7 +454,7 @@ void time_hardirqs_off(unsigned long a0, unsigned long a1) { if (!preempt_trace() && irq_trace()) start_critical_timing(a0, a1); - trace_preemptirqsoff_hist(IRQS_OFF, 1); + trace_preemptirqsoff_hist_rcuidle(IRQS_OFF, 1); } #else /* !CONFIG_PROVE_LOCKING */ -- 2.28.0