summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2011-07-26 13:13:44 +0200
committerIngo Molnar <mingo@elte.hu>2011-08-04 10:17:36 +0200
commit7d36b26be0f3c6b86e3ab7e1539e42f3a3bc79ca (patch)
treef22ac58f11dc1d0738e66024052f2e1c709533f1
parentd7619fe39d9769b4d4545cc511c891deea18ae08 (diff)
downloadlinux-7d36b26be0f3c6b86e3ab7e1539e42f3a3bc79ca.tar.gz
linux-7d36b26be0f3c6b86e3ab7e1539e42f3a3bc79ca.tar.bz2
linux-7d36b26be0f3c6b86e3ab7e1539e42f3a3bc79ca.zip
lockdep: Fix trace_hardirqs_on_caller()
Commit dd4e5d3ac4a ("lockdep: Fix trace_[soft,hard]irqs_[on,off]() recursion") made a bit of a mess of the various checks and error conditions. In particular it moved the check for !irqs_disabled() before the spurious enable test, resulting in some warnings. Reported-by: Arnaud Lacombe <lacombar@gmail.com> Reported-by: Dave Jones <davej@redhat.com> Reported-and-tested-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/r/1311679697.24752.28.camel@twins Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--kernel/lockdep.c30
1 files changed, 16 insertions, 14 deletions
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 3956f5149e25..74ca247a4d4f 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -2485,23 +2485,9 @@ static void __trace_hardirqs_on_caller(unsigned long ip)
{
struct task_struct *curr = current;
- if (DEBUG_LOCKS_WARN_ON(unlikely(early_boot_irqs_disabled)))
- return;
-
- if (unlikely(curr->hardirqs_enabled)) {
- /*
- * Neither irq nor preemption are disabled here
- * so this is racy by nature but losing one hit
- * in a stat is not a big deal.
- */
- __debug_atomic_inc(redundant_hardirqs_on);
- return;
- }
/* we'll do an OFF -> ON transition: */
curr->hardirqs_enabled = 1;
- if (DEBUG_LOCKS_WARN_ON(current->hardirq_context))
- return;
/*
* We are going to turn hardirqs on, so set the
* usage bit for all held locks:
@@ -2529,9 +2515,25 @@ void trace_hardirqs_on_caller(unsigned long ip)
if (unlikely(!debug_locks || current->lockdep_recursion))
return;
+ if (unlikely(current->hardirqs_enabled)) {
+ /*
+ * Neither irq nor preemption are disabled here
+ * so this is racy by nature but losing one hit
+ * in a stat is not a big deal.
+ */
+ __debug_atomic_inc(redundant_hardirqs_on);
+ return;
+ }
+
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
return;
+ if (DEBUG_LOCKS_WARN_ON(unlikely(early_boot_irqs_disabled)))
+ return;
+
+ if (DEBUG_LOCKS_WARN_ON(current->hardirq_context))
+ return;
+
current->lockdep_recursion = 1;
__trace_hardirqs_on_caller(ip);
current->lockdep_recursion = 0;