summaryrefslogtreecommitdiffstats
path: root/kernel/locking/lockdep.c
diff options
context:
space:
mode:
authorWaiman Long <longman@redhat.com>2020-02-06 10:24:03 -0500
committerIngo Molnar <mingo@kernel.org>2020-02-11 13:10:48 +0100
commitb3b9c187dc2544923a601733a85352b9ddaba9b3 (patch)
tree2c2b41e70bfd9d19ad847a43c3a501c8a99aa9b9 /kernel/locking/lockdep.c
parent0a679e13ea30f85a1aef0669ee0c5a9fd7860b34 (diff)
downloadlinux-stable-b3b9c187dc2544923a601733a85352b9ddaba9b3.tar.gz
linux-stable-b3b9c187dc2544923a601733a85352b9ddaba9b3.tar.bz2
linux-stable-b3b9c187dc2544923a601733a85352b9ddaba9b3.zip
locking/lockdep: Decrement IRQ context counters when removing lock chain
There are currently three counters to track the IRQ context of a lock chain - nr_hardirq_chains, nr_softirq_chains and nr_process_chains. They are incremented when a new lock chain is added, but they are not decremented when a lock chain is removed. That causes some of the statistic counts reported by /proc/lockdep_stats to be incorrect. IRQ Fix that by decrementing the right counter when a lock chain is removed. Since inc_chains() no longer accesses hardirq_context and softirq_context directly, it is moved out from the CONFIG_TRACE_IRQFLAGS conditional compilation block. Fixes: a0b0fd53e1e6 ("locking/lockdep: Free lock classes that are no longer in use") Signed-off-by: Waiman Long <longman@redhat.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Ingo Molnar <mingo@kernel.org> Link: https://lkml.kernel.org/r/20200206152408.24165-2-longman@redhat.com
Diffstat (limited to 'kernel/locking/lockdep.c')
-rw-r--r--kernel/locking/lockdep.c40
1 files changed, 23 insertions, 17 deletions
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 32406ef0d6a2..35449f5b79fb 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -2298,18 +2298,6 @@ static int check_irq_usage(struct task_struct *curr, struct held_lock *prev,
return 0;
}
-static void inc_chains(void)
-{
- if (current->hardirq_context)
- nr_hardirq_chains++;
- else {
- if (current->softirq_context)
- nr_softirq_chains++;
- else
- nr_process_chains++;
- }
-}
-
#else
static inline int check_irq_usage(struct task_struct *curr,
@@ -2317,13 +2305,27 @@ static inline int check_irq_usage(struct task_struct *curr,
{
return 1;
}
+#endif /* CONFIG_TRACE_IRQFLAGS */
-static inline void inc_chains(void)
+static void inc_chains(int irq_context)
{
- nr_process_chains++;
+ if (irq_context & LOCK_CHAIN_HARDIRQ_CONTEXT)
+ nr_hardirq_chains++;
+ else if (irq_context & LOCK_CHAIN_SOFTIRQ_CONTEXT)
+ nr_softirq_chains++;
+ else
+ nr_process_chains++;
}
-#endif /* CONFIG_TRACE_IRQFLAGS */
+static void dec_chains(int irq_context)
+{
+ if (irq_context & LOCK_CHAIN_HARDIRQ_CONTEXT)
+ nr_hardirq_chains--;
+ else if (irq_context & LOCK_CHAIN_SOFTIRQ_CONTEXT)
+ nr_softirq_chains--;
+ else
+ nr_process_chains--;
+}
static void
print_deadlock_scenario(struct held_lock *nxt, struct held_lock *prv)
@@ -2843,7 +2845,7 @@ static inline int add_chain_cache(struct task_struct *curr,
hlist_add_head_rcu(&chain->entry, hash_head);
debug_atomic_inc(chain_lookup_misses);
- inc_chains();
+ inc_chains(chain->irq_context);
return 1;
}
@@ -3596,7 +3598,8 @@ lock_used:
static inline unsigned int task_irq_context(struct task_struct *task)
{
- return 2 * !!task->hardirq_context + !!task->softirq_context;
+ return LOCK_CHAIN_HARDIRQ_CONTEXT * !!task->hardirq_context +
+ LOCK_CHAIN_SOFTIRQ_CONTEXT * !!task->softirq_context;
}
static int separate_irq_context(struct task_struct *curr,
@@ -4798,6 +4801,8 @@ recalc:
return;
/* Overwrite the chain key for concurrent RCU readers. */
WRITE_ONCE(chain->chain_key, chain_key);
+ dec_chains(chain->irq_context);
+
/*
* Note: calling hlist_del_rcu() from inside a
* hlist_for_each_entry_rcu() loop is safe.
@@ -4819,6 +4824,7 @@ recalc:
}
*new_chain = *chain;
hlist_add_head_rcu(&new_chain->entry, chainhashentry(chain_key));
+ inc_chains(new_chain->irq_context);
#endif
}