summaryrefslogtreecommitdiffstats
path: root/kernel/locking
diff options
context:
space:
mode:
authorYuyang Du <duyuyang@gmail.com>2019-05-06 16:19:38 +0800
committerIngo Molnar <mingo@kernel.org>2019-06-03 11:55:52 +0200
commit4d56330df22dd9dd9a24f147014f60ee4c914fb8 (patch)
tree6352eba54513bc080ac92046dec3274d9b410f3a /kernel/locking
parent091806515124b20f8cff7927b4b7ff399483b109 (diff)
downloadlinux-4d56330df22dd9dd9a24f147014f60ee4c914fb8.tar.gz
linux-4d56330df22dd9dd9a24f147014f60ee4c914fb8.tar.bz2
linux-4d56330df22dd9dd9a24f147014f60ee4c914fb8.zip
locking/lockdep: Adjust new bit cases in mark_lock
The new bit can be any possible lock usage except it is garbage, so the cases in switch can be made simpler. Warn early on if wrong usage bit is passed without taking locks. No functional change. Signed-off-by: Yuyang Du <duyuyang@gmail.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: bvanassche@acm.org Cc: frederic@kernel.org Cc: ming.lei@redhat.com Cc: will.deacon@arm.com Link: https://lkml.kernel.org/r/20190506081939.74287-23-duyuyang@gmail.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/locking')
-rw-r--r--kernel/locking/lockdep.c21
1 files changed, 7 insertions, 14 deletions
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 1123e7e6c78d..9c4e2a7547d3 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -3582,6 +3582,11 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
{
unsigned int new_mask = 1 << new_bit, ret = 1;
+ if (new_bit >= LOCK_USAGE_STATES) {
+ DEBUG_LOCKS_WARN_ON(1);
+ return 0;
+ }
+
/*
* If already set then do not dirty the cacheline,
* nor do any checks:
@@ -3605,25 +3610,13 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
return 0;
switch (new_bit) {
-#define LOCKDEP_STATE(__STATE) \
- case LOCK_USED_IN_##__STATE: \
- case LOCK_USED_IN_##__STATE##_READ: \
- case LOCK_ENABLED_##__STATE: \
- case LOCK_ENABLED_##__STATE##_READ:
-#include "lockdep_states.h"
-#undef LOCKDEP_STATE
- ret = mark_lock_irq(curr, this, new_bit);
- if (!ret)
- return 0;
- break;
case LOCK_USED:
debug_atomic_dec(nr_unused_locks);
break;
default:
- if (!debug_locks_off_graph_unlock())
+ ret = mark_lock_irq(curr, this, new_bit);
+ if (!ret)
return 0;
- WARN_ON(1);
- return 0;
}
graph_unlock();