summaryrefslogtreecommitdiffstats
path: root/kernel/lockdep.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2008-08-11 09:30:24 +0200
committerIngo Molnar <mingo@elte.hu>2008-08-11 09:30:24 +0200
commit7531e2f34d1d551b096143f19111139f0dd84c8b (patch)
tree0a29d6703e28dc6752b9b4085594cca238595aac /kernel/lockdep.c
parent4f3e7524b2e703d9f8b02ac338153a53dd7ede66 (diff)
downloadlinux-7531e2f34d1d551b096143f19111139f0dd84c8b.tar.gz
linux-7531e2f34d1d551b096143f19111139f0dd84c8b.tar.bz2
linux-7531e2f34d1d551b096143f19111139f0dd84c8b.zip
lockdep: lock protection locks
On Fri, 2008-08-01 at 16:26 -0700, Linus Torvalds wrote: > On Fri, 1 Aug 2008, David Miller wrote: > > > > Taking more than a few locks of the same class at once is bad > > news and it's better to find an alternative method. > > It's not always wrong. > > If you can guarantee that anybody that takes more than one lock of a > particular class will always take a single top-level lock _first_, then > that's all good. You can obviously screw up and take the same lock _twice_ > (which will deadlock), but at least you cannot get into ABBA situations. > > So maybe the right thing to do is to just teach lockdep about "lock > protection locks". That would have solved the multi-queue issues for > networking too - all the actual network drivers would still have taken > just their single queue lock, but the one case that needs to take all of > them would have taken a separate top-level lock first. > > Never mind that the multi-queue locks were always taken in the same order: > it's never wrong to just have some top-level serialization, and anybody > who needs to take <n> locks might as well do <n+1>, because they sure as > hell aren't going to be on _any_ fastpaths. > > So the simplest solution really sounds like just teaching lockdep about > that one special case. It's not "nesting" exactly, although it's obviously > related to it. Do as Linus suggested. The lock protection lock is called nest_lock. Note that we still have the MAX_LOCK_DEPTH (48) limit to consider, so anything that spills that it still up shit creek. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/lockdep.c')
-rw-r--r--kernel/lockdep.c26
1 files changed, 21 insertions, 5 deletions
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index d3c72ad8d09e..410c3365ad8f 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -1372,18 +1372,32 @@ check_deadlock(struct task_struct *curr, struct held_lock *next,
struct lockdep_map *next_instance, int read)
{
struct held_lock *prev;
+ struct held_lock *nest = NULL;
int i;
for (i = 0; i < curr->lockdep_depth; i++) {
prev = curr->held_locks + i;
+
+ if (prev->instance == next->nest_lock)
+ nest = prev;
+
if (hlock_class(prev) != hlock_class(next))
continue;
+
/*
* Allow read-after-read recursion of the same
* lock class (i.e. read_lock(lock)+read_lock(lock)):
*/
if ((read == 2) && prev->read)
return 2;
+
+ /*
+ * We're holding the nest_lock, which serializes this lock's
+ * nesting behaviour.
+ */
+ if (nest)
+ return 2;
+
return print_deadlock_bug(curr, prev, next);
}
return 1;
@@ -2507,7 +2521,7 @@ EXPORT_SYMBOL_GPL(lockdep_init_map);
*/
static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
int trylock, int read, int check, int hardirqs_off,
- unsigned long ip)
+ struct lockdep_map *nest_lock, unsigned long ip)
{
struct task_struct *curr = current;
struct lock_class *class = NULL;
@@ -2566,6 +2580,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
hlock->class_idx = class - lock_classes + 1;
hlock->acquire_ip = ip;
hlock->instance = lock;
+ hlock->nest_lock = nest_lock;
hlock->trylock = trylock;
hlock->read = read;
hlock->check = check;
@@ -2717,7 +2732,7 @@ found_it:
if (!__lock_acquire(hlock->instance,
hlock_class(hlock)->subclass, hlock->trylock,
hlock->read, hlock->check, hlock->hardirqs_off,
- hlock->acquire_ip))
+ hlock->nest_lock, hlock->acquire_ip))
return 0;
}
@@ -2778,7 +2793,7 @@ found_it:
if (!__lock_acquire(hlock->instance,
hlock_class(hlock)->subclass, hlock->trylock,
hlock->read, hlock->check, hlock->hardirqs_off,
- hlock->acquire_ip))
+ hlock->nest_lock, hlock->acquire_ip))
return 0;
}
@@ -2915,7 +2930,8 @@ EXPORT_SYMBOL_GPL(lock_set_subclass);
* and also avoid lockdep recursion:
*/
void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
- int trylock, int read, int check, unsigned long ip)
+ int trylock, int read, int check,
+ struct lockdep_map *nest_lock, unsigned long ip)
{
unsigned long flags;
@@ -2930,7 +2946,7 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
current->lockdep_recursion = 1;
__lock_acquire(lock, subclass, trylock, read, check,
- irqs_disabled_flags(flags), ip);
+ irqs_disabled_flags(flags), nest_lock, ip);
current->lockdep_recursion = 0;
raw_local_irq_restore(flags);
}