From 41719b03091911028116155deddc5eedf8c45e37 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 14 Jan 2009 15:36:26 +0100 Subject: mutex: preemption fixes The problem is that dropping the spinlock right before schedule is a voluntary preemption point and can cause a schedule, right after which we schedule again. Fix this inefficiency by keeping preemption disabled until we schedule, do this by explicity disabling preemption and providing a schedule() variant that assumes preemption is already disabled. Signed-off-by: Peter Zijlstra Signed-off-by: Ingo Molnar --- include/linux/sched.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/linux/sched.h b/include/linux/sched.h index 4cae9b81a1f8..9f0b372cfa6f 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -328,6 +328,7 @@ extern signed long schedule_timeout(signed long timeout); extern signed long schedule_timeout_interruptible(signed long timeout); extern signed long schedule_timeout_killable(signed long timeout); extern signed long schedule_timeout_uninterruptible(signed long timeout); +asmlinkage void __schedule(void); asmlinkage void schedule(void); struct nsproxy; -- cgit v1.2.3 From 0d66bf6d3514b35eb6897629059443132992dbd7 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 12 Jan 2009 14:01:47 +0100 Subject: mutex: implement adaptive spinning Change mutex contention behaviour such that it will sometimes busy wait on acquisition - moving its behaviour closer to that of spinlocks. This concept got ported to mainline from the -rt tree, where it was originally implemented for rtmutexes by Steven Rostedt, based on work by Gregory Haskins. Testing with Ingo's test-mutex application (http://lkml.org/lkml/2006/1/8/50) gave a 345% boost for VFS scalability on my testbox: # ./test-mutex-shm V 16 10 | grep "^avg ops" avg ops/sec: 296604 # ./test-mutex-shm V 16 10 | grep "^avg ops" avg ops/sec: 85870 The key criteria for the busy wait is that the lock owner has to be running on a (different) cpu. The idea is that as long as the owner is running, there is a fair chance it'll release the lock soon, and thus we'll be better off spinning instead of blocking/scheduling. Since regular mutexes (as opposed to rtmutexes) do not atomically track the owner, we add the owner in a non-atomic fashion and deal with the races in the slowpath. Furthermore, to ease the testing of the performance impact of this new code, there is means to disable this behaviour runtime (without having to reboot the system), when scheduler debugging is enabled (CONFIG_SCHED_DEBUG=y), by issuing the following command: # echo NO_OWNER_SPIN > /debug/sched_features This command re-enables spinning again (this is also the default): # echo OWNER_SPIN > /debug/sched_features Signed-off-by: Peter Zijlstra Signed-off-by: Ingo Molnar --- include/linux/mutex.h | 5 +++-- include/linux/sched.h | 1 + 2 files changed, 4 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/mutex.h b/include/linux/mutex.h index 7a0e5c4f8072..3069ec7e0ab8 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h @@ -50,8 +50,10 @@ struct mutex { atomic_t count; spinlock_t wait_lock; struct list_head wait_list; -#ifdef CONFIG_DEBUG_MUTEXES +#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP) struct thread_info *owner; +#endif +#ifdef CONFIG_DEBUG_MUTEXES const char *name; void *magic; #endif @@ -68,7 +70,6 @@ struct mutex_waiter { struct list_head list; struct task_struct *task; #ifdef CONFIG_DEBUG_MUTEXES - struct mutex *lock; void *magic; #endif }; diff --git a/include/linux/sched.h b/include/linux/sched.h index 9f0b372cfa6f..c34b137cd1e5 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -330,6 +330,7 @@ extern signed long schedule_timeout_killable(signed long timeout); extern signed long schedule_timeout_uninterruptible(signed long timeout); asmlinkage void __schedule(void); asmlinkage void schedule(void); +extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner); struct nsproxy; struct user_namespace; -- cgit v1.2.3 From 6f2b9b9a9d750a9175dc79c74bfed5add840983c Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Thu, 29 Jan 2009 16:03:20 +0100 Subject: timer: implement lockdep deadlock detection This modifies the timer code in a way to allow lockdep to detect deadlocks resulting from a lock being taken in the timer function as well as around the del_timer_sync() call. Signed-off-by: Johannes Berg --- include/linux/timer.h | 93 ++++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 84 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/include/linux/timer.h b/include/linux/timer.h index daf9685b861c..51774eb87cc6 100644 --- a/include/linux/timer.h +++ b/include/linux/timer.h @@ -5,6 +5,7 @@ #include #include #include +#include struct tvec_base; @@ -21,52 +22,126 @@ struct timer_list { char start_comm[16]; int start_pid; #endif +#ifdef CONFIG_LOCKDEP + struct lockdep_map lockdep_map; +#endif }; extern struct tvec_base boot_tvec_bases; +#ifdef CONFIG_LOCKDEP +/* + * NB: because we have to copy the lockdep_map, setting the lockdep_map key + * (second argument) here is required, otherwise it could be initialised to + * the copy of the lockdep_map later! We use the pointer to and the string + * ":" as the key resp. the name of the lockdep_map. + */ +#define __TIMER_LOCKDEP_MAP_INITIALIZER(_kn) \ + .lockdep_map = STATIC_LOCKDEP_MAP_INIT(_kn, &_kn), +#else +#define __TIMER_LOCKDEP_MAP_INITIALIZER(_kn) +#endif + #define TIMER_INITIALIZER(_function, _expires, _data) { \ .entry = { .prev = TIMER_ENTRY_STATIC }, \ .function = (_function), \ .expires = (_expires), \ .data = (_data), \ .base = &boot_tvec_bases, \ + __TIMER_LOCKDEP_MAP_INITIALIZER( \ + __FILE__ ":" __stringify(__LINE__)) \ } #define DEFINE_TIMER(_name, _function, _expires, _data) \ struct timer_list _name = \ TIMER_INITIALIZER(_function, _expires, _data) -void init_timer(struct timer_list *timer); -void init_timer_deferrable(struct timer_list *timer); +void init_timer_key(struct timer_list *timer, + const char *name, + struct lock_class_key *key); +void init_timer_deferrable_key(struct timer_list *timer, + const char *name, + struct lock_class_key *key); + +#ifdef CONFIG_LOCKDEP +#define init_timer(timer) \ + do { \ + static struct lock_class_key __key; \ + init_timer_key((timer), #timer, &__key); \ + } while (0) + +#define init_timer_deferrable(timer) \ + do { \ + static struct lock_class_key __key; \ + init_timer_deferrable_key((timer), #timer, &__key); \ + } while (0) + +#define init_timer_on_stack(timer) \ + do { \ + static struct lock_class_key __key; \ + init_timer_on_stack_key((timer), #timer, &__key); \ + } while (0) + +#define setup_timer(timer, fn, data) \ + do { \ + static struct lock_class_key __key; \ + setup_timer_key((timer), #timer, &__key, (fn), (data));\ + } while (0) + +#define setup_timer_on_stack(timer, fn, data) \ + do { \ + static struct lock_class_key __key; \ + setup_timer_on_stack_key((timer), #timer, &__key, \ + (fn), (data)); \ + } while (0) +#else +#define init_timer(timer)\ + init_timer_key((timer), NULL, NULL) +#define init_timer_deferrable(timer)\ + init_timer_deferrable_key((timer), NULL, NULL) +#define init_timer_on_stack(timer)\ + init_timer_on_stack_key((timer), NULL, NULL) +#define setup_timer(timer, fn, data)\ + setup_timer_key((timer), NULL, NULL, (fn), (data)) +#define setup_timer_on_stack(timer, fn, data)\ + setup_timer_on_stack_key((timer), NULL, NULL, (fn), (data)) +#endif #ifdef CONFIG_DEBUG_OBJECTS_TIMERS -extern void init_timer_on_stack(struct timer_list *timer); +extern void init_timer_on_stack_key(struct timer_list *timer, + const char *name, + struct lock_class_key *key); extern void destroy_timer_on_stack(struct timer_list *timer); #else static inline void destroy_timer_on_stack(struct timer_list *timer) { } -static inline void init_timer_on_stack(struct timer_list *timer) +static inline void init_timer_on_stack_key(struct timer_list *timer, + const char *name, + struct lock_class_key *key) { - init_timer(timer); + init_timer_key(timer, name, key); } #endif -static inline void setup_timer(struct timer_list * timer, +static inline void setup_timer_key(struct timer_list * timer, + const char *name, + struct lock_class_key *key, void (*function)(unsigned long), unsigned long data) { timer->function = function; timer->data = data; - init_timer(timer); + init_timer_key(timer, name, key); } -static inline void setup_timer_on_stack(struct timer_list *timer, +static inline void setup_timer_on_stack_key(struct timer_list *timer, + const char *name, + struct lock_class_key *key, void (*function)(unsigned long), unsigned long data) { timer->function = function; timer->data = data; - init_timer_on_stack(timer); + init_timer_on_stack_key(timer, name, key); } /** -- cgit v1.2.3 From cf40bd16fdad42c053040bcd3988f5fdedbb6c57 Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Wed, 21 Jan 2009 08:12:39 +0100 Subject: lockdep: annotate reclaim context (__GFP_NOFS) Here is another version, with the incremental patch rolled up, and added reclaim context annotation to kswapd, and allocation tracing to slab allocators (which may only ever reach the page allocator in rare cases, so it is good to put annotations here too). Haven't tested this version as such, but it should be getting closer to merge worthy ;) -- After noticing some code in mm/filemap.c accidentally perform a __GFP_FS allocation when it should not have been, I thought it might be a good idea to try to catch this kind of thing with lockdep. I coded up a little idea that seems to work. Unfortunately the system has to actually be in __GFP_FS page reclaim, then take the lock, before it will mark it. But at least that might still be some orders of magnitude more common (and more debuggable) than an actual deadlock condition, so we have some improvement I hope (the concept is no less complete than discovery of a lock's interrupt contexts). I guess we could even do the same thing with __GFP_IO (normal reclaim), and even GFP_NOIO locks too... but filesystems will have the most locks and fiddly code paths, so let's start there and see how it goes. It *seems* to work. I did a quick test. ================================= [ INFO: inconsistent lock state ] 2.6.28-rc6-00007-ged31348-dirty #26 --------------------------------- inconsistent {in-reclaim-W} -> {ov-reclaim-W} usage. modprobe/8526 [HC0[0]:SC0[0]:HE1:SE1] takes: (testlock){--..}, at: [] brd_init+0x55/0x216 [brd] {in-reclaim-W} state was registered at: [] __lock_acquire+0x75b/0x1a60 [] lock_acquire+0x91/0xc0 [] mutex_lock_nested+0xb1/0x310 [] brd_init+0x2b/0x216 [brd] [] _stext+0x3b/0x170 [] sys_init_module+0xaf/0x1e0 [] system_call_fastpath+0x16/0x1b [] 0xffffffffffffffff irq event stamp: 3929 hardirqs last enabled at (3929): [] mutex_lock_nested+0x285/0x310 hardirqs last disabled at (3928): [] mutex_lock_nested+0x59/0x310 softirqs last enabled at (3732): [] sk_filter+0x83/0xe0 softirqs last disabled at (3730): [] sk_filter+0x16/0xe0 other info that might help us debug this: 1 lock held by modprobe/8526: #0: (testlock){--..}, at: [] brd_init+0x55/0x216 [brd] stack backtrace: Pid: 8526, comm: modprobe Not tainted 2.6.28-rc6-00007-ged31348-dirty #26 Call Trace: [] print_usage_bug+0x193/0x1d0 [] mark_lock+0xaf0/0xca0 [] mark_held_locks+0x55/0xc0 [] ? brd_init+0x0/0x216 [brd] [] trace_reclaim_fs+0x2a/0x60 [] __alloc_pages_internal+0x475/0x580 [] ? mutex_lock_nested+0x26e/0x310 [] ? brd_init+0x0/0x216 [brd] [] brd_init+0x6a/0x216 [brd] [] ? brd_init+0x0/0x216 [brd] [] _stext+0x3b/0x170 [] ? mutex_unlock+0x9/0x10 [] ? __mutex_unlock_slowpath+0x10d/0x180 [] ? trace_hardirqs_on_caller+0x12c/0x190 [] sys_init_module+0xaf/0x1e0 [] system_call_fastpath+0x16/0x1b Signed-off-by: Nick Piggin Signed-off-by: Peter Zijlstra Signed-off-by: Ingo Molnar --- include/linux/lockdep.h | 17 ++++++++++++++++- include/linux/sched.h | 1 + 2 files changed, 17 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 23bf02fb124f..cc97bdbc7969 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -27,12 +27,16 @@ enum lock_usage_bit LOCK_USED = 0, LOCK_USED_IN_HARDIRQ, LOCK_USED_IN_SOFTIRQ, + LOCK_USED_IN_RECLAIM_FS, LOCK_ENABLED_SOFTIRQS, LOCK_ENABLED_HARDIRQS, + LOCK_HELD_OVER_RECLAIM_FS, LOCK_USED_IN_HARDIRQ_READ, LOCK_USED_IN_SOFTIRQ_READ, + LOCK_USED_IN_RECLAIM_FS_READ, LOCK_ENABLED_SOFTIRQS_READ, LOCK_ENABLED_HARDIRQS_READ, + LOCK_HELD_OVER_RECLAIM_FS_READ, LOCK_USAGE_STATES }; @@ -42,16 +46,20 @@ enum lock_usage_bit #define LOCKF_USED (1 << LOCK_USED) #define LOCKF_USED_IN_HARDIRQ (1 << LOCK_USED_IN_HARDIRQ) #define LOCKF_USED_IN_SOFTIRQ (1 << LOCK_USED_IN_SOFTIRQ) +#define LOCKF_USED_IN_RECLAIM_FS (1 << LOCK_USED_IN_RECLAIM_FS) #define LOCKF_ENABLED_HARDIRQS (1 << LOCK_ENABLED_HARDIRQS) #define LOCKF_ENABLED_SOFTIRQS (1 << LOCK_ENABLED_SOFTIRQS) +#define LOCKF_HELD_OVER_RECLAIM_FS (1 << LOCK_HELD_OVER_RECLAIM_FS) #define LOCKF_ENABLED_IRQS (LOCKF_ENABLED_HARDIRQS | LOCKF_ENABLED_SOFTIRQS) #define LOCKF_USED_IN_IRQ (LOCKF_USED_IN_HARDIRQ | LOCKF_USED_IN_SOFTIRQ) #define LOCKF_USED_IN_HARDIRQ_READ (1 << LOCK_USED_IN_HARDIRQ_READ) #define LOCKF_USED_IN_SOFTIRQ_READ (1 << LOCK_USED_IN_SOFTIRQ_READ) +#define LOCKF_USED_IN_RECLAIM_FS_READ (1 << LOCK_USED_IN_RECLAIM_FS_READ) #define LOCKF_ENABLED_HARDIRQS_READ (1 << LOCK_ENABLED_HARDIRQS_READ) #define LOCKF_ENABLED_SOFTIRQS_READ (1 << LOCK_ENABLED_SOFTIRQS_READ) +#define LOCKF_HELD_OVER_RECLAIM_FS_READ (1 << LOCK_HELD_OVER_RECLAIM_FS_READ) #define LOCKF_ENABLED_IRQS_READ \ (LOCKF_ENABLED_HARDIRQS_READ | LOCKF_ENABLED_SOFTIRQS_READ) @@ -324,7 +332,11 @@ static inline void lock_set_subclass(struct lockdep_map *lock, lock_set_class(lock, lock->name, lock->key, subclass, ip); } -# define INIT_LOCKDEP .lockdep_recursion = 0, +extern void lockdep_set_current_reclaim_state(gfp_t gfp_mask); +extern void lockdep_clear_current_reclaim_state(void); +extern void lockdep_trace_alloc(gfp_t mask); + +# define INIT_LOCKDEP .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0, #define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0) @@ -342,6 +354,9 @@ static inline void lockdep_on(void) # define lock_release(l, n, i) do { } while (0) # define lock_set_class(l, n, k, s, i) do { } while (0) # define lock_set_subclass(l, s, i) do { } while (0) +# define lockdep_set_current_reclaim_state(g) do { } while (0) +# define lockdep_clear_current_reclaim_state() do { } while (0) +# define lockdep_trace_alloc(g) do { } while (0) # define lockdep_init() do { } while (0) # define lockdep_info() do { } while (0) # define lockdep_init_map(lock, name, key, sub) \ diff --git a/include/linux/sched.h b/include/linux/sched.h index 4efb552aca47..b00a77f4999e 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1313,6 +1313,7 @@ struct task_struct { int lockdep_depth; unsigned int lockdep_recursion; struct held_lock held_locks[MAX_LOCK_DEPTH]; + gfp_t lockdep_reclaim_gfp; #endif /* journalling filesystem info */ -- cgit v1.2.3 From 4fc95e867f1e75351b89db3c68212dfcce7ea563 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 22 Jan 2009 13:10:52 +0100 Subject: lockdep: sanitize bit names s/\(LOCKF\?_ENABLED_[^ ]*\)S\(_READ\)\?\>/\1\2/g So that the USED_IN and ENABLED have the same names. Signed-off-by: Peter Zijlstra Signed-off-by: Ingo Molnar --- include/linux/lockdep.h | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) (limited to 'include') diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index cc97bdbc7969..da2e2b25b3b2 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -28,14 +28,14 @@ enum lock_usage_bit LOCK_USED_IN_HARDIRQ, LOCK_USED_IN_SOFTIRQ, LOCK_USED_IN_RECLAIM_FS, - LOCK_ENABLED_SOFTIRQS, - LOCK_ENABLED_HARDIRQS, + LOCK_ENABLED_SOFTIRQ, + LOCK_ENABLED_HARDIRQ, LOCK_HELD_OVER_RECLAIM_FS, LOCK_USED_IN_HARDIRQ_READ, LOCK_USED_IN_SOFTIRQ_READ, LOCK_USED_IN_RECLAIM_FS_READ, - LOCK_ENABLED_SOFTIRQS_READ, - LOCK_ENABLED_HARDIRQS_READ, + LOCK_ENABLED_SOFTIRQ_READ, + LOCK_ENABLED_HARDIRQ_READ, LOCK_HELD_OVER_RECLAIM_FS_READ, LOCK_USAGE_STATES }; @@ -47,22 +47,22 @@ enum lock_usage_bit #define LOCKF_USED_IN_HARDIRQ (1 << LOCK_USED_IN_HARDIRQ) #define LOCKF_USED_IN_SOFTIRQ (1 << LOCK_USED_IN_SOFTIRQ) #define LOCKF_USED_IN_RECLAIM_FS (1 << LOCK_USED_IN_RECLAIM_FS) -#define LOCKF_ENABLED_HARDIRQS (1 << LOCK_ENABLED_HARDIRQS) -#define LOCKF_ENABLED_SOFTIRQS (1 << LOCK_ENABLED_SOFTIRQS) +#define LOCKF_ENABLED_HARDIRQ (1 << LOCK_ENABLED_HARDIRQ) +#define LOCKF_ENABLED_SOFTIRQ (1 << LOCK_ENABLED_SOFTIRQ) #define LOCKF_HELD_OVER_RECLAIM_FS (1 << LOCK_HELD_OVER_RECLAIM_FS) -#define LOCKF_ENABLED_IRQS (LOCKF_ENABLED_HARDIRQS | LOCKF_ENABLED_SOFTIRQS) +#define LOCKF_ENABLED_IRQ (LOCKF_ENABLED_HARDIRQ | LOCKF_ENABLED_SOFTIRQ) #define LOCKF_USED_IN_IRQ (LOCKF_USED_IN_HARDIRQ | LOCKF_USED_IN_SOFTIRQ) #define LOCKF_USED_IN_HARDIRQ_READ (1 << LOCK_USED_IN_HARDIRQ_READ) #define LOCKF_USED_IN_SOFTIRQ_READ (1 << LOCK_USED_IN_SOFTIRQ_READ) #define LOCKF_USED_IN_RECLAIM_FS_READ (1 << LOCK_USED_IN_RECLAIM_FS_READ) -#define LOCKF_ENABLED_HARDIRQS_READ (1 << LOCK_ENABLED_HARDIRQS_READ) -#define LOCKF_ENABLED_SOFTIRQS_READ (1 << LOCK_ENABLED_SOFTIRQS_READ) +#define LOCKF_ENABLED_HARDIRQ_READ (1 << LOCK_ENABLED_HARDIRQ_READ) +#define LOCKF_ENABLED_SOFTIRQ_READ (1 << LOCK_ENABLED_SOFTIRQ_READ) #define LOCKF_HELD_OVER_RECLAIM_FS_READ (1 << LOCK_HELD_OVER_RECLAIM_FS_READ) -#define LOCKF_ENABLED_IRQS_READ \ - (LOCKF_ENABLED_HARDIRQS_READ | LOCKF_ENABLED_SOFTIRQS_READ) +#define LOCKF_ENABLED_IRQ_READ \ + (LOCKF_ENABLED_HARDIRQ_READ | LOCKF_ENABLED_SOFTIRQ_READ) #define LOCKF_USED_IN_IRQ_READ \ (LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ) -- cgit v1.2.3 From a652d7081bc96b3094e85ca30e47f50185d2f717 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 22 Jan 2009 13:13:11 +0100 Subject: lockdep: sanitize reclaim bit names s/HELD_OVER/ENABLED/g so that its similar to the hard and soft-irq names. Signed-off-by: Peter Zijlstra Signed-off-by: Ingo Molnar --- include/linux/lockdep.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index da2e2b25b3b2..6d729c9d1d27 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -30,13 +30,13 @@ enum lock_usage_bit LOCK_USED_IN_RECLAIM_FS, LOCK_ENABLED_SOFTIRQ, LOCK_ENABLED_HARDIRQ, - LOCK_HELD_OVER_RECLAIM_FS, + LOCK_ENABLED_RECLAIM_FS, LOCK_USED_IN_HARDIRQ_READ, LOCK_USED_IN_SOFTIRQ_READ, LOCK_USED_IN_RECLAIM_FS_READ, LOCK_ENABLED_SOFTIRQ_READ, LOCK_ENABLED_HARDIRQ_READ, - LOCK_HELD_OVER_RECLAIM_FS_READ, + LOCK_ENABLED_RECLAIM_FS_READ, LOCK_USAGE_STATES }; @@ -49,7 +49,7 @@ enum lock_usage_bit #define LOCKF_USED_IN_RECLAIM_FS (1 << LOCK_USED_IN_RECLAIM_FS) #define LOCKF_ENABLED_HARDIRQ (1 << LOCK_ENABLED_HARDIRQ) #define LOCKF_ENABLED_SOFTIRQ (1 << LOCK_ENABLED_SOFTIRQ) -#define LOCKF_HELD_OVER_RECLAIM_FS (1 << LOCK_HELD_OVER_RECLAIM_FS) +#define LOCKF_ENABLED_RECLAIM_FS (1 << LOCK_ENABLED_RECLAIM_FS) #define LOCKF_ENABLED_IRQ (LOCKF_ENABLED_HARDIRQ | LOCKF_ENABLED_SOFTIRQ) #define LOCKF_USED_IN_IRQ (LOCKF_USED_IN_HARDIRQ | LOCKF_USED_IN_SOFTIRQ) @@ -59,7 +59,7 @@ enum lock_usage_bit #define LOCKF_USED_IN_RECLAIM_FS_READ (1 << LOCK_USED_IN_RECLAIM_FS_READ) #define LOCKF_ENABLED_HARDIRQ_READ (1 << LOCK_ENABLED_HARDIRQ_READ) #define LOCKF_ENABLED_SOFTIRQ_READ (1 << LOCK_ENABLED_SOFTIRQ_READ) -#define LOCKF_HELD_OVER_RECLAIM_FS_READ (1 << LOCK_HELD_OVER_RECLAIM_FS_READ) +#define LOCKF_ENABLED_RECLAIM_FS_READ (1 << LOCK_ENABLED_RECLAIM_FS_READ) #define LOCKF_ENABLED_IRQ_READ \ (LOCKF_ENABLED_HARDIRQ_READ | LOCKF_ENABLED_SOFTIRQ_READ) -- cgit v1.2.3 From 9851673bc32bc9fcafbbaeffc858ead434bd6d58 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 22 Jan 2009 14:18:40 +0100 Subject: lockdep: move state bit definitions around For convenience later. Signed-off-by: Peter Zijlstra Signed-off-by: Ingo Molnar --- include/linux/lockdep.h | 49 ++++--------------------------------------------- 1 file changed, 4 insertions(+), 45 deletions(-) (limited to 'include') diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 6d729c9d1d27..5a58ea3e91e9 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -20,51 +20,10 @@ struct lockdep_map; #include /* - * Lock-class usage-state bits: + * We'd rather not expose kernel/lockdep_states.h this wide, but we do need + * the total number of states... :-( */ -enum lock_usage_bit -{ - LOCK_USED = 0, - LOCK_USED_IN_HARDIRQ, - LOCK_USED_IN_SOFTIRQ, - LOCK_USED_IN_RECLAIM_FS, - LOCK_ENABLED_SOFTIRQ, - LOCK_ENABLED_HARDIRQ, - LOCK_ENABLED_RECLAIM_FS, - LOCK_USED_IN_HARDIRQ_READ, - LOCK_USED_IN_SOFTIRQ_READ, - LOCK_USED_IN_RECLAIM_FS_READ, - LOCK_ENABLED_SOFTIRQ_READ, - LOCK_ENABLED_HARDIRQ_READ, - LOCK_ENABLED_RECLAIM_FS_READ, - LOCK_USAGE_STATES -}; - -/* - * Usage-state bitmasks: - */ -#define LOCKF_USED (1 << LOCK_USED) -#define LOCKF_USED_IN_HARDIRQ (1 << LOCK_USED_IN_HARDIRQ) -#define LOCKF_USED_IN_SOFTIRQ (1 << LOCK_USED_IN_SOFTIRQ) -#define LOCKF_USED_IN_RECLAIM_FS (1 << LOCK_USED_IN_RECLAIM_FS) -#define LOCKF_ENABLED_HARDIRQ (1 << LOCK_ENABLED_HARDIRQ) -#define LOCKF_ENABLED_SOFTIRQ (1 << LOCK_ENABLED_SOFTIRQ) -#define LOCKF_ENABLED_RECLAIM_FS (1 << LOCK_ENABLED_RECLAIM_FS) - -#define LOCKF_ENABLED_IRQ (LOCKF_ENABLED_HARDIRQ | LOCKF_ENABLED_SOFTIRQ) -#define LOCKF_USED_IN_IRQ (LOCKF_USED_IN_HARDIRQ | LOCKF_USED_IN_SOFTIRQ) - -#define LOCKF_USED_IN_HARDIRQ_READ (1 << LOCK_USED_IN_HARDIRQ_READ) -#define LOCKF_USED_IN_SOFTIRQ_READ (1 << LOCK_USED_IN_SOFTIRQ_READ) -#define LOCKF_USED_IN_RECLAIM_FS_READ (1 << LOCK_USED_IN_RECLAIM_FS_READ) -#define LOCKF_ENABLED_HARDIRQ_READ (1 << LOCK_ENABLED_HARDIRQ_READ) -#define LOCKF_ENABLED_SOFTIRQ_READ (1 << LOCK_ENABLED_SOFTIRQ_READ) -#define LOCKF_ENABLED_RECLAIM_FS_READ (1 << LOCK_ENABLED_RECLAIM_FS_READ) - -#define LOCKF_ENABLED_IRQ_READ \ - (LOCKF_ENABLED_HARDIRQ_READ | LOCKF_ENABLED_SOFTIRQ_READ) -#define LOCKF_USED_IN_IRQ_READ \ - (LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ) +#define XXX_LOCK_USAGE_STATES (1+3*4) #define MAX_LOCKDEP_SUBCLASSES 8UL @@ -105,7 +64,7 @@ struct lock_class { * IRQ/softirq usage tracking bits: */ unsigned long usage_mask; - struct stack_trace usage_traces[LOCK_USAGE_STATES]; + struct stack_trace usage_traces[XXX_LOCK_USAGE_STATES]; /* * These fields represent a directed graph of lock dependencies, -- cgit v1.2.3