diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-02-22 19:25:09 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-02-22 19:25:09 -0800 |
commit | 3b5d8510b94a95e493e8c4951ffc3d1cf6a6792d (patch) | |
tree | 4493367715c7a15e9057e6c72fa3c3edfd0605ad /lib | |
parent | c47f39e3b75e1138823984ad5079547c7a41b726 (diff) | |
parent | 41ef8f826692c8f65882bec0a8211bd4d1d2d19a (diff) | |
download | linux-3b5d8510b94a95e493e8c4951ffc3d1cf6a6792d.tar.gz linux-3b5d8510b94a95e493e8c4951ffc3d1cf6a6792d.tar.bz2 linux-3b5d8510b94a95e493e8c4951ffc3d1cf6a6792d.zip |
Merge branch 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull core locking changes from Ingo Molnar:
"The biggest change is the rwsem lock-steal improvements, both to the
assembly optimized and the spinlock based variants.
The other notable change is the clean up of the seqlock implementation
to be based on the seqcount infrastructure.
The rest is assorted smaller debuggability, cleanup and continued -rt
locking changes."
* 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
rwsem-spinlock: Implement writer lock-stealing for better scalability
futex: Revert "futex: Mark get_robust_list as deprecated"
generic: Use raw local irq variant for generic cmpxchg
lockdep: Selftest: convert spinlock to raw spinlock
seqlock: Use seqcount infrastructure
seqlock: Remove unused functions
ntp: Make ntp_lock raw
intel_idle: Convert i7300_idle_lock to raw_spinlock
locking: Various static lock initializer fixes
lockdep: Print more info when MAX_LOCK_DEPTH is exceeded
rwsem: Implement writer lock-stealing for better scalability
lockdep: Silence warning if CONFIG_LOCKDEP isn't set
watchdog: Use local_clock for get_timestamp()
lockdep: Rename print_unlock_inbalance_bug() to print_unlock_imbalance_bug()
locking/stat: Fix a typo
Diffstat (limited to 'lib')
-rw-r--r-- | lib/locking-selftest.c | 34 | ||||
-rw-r--r-- | lib/rwsem-spinlock.c | 69 | ||||
-rw-r--r-- | lib/rwsem.c | 75 |
3 files changed, 87 insertions, 91 deletions
diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c index 7aae0f2a5e0a..c3eb261a7df3 100644 --- a/lib/locking-selftest.c +++ b/lib/locking-selftest.c @@ -47,10 +47,10 @@ __setup("debug_locks_verbose=", setup_debug_locks_verbose); * Normal standalone locks, for the circular and irq-context * dependency tests: */ -static DEFINE_SPINLOCK(lock_A); -static DEFINE_SPINLOCK(lock_B); -static DEFINE_SPINLOCK(lock_C); -static DEFINE_SPINLOCK(lock_D); +static DEFINE_RAW_SPINLOCK(lock_A); +static DEFINE_RAW_SPINLOCK(lock_B); +static DEFINE_RAW_SPINLOCK(lock_C); +static DEFINE_RAW_SPINLOCK(lock_D); static DEFINE_RWLOCK(rwlock_A); static DEFINE_RWLOCK(rwlock_B); @@ -73,12 +73,12 @@ static DECLARE_RWSEM(rwsem_D); * but X* and Y* are different classes. We do this so that * we do not trigger a real lockup: */ -static DEFINE_SPINLOCK(lock_X1); -static DEFINE_SPINLOCK(lock_X2); -static DEFINE_SPINLOCK(lock_Y1); -static DEFINE_SPINLOCK(lock_Y2); -static DEFINE_SPINLOCK(lock_Z1); -static DEFINE_SPINLOCK(lock_Z2); +static DEFINE_RAW_SPINLOCK(lock_X1); +static DEFINE_RAW_SPINLOCK(lock_X2); +static DEFINE_RAW_SPINLOCK(lock_Y1); +static DEFINE_RAW_SPINLOCK(lock_Y2); +static DEFINE_RAW_SPINLOCK(lock_Z1); +static DEFINE_RAW_SPINLOCK(lock_Z2); static DEFINE_RWLOCK(rwlock_X1); static DEFINE_RWLOCK(rwlock_X2); @@ -107,10 +107,10 @@ static DECLARE_RWSEM(rwsem_Z2); */ #define INIT_CLASS_FUNC(class) \ static noinline void \ -init_class_##class(spinlock_t *lock, rwlock_t *rwlock, struct mutex *mutex, \ - struct rw_semaphore *rwsem) \ +init_class_##class(raw_spinlock_t *lock, rwlock_t *rwlock, \ + struct mutex *mutex, struct rw_semaphore *rwsem)\ { \ - spin_lock_init(lock); \ + raw_spin_lock_init(lock); \ rwlock_init(rwlock); \ mutex_init(mutex); \ init_rwsem(rwsem); \ @@ -168,10 +168,10 @@ static void init_shared_classes(void) * Shortcuts for lock/unlock API variants, to keep * the testcases compact: */ -#define L(x) spin_lock(&lock_##x) -#define U(x) spin_unlock(&lock_##x) +#define L(x) raw_spin_lock(&lock_##x) +#define U(x) raw_spin_unlock(&lock_##x) #define LU(x) L(x); U(x) -#define SI(x) spin_lock_init(&lock_##x) +#define SI(x) raw_spin_lock_init(&lock_##x) #define WL(x) write_lock(&rwlock_##x) #define WU(x) write_unlock(&rwlock_##x) @@ -911,7 +911,7 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_soft) #define I2(x) \ do { \ - spin_lock_init(&lock_##x); \ + raw_spin_lock_init(&lock_##x); \ rwlock_init(&rwlock_##x); \ mutex_init(&mutex_##x); \ init_rwsem(&rwsem_##x); \ diff --git a/lib/rwsem-spinlock.c b/lib/rwsem-spinlock.c index 7e0d6a58fc83..7542afbb22b3 100644 --- a/lib/rwsem-spinlock.c +++ b/lib/rwsem-spinlock.c @@ -73,20 +73,13 @@ __rwsem_do_wake(struct rw_semaphore *sem, int wakewrite) goto dont_wake_writers; } - /* if we are allowed to wake writers try to grant a single write lock - * if there's a writer at the front of the queue - * - we leave the 'waiting count' incremented to signify potential - * contention + /* + * as we support write lock stealing, we can't set sem->activity + * to -1 here to indicate we get the lock. Instead, we wake it up + * to let it go get it again. */ if (waiter->flags & RWSEM_WAITING_FOR_WRITE) { - sem->activity = -1; - list_del(&waiter->list); - tsk = waiter->task; - /* Don't touch waiter after ->task has been NULLed */ - smp_mb(); - waiter->task = NULL; - wake_up_process(tsk); - put_task_struct(tsk); + wake_up_process(waiter->task); goto out; } @@ -121,18 +114,10 @@ static inline struct rw_semaphore * __rwsem_wake_one_writer(struct rw_semaphore *sem) { struct rwsem_waiter *waiter; - struct task_struct *tsk; - - sem->activity = -1; waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); - list_del(&waiter->list); + wake_up_process(waiter->task); - tsk = waiter->task; - smp_mb(); - waiter->task = NULL; - wake_up_process(tsk); - put_task_struct(tsk); return sem; } @@ -204,7 +189,6 @@ int __down_read_trylock(struct rw_semaphore *sem) /* * get a write lock on the semaphore - * - we increment the waiting count anyway to indicate an exclusive lock */ void __sched __down_write_nested(struct rw_semaphore *sem, int subclass) { @@ -214,37 +198,32 @@ void __sched __down_write_nested(struct rw_semaphore *sem, int subclass) raw_spin_lock_irqsave(&sem->wait_lock, flags); - if (sem->activity == 0 && list_empty(&sem->wait_list)) { - /* granted */ - sem->activity = -1; - raw_spin_unlock_irqrestore(&sem->wait_lock, flags); - goto out; - } - - tsk = current; - set_task_state(tsk, TASK_UNINTERRUPTIBLE); - /* set up my own style of waitqueue */ + tsk = current; waiter.task = tsk; waiter.flags = RWSEM_WAITING_FOR_WRITE; - get_task_struct(tsk); - list_add_tail(&waiter.list, &sem->wait_list); - /* we don't need to touch the semaphore struct anymore */ - raw_spin_unlock_irqrestore(&sem->wait_lock, flags); - - /* wait to be given the lock */ + /* wait for someone to release the lock */ for (;;) { - if (!waiter.task) + /* + * That is the key to support write lock stealing: allows the + * task already on CPU to get the lock soon rather than put + * itself into sleep and waiting for system woke it or someone + * else in the head of the wait list up. + */ + if (sem->activity == 0) break; - schedule(); set_task_state(tsk, TASK_UNINTERRUPTIBLE); + raw_spin_unlock_irqrestore(&sem->wait_lock, flags); + schedule(); + raw_spin_lock_irqsave(&sem->wait_lock, flags); } + /* got the lock */ + sem->activity = -1; + list_del(&waiter.list); - tsk->state = TASK_RUNNING; - out: - ; + raw_spin_unlock_irqrestore(&sem->wait_lock, flags); } void __sched __down_write(struct rw_semaphore *sem) @@ -262,8 +241,8 @@ int __down_write_trylock(struct rw_semaphore *sem) raw_spin_lock_irqsave(&sem->wait_lock, flags); - if (sem->activity == 0 && list_empty(&sem->wait_list)) { - /* granted */ + if (sem->activity == 0) { + /* got the lock */ sem->activity = -1; ret = 1; } diff --git a/lib/rwsem.c b/lib/rwsem.c index 8337e1b9bb8d..ad5e0df16ab4 100644 --- a/lib/rwsem.c +++ b/lib/rwsem.c @@ -2,6 +2,8 @@ * * Written by David Howells (dhowells@redhat.com). * Derived from arch/i386/kernel/semaphore.c + * + * Writer lock-stealing by Alex Shi <alex.shi@intel.com> */ #include <linux/rwsem.h> #include <linux/sched.h> @@ -60,7 +62,7 @@ __rwsem_do_wake(struct rw_semaphore *sem, int wake_type) struct rwsem_waiter *waiter; struct task_struct *tsk; struct list_head *next; - signed long oldcount, woken, loop, adjustment; + signed long woken, loop, adjustment; waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); if (!(waiter->flags & RWSEM_WAITING_FOR_WRITE)) @@ -72,30 +74,8 @@ __rwsem_do_wake(struct rw_semaphore *sem, int wake_type) */ goto out; - /* There's a writer at the front of the queue - try to grant it the - * write lock. However, we only wake this writer if we can transition - * the active part of the count from 0 -> 1 - */ - adjustment = RWSEM_ACTIVE_WRITE_BIAS; - if (waiter->list.next == &sem->wait_list) - adjustment -= RWSEM_WAITING_BIAS; - - try_again_write: - oldcount = rwsem_atomic_update(adjustment, sem) - adjustment; - if (oldcount & RWSEM_ACTIVE_MASK) - /* Someone grabbed the sem already */ - goto undo_write; - - /* We must be careful not to touch 'waiter' after we set ->task = NULL. - * It is an allocated on the waiter's stack and may become invalid at - * any time after that point (due to a wakeup from another source). - */ - list_del(&waiter->list); - tsk = waiter->task; - smp_mb(); - waiter->task = NULL; - wake_up_process(tsk); - put_task_struct(tsk); + /* Wake up the writing waiter and let the task grab the sem: */ + wake_up_process(waiter->task); goto out; readers_only: @@ -157,12 +137,40 @@ __rwsem_do_wake(struct rw_semaphore *sem, int wake_type) out: return sem; +} + +/* Try to get write sem, caller holds sem->wait_lock: */ +static int try_get_writer_sem(struct rw_semaphore *sem, + struct rwsem_waiter *waiter) +{ + struct rwsem_waiter *fwaiter; + long oldcount, adjustment; - /* undo the change to the active count, but check for a transition - * 1->0 */ - undo_write: + /* only steal when first waiter is writing */ + fwaiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); + if (!(fwaiter->flags & RWSEM_WAITING_FOR_WRITE)) + return 0; + + adjustment = RWSEM_ACTIVE_WRITE_BIAS; + /* Only one waiter in the queue: */ + if (fwaiter == waiter && waiter->list.next == &sem->wait_list) + adjustment -= RWSEM_WAITING_BIAS; + +try_again_write: + oldcount = rwsem_atomic_update(adjustment, sem) - adjustment; + if (!(oldcount & RWSEM_ACTIVE_MASK)) { + /* No active lock: */ + struct task_struct *tsk = waiter->task; + + list_del(&waiter->list); + smp_mb(); + put_task_struct(tsk); + tsk->state = TASK_RUNNING; + return 1; + } + /* some one grabbed the sem already */ if (rwsem_atomic_update(-adjustment, sem) & RWSEM_ACTIVE_MASK) - goto out; + return 0; goto try_again_write; } @@ -210,6 +218,15 @@ rwsem_down_failed_common(struct rw_semaphore *sem, for (;;) { if (!waiter.task) break; + + raw_spin_lock_irq(&sem->wait_lock); + /* Try to get the writer sem, may steal from the head writer: */ + if (flags == RWSEM_WAITING_FOR_WRITE) + if (try_get_writer_sem(sem, &waiter)) { + raw_spin_unlock_irq(&sem->wait_lock); + return sem; + } + raw_spin_unlock_irq(&sem->wait_lock); schedule(); set_task_state(tsk, TASK_UNINTERRUPTIBLE); } |