summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2015-08-06 17:54:42 +0100
committerIngo Molnar <mingo@kernel.org>2015-08-12 11:59:06 +0200
commit77e430e3e45662b696dc49aa53ea0f7ac63f2574 (patch)
tree1fae9dab1524de8b3ca2516228ff76b809626974 /kernel
parent2b2a85a4d3534b8884fcfa5bb52837f0e1c672bc (diff)
downloadlinux-77e430e3e45662b696dc49aa53ea0f7ac63f2574.tar.gz
linux-77e430e3e45662b696dc49aa53ea0f7ac63f2574.tar.bz2
linux-77e430e3e45662b696dc49aa53ea0f7ac63f2574.zip
locking/qrwlock: Make use of _{acquire|release|relaxed}() atomics
The qrwlock implementation is slightly heavy in its use of memory barriers, mainly through the use of _cmpxchg() and _return() atomics, which imply full barrier semantics. This patch modifies the qrwlock code to use the more relaxed atomic routines so that we can reduce the unnecessary barrier overhead on weakly-ordered architectures. Signed-off-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Waiman.Long@hp.com Cc: paulmck@linux.vnet.ibm.com Link: http://lkml.kernel.org/r/1438880084-18856-7-git-send-email-will.deacon@arm.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/locking/qrwlock.c24
1 files changed, 12 insertions, 12 deletions
diff --git a/kernel/locking/qrwlock.c b/kernel/locking/qrwlock.c
index 6a7a3b8d5ac9..f17a3e3b3550 100644
--- a/kernel/locking/qrwlock.c
+++ b/kernel/locking/qrwlock.c
@@ -55,7 +55,7 @@ rspin_until_writer_unlock(struct qrwlock *lock, u32 cnts)
{
while ((cnts & _QW_WMASK) == _QW_LOCKED) {
cpu_relax_lowlatency();
- cnts = smp_load_acquire((u32 *)&lock->cnts);
+ cnts = atomic_read_acquire(&lock->cnts);
}
}
@@ -74,8 +74,9 @@ void queued_read_lock_slowpath(struct qrwlock *lock, u32 cnts)
* Readers in interrupt context will get the lock immediately
* if the writer is just waiting (not holding the lock yet).
* The rspin_until_writer_unlock() function returns immediately
- * in this case. Otherwise, they will spin until the lock
- * is available without waiting in the queue.
+ * in this case. Otherwise, they will spin (with ACQUIRE
+ * semantics) until the lock is available without waiting in
+ * the queue.
*/
rspin_until_writer_unlock(lock, cnts);
return;
@@ -88,12 +89,11 @@ void queued_read_lock_slowpath(struct qrwlock *lock, u32 cnts)
arch_spin_lock(&lock->lock);
/*
- * At the head of the wait queue now, increment the reader count
- * and wait until the writer, if it has the lock, has gone away.
- * At ths stage, it is not possible for a writer to remain in the
- * waiting state (_QW_WAITING). So there won't be any deadlock.
+ * The ACQUIRE semantics of the following spinning code ensure
+ * that accesses can't leak upwards out of our subsequent critical
+ * section in the case that the lock is currently held for write.
*/
- cnts = atomic_add_return(_QR_BIAS, &lock->cnts) - _QR_BIAS;
+ cnts = atomic_add_return_acquire(_QR_BIAS, &lock->cnts) - _QR_BIAS;
rspin_until_writer_unlock(lock, cnts);
/*
@@ -116,7 +116,7 @@ void queued_write_lock_slowpath(struct qrwlock *lock)
/* Try to acquire the lock directly if no reader is present */
if (!atomic_read(&lock->cnts) &&
- (atomic_cmpxchg(&lock->cnts, 0, _QW_LOCKED) == 0))
+ (atomic_cmpxchg_acquire(&lock->cnts, 0, _QW_LOCKED) == 0))
goto unlock;
/*
@@ -127,7 +127,7 @@ void queued_write_lock_slowpath(struct qrwlock *lock)
struct __qrwlock *l = (struct __qrwlock *)lock;
if (!READ_ONCE(l->wmode) &&
- (cmpxchg(&l->wmode, 0, _QW_WAITING) == 0))
+ (cmpxchg_relaxed(&l->wmode, 0, _QW_WAITING) == 0))
break;
cpu_relax_lowlatency();
@@ -137,8 +137,8 @@ void queued_write_lock_slowpath(struct qrwlock *lock)
for (;;) {
cnts = atomic_read(&lock->cnts);
if ((cnts == _QW_WAITING) &&
- (atomic_cmpxchg(&lock->cnts, _QW_WAITING,
- _QW_LOCKED) == _QW_WAITING))
+ (atomic_cmpxchg_acquire(&lock->cnts, _QW_WAITING,
+ _QW_LOCKED) == _QW_WAITING))
break;
cpu_relax_lowlatency();