summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2018-09-26 13:01:19 +0200
committerIngo Molnar <mingo@kernel.org>2018-10-16 17:33:54 +0200
commit756b1df4c2c82a1cdffeafa9d2aa76c92e7fb405 (patch)
tree0cc842e1fb8ab46e5dc83f4b16dea0679d176b03
parent53bf57fab7321fb42b703056a4c80fc9d986d170 (diff)
downloadlinux-stable-756b1df4c2c82a1cdffeafa9d2aa76c92e7fb405.tar.gz
linux-stable-756b1df4c2c82a1cdffeafa9d2aa76c92e7fb405.tar.bz2
linux-stable-756b1df4c2c82a1cdffeafa9d2aa76c92e7fb405.zip
locking/qspinlock: Rework some comments
While working my way through the code again; I felt the comments could use help. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Will Deacon <will.deacon@arm.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: andrea.parri@amarulasolutions.com Cc: longman@redhat.com Link: https://lkml.kernel.org/r/20181003130257.156322446@infradead.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--kernel/locking/qspinlock.c36
1 files changed, 26 insertions, 10 deletions
diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c
index ec343276f975..47cb99787e4d 100644
--- a/kernel/locking/qspinlock.c
+++ b/kernel/locking/qspinlock.c
@@ -326,16 +326,23 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
/*
* trylock || pending
*
- * 0,0,0 -> 0,0,1 ; trylock
- * 0,0,1 -> 0,1,1 ; pending
+ * 0,0,* -> 0,1,* -> 0,0,1 pending, trylock
*/
val = atomic_fetch_or_acquire(_Q_PENDING_VAL, &lock->val);
+
/*
- * If we observe any contention; undo and queue.
+ * If we observe contention, there is a concurrent locker.
+ *
+ * Undo and queue; our setting of PENDING might have made the
+ * n,0,0 -> 0,0,0 transition fail and it will now be waiting
+ * on @next to become !NULL.
*/
if (unlikely(val & ~_Q_LOCKED_MASK)) {
+
+ /* Undo PENDING if we set it. */
if (!(val & _Q_PENDING_MASK))
clear_pending(lock);
+
goto queue;
}
@@ -474,16 +481,25 @@ locked:
*/
/*
- * In the PV case we might already have _Q_LOCKED_VAL set.
+ * In the PV case we might already have _Q_LOCKED_VAL set, because
+ * of lock stealing; therefore we must also allow:
*
- * The atomic_cond_read_acquire() call above has provided the
- * necessary acquire semantics required for locking.
+ * n,0,1 -> 0,0,1
+ *
+ * Note: at this point: (val & _Q_PENDING_MASK) == 0, because of the
+ * above wait condition, therefore any concurrent setting of
+ * PENDING will make the uncontended transition fail.
*/
- if (((val & _Q_TAIL_MASK) == tail) &&
- atomic_try_cmpxchg_relaxed(&lock->val, &val, _Q_LOCKED_VAL))
- goto release; /* No contention */
+ if ((val & _Q_TAIL_MASK) == tail) {
+ if (atomic_try_cmpxchg_relaxed(&lock->val, &val, _Q_LOCKED_VAL))
+ goto release; /* No contention */
+ }
- /* Either somebody is queued behind us or _Q_PENDING_VAL is set */
+ /*
+ * Either somebody is queued behind us or _Q_PENDING_VAL got set
+ * which will then detect the remaining tail and queue behind us
+ * ensuring we'll see a @next.
+ */
set_locked(lock);
/*