summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/lib
diff options
context:
space:
mode:
authorNicholas Piggin <npiggin@gmail.com>2023-10-16 22:43:03 +1000
committerMichael Ellerman <mpe@ellerman.id.au>2023-10-20 22:43:34 +1100
commitfcf77d44274b96a55cc74043561a4b3151b9ad24 (patch)
tree41e537e792d616948c399cfffe1a54147276d7ee /arch/powerpc/lib
parentfd8fae50c9c6117c4e05954deab2cc515508666b (diff)
downloadlinux-fcf77d44274b96a55cc74043561a4b3151b9ad24.tar.gz
linux-fcf77d44274b96a55cc74043561a4b3151b9ad24.tar.bz2
linux-fcf77d44274b96a55cc74043561a4b3151b9ad24.zip
powerpc/qspinlock: don't propagate the not-sleepy state
To simplify things, don't propagate the not-sleepy condition back down the queue. Instead, have the waiters clear their own node->sleepy when finding the lock owner is not preempted. Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Tested-by: Shrikanth Hegde <sshegde@linux.vnet.ibm.com> Reviewed-by: "Nysal Jan K.A" <nysal@linux.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://msgid.link/20231016124305.139923-5-npiggin@gmail.com
Diffstat (limited to 'arch/powerpc/lib')
-rw-r--r--arch/powerpc/lib/qspinlock.c26
1 files changed, 8 insertions, 18 deletions
diff --git a/arch/powerpc/lib/qspinlock.c b/arch/powerpc/lib/qspinlock.c
index 0932d24a6b07..6bb627e90a32 100644
--- a/arch/powerpc/lib/qspinlock.c
+++ b/arch/powerpc/lib/qspinlock.c
@@ -350,7 +350,7 @@ static __always_inline bool yield_head_to_locked_owner(struct qspinlock *lock, u
return __yield_to_locked_owner(lock, val, paravirt, mustq);
}
-static __always_inline void propagate_sleepy(struct qnode *node, u32 val, bool *set_sleepy, bool paravirt)
+static __always_inline void propagate_sleepy(struct qnode *node, u32 val, bool paravirt)
{
struct qnode *next;
int owner;
@@ -359,18 +359,17 @@ static __always_inline void propagate_sleepy(struct qnode *node, u32 val, bool *
return;
if (!pv_yield_propagate_owner)
return;
- if (*set_sleepy)
- return;
next = READ_ONCE(node->next);
if (!next)
return;
+ if (next->sleepy)
+ return;
+
owner = get_owner_cpu(val);
- if (vcpu_is_preempted(owner)) {
+ if (vcpu_is_preempted(owner))
next->sleepy = 1;
- *set_sleepy = true;
- }
}
/* Called inside spin_begin() */
@@ -385,12 +384,7 @@ static __always_inline bool yield_to_prev(struct qspinlock *lock, struct qnode *
if (!pv_yield_propagate_owner)
goto yield_prev;
- if (!READ_ONCE(node->sleepy)) {
- /* Propagate back sleepy==false */
- if (node->next && node->next->sleepy)
- node->next->sleepy = 0;
- goto yield_prev;
- } else {
+ if (node->sleepy) {
u32 val = READ_ONCE(lock->val);
if (val & _Q_LOCKED_VAL) {
@@ -410,6 +404,7 @@ static __always_inline bool yield_to_prev(struct qspinlock *lock, struct qnode *
if (preempted)
return preempted;
}
+ node->sleepy = false;
}
yield_prev:
@@ -533,7 +528,6 @@ static __always_inline void queued_spin_lock_mcs_queue(struct qspinlock *lock, b
bool sleepy = false;
bool mustq = false;
int idx;
- bool set_sleepy = false;
int iters = 0;
BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS));
@@ -591,10 +585,6 @@ static __always_inline void queued_spin_lock_mcs_queue(struct qspinlock *lock, b
spec_barrier();
spin_end();
- /* Clear out stale propagated sleepy */
- if (paravirt && pv_yield_propagate_owner && node->sleepy)
- node->sleepy = 0;
-
smp_rmb(); /* acquire barrier for the mcs lock */
/*
@@ -636,7 +626,7 @@ again:
}
}
- propagate_sleepy(node, val, &set_sleepy, paravirt);
+ propagate_sleepy(node, val, paravirt);
preempted = yield_head_to_locked_owner(lock, val, paravirt);
if (!maybe_stealers)
continue;