summaryrefslogtreecommitdiffstats
path: root/kernel/locking
diff options
context:
space:
mode:
authorUros Bizjak <ubizjak@gmail.com>2024-01-24 11:49:53 +0100
committerIngo Molnar <mingo@kernel.org>2024-03-01 13:02:05 +0100
commitce3576ebd62d99f79c1dc98824e2ef6d6ab68434 (patch)
treeff97be9bf28a71d1b790982c793ebbe8591c7695 /kernel/locking
parente807c2a37044a51de89d6d4f8a1f5ecfb3752f36 (diff)
downloadlinux-ce3576ebd62d99f79c1dc98824e2ef6d6ab68434.tar.gz
linux-ce3576ebd62d99f79c1dc98824e2ef6d6ab68434.tar.bz2
linux-ce3576ebd62d99f79c1dc98824e2ef6d6ab68434.zip
locking/rtmutex: Use try_cmpxchg_relaxed() in mark_rt_mutex_waiters()
Use try_cmpxchg() instead of cmpxchg(*ptr, old, new) == old. The x86 CMPXCHG instruction returns success in the ZF flag, so this change saves a compare after CMPXCHG (and related move instruction in front of CMPXCHG). Also, try_cmpxchg() implicitly assigns old *ptr value to "old" when CMPXCHG fails. There is no need to re-read the value in the loop. Note that the value from *ptr should be read using READ_ONCE() to prevent the compiler from merging, refetching or reordering the read. No functional change intended. Signed-off-by: Uros Bizjak <ubizjak@gmail.com> Signed-off-by: Ingo Molnar <mingo@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Waiman Long <longman@redhat.com> Cc: Will Deacon <will.deacon@arm.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Paul E. McKenney <paulmck@kernel.org> Link: https://lore.kernel.org/r/20240124104953.612063-1-ubizjak@gmail.com
Diffstat (limited to 'kernel/locking')
-rw-r--r--kernel/locking/rtmutex.c9
1 files changed, 5 insertions, 4 deletions
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index 4a10e8c16fd2..88d08eeb8bc0 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -237,12 +237,13 @@ static __always_inline bool rt_mutex_cmpxchg_release(struct rt_mutex_base *lock,
*/
static __always_inline void mark_rt_mutex_waiters(struct rt_mutex_base *lock)
{
- unsigned long owner, *p = (unsigned long *) &lock->owner;
+ unsigned long *p = (unsigned long *) &lock->owner;
+ unsigned long owner, new;
+ owner = READ_ONCE(*p);
do {
- owner = *p;
- } while (cmpxchg_relaxed(p, owner,
- owner | RT_MUTEX_HAS_WAITERS) != owner);
+ new = owner | RT_MUTEX_HAS_WAITERS;
+ } while (!try_cmpxchg_relaxed(p, &owner, new));
/*
* The cmpxchg loop above is relaxed to avoid back-to-back ACQUIRE