diff options
author | Heiko Carstens <hca@linux.ibm.com> | 2024-11-06 11:03:15 +0100 |
---|---|---|
committer | Heiko Carstens <hca@linux.ibm.com> | 2024-11-12 14:01:29 +0100 |
commit | d5fd93629af76748751ff583d35039e5834eeb47 (patch) | |
tree | 71c069e43075647b06f920e7490199c337dda100 /arch | |
parent | 75a98ed6faa03656811922cd383994fa7c300259 (diff) | |
download | linux-stable-d5fd93629af76748751ff583d35039e5834eeb47.tar.gz linux-stable-d5fd93629af76748751ff583d35039e5834eeb47.tar.bz2 linux-stable-d5fd93629af76748751ff583d35039e5834eeb47.zip |
s390/locking: Use arch_try_cmpxchg() instead of __atomic_cmpxchg_bool()
Use arch_try_cmpxchg() instead of __atomic_cmpxchg_bool() everywhere.
This generates the same code like before, but uses the standard
cmpxchg() implementation instead of a custom __atomic_cmpxchg_bool().
Reviewed-by: Juergen Christ <jchrist@linux.ibm.com>
Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/s390/include/asm/spinlock.h | 13 | ||||
-rw-r--r-- | arch/s390/lib/spinlock.c | 12 |
2 files changed, 14 insertions, 11 deletions
diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h index 77d5e804af93..ac868a9bb0d1 100644 --- a/arch/s390/include/asm/spinlock.h +++ b/arch/s390/include/asm/spinlock.h @@ -57,8 +57,10 @@ static inline int arch_spin_is_locked(arch_spinlock_t *lp) static inline int arch_spin_trylock_once(arch_spinlock_t *lp) { + int old = 0; + barrier(); - return likely(__atomic_cmpxchg_bool(&lp->lock, 0, SPINLOCK_LOCKVAL)); + return likely(arch_try_cmpxchg(&lp->lock, &old, SPINLOCK_LOCKVAL)); } static inline void arch_spin_lock(arch_spinlock_t *lp) @@ -118,7 +120,9 @@ static inline void arch_read_unlock(arch_rwlock_t *rw) static inline void arch_write_lock(arch_rwlock_t *rw) { - if (!__atomic_cmpxchg_bool(&rw->cnts, 0, 0x30000)) + int old = 0; + + if (!arch_try_cmpxchg(&rw->cnts, &old, 0x30000)) arch_write_lock_wait(rw); } @@ -133,8 +137,7 @@ static inline int arch_read_trylock(arch_rwlock_t *rw) int old; old = READ_ONCE(rw->cnts); - return (!(old & 0xffff0000) && - __atomic_cmpxchg_bool(&rw->cnts, old, old + 1)); + return (!(old & 0xffff0000) && arch_try_cmpxchg(&rw->cnts, &old, old + 1)); } static inline int arch_write_trylock(arch_rwlock_t *rw) @@ -142,7 +145,7 @@ static inline int arch_write_trylock(arch_rwlock_t *rw) int old; old = READ_ONCE(rw->cnts); - return !old && __atomic_cmpxchg_bool(&rw->cnts, 0, 0x30000); + return !old && arch_try_cmpxchg(&rw->cnts, &old, 0x30000); } #endif /* __ASM_SPINLOCK_H */ diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c index 9f86ad8fa8b4..09d735010ee1 100644 --- a/arch/s390/lib/spinlock.c +++ b/arch/s390/lib/spinlock.c @@ -127,8 +127,8 @@ static inline void arch_spin_lock_queued(arch_spinlock_t *lp) node_id = node->node_id; /* Enqueue the node for this CPU in the spinlock wait queue */ + old = READ_ONCE(lp->lock); while (1) { - old = READ_ONCE(lp->lock); if ((old & _Q_LOCK_CPU_MASK) == 0 && (old & _Q_LOCK_STEAL_MASK) != _Q_LOCK_STEAL_MASK) { /* @@ -139,7 +139,7 @@ static inline void arch_spin_lock_queued(arch_spinlock_t *lp) * waiter will get the lock. */ new = (old ? (old + _Q_LOCK_STEAL_ADD) : 0) | lockval; - if (__atomic_cmpxchg_bool(&lp->lock, old, new)) + if (arch_try_cmpxchg(&lp->lock, &old, new)) /* Got the lock */ goto out; /* lock passing in progress */ @@ -147,7 +147,7 @@ static inline void arch_spin_lock_queued(arch_spinlock_t *lp) } /* Make the node of this CPU the new tail. */ new = node_id | (old & _Q_LOCK_MASK); - if (__atomic_cmpxchg_bool(&lp->lock, old, new)) + if (arch_try_cmpxchg(&lp->lock, &old, new)) break; } /* Set the 'next' pointer of the tail node in the queue */ @@ -184,7 +184,7 @@ static inline void arch_spin_lock_queued(arch_spinlock_t *lp) if (!owner) { tail_id = old & _Q_TAIL_MASK; new = ((tail_id != node_id) ? tail_id : 0) | lockval; - if (__atomic_cmpxchg_bool(&lp->lock, old, new)) + if (arch_try_cmpxchg(&lp->lock, &old, new)) /* Got the lock */ break; continue; @@ -258,7 +258,7 @@ int arch_spin_trylock_retry(arch_spinlock_t *lp) owner = READ_ONCE(lp->lock); /* Try to get the lock if it is free. */ if (!owner) { - if (__atomic_cmpxchg_bool(&lp->lock, 0, cpu)) + if (arch_try_cmpxchg(&lp->lock, &owner, cpu)) return 1; } } @@ -300,7 +300,7 @@ void arch_write_lock_wait(arch_rwlock_t *rw) while (1) { old = READ_ONCE(rw->cnts); if ((old & 0x1ffff) == 0 && - __atomic_cmpxchg_bool(&rw->cnts, old, old | 0x10000)) + arch_try_cmpxchg(&rw->cnts, &old, old | 0x10000)) /* Got the lock */ break; barrier(); |