summaryrefslogtreecommitdiffstats
path: root/arch/mips/kernel
diff options
context:
space:
mode:
authorMark Rutland <mark.rutland@arm.com>2021-05-25 15:02:21 +0100
committerPeter Zijlstra <peterz@infradead.org>2021-05-26 13:20:51 +0200
commitc7b5fd6faa1dc6cdc721a978d9d122cd31bbd7b1 (patch)
tree1dd783e6db4478586a97d7e5526467734b9ccd1e /arch/mips/kernel
parentf5b1c0f951e7b0d5634b82d57971cae25a0ba435 (diff)
downloadlinux-stable-c7b5fd6faa1dc6cdc721a978d9d122cd31bbd7b1.tar.gz
linux-stable-c7b5fd6faa1dc6cdc721a978d9d122cd31bbd7b1.tar.bz2
linux-stable-c7b5fd6faa1dc6cdc721a978d9d122cd31bbd7b1.zip
locking/atomic: mips: move to ARCH_ATOMIC
We'd like all architectures to convert to ARCH_ATOMIC, as once all architectures are converted it will be possible to make significant cleanups to the atomics headers, and this will make it much easier to generically enable atomic functionality (e.g. debug logic in the instrumented wrappers). As a step towards that, this patch migrates mips to ARCH_ATOMIC. The arch code provides arch_{atomic,atomic64,xchg,cmpxchg}*(), and common code wraps these with optional instrumentation to provide the regular functions. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: Boqun Feng <boqun.feng@gmail.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de> Cc: Will Deacon <will@kernel.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lore.kernel.org/r/20210525140232.53872-23-mark.rutland@arm.com
Diffstat (limited to 'arch/mips/kernel')
-rw-r--r--arch/mips/kernel/cmpxchg.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/arch/mips/kernel/cmpxchg.c b/arch/mips/kernel/cmpxchg.c
index 89107deb03fc..ac9c8cfb2ba9 100644
--- a/arch/mips/kernel/cmpxchg.c
+++ b/arch/mips/kernel/cmpxchg.c
@@ -41,7 +41,7 @@ unsigned long __xchg_small(volatile void *ptr, unsigned long val, unsigned int s
do {
old32 = load32;
new32 = (load32 & ~mask) | (val << shift);
- load32 = cmpxchg(ptr32, old32, new32);
+ load32 = arch_cmpxchg(ptr32, old32, new32);
} while (load32 != old32);
return (load32 & mask) >> shift;
@@ -97,7 +97,7 @@ unsigned long __cmpxchg_small(volatile void *ptr, unsigned long old,
*/
old32 = (load32 & ~mask) | (old << shift);
new32 = (load32 & ~mask) | (new << shift);
- load32 = cmpxchg(ptr32, old32, new32);
+ load32 = arch_cmpxchg(ptr32, old32, new32);
if (load32 == old32)
return old;
}