summaryrefslogtreecommitdiffstats
path: root/arch/csky/include
diff options
context:
space:
mode:
authorMark Rutland <mark.rutland@arm.com>2021-05-25 15:02:15 +0100
committerPeter Zijlstra <peterz@infradead.org>2021-05-26 13:20:51 +0200
commita5fb82d7e2695e667badeac202fb7d113a8ae9a9 (patch)
tree6669d352fc0663a44c6f2cee8015d8fadff8823d /arch/csky/include
parentfc63a6e08a8c97a3dc3a6f2e1946b949b9a6c2d3 (diff)
downloadlinux-a5fb82d7e2695e667badeac202fb7d113a8ae9a9.tar.gz
linux-a5fb82d7e2695e667badeac202fb7d113a8ae9a9.tar.bz2
linux-a5fb82d7e2695e667badeac202fb7d113a8ae9a9.zip
locking/atomic: csky: move to ARCH_ATOMIC
We'd like all architectures to convert to ARCH_ATOMIC, as once all architectures are converted it will be possible to make significant cleanups to the atomics headers, and this will make it much easier to generically enable atomic functionality (e.g. debug logic in the instrumented wrappers). As a step towards that, this patch migrates csky to ARCH_ATOMIC. The arch code provides arch_{atomic,atomic64,xchg,cmpxchg}*(), and common code wraps these with optional instrumentation to provide the regular functions. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Acked-by: Guo Ren <guoren@kernel.org> Cc: Boqun Feng <boqun.feng@gmail.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Will Deacon <will@kernel.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lore.kernel.org/r/20210525140232.53872-17-mark.rutland@arm.com
Diffstat (limited to 'arch/csky/include')
-rw-r--r--arch/csky/include/asm/cmpxchg.h8
1 files changed, 4 insertions, 4 deletions
diff --git a/arch/csky/include/asm/cmpxchg.h b/arch/csky/include/asm/cmpxchg.h
index dabc8e46ce7b..d1bef11f8dc9 100644
--- a/arch/csky/include/asm/cmpxchg.h
+++ b/arch/csky/include/asm/cmpxchg.h
@@ -31,7 +31,7 @@ extern void __bad_xchg(void);
__ret; \
})
-#define xchg_relaxed(ptr, x) \
+#define arch_xchg_relaxed(ptr, x) \
(__xchg_relaxed((x), (ptr), sizeof(*(ptr))))
#define __cmpxchg_relaxed(ptr, old, new, size) \
@@ -61,14 +61,14 @@ extern void __bad_xchg(void);
__ret; \
})
-#define cmpxchg_relaxed(ptr, o, n) \
+#define arch_cmpxchg_relaxed(ptr, o, n) \
(__cmpxchg_relaxed((ptr), (o), (n), sizeof(*(ptr))))
-#define cmpxchg(ptr, o, n) \
+#define arch_cmpxchg(ptr, o, n) \
({ \
__typeof__(*(ptr)) __ret; \
__smp_release_fence(); \
- __ret = cmpxchg_relaxed(ptr, o, n); \
+ __ret = arch_cmpxchg_relaxed(ptr, o, n); \
__smp_acquire_fence(); \
__ret; \
})