summaryrefslogtreecommitdiffstats
path: root/arch/arm64/include
diff options
context:
space:
mode:
authorMark Rutland <mark.rutland@arm.com>2021-12-10 15:14:10 +0000
committerCatalin Marinas <catalin.marinas@arm.com>2021-12-14 13:00:33 +0000
commit053f58bab331e6c31c486661fdb5f6bad5f413de (patch)
treeaa1d4ee968fde4cdf40ce2de7cf174c3cd25bd4d /arch/arm64/include
parent8a578a759ad61b13240190a12cdfa8845c5fa949 (diff)
downloadlinux-stable-053f58bab331e6c31c486661fdb5f6bad5f413de.tar.gz
linux-stable-053f58bab331e6c31c486661fdb5f6bad5f413de.tar.bz2
linux-stable-053f58bab331e6c31c486661fdb5f6bad5f413de.zip
arm64: atomics: lse: define RETURN ops in terms of FETCH ops
The FEAT_LSE atomic instructions include LD* instructions which return the original value of a memory location can be used to directly implement FETCH opertations. Each RETURN op is implemented as a copy of the corresponding FETCH op with a trailing instruction to generate the new value of the memory location. We only directly implement *_fetch_add*(), for which we have a trailing `add` instruction. As the compiler has no visibility of the `add`, this leads to less than optimal code generation when consuming the result. For example, the compiler cannot constant-fold the addition into later operations, and currently GCC 11.1.0 will compile: return __lse_atomic_sub_return(1, v) == 0; As: mov w1, #0xffffffff ldaddal w1, w2, [x0] add w1, w1, w2 cmp w1, #0x0 cset w0, eq // eq = none ret This patch improves this by replacing the `add` with C addition after the inline assembly block, e.g. ret += i; This allows the compiler to manipulate `i`. This permits the compiler to merge the `add` and `cmp` for the above, e.g. mov w1, #0xffffffff ldaddal w1, w1, [x0] cmp w1, #0x1 cset w0, eq // eq = none ret With this change the assembly for each RETURN op is identical to the corresponding FETCH op (including barriers and clobbers) so I've removed the inline assembly and rewritten each RETURN op in terms of the corresponding FETCH op, e.g. | static inline void __lse_atomic_add_return(int i, atomic_t *v) | { | return __lse_atomic_fetch_add(i, v) + i | } The new construction does not adversely affect the common case, and before and after this patch GCC 11.1.0 can compile: __lse_atomic_add_return(i, v) As: ldaddal w0, w2, [x1] add w0, w0, w2 ... while having the freedom to do better elsewhere. This is intended as an optimization and cleanup. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: Boqun Feng <boqun.feng@gmail.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Will Deacon <will@kernel.org> Acked-by: Will Deacon <will@kernel.org> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lore.kernel.org/r/20211210151410.2782645-6-mark.rutland@arm.com Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Diffstat (limited to 'arch/arm64/include')
-rw-r--r--arch/arm64/include/asm/atomic_lse.h48
1 files changed, 14 insertions, 34 deletions
diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h
index e4c5c4c34ce6..d955ade5df7c 100644
--- a/arch/arm64/include/asm/atomic_lse.h
+++ b/arch/arm64/include/asm/atomic_lse.h
@@ -75,31 +75,21 @@ ATOMIC_FETCH_OP_SUB( )
#undef ATOMIC_FETCH_OP_SUB
-#define ATOMIC_OP_ADD_SUB_RETURN(name, mb, cl...) \
+#define ATOMIC_OP_ADD_SUB_RETURN(name) \
static inline int __lse_atomic_add_return##name(int i, atomic_t *v) \
{ \
- u32 tmp; \
- \
- asm volatile( \
- __LSE_PREAMBLE \
- " ldadd" #mb " %w[i], %w[tmp], %[v]\n" \
- " add %w[i], %w[i], %w[tmp]" \
- : [i] "+r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp) \
- : "r" (v) \
- : cl); \
- \
- return i; \
+ return __lse_atomic_fetch_add##name(i, v) + i; \
} \
\
static inline int __lse_atomic_sub_return##name(int i, atomic_t *v) \
{ \
- return __lse_atomic_add_return##name(-i, v); \
+ return __lse_atomic_fetch_sub(i, v) - i; \
}
-ATOMIC_OP_ADD_SUB_RETURN(_relaxed, )
-ATOMIC_OP_ADD_SUB_RETURN(_acquire, a, "memory")
-ATOMIC_OP_ADD_SUB_RETURN(_release, l, "memory")
-ATOMIC_OP_ADD_SUB_RETURN( , al, "memory")
+ATOMIC_OP_ADD_SUB_RETURN(_relaxed)
+ATOMIC_OP_ADD_SUB_RETURN(_acquire)
+ATOMIC_OP_ADD_SUB_RETURN(_release)
+ATOMIC_OP_ADD_SUB_RETURN( )
#undef ATOMIC_OP_ADD_SUB_RETURN
@@ -186,31 +176,21 @@ ATOMIC64_FETCH_OP_SUB( )
#undef ATOMIC64_FETCH_OP_SUB
-#define ATOMIC64_OP_ADD_SUB_RETURN(name, mb, cl...) \
+#define ATOMIC64_OP_ADD_SUB_RETURN(name) \
static inline long __lse_atomic64_add_return##name(s64 i, atomic64_t *v)\
{ \
- unsigned long tmp; \
- \
- asm volatile( \
- __LSE_PREAMBLE \
- " ldadd" #mb " %[i], %x[tmp], %[v]\n" \
- " add %[i], %[i], %x[tmp]" \
- : [i] "+r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp) \
- : "r" (v) \
- : cl); \
- \
- return i; \
+ return __lse_atomic64_fetch_add##name(i, v) + i; \
} \
\
static inline long __lse_atomic64_sub_return##name(s64 i, atomic64_t *v)\
{ \
- return __lse_atomic64_add_return##name(-i, v); \
+ return __lse_atomic64_fetch_sub##name(i, v) - i; \
}
-ATOMIC64_OP_ADD_SUB_RETURN(_relaxed, )
-ATOMIC64_OP_ADD_SUB_RETURN(_acquire, a, "memory")
-ATOMIC64_OP_ADD_SUB_RETURN(_release, l, "memory")
-ATOMIC64_OP_ADD_SUB_RETURN( , al, "memory")
+ATOMIC64_OP_ADD_SUB_RETURN(_relaxed)
+ATOMIC64_OP_ADD_SUB_RETURN(_acquire)
+ATOMIC64_OP_ADD_SUB_RETURN(_release)
+ATOMIC64_OP_ADD_SUB_RETURN( )
#undef ATOMIC64_OP_ADD_SUB_RETURN