diff options
author | Hou Tao <houtao1@huawei.com> | 2022-02-17 15:22:30 +0800 |
---|---|---|
committer | Will Deacon <will@kernel.org> | 2022-02-22 21:25:48 +0000 |
commit | fa1114d9eba5087ba5e81aab4c56f546995e6cd3 (patch) | |
tree | 3c28b1a2af95190120e8ad71dce9d8147c8f6602 /arch/arm64/net/bpf_jit.h | |
parent | 97e58e395e9c074fd096dad13c54e9f4112cf71d (diff) | |
download | linux-stable-fa1114d9eba5087ba5e81aab4c56f546995e6cd3.tar.gz linux-stable-fa1114d9eba5087ba5e81aab4c56f546995e6cd3.tar.bz2 linux-stable-fa1114d9eba5087ba5e81aab4c56f546995e6cd3.zip |
arm64: insn: add encoders for atomic operations
It is a preparation patch for eBPF atomic supports under arm64. eBPF
needs support atomic[64]_fetch_add, atomic[64]_[fetch_]{and,or,xor} and
atomic[64]_{xchg|cmpxchg}. The ordering semantics of eBPF atomics are
the same with the implementations in linux kernel.
Add three helpers to support LDCLR/LDEOR/LDSET/SWP, CAS and DMB
instructions. STADD/STCLR/STEOR/STSET are simply encoded as aliases for
LDADD/LDCLR/LDEOR/LDSET with XZR as the destination register, so no extra
helper is added. atomic_fetch_add() and other atomic ops needs support for
STLXR instruction, so extend enum aarch64_insn_ldst_type to do that.
LDADD/LDEOR/LDSET/SWP and CAS instructions are only available when LSE
atomics is enabled, so just return AARCH64_BREAK_FAULT directly in
these newly-added helpers if CONFIG_ARM64_LSE_ATOMICS is disabled.
Signed-off-by: Hou Tao <houtao1@huawei.com>
Link: https://lore.kernel.org/r/20220217072232.1186625-3-houtao1@huawei.com
Signed-off-by: Will Deacon <will@kernel.org>
Diffstat (limited to 'arch/arm64/net/bpf_jit.h')
-rw-r--r-- | arch/arm64/net/bpf_jit.h | 11 |
1 files changed, 9 insertions, 2 deletions
diff --git a/arch/arm64/net/bpf_jit.h b/arch/arm64/net/bpf_jit.h index cc0cf0f5c7c3..9d9250c7cc72 100644 --- a/arch/arm64/net/bpf_jit.h +++ b/arch/arm64/net/bpf_jit.h @@ -89,9 +89,16 @@ #define A64_STXR(sf, Rt, Rn, Rs) \ A64_LSX(sf, Rt, Rn, Rs, STORE_EX) -/* LSE atomics */ +/* + * LSE atomics + * + * STADD is simply encoded as an alias for LDADD with XZR as + * the destination register. + */ #define A64_STADD(sf, Rn, Rs) \ - aarch64_insn_gen_stadd(Rn, Rs, A64_SIZE(sf)) + aarch64_insn_gen_atomic_ld_op(A64_ZR, Rn, Rs, \ + A64_SIZE(sf), AARCH64_INSN_MEM_ATOMIC_ADD, \ + AARCH64_INSN_MEM_ORDER_NONE) /* Add/subtract (immediate) */ #define A64_ADDSUB_IMM(sf, Rd, Rn, imm12, type) \ |