diff options
author | Hari Bathini <hbathini@linux.ibm.com> | 2022-06-10 21:25:52 +0530 |
---|---|---|
committer | Michael Ellerman <mpe@ellerman.id.au> | 2022-06-29 19:37:08 +1000 |
commit | 2d9206b227434912582049c49af1085660fa1e50 (patch) | |
tree | 9e649d9d9f580e62027eaf4856d52853a815eedc /arch/powerpc/net | |
parent | aea7ef8a82c0ea13ff20b65ff2edf8a38a17eda8 (diff) | |
download | linux-stable-2d9206b227434912582049c49af1085660fa1e50.tar.gz linux-stable-2d9206b227434912582049c49af1085660fa1e50.tar.bz2 linux-stable-2d9206b227434912582049c49af1085660fa1e50.zip |
powerpc/bpf/32: Add instructions for atomic_[cmp]xchg
This adds two atomic opcodes BPF_XCHG and BPF_CMPXCHG on ppc32, both
of which include the BPF_FETCH flag. The kernel's atomic_cmpxchg
operation fundamentally has 3 operands, but we only have two register
fields. Therefore the operand we compare against (the kernel's API
calls it 'old') is hard-coded to be BPF_REG_R0. Also, kernel's
atomic_cmpxchg returns the previous value at dst_reg + off. JIT the
same for BPF too with return value put in BPF_REG_0.
BPF_REG_R0 = atomic_cmpxchg(dst_reg + off, BPF_REG_R0, src_reg);
Signed-off-by: Hari Bathini <hbathini@linux.ibm.com>
Tested-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> (ppc64le)
Reviewed-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20220610155552.25892-6-hbathini@linux.ibm.com
Diffstat (limited to 'arch/powerpc/net')
-rw-r--r-- | arch/powerpc/net/bpf_jit_comp32.c | 25 |
1 files changed, 22 insertions, 3 deletions
diff --git a/arch/powerpc/net/bpf_jit_comp32.c b/arch/powerpc/net/bpf_jit_comp32.c index 28dc6a1a8f2f..43f1c76d48ce 100644 --- a/arch/powerpc/net/bpf_jit_comp32.c +++ b/arch/powerpc/net/bpf_jit_comp32.c @@ -297,6 +297,7 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context * u32 ax_reg = bpf_to_ppc(BPF_REG_AX); u32 tmp_reg = bpf_to_ppc(TMP_REG); u32 size = BPF_SIZE(code); + u32 save_reg, ret_reg; s16 off = insn[i].off; s32 imm = insn[i].imm; bool func_addr_fixed; @@ -799,6 +800,9 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context * * BPF_STX ATOMIC (atomic ops) */ case BPF_STX | BPF_ATOMIC | BPF_W: + save_reg = _R0; + ret_reg = src_reg; + bpf_set_seen_register(ctx, tmp_reg); bpf_set_seen_register(ctx, ax_reg); @@ -829,6 +833,21 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context * case BPF_XOR | BPF_FETCH: EMIT(PPC_RAW_XOR(_R0, _R0, src_reg)); break; + case BPF_CMPXCHG: + /* + * Return old value in BPF_REG_0 for BPF_CMPXCHG & + * in src_reg for other cases. + */ + ret_reg = bpf_to_ppc(BPF_REG_0); + + /* Compare with old value in BPF_REG_0 */ + EMIT(PPC_RAW_CMPW(bpf_to_ppc(BPF_REG_0), _R0)); + /* Don't set if different from old value */ + PPC_BCC_SHORT(COND_NE, (ctx->idx + 3) * 4); + fallthrough; + case BPF_XCHG: + save_reg = src_reg; + break; default: pr_err_ratelimited("eBPF filter atomic op code %02x (@%d) unsupported\n", code, i); @@ -836,15 +855,15 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context * } /* store new value */ - EMIT(PPC_RAW_STWCX(_R0, tmp_reg, dst_reg)); + EMIT(PPC_RAW_STWCX(save_reg, tmp_reg, dst_reg)); /* we're done if this succeeded */ PPC_BCC_SHORT(COND_NE, tmp_idx); /* For the BPF_FETCH variant, get old data into src_reg */ if (imm & BPF_FETCH) { - EMIT(PPC_RAW_MR(src_reg, ax_reg)); + EMIT(PPC_RAW_MR(ret_reg, ax_reg)); if (!fp->aux->verifier_zext) - EMIT(PPC_RAW_LI(src_reg_h, 0)); + EMIT(PPC_RAW_LI(ret_reg - 1, 0)); /* higher 32-bit */ } break; |