summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorDaniel Borkmann <daniel@iogearbox.net>2020-09-08 00:04:10 +0200
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2020-10-01 13:14:52 +0200
commit87f947e2bb5a11dad396a64505f30c647d5ed0ed (patch)
tree554ad732e43af9783ef360459ff8998f4d3cfb9c /net
parent788a00c1f837544bf4622ebb14d15506b4a1151d (diff)
downloadlinux-stable-87f947e2bb5a11dad396a64505f30c647d5ed0ed.tar.gz
linux-stable-87f947e2bb5a11dad396a64505f30c647d5ed0ed.tar.bz2
linux-stable-87f947e2bb5a11dad396a64505f30c647d5ed0ed.zip
bpf: Fix clobbering of r2 in bpf_gen_ld_abs
[ Upstream commit e6a18d36118bea3bf497c9df4d9988b6df120689 ] Bryce reported that he saw the following with: 0: r6 = r1 1: r1 = 12 2: r0 = *(u16 *)skb[r1] The xlated sequence was incorrectly clobbering r2 with pointer value of r6 ... 0: (bf) r6 = r1 1: (b7) r1 = 12 2: (bf) r1 = r6 3: (bf) r2 = r1 4: (85) call bpf_skb_load_helper_16_no_cache#7692160 ... and hence call to the load helper never succeeded given the offset was too high. Fix it by reordering the load of r6 to r1. Other than that the insn has similar calling convention than BPF helpers, that is, r0 - r5 are scratch regs, so nothing else affected after the insn. Fixes: e0cea7ce988c ("bpf: implement ld_abs/ld_ind in native bpf") Reported-by: Bryce Kahle <bryce.kahle@datadoghq.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Link: https://lore.kernel.org/bpf/cace836e4d07bb63b1a53e49c5dfb238a040c298.1599512096.git.daniel@iogearbox.net Signed-off-by: Sasha Levin <sashal@kernel.org>
Diffstat (limited to 'net')
-rw-r--r--net/core/filter.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/net/core/filter.c b/net/core/filter.c
index 25a2c3186e14..557bd5cc8f94 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -5418,8 +5418,6 @@ static int bpf_gen_ld_abs(const struct bpf_insn *orig,
bool indirect = BPF_MODE(orig->code) == BPF_IND;
struct bpf_insn *insn = insn_buf;
- /* We're guaranteed here that CTX is in R6. */
- *insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_CTX);
if (!indirect) {
*insn++ = BPF_MOV64_IMM(BPF_REG_2, orig->imm);
} else {
@@ -5427,6 +5425,8 @@ static int bpf_gen_ld_abs(const struct bpf_insn *orig,
if (orig->imm)
*insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, orig->imm);
}
+ /* We're guaranteed here that CTX is in R6. */
+ *insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_CTX);
switch (BPF_SIZE(orig->code)) {
case BPF_B: