summaryrefslogtreecommitdiffstats
path: root/arch/arm/net
diff options
context:
space:
mode:
authorNicolas Schichan <nschichan@freebox.fr>2015-05-07 17:14:21 +0200
committerDavid S. Miller <davem@davemloft.net>2015-05-10 19:21:49 -0400
commit0b59d8806a31bb0267b3a461e8fef20c727bdbf6 (patch)
treeb10da6cabb48be6b718189c7217e24fecb051b4a /arch/arm/net
parent19fc99d0c6ba7d9b65456496b5bb2169d5f74cd0 (diff)
downloadlinux-0b59d8806a31bb0267b3a461e8fef20c727bdbf6.tar.gz
linux-0b59d8806a31bb0267b3a461e8fef20c727bdbf6.tar.bz2
linux-0b59d8806a31bb0267b3a461e8fef20c727bdbf6.zip
ARM: net: delegate filter to kernel interpreter when imm_offset() return value can't fit into 12bits.
The ARM JIT code emits "ldr rX, [pc, #offset]" to access the literal pool. #offset maximum value is 4095 and if the generated code is too large, the #offset value can overflow and not point to the expected slot in the literal pool. Additionally, when overflow occurs, bits of the overflow can end up changing the destination register of the ldr instruction. Fix that by detecting the overflow in imm_offset() and setting a flag that is checked for each BPF instructions converted in build_body(). As of now it can only be detected in the second pass. As a result the second build_body() call can now fail, so add the corresponding cleanup code in that case. Using multiple literal pools in the JITed code is going to require lots of intrusive changes to the JIT code (which would better be done as a feature instead of fix), just delegating to the kernel BPF interpreter in that case is a more straight forward, minimal fix and easy to backport. Fixes: ddecdfcea0ae ("ARM: 7259/3: net: JIT compiler for packet filters") Signed-off-by: Nicolas Schichan <nschichan@freebox.fr> Acked-by: Daniel Borkmann <daniel@iogearbox.net> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/arm/net')
-rw-r--r--arch/arm/net/bpf_jit_32.c27
1 files changed, 26 insertions, 1 deletions
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
index f412b53ed268..e0e23582c8b4 100644
--- a/arch/arm/net/bpf_jit_32.c
+++ b/arch/arm/net/bpf_jit_32.c
@@ -54,6 +54,7 @@
#define SEEN_DATA (1 << (BPF_MEMWORDS + 3))
#define FLAG_NEED_X_RESET (1 << 0)
+#define FLAG_IMM_OVERFLOW (1 << 1)
struct jit_ctx {
const struct bpf_prog *skf;
@@ -293,6 +294,15 @@ static u16 imm_offset(u32 k, struct jit_ctx *ctx)
/* PC in ARM mode == address of the instruction + 8 */
imm = offset - (8 + ctx->idx * 4);
+ if (imm & ~0xfff) {
+ /*
+ * literal pool is too far, signal it into flags. we
+ * can only detect it on the second pass unfortunately.
+ */
+ ctx->flags |= FLAG_IMM_OVERFLOW;
+ return 0;
+ }
+
return imm;
}
@@ -866,6 +876,14 @@ b_epilogue:
default:
return -1;
}
+
+ if (ctx->flags & FLAG_IMM_OVERFLOW)
+ /*
+ * this instruction generated an overflow when
+ * trying to access the literal pool, so
+ * delegate this filter to the kernel interpreter.
+ */
+ return -1;
}
/* compute offsets only during the first pass */
@@ -928,7 +946,14 @@ void bpf_jit_compile(struct bpf_prog *fp)
ctx.idx = 0;
build_prologue(&ctx);
- build_body(&ctx);
+ if (build_body(&ctx) < 0) {
+#if __LINUX_ARM_ARCH__ < 7
+ if (ctx.imm_count)
+ kfree(ctx.imms);
+#endif
+ bpf_jit_binary_free(header);
+ goto out;
+ }
build_epilogue(&ctx);
flush_icache_range((u32)ctx.target, (u32)(ctx.target + ctx.idx));