summaryrefslogtreecommitdiffstats
path: root/arch/arm64/net
diff options
context:
space:
mode:
authorZi Shen Lim <zlim.lnx@gmail.com>2014-12-03 08:38:01 +0000
committerWill Deacon <will.deacon@arm.com>2014-12-03 18:04:09 +0000
commit51c9fbb1b146f3336a93d398c439b6fbfe5ab489 (patch)
tree2928e70795aaf5fba8d0263ac06b9205d42364d7 /arch/arm64/net
parente4f88d833bec29b8e6fadc1b2488f0c6370935e1 (diff)
downloadlinux-stable-51c9fbb1b146f3336a93d398c439b6fbfe5ab489.tar.gz
linux-stable-51c9fbb1b146f3336a93d398c439b6fbfe5ab489.tar.bz2
linux-stable-51c9fbb1b146f3336a93d398c439b6fbfe5ab489.zip
arm64: bpf: lift restriction on last instruction
Earlier implementation assumed last instruction is BPF_EXIT. Since this is no longer a restriction in eBPF, we remove this limitation. Per Alexei Starovoitov [1]: > classic BPF has a restriction that last insn is always BPF_RET. > eBPF doesn't have BPF_RET instruction and this restriction. > It has BPF_EXIT insn which can appear anywhere in the program > one or more times and it doesn't have to be last insn. [1] https://lkml.org/lkml/2014/11/27/2 Fixes: e54bcde3d69d ("arm64: eBPF JIT compiler") Acked-by: Alexei Starovoitov <ast@plumgrid.com> Signed-off-by: Zi Shen Lim <zlim.lnx@gmail.com> Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'arch/arm64/net')
-rw-r--r--arch/arm64/net/bpf_jit_comp.c13
1 files changed, 8 insertions, 5 deletions
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index 41f1e3e2ea24..edba042b2325 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -60,7 +60,7 @@ struct jit_ctx {
const struct bpf_prog *prog;
int idx;
int tmp_used;
- int body_offset;
+ int epilogue_offset;
int *offset;
u32 *image;
};
@@ -130,8 +130,8 @@ static void jit_fill_hole(void *area, unsigned int size)
static inline int epilogue_offset(const struct jit_ctx *ctx)
{
- int to = ctx->offset[ctx->prog->len - 1];
- int from = ctx->idx - ctx->body_offset;
+ int to = ctx->epilogue_offset;
+ int from = ctx->idx;
return to - from;
}
@@ -463,6 +463,8 @@ emit_cond_jmp:
}
/* function return */
case BPF_JMP | BPF_EXIT:
+ /* Optimization: when last instruction is EXIT,
+ simply fallthrough to epilogue. */
if (i == ctx->prog->len - 1)
break;
jmp_offset = epilogue_offset(ctx);
@@ -685,11 +687,13 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
/* 1. Initial fake pass to compute ctx->idx. */
- /* Fake pass to fill in ctx->offset. */
+ /* Fake pass to fill in ctx->offset and ctx->tmp_used. */
if (build_body(&ctx))
goto out;
build_prologue(&ctx);
+
+ ctx.epilogue_offset = ctx.idx;
build_epilogue(&ctx);
/* Now we know the actual image size. */
@@ -706,7 +710,6 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
build_prologue(&ctx);
- ctx.body_offset = ctx.idx;
if (build_body(&ctx)) {
bpf_jit_binary_free(header);
goto out;