summaryrefslogtreecommitdiffstats
path: root/tools/testing/selftests/bpf/test_verifier.c
diff options
context:
space:
mode:
authorAlexei Starovoitov <ast@kernel.org>2018-05-14 19:11:46 -0700
committerAlexei Starovoitov <ast@kernel.org>2018-05-14 19:11:46 -0700
commitfb40c9ddd66b9c9bb811bbee125b3cb3ba1faee7 (patch)
tree0febd11729744b0eb50eae2ba198d80f7a6d05ac /tools/testing/selftests/bpf/test_verifier.c
parent53ea24c20cea32b1dc70673402b496c4a5291d2d (diff)
parenta82d8cd398716b41f0fbe3882e3fe3f5ccf9f9cf (diff)
downloadlinux-stable-fb40c9ddd66b9c9bb811bbee125b3cb3ba1faee7.tar.gz
linux-stable-fb40c9ddd66b9c9bb811bbee125b3cb3ba1faee7.tar.bz2
linux-stable-fb40c9ddd66b9c9bb811bbee125b3cb3ba1faee7.zip
Merge branch 'bpf-jit-cleanups'
Daniel Borkmann says: ==================== This series follows up mostly with with some minor cleanups on top of 'Move ld_abs/ld_ind to native BPF' as well as implements better 32/64 bit immediate load into register and saves tail call init on cBPF for the arm64 JIT. Last but not least we add a couple of test cases. For details please see individual patches. Thanks! v1 -> v2: - Minor fix in i64_i16_blocks() to remove 24 shift. - Added last two patches. - Added Acks from prior round. ==================== Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'tools/testing/selftests/bpf/test_verifier.c')
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c62
1 files changed, 62 insertions, 0 deletions
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index 275b4570b5b8..a877af00605d 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -41,6 +41,7 @@
# endif
#endif
#include "bpf_rlimit.h"
+#include "bpf_rand.h"
#include "../../../include/linux/filter.h"
#ifndef ARRAY_SIZE
@@ -152,6 +153,30 @@ static void bpf_fill_jump_around_ld_abs(struct bpf_test *self)
insn[i] = BPF_EXIT_INSN();
}
+static void bpf_fill_rand_ld_dw(struct bpf_test *self)
+{
+ struct bpf_insn *insn = self->insns;
+ uint64_t res = 0;
+ int i = 0;
+
+ insn[i++] = BPF_MOV32_IMM(BPF_REG_0, 0);
+ while (i < self->retval) {
+ uint64_t val = bpf_semi_rand_get();
+ struct bpf_insn tmp[2] = { BPF_LD_IMM64(BPF_REG_1, val) };
+
+ res ^= val;
+ insn[i++] = tmp[0];
+ insn[i++] = tmp[1];
+ insn[i++] = BPF_ALU64_REG(BPF_XOR, BPF_REG_0, BPF_REG_1);
+ }
+ insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_0);
+ insn[i++] = BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 32);
+ insn[i++] = BPF_ALU64_REG(BPF_XOR, BPF_REG_0, BPF_REG_1);
+ insn[i] = BPF_EXIT_INSN();
+ res ^= (res >> 32);
+ self->retval = (uint32_t)res;
+}
+
static struct bpf_test tests[] = {
{
"add+sub+mul",
@@ -11974,6 +11999,42 @@ static struct bpf_test tests[] = {
.result = ACCEPT,
.retval = 10,
},
+ {
+ "ld_dw: xor semi-random 64 bit imms, test 1",
+ .insns = { },
+ .data = { },
+ .fill_helper = bpf_fill_rand_ld_dw,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 4090,
+ },
+ {
+ "ld_dw: xor semi-random 64 bit imms, test 2",
+ .insns = { },
+ .data = { },
+ .fill_helper = bpf_fill_rand_ld_dw,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 2047,
+ },
+ {
+ "ld_dw: xor semi-random 64 bit imms, test 3",
+ .insns = { },
+ .data = { },
+ .fill_helper = bpf_fill_rand_ld_dw,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 511,
+ },
+ {
+ "ld_dw: xor semi-random 64 bit imms, test 4",
+ .insns = { },
+ .data = { },
+ .fill_helper = bpf_fill_rand_ld_dw,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 5,
+ },
};
static int probe_filter_length(const struct bpf_insn *fp)
@@ -12346,5 +12407,6 @@ int main(int argc, char **argv)
return EXIT_FAILURE;
}
+ bpf_semi_rand_init();
return do_test(unpriv, from, to);
}