summaryrefslogtreecommitdiffstats
path: root/tools
diff options
context:
space:
mode:
authorJohn Fastabend <john.fastabend@gmail.com>2020-03-30 14:38:21 -0700
committerAlexei Starovoitov <ast@kernel.org>2020-03-30 15:00:31 -0700
commit41f70fe0649dddf02046315dc566e06da5a2dc91 (patch)
tree891b4fa11cdd0212fe9f094af6d6a1e70cc679f9 /tools
parent32f13a5add87caf11e9dd262372b4a2b79c57aa5 (diff)
downloadlinux-stable-41f70fe0649dddf02046315dc566e06da5a2dc91.tar.gz
linux-stable-41f70fe0649dddf02046315dc566e06da5a2dc91.tar.bz2
linux-stable-41f70fe0649dddf02046315dc566e06da5a2dc91.zip
bpf: Test_verifier, add alu32 bounds tracking tests
Its possible to have divergent ALU32 and ALU64 bounds when using JMP32 instructins and ALU64 arithmatic operations. Sometimes the clang will even generate this code. Because the case is a bit tricky lets add a specific test for it. Here is pseudocode asm version to illustrate the idea, 1 r0 = 0xffffffff00000001; 2 if w0 > 1 goto %l[fail]; 3 r0 += 1 5 if w0 > 2 goto %l[fail] 6 exit The intent here is the verifier will fail the load if the 32bit bounds are not tracked correctly through ALU64 op. Similarly we can check the 64bit bounds are correctly zero extended after ALU32 ops. 1 r0 = 0xffffffff00000001; 2 w0 += 1 2 if r0 > 3 goto %l[fail]; 6 exit The above will fail if we do not correctly zero extend 64bit bounds after 32bit op. Signed-off-by: John Fastabend <john.fastabend@gmail.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Link: https://lore.kernel.org/bpf/158560430155.10843.514209255758200922.stgit@john-Precision-5820-Tower
Diffstat (limited to 'tools')
-rw-r--r--tools/testing/selftests/bpf/verifier/bounds.c39
1 files changed, 39 insertions, 0 deletions
diff --git a/tools/testing/selftests/bpf/verifier/bounds.c b/tools/testing/selftests/bpf/verifier/bounds.c
index cf72fcc5ced0..4d0d09574bf4 100644
--- a/tools/testing/selftests/bpf/verifier/bounds.c
+++ b/tools/testing/selftests/bpf/verifier/bounds.c
@@ -500,3 +500,42 @@
.errstr = "map_value pointer and 1000000000000",
.result = REJECT
},
+{
+ "bounds check mixed 32bit and 64bit arithmatic. test1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_1, -1),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 32),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
+ /* r1 = 0xffffFFFF00000001 */
+ BPF_JMP32_IMM(BPF_JGT, BPF_REG_1, 1, 3),
+ /* check ALU64 op keeps 32bit bounds */
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
+ BPF_JMP32_IMM(BPF_JGT, BPF_REG_1, 2, 1),
+ BPF_JMP_A(1),
+ /* invalid ldx if bounds are lost above */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, -1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT
+},
+{
+ "bounds check mixed 32bit and 64bit arithmatic. test2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_1, -1),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 32),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
+ /* r1 = 0xffffFFFF00000001 */
+ BPF_MOV64_IMM(BPF_REG_2, 3),
+ /* r1 = 0x2 */
+ BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 1),
+ /* check ALU32 op zero extends 64bit bounds */
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 1),
+ BPF_JMP_A(1),
+ /* invalid ldx if bounds are lost above */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, -1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT
+},