summaryrefslogtreecommitdiffstats
path: root/tools
diff options
context:
space:
mode:
authorEdward Cree <ecree@solarflare.com>2017-08-07 15:29:11 +0100
committerDavid S. Miller <davem@davemloft.net>2017-08-08 17:51:35 -0700
commit1f9ab38f8a155913c9a587a673e61eedb75c9bc8 (patch)
tree413ab94b9a98d08a7b9055758ef02f368f5425d1 /tools
parentc2c3e11712e23d430a49e1247a8ec211740c2254 (diff)
downloadlinux-1f9ab38f8a155913c9a587a673e61eedb75c9bc8.tar.gz
linux-1f9ab38f8a155913c9a587a673e61eedb75c9bc8.tar.bz2
linux-1f9ab38f8a155913c9a587a673e61eedb75c9bc8.zip
selftests/bpf: don't try to access past MAX_PACKET_OFF in test_verifier
A number of selftests fell foul of the changed MAX_PACKET_OFF handling. For instance, "direct packet access: test2" was potentially reading four bytes from pkt + 0xffff, which could take it past the verifier's limit, causing the program to be rejected (checks against pkt_end didn't give us any reg->range). Increase the shifts by one so that R2 is now mask 0x7fff instead of mask 0xffff. Signed-off-by: Edward Cree <ecree@solarflare.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'tools')
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index 06914941f376..876b8785fd83 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -2330,8 +2330,8 @@ static struct bpf_test tests[] = {
offsetof(struct __sk_buff, data)),
BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_4),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
- BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 48),
- BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 48),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 49),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 49),
BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_3),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
@@ -2710,11 +2710,11 @@ static struct bpf_test tests[] = {
BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xffff),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0x7fff),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0xffff - 1),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0x7fff - 1),
BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
@@ -2736,10 +2736,10 @@ static struct bpf_test tests[] = {
BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -8),
BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
- BPF_ALU64_IMM(BPF_AND, BPF_REG_4, 0xffff),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_4, 0x7fff),
BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0xffff - 1),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0x7fff - 1),
BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
@@ -2765,7 +2765,7 @@ static struct bpf_test tests[] = {
BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_4, -8),
BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
- BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 48),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 49),
BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
@@ -2820,7 +2820,7 @@ static struct bpf_test tests[] = {
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0xffff - 1),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7fff - 1),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),