diff options
Diffstat (limited to 'samples')
-rw-r--r-- | samples/bpf/bpf_load.c | 7 | ||||
-rw-r--r-- | samples/bpf/tracex5_kern.c | 1 | ||||
-rw-r--r-- | samples/seccomp/bpf-helper.h | 125 |
3 files changed, 80 insertions, 53 deletions
diff --git a/samples/bpf/bpf_load.c b/samples/bpf/bpf_load.c index 396e204888b3..b86ee54da2d1 100644 --- a/samples/bpf/bpf_load.c +++ b/samples/bpf/bpf_load.c @@ -277,6 +277,11 @@ int load_bpf_file(char *path) Elf_Data *data, *data_prog, *symbols = NULL; char *shname, *shname_prog; + /* reset global variables */ + kern_version = 0; + memset(license, 0, sizeof(license)); + memset(processed_sec, 0, sizeof(processed_sec)); + if (elf_version(EV_CURRENT) == EV_NONE) return 1; @@ -328,6 +333,8 @@ int load_bpf_file(char *path) /* load programs that need map fixup (relocations) */ for (i = 1; i < ehdr.e_shnum; i++) { + if (processed_sec[i]) + continue; if (get_sec(elf, i, &ehdr, &shname, &shdr, &data)) continue; diff --git a/samples/bpf/tracex5_kern.c b/samples/bpf/tracex5_kern.c index fd12d7154d42..7e4cf74553ff 100644 --- a/samples/bpf/tracex5_kern.c +++ b/samples/bpf/tracex5_kern.c @@ -8,6 +8,7 @@ #include <linux/version.h> #include <uapi/linux/bpf.h> #include <uapi/linux/seccomp.h> +#include <uapi/linux/unistd.h> #include "bpf_helpers.h" #define PROG(F) SEC("kprobe/"__stringify(F)) int bpf_func_##F diff --git a/samples/seccomp/bpf-helper.h b/samples/seccomp/bpf-helper.h index 38ee70f3cd5b..1d8de9edd858 100644 --- a/samples/seccomp/bpf-helper.h +++ b/samples/seccomp/bpf-helper.h @@ -138,7 +138,7 @@ union arg64 { #define ARG_32(idx) \ BPF_STMT(BPF_LD+BPF_W+BPF_ABS, LO_ARG(idx)) -/* Loads hi into A and lo in X */ +/* Loads lo into M[0] and hi into M[1] and A */ #define ARG_64(idx) \ BPF_STMT(BPF_LD+BPF_W+BPF_ABS, LO_ARG(idx)), \ BPF_STMT(BPF_ST, 0), /* lo -> M[0] */ \ @@ -153,88 +153,107 @@ union arg64 { BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (value), 1, 0), \ jt -/* Checks the lo, then swaps to check the hi. A=lo,X=hi */ +#define JA32(value, jt) \ + BPF_JUMP(BPF_JMP+BPF_JSET+BPF_K, (value), 0, 1), \ + jt + +#define JGE32(value, jt) \ + BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (value), 0, 1), \ + jt + +#define JGT32(value, jt) \ + BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (value), 0, 1), \ + jt + +#define JLE32(value, jt) \ + BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (value), 1, 0), \ + jt + +#define JLT32(value, jt) \ + BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (value), 1, 0), \ + jt + +/* + * All the JXX64 checks assume lo is saved in M[0] and hi is saved in both + * A and M[1]. This invariant is kept by restoring A if necessary. + */ #define JEQ64(lo, hi, jt) \ + /* if (hi != arg.hi) goto NOMATCH; */ \ BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \ BPF_STMT(BPF_LD+BPF_MEM, 0), /* swap in lo */ \ + /* if (lo != arg.lo) goto NOMATCH; */ \ BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (lo), 0, 2), \ - BPF_STMT(BPF_LD+BPF_MEM, 1), /* passed: swap hi back in */ \ + BPF_STMT(BPF_LD+BPF_MEM, 1), \ jt, \ - BPF_STMT(BPF_LD+BPF_MEM, 1) /* failed: swap hi back in */ + BPF_STMT(BPF_LD+BPF_MEM, 1) #define JNE64(lo, hi, jt) \ - BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 5, 0), \ - BPF_STMT(BPF_LD+BPF_MEM, 0), /* swap in lo */ \ + /* if (hi != arg.hi) goto MATCH; */ \ + BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 3), \ + BPF_STMT(BPF_LD+BPF_MEM, 0), \ + /* if (lo != arg.lo) goto MATCH; */ \ BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (lo), 2, 0), \ - BPF_STMT(BPF_LD+BPF_MEM, 1), /* passed: swap hi back in */ \ + BPF_STMT(BPF_LD+BPF_MEM, 1), \ jt, \ - BPF_STMT(BPF_LD+BPF_MEM, 1) /* failed: swap hi back in */ - -#define JA32(value, jt) \ - BPF_JUMP(BPF_JMP+BPF_JSET+BPF_K, (value), 0, 1), \ - jt + BPF_STMT(BPF_LD+BPF_MEM, 1) #define JA64(lo, hi, jt) \ + /* if (hi & arg.hi) goto MATCH; */ \ BPF_JUMP(BPF_JMP+BPF_JSET+BPF_K, (hi), 3, 0), \ - BPF_STMT(BPF_LD+BPF_MEM, 0), /* swap in lo */ \ + BPF_STMT(BPF_LD+BPF_MEM, 0), \ + /* if (lo & arg.lo) goto MATCH; */ \ BPF_JUMP(BPF_JMP+BPF_JSET+BPF_K, (lo), 0, 2), \ - BPF_STMT(BPF_LD+BPF_MEM, 1), /* passed: swap hi back in */ \ + BPF_STMT(BPF_LD+BPF_MEM, 1), \ jt, \ - BPF_STMT(BPF_LD+BPF_MEM, 1) /* failed: swap hi back in */ + BPF_STMT(BPF_LD+BPF_MEM, 1) -#define JGE32(value, jt) \ - BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (value), 0, 1), \ - jt - -#define JLT32(value, jt) \ - BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (value), 1, 0), \ - jt - -/* Shortcut checking if hi > arg.hi. */ #define JGE64(lo, hi, jt) \ + /* if (hi > arg.hi) goto MATCH; */ \ BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (hi), 4, 0), \ + /* if (hi != arg.hi) goto NOMATCH; */ \ BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \ - BPF_STMT(BPF_LD+BPF_MEM, 0), /* swap in lo */ \ + BPF_STMT(BPF_LD+BPF_MEM, 0), \ + /* if (lo >= arg.lo) goto MATCH; */ \ BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (lo), 0, 2), \ - BPF_STMT(BPF_LD+BPF_MEM, 1), /* passed: swap hi back in */ \ - jt, \ - BPF_STMT(BPF_LD+BPF_MEM, 1) /* failed: swap hi back in */ - -#define JLT64(lo, hi, jt) \ - BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (hi), 0, 4), \ - BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \ - BPF_STMT(BPF_LD+BPF_MEM, 0), /* swap in lo */ \ - BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (lo), 2, 0), \ - BPF_STMT(BPF_LD+BPF_MEM, 1), /* passed: swap hi back in */ \ + BPF_STMT(BPF_LD+BPF_MEM, 1), \ jt, \ - BPF_STMT(BPF_LD+BPF_MEM, 1) /* failed: swap hi back in */ + BPF_STMT(BPF_LD+BPF_MEM, 1) -#define JGT32(value, jt) \ - BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (value), 0, 1), \ - jt - -#define JLE32(value, jt) \ - BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (value), 1, 0), \ - jt - -/* Check hi > args.hi first, then do the GE checking */ #define JGT64(lo, hi, jt) \ + /* if (hi > arg.hi) goto MATCH; */ \ BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (hi), 4, 0), \ + /* if (hi != arg.hi) goto NOMATCH; */ \ BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \ - BPF_STMT(BPF_LD+BPF_MEM, 0), /* swap in lo */ \ + BPF_STMT(BPF_LD+BPF_MEM, 0), \ + /* if (lo > arg.lo) goto MATCH; */ \ BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (lo), 0, 2), \ - BPF_STMT(BPF_LD+BPF_MEM, 1), /* passed: swap hi back in */ \ + BPF_STMT(BPF_LD+BPF_MEM, 1), \ jt, \ - BPF_STMT(BPF_LD+BPF_MEM, 1) /* failed: swap hi back in */ + BPF_STMT(BPF_LD+BPF_MEM, 1) #define JLE64(lo, hi, jt) \ - BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (hi), 6, 0), \ - BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 3), \ - BPF_STMT(BPF_LD+BPF_MEM, 0), /* swap in lo */ \ + /* if (hi < arg.hi) goto MATCH; */ \ + BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (hi), 0, 4), \ + /* if (hi != arg.hi) goto NOMATCH; */ \ + BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \ + BPF_STMT(BPF_LD+BPF_MEM, 0), \ + /* if (lo <= arg.lo) goto MATCH; */ \ BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (lo), 2, 0), \ - BPF_STMT(BPF_LD+BPF_MEM, 1), /* passed: swap hi back in */ \ + BPF_STMT(BPF_LD+BPF_MEM, 1), \ + jt, \ + BPF_STMT(BPF_LD+BPF_MEM, 1) + +#define JLT64(lo, hi, jt) \ + /* if (hi < arg.hi) goto MATCH; */ \ + BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (hi), 0, 4), \ + /* if (hi != arg.hi) goto NOMATCH; */ \ + BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \ + BPF_STMT(BPF_LD+BPF_MEM, 0), \ + /* if (lo < arg.lo) goto MATCH; */ \ + BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (lo), 2, 0), \ + BPF_STMT(BPF_LD+BPF_MEM, 1), \ jt, \ - BPF_STMT(BPF_LD+BPF_MEM, 1) /* failed: swap hi back in */ + BPF_STMT(BPF_LD+BPF_MEM, 1) #define LOAD_SYSCALL_NR \ BPF_STMT(BPF_LD+BPF_W+BPF_ABS, \ |