summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/arm64/include/asm/insn.h4
-rw-r--r--arch/arm64/lib/insn.c6
-rw-r--r--arch/arm64/net/bpf_jit.h12
-rw-r--r--arch/arm64/net/bpf_jit_comp.c91
-rw-r--r--arch/riscv/net/bpf_jit.h30
-rw-r--r--arch/riscv/net/bpf_jit_comp64.c102
-rw-r--r--include/linux/bpf.h3
-rw-r--r--include/linux/bpf_verifier.h2
-rw-r--r--include/linux/trace_events.h6
-rw-r--r--include/net/lwtunnel.h5
-rw-r--r--include/uapi/linux/bpf.h22
-rw-r--r--kernel/bpf/cpumap.c113
-rw-r--r--kernel/bpf/helpers.c8
-rw-r--r--kernel/bpf/syscall.c135
-rw-r--r--kernel/bpf/verifier.c94
-rw-r--r--kernel/trace/bpf_trace.c342
-rw-r--r--lib/test_bpf.c12
-rw-r--r--net/core/lwt_bpf.c7
-rw-r--r--net/ipv4/ip_output.c2
-rw-r--r--net/ipv6/ip6_output.c2
-rw-r--r--samples/bpf/.gitignore12
-rw-r--r--samples/bpf/Makefile68
-rw-r--r--samples/bpf/README.rst6
-rw-r--r--samples/bpf/net_shared.h2
-rw-r--r--samples/bpf/offwaketime.bpf.c (renamed from samples/bpf/offwaketime_kern.c)39
-rw-r--r--samples/bpf/offwaketime_user.c2
-rw-r--r--samples/bpf/spintest.bpf.c (renamed from samples/bpf/spintest_kern.c)27
-rw-r--r--samples/bpf/spintest_user.c24
-rw-r--r--samples/bpf/test_map_in_map.bpf.c10
-rw-r--r--samples/bpf/test_overhead_kprobe.bpf.c20
-rw-r--r--samples/bpf/test_overhead_tp.bpf.c29
-rw-r--r--samples/bpf/tracex1.bpf.c (renamed from samples/bpf/tracex1_kern.c)25
-rw-r--r--samples/bpf/tracex1_user.c2
-rw-r--r--samples/bpf/tracex3.bpf.c (renamed from samples/bpf/tracex3_kern.c)40
-rw-r--r--samples/bpf/tracex3_user.c2
-rw-r--r--samples/bpf/tracex4.bpf.c (renamed from samples/bpf/tracex4_kern.c)3
-rw-r--r--samples/bpf/tracex4_user.c2
-rw-r--r--samples/bpf/tracex5.bpf.c (renamed from samples/bpf/tracex5_kern.c)12
-rw-r--r--samples/bpf/tracex5_user.c2
-rw-r--r--samples/bpf/tracex6.bpf.c (renamed from samples/bpf/tracex6_kern.c)20
-rw-r--r--samples/bpf/tracex6_user.c2
-rw-r--r--samples/bpf/tracex7.bpf.c (renamed from samples/bpf/tracex7_kern.c)3
-rw-r--r--samples/bpf/tracex7_user.c2
-rw-r--r--samples/bpf/xdp1_kern.c100
-rw-r--r--samples/bpf/xdp1_user.c166
-rw-r--r--samples/bpf/xdp2_kern.c125
-rw-r--r--samples/bpf/xdp_monitor.bpf.c8
-rw-r--r--samples/bpf/xdp_monitor_user.c118
-rw-r--r--samples/bpf/xdp_redirect.bpf.c49
-rw-r--r--samples/bpf/xdp_redirect_cpu.bpf.c539
-rw-r--r--samples/bpf/xdp_redirect_cpu_user.c559
-rw-r--r--samples/bpf/xdp_redirect_map.bpf.c97
-rw-r--r--samples/bpf/xdp_redirect_map_multi.bpf.c77
-rw-r--r--samples/bpf/xdp_redirect_map_multi_user.c232
-rw-r--r--samples/bpf/xdp_redirect_map_user.c228
-rw-r--r--samples/bpf/xdp_redirect_user.c172
-rw-r--r--samples/bpf/xdp_rxq_info_kern.c140
-rw-r--r--samples/bpf/xdp_rxq_info_user.c614
-rw-r--r--samples/bpf/xdp_sample_pkts_kern.c57
-rw-r--r--samples/bpf/xdp_sample_pkts_user.c196
-rw-r--r--tools/include/uapi/linux/bpf.h22
-rw-r--r--tools/lib/bpf/Build2
-rw-r--r--tools/lib/bpf/bpf.c11
-rw-r--r--tools/lib/bpf/bpf.h11
-rw-r--r--tools/lib/bpf/elf.c440
-rw-r--r--tools/lib/bpf/libbpf.c424
-rw-r--r--tools/lib/bpf/libbpf.h52
-rw-r--r--tools/lib/bpf/libbpf.map2
-rw-r--r--tools/lib/bpf/libbpf_internal.h21
-rw-r--r--tools/lib/bpf/relo_core.c2
-rw-r--r--tools/lib/bpf/usdt.c116
-rw-r--r--tools/testing/selftests/bpf/.gitignore1
-rw-r--r--tools/testing/selftests/bpf/Makefile5
-rw-r--r--tools/testing/selftests/bpf/bench.h9
-rw-r--r--tools/testing/selftests/bpf/config2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_cookie.c78
-rw-r--r--tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c8
-rw-r--r--tools/testing/selftests/bpf/prog_tests/local_kptr_stash.c33
-rw-r--r--tools/testing/selftests/bpf/prog_tests/lwt_helpers.h139
-rw-r--r--tools/testing/selftests/bpf/prog_tests/lwt_redirect.c330
-rw-r--r--tools/testing/selftests/bpf/prog_tests/lwt_reroute.c262
-rw-r--r--tools/testing/selftests/bpf/prog_tests/refcounted_kptr.c26
-rw-r--r--tools/testing/selftests/bpf/prog_tests/task_kfunc.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tc_bpf.c36
-rw-r--r--tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c415
-rw-r--r--tools/testing/selftests/bpf/progs/local_kptr_stash.c28
-rw-r--r--tools/testing/selftests/bpf/progs/local_kptr_stash_fail.c85
-rw-r--r--tools/testing/selftests/bpf/progs/refcounted_kptr.c71
-rw-r--r--tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c28
-rw-r--r--tools/testing/selftests/bpf/progs/task_kfunc_success.c51
-rw-r--r--tools/testing/selftests/bpf/progs/test_ldsx_insn.c3
-rw-r--r--tools/testing/selftests/bpf/progs/test_lwt_redirect.c90
-rw-r--r--tools/testing/selftests/bpf/progs/test_lwt_reroute.c36
-rw-r--r--tools/testing/selftests/bpf/progs/test_tc_bpf.c13
-rw-r--r--tools/testing/selftests/bpf/progs/uprobe_multi.c101
-rw-r--r--tools/testing/selftests/bpf/progs/uprobe_multi_bench.c15
-rw-r--r--tools/testing/selftests/bpf/progs/uprobe_multi_usdt.c16
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_bswap.c3
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_gotol.c3
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_ldsx.c3
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_movsx.c3
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_sdiv.c3
-rw-r--r--tools/testing/selftests/bpf/testing_helpers.h10
-rw-r--r--tools/testing/selftests/bpf/uprobe_multi.c91
104 files changed, 3719 insertions, 4212 deletions
diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h
index 139a88e4e852..db1aeacd4cd9 100644
--- a/arch/arm64/include/asm/insn.h
+++ b/arch/arm64/include/asm/insn.h
@@ -186,6 +186,8 @@ enum aarch64_insn_ldst_type {
AARCH64_INSN_LDST_LOAD_ACQ_EX,
AARCH64_INSN_LDST_STORE_EX,
AARCH64_INSN_LDST_STORE_REL_EX,
+ AARCH64_INSN_LDST_SIGNED_LOAD_IMM_OFFSET,
+ AARCH64_INSN_LDST_SIGNED_LOAD_REG_OFFSET,
};
enum aarch64_insn_adsb_type {
@@ -324,6 +326,7 @@ __AARCH64_INSN_FUNCS(prfm, 0x3FC00000, 0x39800000)
__AARCH64_INSN_FUNCS(prfm_lit, 0xFF000000, 0xD8000000)
__AARCH64_INSN_FUNCS(store_imm, 0x3FC00000, 0x39000000)
__AARCH64_INSN_FUNCS(load_imm, 0x3FC00000, 0x39400000)
+__AARCH64_INSN_FUNCS(signed_load_imm, 0X3FC00000, 0x39800000)
__AARCH64_INSN_FUNCS(store_pre, 0x3FE00C00, 0x38000C00)
__AARCH64_INSN_FUNCS(load_pre, 0x3FE00C00, 0x38400C00)
__AARCH64_INSN_FUNCS(store_post, 0x3FE00C00, 0x38000400)
@@ -337,6 +340,7 @@ __AARCH64_INSN_FUNCS(ldset, 0x3F20FC00, 0x38203000)
__AARCH64_INSN_FUNCS(swp, 0x3F20FC00, 0x38208000)
__AARCH64_INSN_FUNCS(cas, 0x3FA07C00, 0x08A07C00)
__AARCH64_INSN_FUNCS(ldr_reg, 0x3FE0EC00, 0x38606800)
+__AARCH64_INSN_FUNCS(signed_ldr_reg, 0X3FE0FC00, 0x38A0E800)
__AARCH64_INSN_FUNCS(ldr_imm, 0x3FC00000, 0x39400000)
__AARCH64_INSN_FUNCS(ldr_lit, 0xBF000000, 0x18000000)
__AARCH64_INSN_FUNCS(ldrsw_lit, 0xFF000000, 0x98000000)
diff --git a/arch/arm64/lib/insn.c b/arch/arm64/lib/insn.c
index 924934cb85ee..a635ab83fee3 100644
--- a/arch/arm64/lib/insn.c
+++ b/arch/arm64/lib/insn.c
@@ -385,6 +385,9 @@ u32 aarch64_insn_gen_load_store_reg(enum aarch64_insn_register reg,
case AARCH64_INSN_LDST_LOAD_REG_OFFSET:
insn = aarch64_insn_get_ldr_reg_value();
break;
+ case AARCH64_INSN_LDST_SIGNED_LOAD_REG_OFFSET:
+ insn = aarch64_insn_get_signed_ldr_reg_value();
+ break;
case AARCH64_INSN_LDST_STORE_REG_OFFSET:
insn = aarch64_insn_get_str_reg_value();
break;
@@ -430,6 +433,9 @@ u32 aarch64_insn_gen_load_store_imm(enum aarch64_insn_register reg,
case AARCH64_INSN_LDST_LOAD_IMM_OFFSET:
insn = aarch64_insn_get_ldr_imm_value();
break;
+ case AARCH64_INSN_LDST_SIGNED_LOAD_IMM_OFFSET:
+ insn = aarch64_insn_get_signed_load_imm_value();
+ break;
case AARCH64_INSN_LDST_STORE_IMM_OFFSET:
insn = aarch64_insn_get_str_imm_value();
break;
diff --git a/arch/arm64/net/bpf_jit.h b/arch/arm64/net/bpf_jit.h
index c2edadb8ec6a..23b1b34db088 100644
--- a/arch/arm64/net/bpf_jit.h
+++ b/arch/arm64/net/bpf_jit.h
@@ -59,10 +59,13 @@
AARCH64_INSN_LDST_##type##_REG_OFFSET)
#define A64_STRB(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 8, STORE)
#define A64_LDRB(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 8, LOAD)
+#define A64_LDRSB(Xt, Xn, Xm) A64_LS_REG(Xt, Xn, Xm, 8, SIGNED_LOAD)
#define A64_STRH(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 16, STORE)
#define A64_LDRH(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 16, LOAD)
+#define A64_LDRSH(Xt, Xn, Xm) A64_LS_REG(Xt, Xn, Xm, 16, SIGNED_LOAD)
#define A64_STR32(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 32, STORE)
#define A64_LDR32(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 32, LOAD)
+#define A64_LDRSW(Xt, Xn, Xm) A64_LS_REG(Xt, Xn, Xm, 32, SIGNED_LOAD)
#define A64_STR64(Xt, Xn, Xm) A64_LS_REG(Xt, Xn, Xm, 64, STORE)
#define A64_LDR64(Xt, Xn, Xm) A64_LS_REG(Xt, Xn, Xm, 64, LOAD)
@@ -73,10 +76,13 @@
AARCH64_INSN_LDST_##type##_IMM_OFFSET)
#define A64_STRBI(Wt, Xn, imm) A64_LS_IMM(Wt, Xn, imm, 8, STORE)
#define A64_LDRBI(Wt, Xn, imm) A64_LS_IMM(Wt, Xn, imm, 8, LOAD)
+#define A64_LDRSBI(Xt, Xn, imm) A64_LS_IMM(Xt, Xn, imm, 8, SIGNED_LOAD)
#define A64_STRHI(Wt, Xn, imm) A64_LS_IMM(Wt, Xn, imm, 16, STORE)
#define A64_LDRHI(Wt, Xn, imm) A64_LS_IMM(Wt, Xn, imm, 16, LOAD)
+#define A64_LDRSHI(Xt, Xn, imm) A64_LS_IMM(Xt, Xn, imm, 16, SIGNED_LOAD)
#define A64_STR32I(Wt, Xn, imm) A64_LS_IMM(Wt, Xn, imm, 32, STORE)
#define A64_LDR32I(Wt, Xn, imm) A64_LS_IMM(Wt, Xn, imm, 32, LOAD)
+#define A64_LDRSWI(Xt, Xn, imm) A64_LS_IMM(Xt, Xn, imm, 32, SIGNED_LOAD)
#define A64_STR64I(Xt, Xn, imm) A64_LS_IMM(Xt, Xn, imm, 64, STORE)
#define A64_LDR64I(Xt, Xn, imm) A64_LS_IMM(Xt, Xn, imm, 64, LOAD)
@@ -186,6 +192,11 @@
#define A64_UXTH(sf, Rd, Rn) A64_UBFM(sf, Rd, Rn, 0, 15)
#define A64_UXTW(sf, Rd, Rn) A64_UBFM(sf, Rd, Rn, 0, 31)
+/* Sign extend */
+#define A64_SXTB(sf, Rd, Rn) A64_SBFM(sf, Rd, Rn, 0, 7)
+#define A64_SXTH(sf, Rd, Rn) A64_SBFM(sf, Rd, Rn, 0, 15)
+#define A64_SXTW(sf, Rd, Rn) A64_SBFM(sf, Rd, Rn, 0, 31)
+
/* Move wide (immediate) */
#define A64_MOVEW(sf, Rd, imm16, shift, type) \
aarch64_insn_gen_movewide(Rd, imm16, shift, \
@@ -223,6 +234,7 @@
#define A64_DATA2(sf, Rd, Rn, Rm, type) aarch64_insn_gen_data2(Rd, Rn, Rm, \
A64_VARIANT(sf), AARCH64_INSN_DATA2_##type)
#define A64_UDIV(sf, Rd, Rn, Rm) A64_DATA2(sf, Rd, Rn, Rm, UDIV)
+#define A64_SDIV(sf, Rd, Rn, Rm) A64_DATA2(sf, Rd, Rn, Rm, SDIV)
#define A64_LSLV(sf, Rd, Rn, Rm) A64_DATA2(sf, Rd, Rn, Rm, LSLV)
#define A64_LSRV(sf, Rd, Rn, Rm) A64_DATA2(sf, Rd, Rn, Rm, LSRV)
#define A64_ASRV(sf, Rd, Rn, Rm) A64_DATA2(sf, Rd, Rn, Rm, ASRV)
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index ec2174838f2a..150d1c6543f7 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -715,7 +715,8 @@ static int add_exception_handler(const struct bpf_insn *insn,
/* First pass */
return 0;
- if (BPF_MODE(insn->code) != BPF_PROBE_MEM)
+ if (BPF_MODE(insn->code) != BPF_PROBE_MEM &&
+ BPF_MODE(insn->code) != BPF_PROBE_MEMSX)
return 0;
if (!ctx->prog->aux->extable ||
@@ -779,12 +780,26 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
u8 dst_adj;
int off_adj;
int ret;
+ bool sign_extend;
switch (code) {
/* dst = src */
case BPF_ALU | BPF_MOV | BPF_X:
case BPF_ALU64 | BPF_MOV | BPF_X:
- emit(A64_MOV(is64, dst, src), ctx);
+ switch (insn->off) {
+ case 0:
+ emit(A64_MOV(is64, dst, src), ctx);
+ break;
+ case 8:
+ emit(A64_SXTB(is64, dst, src), ctx);
+ break;
+ case 16:
+ emit(A64_SXTH(is64, dst, src), ctx);
+ break;
+ case 32:
+ emit(A64_SXTW(is64, dst, src), ctx);
+ break;
+ }
break;
/* dst = dst OP src */
case BPF_ALU | BPF_ADD | BPF_X:
@@ -813,11 +828,17 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
break;
case BPF_ALU | BPF_DIV | BPF_X:
case BPF_ALU64 | BPF_DIV | BPF_X:
- emit(A64_UDIV(is64, dst, dst, src), ctx);
+ if (!off)
+ emit(A64_UDIV(is64, dst, dst, src), ctx);
+ else
+ emit(A64_SDIV(is64, dst, dst, src), ctx);
break;
case BPF_ALU | BPF_MOD | BPF_X:
case BPF_ALU64 | BPF_MOD | BPF_X:
- emit(A64_UDIV(is64, tmp, dst, src), ctx);
+ if (!off)
+ emit(A64_UDIV(is64, tmp, dst, src), ctx);
+ else
+ emit(A64_SDIV(is64, tmp, dst, src), ctx);
emit(A64_MSUB(is64, dst, dst, tmp, src), ctx);
break;
case BPF_ALU | BPF_LSH | BPF_X:
@@ -840,11 +861,12 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
/* dst = BSWAP##imm(dst) */
case BPF_ALU | BPF_END | BPF_FROM_LE:
case BPF_ALU | BPF_END | BPF_FROM_BE:
+ case BPF_ALU64 | BPF_END | BPF_FROM_LE:
#ifdef CONFIG_CPU_BIG_ENDIAN
- if (BPF_SRC(code) == BPF_FROM_BE)
+ if (BPF_CLASS(code) == BPF_ALU && BPF_SRC(code) == BPF_FROM_BE)
goto emit_bswap_uxt;
#else /* !CONFIG_CPU_BIG_ENDIAN */
- if (BPF_SRC(code) == BPF_FROM_LE)
+ if (BPF_CLASS(code) == BPF_ALU && BPF_SRC(code) == BPF_FROM_LE)
goto emit_bswap_uxt;
#endif
switch (imm) {
@@ -943,12 +965,18 @@ emit_bswap_uxt:
case BPF_ALU | BPF_DIV | BPF_K:
case BPF_ALU64 | BPF_DIV | BPF_K:
emit_a64_mov_i(is64, tmp, imm, ctx);
- emit(A64_UDIV(is64, dst, dst, tmp), ctx);
+ if (!off)
+ emit(A64_UDIV(is64, dst, dst, tmp), ctx);
+ else
+ emit(A64_SDIV(is64, dst, dst, tmp), ctx);
break;
case BPF_ALU | BPF_MOD | BPF_K:
case BPF_ALU64 | BPF_MOD | BPF_K:
emit_a64_mov_i(is64, tmp2, imm, ctx);
- emit(A64_UDIV(is64, tmp, dst, tmp2), ctx);
+ if (!off)
+ emit(A64_UDIV(is64, tmp, dst, tmp2), ctx);
+ else
+ emit(A64_SDIV(is64, tmp, dst, tmp2), ctx);
emit(A64_MSUB(is64, dst, dst, tmp, tmp2), ctx);
break;
case BPF_ALU | BPF_LSH | BPF_K:
@@ -966,7 +994,11 @@ emit_bswap_uxt:
/* JUMP off */
case BPF_JMP | BPF_JA:
- jmp_offset = bpf2a64_offset(i, off, ctx);
+ case BPF_JMP32 | BPF_JA:
+ if (BPF_CLASS(code) == BPF_JMP)
+ jmp_offset = bpf2a64_offset(i, off, ctx);
+ else
+ jmp_offset = bpf2a64_offset(i, imm, ctx);
check_imm26(jmp_offset);
emit(A64_B(jmp_offset), ctx);
break;
@@ -1122,7 +1154,7 @@ emit_cond_jmp:
return 1;
}
- /* LDX: dst = *(size *)(src + off) */
+ /* LDX: dst = (u64)*(unsigned size *)(src + off) */
case BPF_LDX | BPF_MEM | BPF_W:
case BPF_LDX | BPF_MEM | BPF_H:
case BPF_LDX | BPF_MEM | BPF_B:
@@ -1131,6 +1163,13 @@ emit_cond_jmp:
case BPF_LDX | BPF_PROBE_MEM | BPF_W:
case BPF_LDX | BPF_PROBE_MEM | BPF_H:
case BPF_LDX | BPF_PROBE_MEM | BPF_B:
+ /* LDXS: dst_reg = (s64)*(signed size *)(src_reg + off) */
+ case BPF_LDX | BPF_MEMSX | BPF_B:
+ case BPF_LDX | BPF_MEMSX | BPF_H:
+ case BPF_LDX | BPF_MEMSX | BPF_W:
+ case BPF_LDX | BPF_PROBE_MEMSX | BPF_B:
+ case BPF_LDX | BPF_PROBE_MEMSX | BPF_H:
+ case BPF_LDX | BPF_PROBE_MEMSX | BPF_W:
if (ctx->fpb_offset > 0 && src == fp) {
src_adj = fpb;
off_adj = off + ctx->fpb_offset;
@@ -1138,29 +1177,49 @@ emit_cond_jmp:
src_adj = src;
off_adj = off;
}
+ sign_extend = (BPF_MODE(insn->code) == BPF_MEMSX ||
+ BPF_MODE(insn->code) == BPF_PROBE_MEMSX);
switch (BPF_SIZE(code)) {
case BPF_W:
if (is_lsi_offset(off_adj, 2)) {
- emit(A64_LDR32I(dst, src_adj, off_adj), ctx);
+ if (sign_extend)
+ emit(A64_LDRSWI(dst, src_adj, off_adj), ctx);
+ else
+ emit(A64_LDR32I(dst, src_adj, off_adj), ctx);
} else {
emit_a64_mov_i(1, tmp, off, ctx);
- emit(A64_LDR32(dst, src, tmp), ctx);
+ if (sign_extend)
+ emit(A64_LDRSW(dst, src_adj, off_adj), ctx);
+ else
+ emit(A64_LDR32(dst, src, tmp), ctx);
}
break;
case BPF_H:
if (is_lsi_offset(off_adj, 1)) {
- emit(A64_LDRHI(dst, src_adj, off_adj), ctx);
+ if (sign_extend)
+ emit(A64_LDRSHI(dst, src_adj, off_adj), ctx);
+ else
+ emit(A64_LDRHI(dst, src_adj, off_adj), ctx);
} else {
emit_a64_mov_i(1, tmp, off, ctx);
- emit(A64_LDRH(dst, src, tmp), ctx);
+ if (sign_extend)
+ emit(A64_LDRSH(dst, src, tmp), ctx);
+ else
+ emit(A64_LDRH(dst, src, tmp), ctx);
}
break;
case BPF_B:
if (is_lsi_offset(off_adj, 0)) {
- emit(A64_LDRBI(dst, src_adj, off_adj), ctx);
+ if (sign_extend)
+ emit(A64_LDRSBI(dst, src_adj, off_adj), ctx);
+ else
+ emit(A64_LDRBI(dst, src_adj, off_adj), ctx);
} else {
emit_a64_mov_i(1, tmp, off, ctx);
- emit(A64_LDRB(dst, src, tmp), ctx);
+ if (sign_extend)
+ emit(A64_LDRSB(dst, src, tmp), ctx);
+ else
+ emit(A64_LDRB(dst, src, tmp), ctx);
}
break;
case BPF_DW:
diff --git a/arch/riscv/net/bpf_jit.h b/arch/riscv/net/bpf_jit.h
index 2717f5490428..d21c6c92a683 100644
--- a/arch/riscv/net/bpf_jit.h
+++ b/arch/riscv/net/bpf_jit.h
@@ -431,11 +431,21 @@ static inline u32 rv_mulhu(u8 rd, u8 rs1, u8 rs2)
return rv_r_insn(1, rs2, rs1, 3, rd, 0x33);
}
+static inline u32 rv_div(u8 rd, u8 rs1, u8 rs2)
+{
+ return rv_r_insn(1, rs2, rs1, 4, rd, 0x33);
+}
+
static inline u32 rv_divu(u8 rd, u8 rs1, u8 rs2)
{
return rv_r_insn(1, rs2, rs1, 5, rd, 0x33);
}
+static inline u32 rv_rem(u8 rd, u8 rs1, u8 rs2)
+{
+ return rv_r_insn(1, rs2, rs1, 6, rd, 0x33);
+}
+
static inline u32 rv_remu(u8 rd, u8 rs1, u8 rs2)
{
return rv_r_insn(1, rs2, rs1, 7, rd, 0x33);
@@ -501,6 +511,16 @@ static inline u32 rv_ble(u8 rs1, u8 rs2, u16 imm12_1)
return rv_bge(rs2, rs1, imm12_1);
}
+static inline u32 rv_lb(u8 rd, u16 imm11_0, u8 rs1)
+{
+ return rv_i_insn(imm11_0, rs1, 0, rd, 0x03);
+}
+
+static inline u32 rv_lh(u8 rd, u16 imm11_0, u8 rs1)
+{
+ return rv_i_insn(imm11_0, rs1, 1, rd, 0x03);
+}
+
static inline u32 rv_lw(u8 rd, u16 imm11_0, u8 rs1)
{
return rv_i_insn(imm11_0, rs1, 2, rd, 0x03);
@@ -766,11 +786,21 @@ static inline u32 rv_mulw(u8 rd, u8 rs1, u8 rs2)
return rv_r_insn(1, rs2, rs1, 0, rd, 0x3b);
}
+static inline u32 rv_divw(u8 rd, u8 rs1, u8 rs2)
+{
+ return rv_r_insn(1, rs2, rs1, 4, rd, 0x3b);
+}
+
static inline u32 rv_divuw(u8 rd, u8 rs1, u8 rs2)
{
return rv_r_insn(1, rs2, rs1, 5, rd, 0x3b);
}
+static inline u32 rv_remw(u8 rd, u8 rs1, u8 rs2)
+{
+ return rv_r_insn(1, rs2, rs1, 6, rd, 0x3b);
+}
+
static inline u32 rv_remuw(u8 rd, u8 rs1, u8 rs2)
{
return rv_r_insn(1, rs2, rs1, 7, rd, 0x3b);
diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c
index 0ca4f5c0097c..8423f4ddf8f5 100644
--- a/arch/riscv/net/bpf_jit_comp64.c
+++ b/arch/riscv/net/bpf_jit_comp64.c
@@ -580,7 +580,8 @@ static int add_exception_handler(const struct bpf_insn *insn,
unsigned long pc;
off_t offset;
- if (!ctx->insns || !ctx->prog->aux->extable || BPF_MODE(insn->code) != BPF_PROBE_MEM)
+ if (!ctx->insns || !ctx->prog->aux->extable ||
+ (BPF_MODE(insn->code) != BPF_PROBE_MEM && BPF_MODE(insn->code) != BPF_PROBE_MEMSX))
return 0;
if (WARN_ON_ONCE(ctx->nexentries >= ctx->prog->aux->num_exentries))
@@ -1046,7 +1047,19 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
emit_zext_32(rd, ctx);
break;
}
- emit_mv(rd, rs, ctx);
+ switch (insn->off) {
+ case 0:
+ emit_mv(rd, rs, ctx);
+ break;
+ case 8:
+ case 16:
+ emit_slli(RV_REG_T1, rs, 64 - insn->off, ctx);
+ emit_srai(rd, RV_REG_T1, 64 - insn->off, ctx);
+ break;
+ case 32:
+ emit_addiw(rd, rs, 0, ctx);
+ break;
+ }
if (!is64 && !aux->verifier_zext)
emit_zext_32(rd, ctx);
break;
@@ -1094,13 +1107,19 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
break;
case BPF_ALU | BPF_DIV | BPF_X:
case BPF_ALU64 | BPF_DIV | BPF_X:
- emit(is64 ? rv_divu(rd, rd, rs) : rv_divuw(rd, rd, rs), ctx);
+ if (off)
+ emit(is64 ? rv_div(rd, rd, rs) : rv_divw(rd, rd, rs), ctx);
+ else
+ emit(is64 ? rv_divu(rd, rd, rs) : rv_divuw(rd, rd, rs), ctx);
if (!is64 && !aux->verifier_zext)
emit_zext_32(rd, ctx);
break;
case BPF_ALU | BPF_MOD | BPF_X:
case BPF_ALU64 | BPF_MOD | BPF_X:
- emit(is64 ? rv_remu(rd, rd, rs) : rv_remuw(rd, rd, rs), ctx);
+ if (off)
+ emit(is64 ? rv_rem(rd, rd, rs) : rv_remw(rd, rd, rs), ctx);
+ else
+ emit(is64 ? rv_remu(rd, rd, rs) : rv_remuw(rd, rd, rs), ctx);
if (!is64 && !aux->verifier_zext)
emit_zext_32(rd, ctx);
break;
@@ -1149,6 +1168,7 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
break;
case BPF_ALU | BPF_END | BPF_FROM_BE:
+ case BPF_ALU64 | BPF_END | BPF_FROM_LE:
emit_li(RV_REG_T2, 0, ctx);
emit_andi(RV_REG_T1, rd, 0xff, ctx);
@@ -1271,16 +1291,24 @@ out_be:
case BPF_ALU | BPF_DIV | BPF_K:
case BPF_ALU64 | BPF_DIV | BPF_K:
emit_imm(RV_REG_T1, imm, ctx);
- emit(is64 ? rv_divu(rd, rd, RV_REG_T1) :
- rv_divuw(rd, rd, RV_REG_T1), ctx);
+ if (off)
+ emit(is64 ? rv_div(rd, rd, RV_REG_T1) :
+ rv_divw(rd, rd, RV_REG_T1), ctx);
+ else
+ emit(is64 ? rv_divu(rd, rd, RV_REG_T1) :
+ rv_divuw(rd, rd, RV_REG_T1), ctx);
if (!is64 && !aux->verifier_zext)
emit_zext_32(rd, ctx);
break;
case BPF_ALU | BPF_MOD | BPF_K:
case BPF_ALU64 | BPF_MOD | BPF_K:
emit_imm(RV_REG_T1, imm, ctx);
- emit(is64 ? rv_remu(rd, rd, RV_REG_T1) :
- rv_remuw(rd, rd, RV_REG_T1), ctx);
+ if (off)
+ emit(is64 ? rv_rem(rd, rd, RV_REG_T1) :
+ rv_remw(rd, rd, RV_REG_T1), ctx);
+ else
+ emit(is64 ? rv_remu(rd, rd, RV_REG_T1) :
+ rv_remuw(rd, rd, RV_REG_T1), ctx);
if (!is64 && !aux->verifier_zext)
emit_zext_32(rd, ctx);
break;
@@ -1314,7 +1342,11 @@ out_be:
/* JUMP off */
case BPF_JMP | BPF_JA:
- rvoff = rv_offset(i, off, ctx);
+ case BPF_JMP32 | BPF_JA:
+ if (BPF_CLASS(code) == BPF_JMP)
+ rvoff = rv_offset(i, off, ctx);
+ else
+ rvoff = rv_offset(i, imm, ctx);
ret = emit_jump_and_link(RV_REG_ZERO, rvoff, true, ctx);
if (ret)
return ret;
@@ -1486,7 +1518,7 @@ out_be:
return 1;
}
- /* LDX: dst = *(size *)(src + off) */
+ /* LDX: dst = *(unsigned size *)(src + off) */
case BPF_LDX | BPF_MEM | BPF_B:
case BPF_LDX | BPF_MEM | BPF_H:
case BPF_LDX | BPF_MEM | BPF_W:
@@ -1495,14 +1527,28 @@ out_be:
case BPF_LDX | BPF_PROBE_MEM | BPF_H:
case BPF_LDX | BPF_PROBE_MEM | BPF_W:
case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
+ /* LDSX: dst = *(signed size *)(src + off) */
+ case BPF_LDX | BPF_MEMSX | BPF_B:
+ case BPF_LDX | BPF_MEMSX | BPF_H:
+ case BPF_LDX | BPF_MEMSX | BPF_W:
+ case BPF_LDX | BPF_PROBE_MEMSX | BPF_B:
+ case BPF_LDX | BPF_PROBE_MEMSX | BPF_H:
+ case BPF_LDX | BPF_PROBE_MEMSX | BPF_W:
{
int insn_len, insns_start;
+ bool sign_ext;
+
+ sign_ext = BPF_MODE(insn->code) == BPF_MEMSX ||
+ BPF_MODE(insn->code) == BPF_PROBE_MEMSX;
switch (BPF_SIZE(code)) {
case BPF_B:
if (is_12b_int(off)) {
insns_start = ctx->ninsns;
- emit(rv_lbu(rd, off, rs), ctx);
+ if (sign_ext)
+ emit(rv_lb(rd, off, rs), ctx);
+ else
+ emit(rv_lbu(rd, off, rs), ctx);
insn_len = ctx->ninsns - insns_start;
break;
}
@@ -1510,15 +1556,19 @@ out_be:
emit_imm(RV_REG_T1, off, ctx);
emit_add(RV_REG_T1, RV_REG_T1, rs, ctx);
insns_start = ctx->ninsns;
- emit(rv_lbu(rd, 0, RV_REG_T1), ctx);
+ if (sign_ext)
+ emit(rv_lb(rd, 0, RV_REG_T1), ctx);
+ else
+ emit(rv_lbu(rd, 0, RV_REG_T1), ctx);
insn_len = ctx->ninsns - insns_start;
- if (insn_is_zext(&insn[1]))
- return 1;
break;
case BPF_H:
if (is_12b_int(off)) {
insns_start = ctx->ninsns;
- emit(rv_lhu(rd, off, rs), ctx);
+ if (sign_ext)
+ emit(rv_lh(rd, off, rs), ctx);
+ else
+ emit(rv_lhu(rd, off, rs), ctx);
insn_len = ctx->ninsns - insns_start;
break;
}
@@ -1526,15 +1576,19 @@ out_be:
emit_imm(RV_REG_T1, off, ctx);
emit_add(RV_REG_T1, RV_REG_T1, rs, ctx);
insns_start = ctx->ninsns;
- emit(rv_lhu(rd, 0, RV_REG_T1), ctx);
+ if (sign_ext)
+ emit(rv_lh(rd, 0, RV_REG_T1), ctx);
+ else
+ emit(rv_lhu(rd, 0, RV_REG_T1), ctx);
insn_len = ctx->ninsns - insns_start;
- if (insn_is_zext(&insn[1]))
- return 1;
break;
case BPF_W:
if (is_12b_int(off)) {
insns_start = ctx->ninsns;
- emit(rv_lwu(rd, off, rs), ctx);
+ if (sign_ext)
+ emit(rv_lw(rd, off, rs), ctx);
+ else
+ emit(rv_lwu(rd, off, rs), ctx);
insn_len = ctx->ninsns - insns_start;
break;
}
@@ -1542,10 +1596,11 @@ out_be:
emit_imm(RV_REG_T1, off, ctx);
emit_add(RV_REG_T1, RV_REG_T1, rs, ctx);
insns_start = ctx->ninsns;
- emit(rv_lwu(rd, 0, RV_REG_T1), ctx);
+ if (sign_ext)
+ emit(rv_lw(rd, 0, RV_REG_T1), ctx);
+ else
+ emit(rv_lwu(rd, 0, RV_REG_T1), ctx);
insn_len = ctx->ninsns - insns_start;
- if (insn_is_zext(&insn[1]))
- return 1;
break;
case BPF_DW:
if (is_12b_int(off)) {
@@ -1566,6 +1621,9 @@ out_be:
ret = add_exception_handler(insn, ctx, rd, insn_len);
if (ret)
return ret;
+
+ if (BPF_SIZE(code) != BPF_DW && insn_is_zext(&insn[1]))
+ return 1;
break;
}
/* speculation barrier */
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index eced6400f778..12596af59c00 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -653,7 +653,8 @@ enum bpf_type_flag {
MEM_RCU = BIT(13 + BPF_BASE_TYPE_BITS),
/* Used to tag PTR_TO_BTF_ID | MEM_ALLOC references which are non-owning.
- * Currently only valid for linked-list and rbtree nodes.
+ * Currently only valid for linked-list and rbtree nodes. If the nodes
+ * have a bpf_refcount_field, they must be tagged MEM_RCU as well.
*/
NON_OWN_REF = BIT(14 + BPF_BASE_TYPE_BITS),
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index f70f9ac884d2..b6e58dab8e27 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -745,7 +745,7 @@ static inline bool bpf_prog_check_recur(const struct bpf_prog *prog)
}
}
-#define BPF_REG_TRUSTED_MODIFIERS (MEM_ALLOC | PTR_TRUSTED)
+#define BPF_REG_TRUSTED_MODIFIERS (MEM_ALLOC | PTR_TRUSTED | NON_OWN_REF)
static inline bool bpf_type_has_unsafe_modifiers(u32 type)
{
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index e66d04dbe56a..5b85cf18c350 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -752,6 +752,7 @@ int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
u32 *fd_type, const char **buf,
u64 *probe_offset, u64 *probe_addr);
int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
+int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
#else
static inline unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
{
@@ -798,6 +799,11 @@ bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
{
return -EOPNOTSUPP;
}
+static inline int
+bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
+{
+ return -EOPNOTSUPP;
+}
#endif
enum {
diff --git a/include/net/lwtunnel.h b/include/net/lwtunnel.h
index 6f15e6fa154e..53bd2d02a4f0 100644
--- a/include/net/lwtunnel.h
+++ b/include/net/lwtunnel.h
@@ -16,9 +16,12 @@
#define LWTUNNEL_STATE_INPUT_REDIRECT BIT(1)
#define LWTUNNEL_STATE_XMIT_REDIRECT BIT(2)
+/* LWTUNNEL_XMIT_CONTINUE should be distinguishable from dst_output return
+ * values (NET_XMIT_xxx and NETDEV_TX_xxx in linux/netdevice.h) for safety.
+ */
enum {
LWTUNNEL_XMIT_DONE,
- LWTUNNEL_XMIT_CONTINUE,
+ LWTUNNEL_XMIT_CONTINUE = 0x100,
};
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index d21deb46f49f..8790b3962e4b 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -1039,6 +1039,7 @@ enum bpf_attach_type {
BPF_NETFILTER,
BPF_TCX_INGRESS,
BPF_TCX_EGRESS,
+ BPF_TRACE_UPROBE_MULTI,
__MAX_BPF_ATTACH_TYPE
};
@@ -1057,6 +1058,7 @@ enum bpf_link_type {
BPF_LINK_TYPE_STRUCT_OPS = 9,
BPF_LINK_TYPE_NETFILTER = 10,
BPF_LINK_TYPE_TCX = 11,
+ BPF_LINK_TYPE_UPROBE_MULTI = 12,
MAX_BPF_LINK_TYPE,
};
@@ -1186,7 +1188,16 @@ enum bpf_perf_event_type {
/* link_create.kprobe_multi.flags used in LINK_CREATE command for
* BPF_TRACE_KPROBE_MULTI attach type to create return probe.
*/
-#define BPF_F_KPROBE_MULTI_RETURN (1U << 0)
+enum {
+ BPF_F_KPROBE_MULTI_RETURN = (1U << 0)
+};
+
+/* link_create.uprobe_multi.flags used in LINK_CREATE command for
+ * BPF_TRACE_UPROBE_MULTI attach type to create return probe.
+ */
+enum {
+ BPF_F_UPROBE_MULTI_RETURN = (1U << 0)
+};
/* link_create.netfilter.flags used in LINK_CREATE command for
* BPF_PROG_TYPE_NETFILTER to enable IP packet defragmentation.
@@ -1624,6 +1635,15 @@ union bpf_attr {
};
__u64 expected_revision;
} tcx;
+ struct {
+ __aligned_u64 path;
+ __aligned_u64 offsets;
+ __aligned_u64 ref_ctr_offsets;
+ __aligned_u64 cookies;
+ __u32 cnt;
+ __u32 flags;
+ __u32 pid;
+ } uprobe_multi;
};
} link_create;
diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c
index ef28c64f1eb1..e42a1bdb7f53 100644
--- a/kernel/bpf/cpumap.c
+++ b/kernel/bpf/cpumap.c
@@ -68,11 +68,8 @@ struct bpf_cpu_map_entry {
struct bpf_cpumap_val value;
struct bpf_prog *prog;
- atomic_t refcnt; /* Control when this struct can be free'ed */
- struct rcu_head rcu;
-
- struct work_struct kthread_stop_wq;
struct completion kthread_running;
+ struct rcu_work free_work;
};
struct bpf_cpu_map {
@@ -117,11 +114,6 @@ static struct bpf_map *cpu_map_alloc(union bpf_attr *attr)
return &cmap->map;
}
-static void get_cpu_map_entry(struct bpf_cpu_map_entry *rcpu)
-{
- atomic_inc(&rcpu->refcnt);
-}
-
static void __cpu_map_ring_cleanup(struct ptr_ring *ring)
{
/* The tear-down procedure should have made sure that queue is
@@ -142,35 +134,6 @@ static void __cpu_map_ring_cleanup(struct ptr_ring *ring)
}
}
-static void put_cpu_map_entry(struct bpf_cpu_map_entry *rcpu)
-{
- if (atomic_dec_and_test(&rcpu->refcnt)) {
- if (rcpu->prog)
- bpf_prog_put(rcpu->prog);
- /* The queue should be empty at this point */
- __cpu_map_ring_cleanup(rcpu->queue);
- ptr_ring_cleanup(rcpu->queue, NULL);
- kfree(rcpu->queue);
- kfree(rcpu);
- }
-}
-
-/* called from workqueue, to workaround syscall using preempt_disable */
-static void cpu_map_kthread_stop(struct work_struct *work)
-{
- struct bpf_cpu_map_entry *rcpu;
-
- rcpu = container_of(work, struct bpf_cpu_map_entry, kthread_stop_wq);
-
- /* Wait for flush in __cpu_map_entry_free(), via full RCU barrier,
- * as it waits until all in-flight call_rcu() callbacks complete.
- */
- rcu_barrier();
-
- /* kthread_stop will wake_up_process and wait for it to complete */
- kthread_stop(rcpu->kthread);
-}
-
static void cpu_map_bpf_prog_run_skb(struct bpf_cpu_map_entry *rcpu,
struct list_head *listp,
struct xdp_cpumap_stats *stats)
@@ -395,7 +358,6 @@ static int cpu_map_kthread_run(void *data)
}
__set_current_state(TASK_RUNNING);
- put_cpu_map_entry(rcpu);
return 0;
}
@@ -472,9 +434,6 @@ __cpu_map_entry_alloc(struct bpf_map *map, struct bpf_cpumap_val *value,
if (IS_ERR(rcpu->kthread))
goto free_prog;
- get_cpu_map_entry(rcpu); /* 1-refcnt for being in cmap->cpu_map[] */
- get_cpu_map_entry(rcpu); /* 1-refcnt for kthread */
-
/* Make sure kthread runs on a single CPU */
kthread_bind(rcpu->kthread, cpu);
wake_up_process(rcpu->kthread);
@@ -501,40 +460,40 @@ free_rcu:
return NULL;
}
-static void __cpu_map_entry_free(struct rcu_head *rcu)
+static void __cpu_map_entry_free(struct work_struct *work)
{
struct bpf_cpu_map_entry *rcpu;
/* This cpu_map_entry have been disconnected from map and one
- * RCU grace-period have elapsed. Thus, XDP cannot queue any
+ * RCU grace-period have elapsed. Thus, XDP cannot queue any
* new packets and cannot change/set flush_needed that can
* find this entry.
*/
- rcpu = container_of(rcu, struct bpf_cpu_map_entry, rcu);
+ rcpu = container_of(to_rcu_work(work), struct bpf_cpu_map_entry, free_work);
+ /* kthread_stop will wake_up_process and wait for it to complete.
+ * cpu_map_kthread_run() makes sure the pointer ring is empty
+ * before exiting.
+ */
+ kthread_stop(rcpu->kthread);
+
+ if (rcpu->prog)
+ bpf_prog_put(rcpu->prog);
+ /* The queue should be empty at this point */
+ __cpu_map_ring_cleanup(rcpu->queue);
+ ptr_ring_cleanup(rcpu->queue, NULL);
+ kfree(rcpu->queue);
free_percpu(rcpu->bulkq);
- /* Cannot kthread_stop() here, last put free rcpu resources */
- put_cpu_map_entry(rcpu);
+ kfree(rcpu);
}
-/* After xchg pointer to bpf_cpu_map_entry, use the call_rcu() to
- * ensure any driver rcu critical sections have completed, but this
- * does not guarantee a flush has happened yet. Because driver side
- * rcu_read_lock/unlock only protects the running XDP program. The
- * atomic xchg and NULL-ptr check in __cpu_map_flush() makes sure a
- * pending flush op doesn't fail.
- *
- * The bpf_cpu_map_entry is still used by the kthread, and there can
- * still be pending packets (in queue and percpu bulkq). A refcnt
- * makes sure to last user (kthread_stop vs. call_rcu) free memory
- * resources.
- *
- * The rcu callback __cpu_map_entry_free flush remaining packets in
- * percpu bulkq to queue. Due to caller map_delete_elem() disable
- * preemption, cannot call kthread_stop() to make sure queue is empty.
- * Instead a work_queue is started for stopping kthread,
- * cpu_map_kthread_stop, which waits for an RCU grace period before
- * stopping kthread, emptying the queue.
+/* After the xchg of the bpf_cpu_map_entry pointer, we need to make sure the old
+ * entry is no longer in use before freeing. We use queue_rcu_work() to call
+ * __cpu_map_entry_free() in a separate workqueue after waiting for an RCU grace
+ * period. This means that (a) all pending enqueue and flush operations have
+ * completed (because of the RCU callback), and (b) we are in a workqueue
+ * context where we can stop the kthread and wait for it to exit before freeing
+ * everything.
*/
static void __cpu_map_entry_replace(struct bpf_cpu_map *cmap,
u32 key_cpu, struct bpf_cpu_map_entry *rcpu)
@@ -543,9 +502,8 @@ static void __cpu_map_entry_replace(struct bpf_cpu_map *cmap,
old_rcpu = unrcu_pointer(xchg(&cmap->cpu_map[key_cpu], RCU_INITIALIZER(rcpu)));
if (old_rcpu) {
- call_rcu(&old_rcpu->rcu, __cpu_map_entry_free);
- INIT_WORK(&old_rcpu->kthread_stop_wq, cpu_map_kthread_stop);
- schedule_work(&old_rcpu->kthread_stop_wq);
+ INIT_RCU_WORK(&old_rcpu->free_work, __cpu_map_entry_free);
+ queue_rcu_work(system_wq, &old_rcpu->free_work);
}
}
@@ -557,7 +515,7 @@ static long cpu_map_delete_elem(struct bpf_map *map, void *key)
if (key_cpu >= map->max_entries)
return -EINVAL;
- /* notice caller map_delete_elem() use preempt_disable() */
+ /* notice caller map_delete_elem() uses rcu_read_lock() */
__cpu_map_entry_replace(cmap, key_cpu, NULL);
return 0;
}
@@ -608,16 +566,15 @@ static void cpu_map_free(struct bpf_map *map)
/* At this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
* so the bpf programs (can be more than one that used this map) were
* disconnected from events. Wait for outstanding critical sections in
- * these programs to complete. The rcu critical section only guarantees
- * no further "XDP/bpf-side" reads against bpf_cpu_map->cpu_map.
- * It does __not__ ensure pending flush operations (if any) are
- * complete.
+ * these programs to complete. synchronize_rcu() below not only
+ * guarantees no further "XDP/bpf-side" reads against
+ * bpf_cpu_map->cpu_map, but also ensure pending flush operations
+ * (if any) are completed.
*/
-
synchronize_rcu();
- /* For cpu_map the remote CPUs can still be using the entries
- * (struct bpf_cpu_map_entry).
+ /* The only possible user of bpf_cpu_map_entry is
+ * cpu_map_kthread_run().
*/
for (i = 0; i < cmap->map.max_entries; i++) {
struct bpf_cpu_map_entry *rcpu;
@@ -626,8 +583,8 @@ static void cpu_map_free(struct bpf_map *map)
if (!rcpu)
continue;
- /* bq flush and cleanup happens after RCU grace-period */
- __cpu_map_entry_replace(cmap, i, NULL); /* call_rcu */
+ /* Stop kthread and cleanup entry directly */
+ __cpu_map_entry_free(&rcpu->free_work.work);
}
bpf_map_area_free(cmap->cpu_map);
bpf_map_area_free(cmap);
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index eb91cae0612a..8bd3812fb8df 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -286,6 +286,7 @@ static inline void __bpf_spin_lock(struct bpf_spin_lock *lock)
compiletime_assert(u.val == 0, "__ARCH_SPIN_LOCK_UNLOCKED not 0");
BUILD_BUG_ON(sizeof(*l) != sizeof(__u32));
BUILD_BUG_ON(sizeof(*lock) != sizeof(__u32));
+ preempt_disable();
arch_spin_lock(l);
}
@@ -294,6 +295,7 @@ static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock)
arch_spinlock_t *l = (void *)lock;
arch_spin_unlock(l);
+ preempt_enable();
}
#else
@@ -1913,7 +1915,11 @@ void __bpf_obj_drop_impl(void *p, const struct btf_record *rec)
if (rec)
bpf_obj_free_fields(rec, p);
- bpf_mem_free(&bpf_global_ma, p);
+
+ if (rec && rec->refcount_off >= 0)
+ bpf_mem_free_rcu(&bpf_global_ma, p);
+ else
+ bpf_mem_free(&bpf_global_ma, p);
}
__bpf_kfunc void bpf_obj_drop_impl(void *p__alloc, void *meta__ign)
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index cb658543bdb4..ebeb0695305a 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -657,7 +657,6 @@ void bpf_obj_free_fields(const struct btf_record *rec, void *obj)
if (!btf_is_kernel(field->kptr.btf)) {
pointee_struct_meta = btf_find_struct_meta(field->kptr.btf,
field->kptr.btf_id);
- WARN_ON_ONCE(!pointee_struct_meta);
migrate_disable();
__bpf_obj_drop_impl(xchgd_field, pointee_struct_meta ?
pointee_struct_meta->record :
@@ -2815,10 +2814,12 @@ static void bpf_link_free_id(int id)
/* Clean up bpf_link and corresponding anon_inode file and FD. After
* anon_inode is created, bpf_link can't be just kfree()'d due to deferred
- * anon_inode's release() call. This helper marksbpf_link as
+ * anon_inode's release() call. This helper marks bpf_link as
* defunct, releases anon_inode file and puts reserved FD. bpf_prog's refcnt
* is not decremented, it's the responsibility of a calling code that failed
* to complete bpf_link initialization.
+ * This helper eventually calls link's dealloc callback, but does not call
+ * link's release callback.
*/
void bpf_link_cleanup(struct bpf_link_primer *primer)
{
@@ -3655,34 +3656,6 @@ static int bpf_raw_tracepoint_open(const union bpf_attr *attr)
return fd;
}
-static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog,
- enum bpf_attach_type attach_type)
-{
- switch (prog->type) {
- case BPF_PROG_TYPE_CGROUP_SOCK:
- case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
- case BPF_PROG_TYPE_CGROUP_SOCKOPT:
- case BPF_PROG_TYPE_SK_LOOKUP:
- return attach_type == prog->expected_attach_type ? 0 : -EINVAL;
- case BPF_PROG_TYPE_CGROUP_SKB:
- if (!capable(CAP_NET_ADMIN))
- /* cg-skb progs can be loaded by unpriv user.
- * check permissions at attach time.
- */
- return -EPERM;
- return prog->enforce_expected_attach_type &&
- prog->expected_attach_type != attach_type ?
- -EINVAL : 0;
- case BPF_PROG_TYPE_KPROBE:
- if (prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI &&
- attach_type != BPF_TRACE_KPROBE_MULTI)
- return -EINVAL;
- return 0;
- default:
- return 0;
- }
-}
-
static enum bpf_prog_type
attach_type_to_prog_type(enum bpf_attach_type attach_type)
{
@@ -3749,6 +3722,62 @@ attach_type_to_prog_type(enum bpf_attach_type attach_type)
}
}
+static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog,
+ enum bpf_attach_type attach_type)
+{
+ enum bpf_prog_type ptype;
+
+ switch (prog->type) {
+ case BPF_PROG_TYPE_CGROUP_SOCK:
+ case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
+ case BPF_PROG_TYPE_CGROUP_SOCKOPT:
+ case BPF_PROG_TYPE_SK_LOOKUP:
+ return attach_type == prog->expected_attach_type ? 0 : -EINVAL;
+ case BPF_PROG_TYPE_CGROUP_SKB:
+ if (!capable(CAP_NET_ADMIN))
+ /* cg-skb progs can be loaded by unpriv user.
+ * check permissions at attach time.
+ */
+ return -EPERM;
+ return prog->enforce_expected_attach_type &&
+ prog->expected_attach_type != attach_type ?
+ -EINVAL : 0;
+ case BPF_PROG_TYPE_EXT:
+ return 0;
+ case BPF_PROG_TYPE_NETFILTER:
+ if (attach_type != BPF_NETFILTER)
+ return -EINVAL;
+ return 0;
+ case BPF_PROG_TYPE_PERF_EVENT:
+ case BPF_PROG_TYPE_TRACEPOINT:
+ if (attach_type != BPF_PERF_EVENT)
+ return -EINVAL;
+ return 0;
+ case BPF_PROG_TYPE_KPROBE:
+ if (prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI &&
+ attach_type != BPF_TRACE_KPROBE_MULTI)
+ return -EINVAL;
+ if (prog->expected_attach_type == BPF_TRACE_UPROBE_MULTI &&
+ attach_type != BPF_TRACE_UPROBE_MULTI)
+ return -EINVAL;
+ if (attach_type != BPF_PERF_EVENT &&
+ attach_type != BPF_TRACE_KPROBE_MULTI &&
+ attach_type != BPF_TRACE_UPROBE_MULTI)
+ return -EINVAL;
+ return 0;
+ case BPF_PROG_TYPE_SCHED_CLS:
+ if (attach_type != BPF_TCX_INGRESS &&
+ attach_type != BPF_TCX_EGRESS)
+ return -EINVAL;
+ return 0;
+ default:
+ ptype = attach_type_to_prog_type(attach_type);
+ if (ptype == BPF_PROG_TYPE_UNSPEC || ptype != prog->type)
+ return -EINVAL;
+ return 0;
+ }
+}
+
#define BPF_PROG_ATTACH_LAST_FIELD expected_revision
#define BPF_F_ATTACH_MASK_BASE \
@@ -4852,10 +4881,9 @@ err_put:
return err;
}
-#define BPF_LINK_CREATE_LAST_FIELD link_create.kprobe_multi.cookies
+#define BPF_LINK_CREATE_LAST_FIELD link_create.uprobe_multi.pid
static int link_create(union bpf_attr *attr, bpfptr_t uattr)
{
- enum bpf_prog_type ptype;
struct bpf_prog *prog;
int ret;
@@ -4875,45 +4903,6 @@ static int link_create(union bpf_attr *attr, bpfptr_t uattr)
goto out;
switch (prog->type) {
- case BPF_PROG_TYPE_EXT:
- break;
- case BPF_PROG_TYPE_NETFILTER:
- if (attr->link_create.attach_type != BPF_NETFILTER) {
- ret = -EINVAL;
- goto out;
- }
- break;
- case BPF_PROG_TYPE_PERF_EVENT:
- case BPF_PROG_TYPE_TRACEPOINT:
- if (attr->link_create.attach_type != BPF_PERF_EVENT) {
- ret = -EINVAL;
- goto out;
- }
- break;
- case BPF_PROG_TYPE_KPROBE:
- if (attr->link_create.attach_type != BPF_PERF_EVENT &&
- attr->link_create.attach_type != BPF_TRACE_KPROBE_MULTI) {
- ret = -EINVAL;
- goto out;
- }
- break;
- case BPF_PROG_TYPE_SCHED_CLS:
- if (attr->link_create.attach_type != BPF_TCX_INGRESS &&
- attr->link_create.attach_type != BPF_TCX_EGRESS) {
- ret = -EINVAL;
- goto out;
- }
- break;
- default:
- ptype = attach_type_to_prog_type(attr->link_create.attach_type);
- if (ptype == BPF_PROG_TYPE_UNSPEC || ptype != prog->type) {
- ret = -EINVAL;
- goto out;
- }
- break;
- }
-
- switch (prog->type) {
case BPF_PROG_TYPE_CGROUP_SKB:
case BPF_PROG_TYPE_CGROUP_SOCK:
case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
@@ -4969,8 +4958,10 @@ static int link_create(union bpf_attr *attr, bpfptr_t uattr)
case BPF_PROG_TYPE_KPROBE:
if (attr->link_create.attach_type == BPF_PERF_EVENT)
ret = bpf_perf_link_attach(attr, prog);
- else
+ else if (attr->link_create.attach_type == BPF_TRACE_KPROBE_MULTI)
ret = bpf_kprobe_multi_link_attach(attr, prog);
+ else if (attr->link_create.attach_type == BPF_TRACE_UPROBE_MULTI)
+ ret = bpf_uprobe_multi_link_attach(attr, prog);
break;
default:
ret = -EINVAL;
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 4ccca1f6c998..bb78212fa5b2 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -4990,20 +4990,22 @@ static int map_kptr_match_type(struct bpf_verifier_env *env,
struct bpf_reg_state *reg, u32 regno)
{
const char *targ_name = btf_type_name(kptr_field->kptr.btf, kptr_field->kptr.btf_id);
- int perm_flags = PTR_MAYBE_NULL | PTR_TRUSTED | MEM_RCU;
+ int perm_flags;
const char *reg_name = "";
- /* Only unreferenced case accepts untrusted pointers */
- if (kptr_field->type == BPF_KPTR_UNREF)
- perm_flags |= PTR_UNTRUSTED;
+ if (btf_is_kernel(reg->btf)) {
+ perm_flags = PTR_MAYBE_NULL | PTR_TRUSTED | MEM_RCU;
+
+ /* Only unreferenced case accepts untrusted pointers */
+ if (kptr_field->type == BPF_KPTR_UNREF)
+ perm_flags |= PTR_UNTRUSTED;
+ } else {
+ perm_flags = PTR_MAYBE_NULL | MEM_ALLOC;
+ }
if (base_type(reg->type) != PTR_TO_BTF_ID || (type_flag(reg->type) & ~perm_flags))
goto bad_type;
- if (!btf_is_kernel(reg->btf)) {
- verbose(env, "R%d must point to kernel BTF\n", regno);
- return -EINVAL;
- }
/* We need to verify reg->type and reg->btf, before accessing reg->btf */
reg_name = btf_type_name(reg->btf, reg->btf_id);
@@ -5016,7 +5018,7 @@ static int map_kptr_match_type(struct bpf_verifier_env *env,
if (__check_ptr_off_reg(env, reg, regno, true))
return -EACCES;
- /* A full type match is needed, as BTF can be vmlinux or module BTF, and
+ /* A full type match is needed, as BTF can be vmlinux, module or prog BTF, and
* we also need to take into account the reg->off.
*
* We want to support cases like:
@@ -5062,7 +5064,9 @@ bad_type:
*/
static bool in_rcu_cs(struct bpf_verifier_env *env)
{
- return env->cur_state->active_rcu_lock || !env->prog->aux->sleepable;
+ return env->cur_state->active_rcu_lock ||
+ env->cur_state->active_lock.ptr ||
+ !env->prog->aux->sleepable;
}
/* Once GCC supports btf_type_tag the following mechanism will be replaced with tag check */
@@ -7916,7 +7920,10 @@ found:
verbose(env, "verifier internal error: unimplemented handling of MEM_ALLOC\n");
return -EFAULT;
}
- /* Handled by helper specific checks */
+ if (meta->func_id == BPF_FUNC_kptr_xchg) {
+ if (map_kptr_match_type(env, meta->kptr_field, reg, regno))
+ return -EACCES;
+ }
break;
case PTR_TO_BTF_ID | MEM_PERCPU:
case PTR_TO_BTF_ID | MEM_PERCPU | PTR_TRUSTED:
@@ -7968,17 +7975,6 @@ int check_func_arg_reg_off(struct bpf_verifier_env *env,
if (arg_type_is_dynptr(arg_type) && type == PTR_TO_STACK)
return 0;
- if ((type_is_ptr_alloc_obj(type) || type_is_non_owning_ref(type)) && reg->off) {
- if (reg_find_field_offset(reg, reg->off, BPF_GRAPH_NODE_OR_ROOT))
- return __check_ptr_off_reg(env, reg, regno, true);
-
- verbose(env, "R%d must have zero offset when passed to release func\n",
- regno);
- verbose(env, "No graph node or root found at R%d type:%s off:%d\n", regno,
- btf_type_name(reg->btf, reg->btf_id), reg->off);
- return -EINVAL;
- }
-
/* Doing check_ptr_off_reg check for the offset will catch this
* because fixed_off_ok is false, but checking here allows us
* to give the user a better error message.
@@ -8013,6 +8009,7 @@ int check_func_arg_reg_off(struct bpf_verifier_env *env,
case PTR_TO_BTF_ID | PTR_TRUSTED:
case PTR_TO_BTF_ID | MEM_RCU:
case PTR_TO_BTF_ID | MEM_ALLOC | NON_OWN_REF:
+ case PTR_TO_BTF_ID | MEM_ALLOC | NON_OWN_REF | MEM_RCU:
/* When referenced PTR_TO_BTF_ID is passed to release function,
* its fixed offset must be 0. In the other cases, fixed offset
* can be non-zero. This was already checked above. So pass
@@ -10479,6 +10476,7 @@ static int process_kf_arg_ptr_to_btf_id(struct bpf_verifier_env *env,
static int ref_set_non_owning(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
{
struct bpf_verifier_state *state = env->cur_state;
+ struct btf_record *rec = reg_btf_record(reg);
if (!state->active_lock.ptr) {
verbose(env, "verifier internal error: ref_set_non_owning w/o active lock\n");
@@ -10491,6 +10489,9 @@ static int ref_set_non_owning(struct bpf_verifier_env *env, struct bpf_reg_state
}
reg->type |= NON_OWN_REF;
+ if (rec->refcount_off >= 0)
+ reg->type |= MEM_RCU;
+
return 0;
}
@@ -11223,10 +11224,7 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
verbose(env, "arg#%d doesn't point to a type with bpf_refcount field\n", i);
return -EINVAL;
}
- if (rec->refcount_off >= 0) {
- verbose(env, "bpf_refcount_acquire calls are disabled for now\n");
- return -EINVAL;
- }
+
meta->arg_btf = reg->btf;
meta->arg_btf_id = reg->btf_id;
break;
@@ -11331,6 +11329,11 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
struct bpf_func_state *state;
struct bpf_reg_state *reg;
+ if (in_rbtree_lock_required_cb(env) && (rcu_lock || rcu_unlock)) {
+ verbose(env, "Calling bpf_rcu_read_{lock,unlock} in unnecessary rbtree callback\n");
+ return -EACCES;
+ }
+
if (rcu_lock) {
verbose(env, "nested rcu read lock (kernel function %s)\n", func_name);
return -EINVAL;
@@ -14047,6 +14050,12 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
return -EINVAL;
}
+ /* check src2 operand */
+ err = check_reg_arg(env, insn->dst_reg, SRC_OP);
+ if (err)
+ return err;
+
+ dst_reg = &regs[insn->dst_reg];
if (BPF_SRC(insn->code) == BPF_X) {
if (insn->imm != 0) {
verbose(env, "BPF_JMP/JMP32 uses reserved fields\n");
@@ -14058,12 +14067,13 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
if (err)
return err;
- if (is_pointer_value(env, insn->src_reg)) {
+ src_reg = &regs[insn->src_reg];
+ if (!(reg_is_pkt_pointer_any(dst_reg) && reg_is_pkt_pointer_any(src_reg)) &&
+ is_pointer_value(env, insn->src_reg)) {
verbose(env, "R%d pointer comparison prohibited\n",
insn->src_reg);
return -EACCES;
}
- src_reg = &regs[insn->src_reg];
} else {
if (insn->src_reg != BPF_REG_0) {
verbose(env, "BPF_JMP/JMP32 uses reserved fields\n");
@@ -14071,12 +14081,6 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
}
}
- /* check src2 operand */
- err = check_reg_arg(env, insn->dst_reg, SRC_OP);
- if (err)
- return err;
-
- dst_reg = &regs[insn->dst_reg];
is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
if (BPF_SRC(insn->code) == BPF_K) {
@@ -16692,7 +16696,8 @@ static int do_check(struct bpf_verifier_env *env)
return -EINVAL;
}
- if (env->cur_state->active_rcu_lock) {
+ if (env->cur_state->active_rcu_lock &&
+ !in_rbtree_lock_required_cb(env)) {
verbose(env, "bpf_rcu_read_unlock is missing\n");
return -EINVAL;
}
@@ -16972,11 +16977,6 @@ static int check_map_prog_compatibility(struct bpf_verifier_env *env,
verbose(env, "tracing progs cannot use bpf_spin_lock yet\n");
return -EINVAL;
}
-
- if (prog->aux->sleepable) {
- verbose(env, "sleepable progs cannot use bpf_spin_lock yet\n");
- return -EINVAL;
- }
}
if (btf_record_has_field(map->record, BPF_TIMER)) {
@@ -18281,6 +18281,13 @@ static int fixup_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
struct btf_struct_meta *kptr_struct_meta = env->insn_aux_data[insn_idx].kptr_struct_meta;
struct bpf_insn addr[2] = { BPF_LD_IMM64(BPF_REG_2, (long)kptr_struct_meta) };
+ if (desc->func_id == special_kfunc_list[KF_bpf_refcount_acquire_impl] &&
+ !kptr_struct_meta) {
+ verbose(env, "verifier internal error: kptr_struct_meta expected at insn_idx %d\n",
+ insn_idx);
+ return -EFAULT;
+ }
+
insn_buf[0] = addr[0];
insn_buf[1] = addr[1];
insn_buf[2] = *insn;
@@ -18288,6 +18295,7 @@ static int fixup_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
} else if (desc->func_id == special_kfunc_list[KF_bpf_list_push_back_impl] ||
desc->func_id == special_kfunc_list[KF_bpf_list_push_front_impl] ||
desc->func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]) {
+ struct btf_struct_meta *kptr_struct_meta = env->insn_aux_data[insn_idx].kptr_struct_meta;
int struct_meta_reg = BPF_REG_3;
int node_offset_reg = BPF_REG_4;
@@ -18297,6 +18305,12 @@ static int fixup_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
node_offset_reg = BPF_REG_5;
}
+ if (!kptr_struct_meta) {
+ verbose(env, "verifier internal error: kptr_struct_meta expected at insn_idx %d\n",
+ insn_idx);
+ return -EFAULT;
+ }
+
__fixup_collection_insert_kfunc(&env->insn_aux_data[insn_idx], struct_meta_reg,
node_offset_reg, insn, insn_buf, cnt);
} else if (desc->func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx] ||
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 792445e1f3f0..a7264b2c17ad 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -23,6 +23,7 @@
#include <linux/sort.h>
#include <linux/key.h>
#include <linux/verification.h>
+#include <linux/namei.h>
#include <net/bpf_sk_storage.h>
@@ -86,6 +87,9 @@ static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size,
static u64 bpf_kprobe_multi_cookie(struct bpf_run_ctx *ctx);
static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx);
+static u64 bpf_uprobe_multi_cookie(struct bpf_run_ctx *ctx);
+static u64 bpf_uprobe_multi_entry_ip(struct bpf_run_ctx *ctx);
+
/**
* trace_call_bpf - invoke BPF program
* @call: tracepoint event
@@ -1103,6 +1107,30 @@ static const struct bpf_func_proto bpf_get_attach_cookie_proto_kmulti = {
.arg1_type = ARG_PTR_TO_CTX,
};
+BPF_CALL_1(bpf_get_func_ip_uprobe_multi, struct pt_regs *, regs)
+{
+ return bpf_uprobe_multi_entry_ip(current->bpf_ctx);
+}
+
+static const struct bpf_func_proto bpf_get_func_ip_proto_uprobe_multi = {
+ .func = bpf_get_func_ip_uprobe_multi,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+};
+
+BPF_CALL_1(bpf_get_attach_cookie_uprobe_multi, struct pt_regs *, regs)
+{
+ return bpf_uprobe_multi_cookie(current->bpf_ctx);
+}
+
+static const struct bpf_func_proto bpf_get_attach_cookie_proto_umulti = {
+ .func = bpf_get_attach_cookie_uprobe_multi,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+};
+
BPF_CALL_1(bpf_get_attach_cookie_trace, void *, ctx)
{
struct bpf_trace_run_ctx *run_ctx;
@@ -1545,13 +1573,17 @@ kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_override_return_proto;
#endif
case BPF_FUNC_get_func_ip:
- return prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI ?
- &bpf_get_func_ip_proto_kprobe_multi :
- &bpf_get_func_ip_proto_kprobe;
+ if (prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI)
+ return &bpf_get_func_ip_proto_kprobe_multi;
+ if (prog->expected_attach_type == BPF_TRACE_UPROBE_MULTI)
+ return &bpf_get_func_ip_proto_uprobe_multi;
+ return &bpf_get_func_ip_proto_kprobe;
case BPF_FUNC_get_attach_cookie:
- return prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI ?
- &bpf_get_attach_cookie_proto_kmulti :
- &bpf_get_attach_cookie_proto_trace;
+ if (prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI)
+ return &bpf_get_attach_cookie_proto_kmulti;
+ if (prog->expected_attach_type == BPF_TRACE_UPROBE_MULTI)
+ return &bpf_get_attach_cookie_proto_umulti;
+ return &bpf_get_attach_cookie_proto_trace;
default:
return bpf_tracing_func_proto(func_id, prog);
}
@@ -2970,3 +3002,301 @@ static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx)
return 0;
}
#endif
+
+#ifdef CONFIG_UPROBES
+struct bpf_uprobe_multi_link;
+
+struct bpf_uprobe {
+ struct bpf_uprobe_multi_link *link;
+ loff_t offset;
+ u64 cookie;
+ struct uprobe_consumer consumer;
+};
+
+struct bpf_uprobe_multi_link {
+ struct path path;
+ struct bpf_link link;
+ u32 cnt;
+ struct bpf_uprobe *uprobes;
+ struct task_struct *task;
+};
+
+struct bpf_uprobe_multi_run_ctx {
+ struct bpf_run_ctx run_ctx;
+ unsigned long entry_ip;
+ struct bpf_uprobe *uprobe;
+};
+
+static void bpf_uprobe_unregister(struct path *path, struct bpf_uprobe *uprobes,
+ u32 cnt)
+{
+ u32 i;
+
+ for (i = 0; i < cnt; i++) {
+ uprobe_unregister(d_real_inode(path->dentry), uprobes[i].offset,
+ &uprobes[i].consumer);
+ }
+}
+
+static void bpf_uprobe_multi_link_release(struct bpf_link *link)
+{
+ struct bpf_uprobe_multi_link *umulti_link;
+
+ umulti_link = container_of(link, struct bpf_uprobe_multi_link, link);
+ bpf_uprobe_unregister(&umulti_link->path, umulti_link->uprobes, umulti_link->cnt);
+}
+
+static void bpf_uprobe_multi_link_dealloc(struct bpf_link *link)
+{
+ struct bpf_uprobe_multi_link *umulti_link;
+
+ umulti_link = container_of(link, struct bpf_uprobe_multi_link, link);
+ if (umulti_link->task)
+ put_task_struct(umulti_link->task);
+ path_put(&umulti_link->path);
+ kvfree(umulti_link->uprobes);
+ kfree(umulti_link);
+}
+
+static const struct bpf_link_ops bpf_uprobe_multi_link_lops = {
+ .release = bpf_uprobe_multi_link_release,
+ .dealloc = bpf_uprobe_multi_link_dealloc,
+};
+
+static int uprobe_prog_run(struct bpf_uprobe *uprobe,
+ unsigned long entry_ip,
+ struct pt_regs *regs)
+{
+ struct bpf_uprobe_multi_link *link = uprobe->link;
+ struct bpf_uprobe_multi_run_ctx run_ctx = {
+ .entry_ip = entry_ip,
+ .uprobe = uprobe,
+ };
+ struct bpf_prog *prog = link->link.prog;
+ bool sleepable = prog->aux->sleepable;
+ struct bpf_run_ctx *old_run_ctx;
+ int err = 0;
+
+ if (link->task && current != link->task)
+ return 0;
+
+ if (sleepable)
+ rcu_read_lock_trace();
+ else
+ rcu_read_lock();
+
+ migrate_disable();
+
+ old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
+ err = bpf_prog_run(link->link.prog, regs);
+ bpf_reset_run_ctx(old_run_ctx);
+
+ migrate_enable();
+
+ if (sleepable)
+ rcu_read_unlock_trace();
+ else
+ rcu_read_unlock();
+ return err;
+}
+
+static bool
+uprobe_multi_link_filter(struct uprobe_consumer *con, enum uprobe_filter_ctx ctx,
+ struct mm_struct *mm)
+{
+ struct bpf_uprobe *uprobe;
+
+ uprobe = container_of(con, struct bpf_uprobe, consumer);
+ return uprobe->link->task->mm == mm;
+}
+
+static int
+uprobe_multi_link_handler(struct uprobe_consumer *con, struct pt_regs *regs)
+{
+ struct bpf_uprobe *uprobe;
+
+ uprobe = container_of(con, struct bpf_uprobe, consumer);
+ return uprobe_prog_run(uprobe, instruction_pointer(regs), regs);
+}
+
+static int
+uprobe_multi_link_ret_handler(struct uprobe_consumer *con, unsigned long func, struct pt_regs *regs)
+{
+ struct bpf_uprobe *uprobe;
+
+ uprobe = container_of(con, struct bpf_uprobe, consumer);
+ return uprobe_prog_run(uprobe, func, regs);
+}
+
+static u64 bpf_uprobe_multi_entry_ip(struct bpf_run_ctx *ctx)
+{
+ struct bpf_uprobe_multi_run_ctx *run_ctx;
+
+ run_ctx = container_of(current->bpf_ctx, struct bpf_uprobe_multi_run_ctx, run_ctx);
+ return run_ctx->entry_ip;
+}
+
+static u64 bpf_uprobe_multi_cookie(struct bpf_run_ctx *ctx)
+{
+ struct bpf_uprobe_multi_run_ctx *run_ctx;
+
+ run_ctx = container_of(current->bpf_ctx, struct bpf_uprobe_multi_run_ctx, run_ctx);
+ return run_ctx->uprobe->cookie;
+}
+
+int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
+{
+ struct bpf_uprobe_multi_link *link = NULL;
+ unsigned long __user *uref_ctr_offsets;
+ unsigned long *ref_ctr_offsets = NULL;
+ struct bpf_link_primer link_primer;
+ struct bpf_uprobe *uprobes = NULL;
+ struct task_struct *task = NULL;
+ unsigned long __user *uoffsets;
+ u64 __user *ucookies;
+ void __user *upath;
+ u32 flags, cnt, i;
+ struct path path;
+ char *name;
+ pid_t pid;
+ int err;
+
+ /* no support for 32bit archs yet */
+ if (sizeof(u64) != sizeof(void *))
+ return -EOPNOTSUPP;
+
+ if (prog->expected_attach_type != BPF_TRACE_UPROBE_MULTI)
+ return -EINVAL;
+
+ flags = attr->link_create.uprobe_multi.flags;
+ if (flags & ~BPF_F_UPROBE_MULTI_RETURN)
+ return -EINVAL;
+
+ /*
+ * path, offsets and cnt are mandatory,
+ * ref_ctr_offsets and cookies are optional
+ */
+ upath = u64_to_user_ptr(attr->link_create.uprobe_multi.path);
+ uoffsets = u64_to_user_ptr(attr->link_create.uprobe_multi.offsets);
+ cnt = attr->link_create.uprobe_multi.cnt;
+
+ if (!upath || !uoffsets || !cnt)
+ return -EINVAL;
+
+ uref_ctr_offsets = u64_to_user_ptr(attr->link_create.uprobe_multi.ref_ctr_offsets);
+ ucookies = u64_to_user_ptr(attr->link_create.uprobe_multi.cookies);
+
+ name = strndup_user(upath, PATH_MAX);
+ if (IS_ERR(name)) {
+ err = PTR_ERR(name);
+ return err;
+ }
+
+ err = kern_path(name, LOOKUP_FOLLOW, &path);
+ kfree(name);
+ if (err)
+ return err;
+
+ if (!d_is_reg(path.dentry)) {
+ err = -EBADF;
+ goto error_path_put;
+ }
+
+ pid = attr->link_create.uprobe_multi.pid;
+ if (pid) {
+ rcu_read_lock();
+ task = get_pid_task(find_vpid(pid), PIDTYPE_PID);
+ rcu_read_unlock();
+ if (!task)
+ goto error_path_put;
+ }
+
+ err = -ENOMEM;
+
+ link = kzalloc(sizeof(*link), GFP_KERNEL);
+ uprobes = kvcalloc(cnt, sizeof(*uprobes), GFP_KERNEL);
+
+ if (!uprobes || !link)
+ goto error_free;
+
+ if (uref_ctr_offsets) {
+ ref_ctr_offsets = kvcalloc(cnt, sizeof(*ref_ctr_offsets), GFP_KERNEL);
+ if (!ref_ctr_offsets)
+ goto error_free;
+ }
+
+ for (i = 0; i < cnt; i++) {
+ if (ucookies && __get_user(uprobes[i].cookie, ucookies + i)) {
+ err = -EFAULT;
+ goto error_free;
+ }
+ if (uref_ctr_offsets && __get_user(ref_ctr_offsets[i], uref_ctr_offsets + i)) {
+ err = -EFAULT;
+ goto error_free;
+ }
+ if (__get_user(uprobes[i].offset, uoffsets + i)) {
+ err = -EFAULT;
+ goto error_free;
+ }
+
+ uprobes[i].link = link;
+
+ if (flags & BPF_F_UPROBE_MULTI_RETURN)
+ uprobes[i].consumer.ret_handler = uprobe_multi_link_ret_handler;
+ else
+ uprobes[i].consumer.handler = uprobe_multi_link_handler;
+
+ if (pid)
+ uprobes[i].consumer.filter = uprobe_multi_link_filter;
+ }
+
+ link->cnt = cnt;
+ link->uprobes = uprobes;
+ link->path = path;
+ link->task = task;
+
+ bpf_link_init(&link->link, BPF_LINK_TYPE_UPROBE_MULTI,
+ &bpf_uprobe_multi_link_lops, prog);
+
+ for (i = 0; i < cnt; i++) {
+ err = uprobe_register_refctr(d_real_inode(link->path.dentry),
+ uprobes[i].offset,
+ ref_ctr_offsets ? ref_ctr_offsets[i] : 0,
+ &uprobes[i].consumer);
+ if (err) {
+ bpf_uprobe_unregister(&path, uprobes, i);
+ goto error_free;
+ }
+ }
+
+ err = bpf_link_prime(&link->link, &link_primer);
+ if (err)
+ goto error_free;
+
+ kvfree(ref_ctr_offsets);
+ return bpf_link_settle(&link_primer);
+
+error_free:
+ kvfree(ref_ctr_offsets);
+ kvfree(uprobes);
+ kfree(link);
+ if (task)
+ put_task_struct(task);
+error_path_put:
+ path_put(&path);
+ return err;
+}
+#else /* !CONFIG_UPROBES */
+int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
+{
+ return -EOPNOTSUPP;
+}
+static u64 bpf_uprobe_multi_cookie(struct bpf_run_ctx *ctx)
+{
+ return 0;
+}
+static u64 bpf_uprobe_multi_entry_ip(struct bpf_run_ctx *ctx)
+{
+ return 0;
+}
+#endif /* CONFIG_UPROBES */
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index 913a7a079239..ecde4216201e 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -596,8 +596,8 @@ static int __bpf_fill_alu_shift(struct bpf_test *self, u8 op,
{
static const s64 regs[] = {
0x0123456789abcdefLL, /* dword > 0, word < 0 */
- 0xfedcba9876543210LL, /* dowrd < 0, word > 0 */
- 0xfedcba0198765432LL, /* dowrd < 0, word < 0 */
+ 0xfedcba9876543210LL, /* dword < 0, word > 0 */
+ 0xfedcba0198765432LL, /* dword < 0, word < 0 */
0x0123458967abcdefLL, /* dword > 0, word > 0 */
};
int bits = alu32 ? 32 : 64;
@@ -14567,8 +14567,10 @@ static int run_one(const struct bpf_prog *fp, struct bpf_test *test)
if (ret == test->test[i].result) {
pr_cont("%lld ", duration);
} else {
- pr_cont("ret %d != %d ", ret,
- test->test[i].result);
+ s32 res = test->test[i].result;
+
+ pr_cont("ret %d != %d (%#x != %#x)",
+ ret, res, ret, res);
err_cnt++;
}
}
@@ -15045,7 +15047,7 @@ static __init int prepare_tail_call_tests(struct bpf_array **pprogs)
struct bpf_array *progs;
int which, err;
- /* Allocate the table of programs to be used for tall calls */
+ /* Allocate the table of programs to be used for tail calls */
progs = kzalloc(struct_size(progs, ptrs, ntests + 1), GFP_KERNEL);
if (!progs)
goto out_nomem;
diff --git a/net/core/lwt_bpf.c b/net/core/lwt_bpf.c
index 8b6b5e72b217..4a0797f0a154 100644
--- a/net/core/lwt_bpf.c
+++ b/net/core/lwt_bpf.c
@@ -60,9 +60,8 @@ static int run_lwt_bpf(struct sk_buff *skb, struct bpf_lwt_prog *lwt,
ret = BPF_OK;
} else {
skb_reset_mac_header(skb);
- ret = skb_do_redirect(skb);
- if (ret == 0)
- ret = BPF_REDIRECT;
+ skb_do_redirect(skb);
+ ret = BPF_REDIRECT;
}
break;
@@ -255,7 +254,7 @@ static int bpf_lwt_xmit_reroute(struct sk_buff *skb)
err = dst_output(dev_net(skb_dst(skb)->dev), skb->sk, skb);
if (unlikely(err))
- return err;
+ return net_xmit_errno(err);
/* ip[6]_finish_output2 understand LWTUNNEL_XMIT_DONE */
return LWTUNNEL_XMIT_DONE;
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index ce6257860a40..43ba4b77b248 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -216,7 +216,7 @@ static int ip_finish_output2(struct net *net, struct sock *sk, struct sk_buff *s
if (lwtunnel_xmit_redirect(dst->lwtstate)) {
int res = lwtunnel_xmit(skb);
- if (res < 0 || res == LWTUNNEL_XMIT_DONE)
+ if (res != LWTUNNEL_XMIT_CONTINUE)
return res;
}
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index f8a1f6bb3f87..0665e8b09968 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -113,7 +113,7 @@ static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff *
if (lwtunnel_xmit_redirect(dst->lwtstate)) {
int res = lwtunnel_xmit(skb);
- if (res < 0 || res == LWTUNNEL_XMIT_DONE)
+ if (res != LWTUNNEL_XMIT_CONTINUE)
return res;
}
diff --git a/samples/bpf/.gitignore b/samples/bpf/.gitignore
index 0e7bfdbff80a..0002cd359fb1 100644
--- a/samples/bpf/.gitignore
+++ b/samples/bpf/.gitignore
@@ -37,22 +37,10 @@ tracex4
tracex5
tracex6
tracex7
-xdp1
-xdp2
xdp_adjust_tail
xdp_fwd
-xdp_monitor
-xdp_redirect
-xdp_redirect_cpu
-xdp_redirect_map
-xdp_redirect_map_multi
xdp_router_ipv4
-xdp_rxq_info
-xdp_sample_pkts
xdp_tx_iptunnel
-xdpsock
-xdpsock_ctrl_proc
-xsk_fwd
testfile.img
hbm_out.log
iperf.*
diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index 595b98d825ce..4ccf4236031c 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -30,8 +30,6 @@ tprogs-y += test_cgrp2_array_pin
tprogs-y += test_cgrp2_attach
tprogs-y += test_cgrp2_sock
tprogs-y += test_cgrp2_sock2
-tprogs-y += xdp1
-tprogs-y += xdp2
tprogs-y += xdp_router_ipv4
tprogs-y += test_current_task_under_cgroup
tprogs-y += trace_event
@@ -41,22 +39,14 @@ tprogs-y += lwt_len_hist
tprogs-y += xdp_tx_iptunnel
tprogs-y += test_map_in_map
tprogs-y += per_socket_stats_example
-tprogs-y += xdp_rxq_info
tprogs-y += syscall_tp
tprogs-y += cpustat
tprogs-y += xdp_adjust_tail
tprogs-y += xdp_fwd
tprogs-y += task_fd_query
-tprogs-y += xdp_sample_pkts
tprogs-y += ibumad
tprogs-y += hbm
-tprogs-y += xdp_redirect_cpu
-tprogs-y += xdp_redirect_map_multi
-tprogs-y += xdp_redirect_map
-tprogs-y += xdp_redirect
-tprogs-y += xdp_monitor
-
# Libbpf dependencies
LIBBPF_SRC = $(TOOLS_PATH)/lib/bpf
LIBBPF_OUTPUT = $(abspath $(BPF_SAMPLES_PATH))/libbpf
@@ -90,9 +80,6 @@ test_cgrp2_array_pin-objs := test_cgrp2_array_pin.o
test_cgrp2_attach-objs := test_cgrp2_attach.o
test_cgrp2_sock-objs := test_cgrp2_sock.o
test_cgrp2_sock2-objs := test_cgrp2_sock2.o
-xdp1-objs := xdp1_user.o
-# reuse xdp1 source intentionally
-xdp2-objs := xdp1_user.o
test_current_task_under_cgroup-objs := $(CGROUP_HELPERS) \
test_current_task_under_cgroup_user.o
trace_event-objs := trace_event_user.o $(TRACE_HELPERS)
@@ -102,21 +89,14 @@ lwt_len_hist-objs := lwt_len_hist_user.o
xdp_tx_iptunnel-objs := xdp_tx_iptunnel_user.o
test_map_in_map-objs := test_map_in_map_user.o
per_socket_stats_example-objs := cookie_uid_helper_example.o
-xdp_rxq_info-objs := xdp_rxq_info_user.o
syscall_tp-objs := syscall_tp_user.o
cpustat-objs := cpustat_user.o
xdp_adjust_tail-objs := xdp_adjust_tail_user.o
xdp_fwd-objs := xdp_fwd_user.o
task_fd_query-objs := task_fd_query_user.o $(TRACE_HELPERS)
-xdp_sample_pkts-objs := xdp_sample_pkts_user.o
ibumad-objs := ibumad_user.o
hbm-objs := hbm.o $(CGROUP_HELPERS)
-xdp_redirect_map_multi-objs := xdp_redirect_map_multi_user.o $(XDP_SAMPLE)
-xdp_redirect_cpu-objs := xdp_redirect_cpu_user.o $(XDP_SAMPLE)
-xdp_redirect_map-objs := xdp_redirect_map_user.o $(XDP_SAMPLE)
-xdp_redirect-objs := xdp_redirect_user.o $(XDP_SAMPLE)
-xdp_monitor-objs := xdp_monitor_user.o $(XDP_SAMPLE)
xdp_router_ipv4-objs := xdp_router_ipv4_user.o $(XDP_SAMPLE)
# Tell kbuild to always build the programs
@@ -124,29 +104,27 @@ always-y := $(tprogs-y)
always-y += sockex1_kern.o
always-y += sockex2_kern.o
always-y += sockex3_kern.o
-always-y += tracex1_kern.o
+always-y += tracex1.bpf.o
always-y += tracex2.bpf.o
-always-y += tracex3_kern.o
-always-y += tracex4_kern.o
-always-y += tracex5_kern.o
-always-y += tracex6_kern.o
-always-y += tracex7_kern.o
+always-y += tracex3.bpf.o
+always-y += tracex4.bpf.o
+always-y += tracex5.bpf.o
+always-y += tracex6.bpf.o
+always-y += tracex7.bpf.o
always-y += sock_flags.bpf.o
always-y += test_probe_write_user.bpf.o
always-y += trace_output.bpf.o
always-y += tcbpf1_kern.o
always-y += tc_l2_redirect_kern.o
always-y += lathist_kern.o
-always-y += offwaketime_kern.o
-always-y += spintest_kern.o
+always-y += offwaketime.bpf.o
+always-y += spintest.bpf.o
always-y += map_perf_test.bpf.o
always-y += test_overhead_tp.bpf.o
always-y += test_overhead_raw_tp.bpf.o
always-y += test_overhead_kprobe.bpf.o
always-y += parse_varlen.o parse_simple.o parse_ldabs.o
always-y += test_cgrp2_tc.bpf.o
-always-y += xdp1_kern.o
-always-y += xdp2_kern.o
always-y += test_current_task_under_cgroup.bpf.o
always-y += trace_event_kern.o
always-y += sampleip_kern.o
@@ -162,14 +140,12 @@ always-y += tcp_clamp_kern.o
always-y += tcp_basertt_kern.o
always-y += tcp_tos_reflect_kern.o
always-y += tcp_dumpstats_kern.o
-always-y += xdp_rxq_info_kern.o
always-y += xdp2skb_meta_kern.o
always-y += syscall_tp_kern.o
always-y += cpustat_kern.o
always-y += xdp_adjust_tail_kern.o
always-y += xdp_fwd_kern.o
always-y += task_fd_query_kern.o
-always-y += xdp_sample_pkts_kern.o
always-y += ibumad_kern.o
always-y += hbm_out_kern.o
always-y += hbm_edt_kern.o
@@ -207,11 +183,6 @@ TPROGS_LDFLAGS := -L$(SYSROOT)/usr/lib
endif
TPROGS_LDLIBS += $(LIBBPF) -lelf -lz
-TPROGLDLIBS_xdp_monitor += -lm
-TPROGLDLIBS_xdp_redirect += -lm
-TPROGLDLIBS_xdp_redirect_cpu += -lm
-TPROGLDLIBS_xdp_redirect_map += -lm
-TPROGLDLIBS_xdp_redirect_map_multi += -lm
TPROGLDLIBS_xdp_router_ipv4 += -lm -pthread
TPROGLDLIBS_tracex4 += -lrt
TPROGLDLIBS_trace_output += -lrt
@@ -326,14 +297,9 @@ $(obj)/$(TRACE_HELPERS) $(obj)/$(CGROUP_HELPERS) $(obj)/$(XDP_SAMPLE): | libbpf_
.PHONY: libbpf_hdrs
-$(obj)/xdp_redirect_cpu_user.o: $(obj)/xdp_redirect_cpu.skel.h
-$(obj)/xdp_redirect_map_multi_user.o: $(obj)/xdp_redirect_map_multi.skel.h
-$(obj)/xdp_redirect_map_user.o: $(obj)/xdp_redirect_map.skel.h
-$(obj)/xdp_redirect_user.o: $(obj)/xdp_redirect.skel.h
-$(obj)/xdp_monitor_user.o: $(obj)/xdp_monitor.skel.h
$(obj)/xdp_router_ipv4_user.o: $(obj)/xdp_router_ipv4.skel.h
-$(obj)/tracex5_kern.o: $(obj)/syscall_nrs.h
+$(obj)/tracex5.bpf.o: $(obj)/syscall_nrs.h
$(obj)/hbm_out_kern.o: $(src)/hbm.h $(src)/hbm_kern.h
$(obj)/hbm.o: $(src)/hbm.h
$(obj)/hbm_edt_kern.o: $(src)/hbm.h $(src)/hbm_kern.h
@@ -383,11 +349,6 @@ endef
CLANG_SYS_INCLUDES = $(call get_sys_includes,$(CLANG))
-$(obj)/xdp_redirect_cpu.bpf.o: $(obj)/xdp_sample.bpf.o
-$(obj)/xdp_redirect_map_multi.bpf.o: $(obj)/xdp_sample.bpf.o
-$(obj)/xdp_redirect_map.bpf.o: $(obj)/xdp_sample.bpf.o
-$(obj)/xdp_redirect.bpf.o: $(obj)/xdp_sample.bpf.o
-$(obj)/xdp_monitor.bpf.o: $(obj)/xdp_sample.bpf.o
$(obj)/xdp_router_ipv4.bpf.o: $(obj)/xdp_sample.bpf.o
$(obj)/%.bpf.o: $(src)/%.bpf.c $(obj)/vmlinux.h $(src)/xdp_sample.bpf.h $(src)/xdp_sample_shared.h
@@ -398,16 +359,9 @@ $(obj)/%.bpf.o: $(src)/%.bpf.c $(obj)/vmlinux.h $(src)/xdp_sample.bpf.h $(src)/x
-I$(LIBBPF_INCLUDE) $(CLANG_SYS_INCLUDES) \
-c $(filter %.bpf.c,$^) -o $@
-LINKED_SKELS := xdp_redirect_cpu.skel.h xdp_redirect_map_multi.skel.h \
- xdp_redirect_map.skel.h xdp_redirect.skel.h xdp_monitor.skel.h \
- xdp_router_ipv4.skel.h
+LINKED_SKELS := xdp_router_ipv4.skel.h
clean-files += $(LINKED_SKELS)
-xdp_redirect_cpu.skel.h-deps := xdp_redirect_cpu.bpf.o xdp_sample.bpf.o
-xdp_redirect_map_multi.skel.h-deps := xdp_redirect_map_multi.bpf.o xdp_sample.bpf.o
-xdp_redirect_map.skel.h-deps := xdp_redirect_map.bpf.o xdp_sample.bpf.o
-xdp_redirect.skel.h-deps := xdp_redirect.bpf.o xdp_sample.bpf.o
-xdp_monitor.skel.h-deps := xdp_monitor.bpf.o xdp_sample.bpf.o
xdp_router_ipv4.skel.h-deps := xdp_router_ipv4.bpf.o xdp_sample.bpf.o
LINKED_BPF_SRCS := $(patsubst %.bpf.o,%.bpf.c,$(foreach skel,$(LINKED_SKELS),$($(skel)-deps)))
@@ -440,7 +394,7 @@ $(obj)/%.o: $(src)/%.c
-Wno-gnu-variable-sized-type-not-at-end \
-Wno-address-of-packed-member -Wno-tautological-compare \
-Wno-unknown-warning-option $(CLANG_ARCH_ARGS) \
- -fno-asynchronous-unwind-tables \
+ -fno-asynchronous-unwind-tables -fcf-protection \
-I$(srctree)/samples/bpf/ -include asm_goto_workaround.h \
-O2 -emit-llvm -Xclang -disable-llvm-passes -c $< -o - | \
$(OPT) -O2 -mtriple=bpf-pc-linux | $(LLVM_DIS) | \
diff --git a/samples/bpf/README.rst b/samples/bpf/README.rst
index f16fc48e55a5..cabe2d216997 100644
--- a/samples/bpf/README.rst
+++ b/samples/bpf/README.rst
@@ -4,6 +4,12 @@ eBPF sample programs
This directory contains a test stubs, verifier test-suite and examples
for using eBPF. The examples use libbpf from tools/lib/bpf.
+Note that the XDP-specific samples have been removed from this directory and
+moved to the xdp-tools repository: https://github.com/xdp-project/xdp-tools
+See the commit messages removing each tool from this directory for how to
+convert specific command invocations between the old samples and the utilities
+in xdp-tools.
+
Build dependencies
==================
diff --git a/samples/bpf/net_shared.h b/samples/bpf/net_shared.h
index e9429af9aa44..88cc52461c98 100644
--- a/samples/bpf/net_shared.h
+++ b/samples/bpf/net_shared.h
@@ -17,6 +17,8 @@
#define TC_ACT_OK 0
#define TC_ACT_SHOT 2
+#define IFNAMSIZ 16
+
#if defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) && \
__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
#define bpf_ntohs(x) __builtin_bswap16(x)
diff --git a/samples/bpf/offwaketime_kern.c b/samples/bpf/offwaketime.bpf.c
index 23f12b47e9e5..4a65ba76c1b1 100644
--- a/samples/bpf/offwaketime_kern.c
+++ b/samples/bpf/offwaketime.bpf.c
@@ -4,20 +4,15 @@
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*/
-#include <uapi/linux/bpf.h>
-#include <uapi/linux/ptrace.h>
-#include <uapi/linux/perf_event.h>
+#include "vmlinux.h"
#include <linux/version.h>
-#include <linux/sched.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_core_read.h>
-#define _(P) \
- ({ \
- typeof(P) val; \
- bpf_probe_read_kernel(&val, sizeof(val), &(P)); \
- val; \
- })
+#ifndef PERF_MAX_STACK_DEPTH
+#define PERF_MAX_STACK_DEPTH 127
+#endif
#define MINBLOCK_US 1
#define MAX_ENTRIES 10000
@@ -67,11 +62,9 @@ struct {
SEC("kprobe/try_to_wake_up")
int waker(struct pt_regs *ctx)
{
- struct task_struct *p = (void *) PT_REGS_PARM1(ctx);
+ struct task_struct *p = (void *)PT_REGS_PARM1_CORE(ctx);
+ u32 pid = BPF_CORE_READ(p, pid);
struct wokeby_t woke;
- u32 pid;
-
- pid = _(p->pid);
bpf_get_current_comm(&woke.name, sizeof(woke.name));
woke.ret = bpf_get_stackid(ctx, &stackmap, STACKID_FLAGS);
@@ -111,28 +104,18 @@ static inline int update_counts(void *ctx, u32 pid, u64 delta)
#if 1
/* taken from /sys/kernel/tracing/events/sched/sched_switch/format */
-struct sched_switch_args {
- unsigned long long pad;
- char prev_comm[TASK_COMM_LEN];
- int prev_pid;
- int prev_prio;
- long long prev_state;
- char next_comm[TASK_COMM_LEN];
- int next_pid;
- int next_prio;
-};
SEC("tracepoint/sched/sched_switch")
-int oncpu(struct sched_switch_args *ctx)
+int oncpu(struct trace_event_raw_sched_switch *ctx)
{
/* record previous thread sleep time */
u32 pid = ctx->prev_pid;
#else
-SEC("kprobe/finish_task_switch")
+SEC("kprobe.multi/finish_task_switch*")
int oncpu(struct pt_regs *ctx)
{
- struct task_struct *p = (void *) PT_REGS_PARM1(ctx);
+ struct task_struct *p = (void *)PT_REGS_PARM1_CORE(ctx);
/* record previous thread sleep time */
- u32 pid = _(p->pid);
+ u32 pid = BPF_CORE_READ(p, pid);
#endif
u64 delta, ts, *tsp;
diff --git a/samples/bpf/offwaketime_user.c b/samples/bpf/offwaketime_user.c
index b6eedcb98fb9..5557b5393642 100644
--- a/samples/bpf/offwaketime_user.c
+++ b/samples/bpf/offwaketime_user.c
@@ -105,7 +105,7 @@ int main(int argc, char **argv)
return 2;
}
- snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+ snprintf(filename, sizeof(filename), "%s.bpf.o", argv[0]);
obj = bpf_object__open_file(filename, NULL);
if (libbpf_get_error(obj)) {
fprintf(stderr, "ERROR: opening BPF object file failed\n");
diff --git a/samples/bpf/spintest_kern.c b/samples/bpf/spintest.bpf.c
index 455da77319d9..cba5a9d50783 100644
--- a/samples/bpf/spintest_kern.c
+++ b/samples/bpf/spintest.bpf.c
@@ -4,14 +4,15 @@
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*/
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
+#include "vmlinux.h"
#include <linux/version.h>
-#include <uapi/linux/bpf.h>
-#include <uapi/linux/perf_event.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
+#ifndef PERF_MAX_STACK_DEPTH
+#define PERF_MAX_STACK_DEPTH 127
+#endif
+
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__type(key, long);
@@ -46,20 +47,10 @@ int foo(struct pt_regs *ctx) \
}
/* add kprobes to all possible *spin* functions */
-SEC("kprobe/spin_unlock")PROG(p1)
-SEC("kprobe/spin_lock")PROG(p2)
-SEC("kprobe/mutex_spin_on_owner")PROG(p3)
-SEC("kprobe/rwsem_spin_on_owner")PROG(p4)
-SEC("kprobe/spin_unlock_irqrestore")PROG(p5)
-SEC("kprobe/_raw_spin_unlock_irqrestore")PROG(p6)
-SEC("kprobe/_raw_spin_unlock_bh")PROG(p7)
-SEC("kprobe/_raw_spin_unlock")PROG(p8)
-SEC("kprobe/_raw_spin_lock_irqsave")PROG(p9)
-SEC("kprobe/_raw_spin_trylock_bh")PROG(p10)
-SEC("kprobe/_raw_spin_lock_irq")PROG(p11)
-SEC("kprobe/_raw_spin_trylock")PROG(p12)
-SEC("kprobe/_raw_spin_lock")PROG(p13)
-SEC("kprobe/_raw_spin_lock_bh")PROG(p14)
+SEC("kprobe.multi/spin_*lock*")PROG(spin_lock)
+SEC("kprobe.multi/*_spin_on_owner")PROG(spin_on_owner)
+SEC("kprobe.multi/_raw_spin_*lock*")PROG(raw_spin_lock)
+
/* and to inner bpf helpers */
SEC("kprobe/htab_map_update_elem")PROG(p15)
SEC("kprobe/__htab_percpu_map_update_elem")PROG(p16)
diff --git a/samples/bpf/spintest_user.c b/samples/bpf/spintest_user.c
index aadac14f748a..55971edb1088 100644
--- a/samples/bpf/spintest_user.c
+++ b/samples/bpf/spintest_user.c
@@ -9,13 +9,12 @@
int main(int ac, char **argv)
{
- char filename[256], symbol[256];
struct bpf_object *obj = NULL;
struct bpf_link *links[20];
long key, next_key, value;
struct bpf_program *prog;
int map_fd, i, j = 0;
- const char *section;
+ char filename[256];
struct ksym *sym;
if (load_kallsyms()) {
@@ -23,7 +22,7 @@ int main(int ac, char **argv)
return 2;
}
- snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+ snprintf(filename, sizeof(filename), "%s.bpf.o", argv[0]);
obj = bpf_object__open_file(filename, NULL);
if (libbpf_get_error(obj)) {
fprintf(stderr, "ERROR: opening BPF object file failed\n");
@@ -44,20 +43,13 @@ int main(int ac, char **argv)
}
bpf_object__for_each_program(prog, obj) {
- section = bpf_program__section_name(prog);
- if (sscanf(section, "kprobe/%s", symbol) != 1)
- continue;
-
- /* Attach prog only when symbol exists */
- if (ksym_get_addr(symbol)) {
- links[j] = bpf_program__attach(prog);
- if (libbpf_get_error(links[j])) {
- fprintf(stderr, "bpf_program__attach failed\n");
- links[j] = NULL;
- goto cleanup;
- }
- j++;
+ links[j] = bpf_program__attach(prog);
+ if (libbpf_get_error(links[j])) {
+ fprintf(stderr, "bpf_program__attach failed\n");
+ links[j] = NULL;
+ goto cleanup;
}
+ j++;
}
for (i = 0; i < 5; i++) {
diff --git a/samples/bpf/test_map_in_map.bpf.c b/samples/bpf/test_map_in_map.bpf.c
index 1883559e5977..9f030f9c4e1b 100644
--- a/samples/bpf/test_map_in_map.bpf.c
+++ b/samples/bpf/test_map_in_map.bpf.c
@@ -103,19 +103,15 @@ static __always_inline int do_inline_hash_lookup(void *inner_map, u32 port)
return result ? *result : -ENOENT;
}
-SEC("kprobe/__sys_connect")
-int trace_sys_connect(struct pt_regs *ctx)
+SEC("ksyscall/connect")
+int BPF_KSYSCALL(trace_sys_connect, unsigned int fd, struct sockaddr_in6 *in6, int addrlen)
{
- struct sockaddr_in6 *in6;
u16 test_case, port, dst6[8];
- int addrlen, ret, inline_ret, ret_key = 0;
+ int ret, inline_ret, ret_key = 0;
u32 port_key;
void *outer_map, *inner_map;
bool inline_hash = false;
- in6 = (struct sockaddr_in6 *)PT_REGS_PARM2_CORE(ctx);
- addrlen = (int)PT_REGS_PARM3_CORE(ctx);
-
if (addrlen != sizeof(*in6))
return 0;
diff --git a/samples/bpf/test_overhead_kprobe.bpf.c b/samples/bpf/test_overhead_kprobe.bpf.c
index c3528731e0e1..668cf5259c60 100644
--- a/samples/bpf/test_overhead_kprobe.bpf.c
+++ b/samples/bpf/test_overhead_kprobe.bpf.c
@@ -8,13 +8,7 @@
#include <linux/version.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
-
-#define _(P) \
- ({ \
- typeof(P) val = 0; \
- bpf_probe_read_kernel(&val, sizeof(val), &(P)); \
- val; \
- })
+#include <bpf/bpf_core_read.h>
SEC("kprobe/__set_task_comm")
int prog(struct pt_regs *ctx)
@@ -26,14 +20,14 @@ int prog(struct pt_regs *ctx)
u16 oom_score_adj;
u32 pid;
- tsk = (void *)PT_REGS_PARM1(ctx);
+ tsk = (void *)PT_REGS_PARM1_CORE(ctx);
- pid = _(tsk->pid);
- bpf_probe_read_kernel_str(oldcomm, sizeof(oldcomm), &tsk->comm);
- bpf_probe_read_kernel_str(newcomm, sizeof(newcomm),
+ pid = BPF_CORE_READ(tsk, pid);
+ bpf_core_read_str(oldcomm, sizeof(oldcomm), &tsk->comm);
+ bpf_core_read_str(newcomm, sizeof(newcomm),
(void *)PT_REGS_PARM2(ctx));
- signal = _(tsk->signal);
- oom_score_adj = _(signal->oom_score_adj);
+ signal = BPF_CORE_READ(tsk, signal);
+ oom_score_adj = BPF_CORE_READ(signal, oom_score_adj);
return 0;
}
diff --git a/samples/bpf/test_overhead_tp.bpf.c b/samples/bpf/test_overhead_tp.bpf.c
index 8b498328e961..5dc08b587978 100644
--- a/samples/bpf/test_overhead_tp.bpf.c
+++ b/samples/bpf/test_overhead_tp.bpf.c
@@ -8,40 +8,15 @@
#include <bpf/bpf_helpers.h>
/* from /sys/kernel/tracing/events/task/task_rename/format */
-struct task_rename {
- __u64 pad;
- __u32 pid;
- char oldcomm[TASK_COMM_LEN];
- char newcomm[TASK_COMM_LEN];
- __u16 oom_score_adj;
-};
SEC("tracepoint/task/task_rename")
-int prog(struct task_rename *ctx)
+int prog(struct trace_event_raw_task_rename *ctx)
{
return 0;
}
/* from /sys/kernel/tracing/events/fib/fib_table_lookup/format */
-struct fib_table_lookup {
- __u64 pad;
- __u32 tb_id;
- int err;
- int oif;
- int iif;
- __u8 proto;
- __u8 tos;
- __u8 scope;
- __u8 flags;
- __u8 src[4];
- __u8 dst[4];
- __u8 gw4[4];
- __u8 gw6[16];
- __u16 sport;
- __u16 dport;
- char name[16];
-};
SEC("tracepoint/fib/fib_table_lookup")
-int prog2(struct fib_table_lookup *ctx)
+int prog2(struct trace_event_raw_fib_table_lookup *ctx)
{
return 0;
}
diff --git a/samples/bpf/tracex1_kern.c b/samples/bpf/tracex1.bpf.c
index ef30d2b353b0..0ab39d76ff8f 100644
--- a/samples/bpf/tracex1_kern.c
+++ b/samples/bpf/tracex1.bpf.c
@@ -4,42 +4,35 @@
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*/
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <uapi/linux/bpf.h>
+#include "vmlinux.h"
+#include "net_shared.h"
#include <linux/version.h>
#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
#include <bpf/bpf_tracing.h>
-#define _(P) \
- ({ \
- typeof(P) val = 0; \
- bpf_probe_read_kernel(&val, sizeof(val), &(P)); \
- val; \
- })
-
/* kprobe is NOT a stable ABI
* kernel functions can be removed, renamed or completely change semantics.
* Number of arguments and their positions can change, etc.
* In such case this bpf+kprobe example will no longer be meaningful
*/
-SEC("kprobe/__netif_receive_skb_core")
+SEC("kprobe.multi/__netif_receive_skb_core*")
int bpf_prog1(struct pt_regs *ctx)
{
/* attaches to kprobe __netif_receive_skb_core,
* looks for packets on loobpack device and prints them
+ * (wildcard is used for avoiding symbol mismatch due to optimization)
*/
char devname[IFNAMSIZ];
struct net_device *dev;
struct sk_buff *skb;
int len;
- /* non-portable! works for the given kernel only */
- bpf_probe_read_kernel(&skb, sizeof(skb), (void *)PT_REGS_PARM1(ctx));
- dev = _(skb->dev);
- len = _(skb->len);
+ bpf_core_read(&skb, sizeof(skb), (void *)PT_REGS_PARM1(ctx));
+ dev = BPF_CORE_READ(skb, dev);
+ len = BPF_CORE_READ(skb, len);
- bpf_probe_read_kernel(devname, sizeof(devname), dev->name);
+ BPF_CORE_READ_STR_INTO(&devname, dev, name);
if (devname[0] == 'l' && devname[1] == 'o') {
char fmt[] = "skb %p len %d\n";
diff --git a/samples/bpf/tracex1_user.c b/samples/bpf/tracex1_user.c
index 9d4adb7fd834..8c3d9043a2b6 100644
--- a/samples/bpf/tracex1_user.c
+++ b/samples/bpf/tracex1_user.c
@@ -12,7 +12,7 @@ int main(int ac, char **argv)
char filename[256];
FILE *f;
- snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+ snprintf(filename, sizeof(filename), "%s.bpf.o", argv[0]);
obj = bpf_object__open_file(filename, NULL);
if (libbpf_get_error(obj)) {
fprintf(stderr, "ERROR: opening BPF object file failed\n");
diff --git a/samples/bpf/tracex3_kern.c b/samples/bpf/tracex3.bpf.c
index bde6591cb20c..41f37966f5f5 100644
--- a/samples/bpf/tracex3_kern.c
+++ b/samples/bpf/tracex3.bpf.c
@@ -4,13 +4,17 @@
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*/
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
+#include "vmlinux.h"
#include <linux/version.h>
-#include <uapi/linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
+struct start_key {
+ dev_t dev;
+ u32 _pad;
+ sector_t sector;
+};
+
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__type(key, long);
@@ -18,16 +22,17 @@ struct {
__uint(max_entries, 4096);
} my_map SEC(".maps");
-/* kprobe is NOT a stable ABI. If kernel internals change this bpf+kprobe
- * example will no longer be meaningful
- */
-SEC("kprobe/blk_mq_start_request")
-int bpf_prog1(struct pt_regs *ctx)
+/* from /sys/kernel/tracing/events/block/block_io_start/format */
+SEC("tracepoint/block/block_io_start")
+int bpf_prog1(struct trace_event_raw_block_rq *ctx)
{
- long rq = PT_REGS_PARM1(ctx);
u64 val = bpf_ktime_get_ns();
+ struct start_key key = {
+ .dev = ctx->dev,
+ .sector = ctx->sector
+ };
- bpf_map_update_elem(&my_map, &rq, &val, BPF_ANY);
+ bpf_map_update_elem(&my_map, &key, &val, BPF_ANY);
return 0;
}
@@ -49,21 +54,26 @@ struct {
__uint(max_entries, SLOTS);
} lat_map SEC(".maps");
-SEC("kprobe/__blk_account_io_done")
-int bpf_prog2(struct pt_regs *ctx)
+/* from /sys/kernel/tracing/events/block/block_io_done/format */
+SEC("tracepoint/block/block_io_done")
+int bpf_prog2(struct trace_event_raw_block_rq *ctx)
{
- long rq = PT_REGS_PARM1(ctx);
+ struct start_key key = {
+ .dev = ctx->dev,
+ .sector = ctx->sector
+ };
+
u64 *value, l, base;
u32 index;
- value = bpf_map_lookup_elem(&my_map, &rq);
+ value = bpf_map_lookup_elem(&my_map, &key);
if (!value)
return 0;
u64 cur_time = bpf_ktime_get_ns();
u64 delta = cur_time - *value;
- bpf_map_delete_elem(&my_map, &rq);
+ bpf_map_delete_elem(&my_map, &key);
/* the lines below are computing index = log10(delta)*10
* using integer arithmetic
diff --git a/samples/bpf/tracex3_user.c b/samples/bpf/tracex3_user.c
index d5eebace31e6..1002eb0323b4 100644
--- a/samples/bpf/tracex3_user.c
+++ b/samples/bpf/tracex3_user.c
@@ -125,7 +125,7 @@ int main(int ac, char **argv)
}
}
- snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+ snprintf(filename, sizeof(filename), "%s.bpf.o", argv[0]);
obj = bpf_object__open_file(filename, NULL);
if (libbpf_get_error(obj)) {
fprintf(stderr, "ERROR: opening BPF object file failed\n");
diff --git a/samples/bpf/tracex4_kern.c b/samples/bpf/tracex4.bpf.c
index eb0f8fdd14bf..ca826750901a 100644
--- a/samples/bpf/tracex4_kern.c
+++ b/samples/bpf/tracex4.bpf.c
@@ -4,9 +4,8 @@
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*/
-#include <linux/ptrace.h>
+#include "vmlinux.h"
#include <linux/version.h>
-#include <uapi/linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
diff --git a/samples/bpf/tracex4_user.c b/samples/bpf/tracex4_user.c
index dee8f0a091ba..a5145ad72cbf 100644
--- a/samples/bpf/tracex4_user.c
+++ b/samples/bpf/tracex4_user.c
@@ -53,7 +53,7 @@ int main(int ac, char **argv)
char filename[256];
int map_fd, j = 0;
- snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+ snprintf(filename, sizeof(filename), "%s.bpf.o", argv[0]);
obj = bpf_object__open_file(filename, NULL);
if (libbpf_get_error(obj)) {
fprintf(stderr, "ERROR: opening BPF object file failed\n");
diff --git a/samples/bpf/tracex5_kern.c b/samples/bpf/tracex5.bpf.c
index 64a1f7550d7e..4d3d6c9b25fa 100644
--- a/samples/bpf/tracex5_kern.c
+++ b/samples/bpf/tracex5.bpf.c
@@ -4,15 +4,15 @@
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*/
-#include <linux/ptrace.h>
+#include "vmlinux.h"
+#include "syscall_nrs.h"
#include <linux/version.h>
-#include <uapi/linux/bpf.h>
-#include <uapi/linux/seccomp.h>
#include <uapi/linux/unistd.h>
-#include "syscall_nrs.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_core_read.h>
+#define __stringify(x) #x
#define PROG(F) SEC("kprobe/"__stringify(F)) int bpf_func_##F
struct {
@@ -47,7 +47,7 @@ PROG(SYS__NR_write)(struct pt_regs *ctx)
{
struct seccomp_data sd;
- bpf_probe_read_kernel(&sd, sizeof(sd), (void *)PT_REGS_PARM2(ctx));
+ bpf_core_read(&sd, sizeof(sd), (void *)PT_REGS_PARM2(ctx));
if (sd.args[2] == 512) {
char fmt[] = "write(fd=%d, buf=%p, size=%d)\n";
bpf_trace_printk(fmt, sizeof(fmt),
@@ -60,7 +60,7 @@ PROG(SYS__NR_read)(struct pt_regs *ctx)
{
struct seccomp_data sd;
- bpf_probe_read_kernel(&sd, sizeof(sd), (void *)PT_REGS_PARM2(ctx));
+ bpf_core_read(&sd, sizeof(sd), (void *)PT_REGS_PARM2(ctx));
if (sd.args[2] > 128 && sd.args[2] <= 1024) {
char fmt[] = "read(fd=%d, buf=%p, size=%d)\n";
bpf_trace_printk(fmt, sizeof(fmt),
diff --git a/samples/bpf/tracex5_user.c b/samples/bpf/tracex5_user.c
index 9d7d79f0d47d..7e2d8397fb98 100644
--- a/samples/bpf/tracex5_user.c
+++ b/samples/bpf/tracex5_user.c
@@ -42,7 +42,7 @@ int main(int ac, char **argv)
char filename[256];
FILE *f;
- snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+ snprintf(filename, sizeof(filename), "%s.bpf.o", argv[0]);
obj = bpf_object__open_file(filename, NULL);
if (libbpf_get_error(obj)) {
fprintf(stderr, "ERROR: opening BPF object file failed\n");
diff --git a/samples/bpf/tracex6_kern.c b/samples/bpf/tracex6.bpf.c
index acad5712d8b4..9b23b4737cfb 100644
--- a/samples/bpf/tracex6_kern.c
+++ b/samples/bpf/tracex6.bpf.c
@@ -1,7 +1,8 @@
-#include <linux/ptrace.h>
+#include "vmlinux.h"
#include <linux/version.h>
-#include <uapi/linux/bpf.h>
#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_core_read.h>
struct {
__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
@@ -45,13 +46,24 @@ int bpf_prog1(struct pt_regs *ctx)
return 0;
}
-SEC("kprobe/htab_map_lookup_elem")
-int bpf_prog2(struct pt_regs *ctx)
+/*
+ * Since *_map_lookup_elem can't be expected to trigger bpf programs
+ * due to potential deadlocks (bpf_disable_instrumentation), this bpf
+ * program will be attached to bpf_map_copy_value (which is called
+ * from map_lookup_elem) and will only filter the hashtable type.
+ */
+SEC("kprobe/bpf_map_copy_value")
+int BPF_KPROBE(bpf_prog2, struct bpf_map *map)
{
u32 key = bpf_get_smp_processor_id();
struct bpf_perf_event_value *val, buf;
+ enum bpf_map_type type;
int error;
+ type = BPF_CORE_READ(map, map_type);
+ if (type != BPF_MAP_TYPE_HASH)
+ return 0;
+
error = bpf_perf_event_read_value(&counters, key, &buf, sizeof(buf));
if (error)
return 0;
diff --git a/samples/bpf/tracex6_user.c b/samples/bpf/tracex6_user.c
index 8e83bf2a84a4..ae811ac83bc2 100644
--- a/samples/bpf/tracex6_user.c
+++ b/samples/bpf/tracex6_user.c
@@ -180,7 +180,7 @@ int main(int argc, char **argv)
char filename[256];
int i = 0;
- snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+ snprintf(filename, sizeof(filename), "%s.bpf.o", argv[0]);
obj = bpf_object__open_file(filename, NULL);
if (libbpf_get_error(obj)) {
fprintf(stderr, "ERROR: opening BPF object file failed\n");
diff --git a/samples/bpf/tracex7_kern.c b/samples/bpf/tracex7.bpf.c
index c5a92df8ac31..ab8d6704a5a4 100644
--- a/samples/bpf/tracex7_kern.c
+++ b/samples/bpf/tracex7.bpf.c
@@ -1,5 +1,4 @@
-#include <uapi/linux/ptrace.h>
-#include <uapi/linux/bpf.h>
+#include "vmlinux.h"
#include <linux/version.h>
#include <bpf/bpf_helpers.h>
diff --git a/samples/bpf/tracex7_user.c b/samples/bpf/tracex7_user.c
index 8be7ce18d3ba..b10b5e03a226 100644
--- a/samples/bpf/tracex7_user.c
+++ b/samples/bpf/tracex7_user.c
@@ -19,7 +19,7 @@ int main(int argc, char **argv)
return 0;
}
- snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+ snprintf(filename, sizeof(filename), "%s.bpf.o", argv[0]);
obj = bpf_object__open_file(filename, NULL);
if (libbpf_get_error(obj)) {
fprintf(stderr, "ERROR: opening BPF object file failed\n");
diff --git a/samples/bpf/xdp1_kern.c b/samples/bpf/xdp1_kern.c
deleted file mode 100644
index d91f27cbcfa9..000000000000
--- a/samples/bpf/xdp1_kern.c
+++ /dev/null
@@ -1,100 +0,0 @@
-/* Copyright (c) 2016 PLUMgrid
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- */
-#define KBUILD_MODNAME "foo"
-#include <uapi/linux/bpf.h>
-#include <linux/in.h>
-#include <linux/if_ether.h>
-#include <linux/if_packet.h>
-#include <linux/if_vlan.h>
-#include <linux/ip.h>
-#include <linux/ipv6.h>
-#include <bpf/bpf_helpers.h>
-
-struct {
- __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
- __type(key, u32);
- __type(value, long);
- __uint(max_entries, 256);
-} rxcnt SEC(".maps");
-
-static int parse_ipv4(void *data, u64 nh_off, void *data_end)
-{
- struct iphdr *iph = data + nh_off;
-
- if (iph + 1 > data_end)
- return 0;
- return iph->protocol;
-}
-
-static int parse_ipv6(void *data, u64 nh_off, void *data_end)
-{
- struct ipv6hdr *ip6h = data + nh_off;
-
- if (ip6h + 1 > data_end)
- return 0;
- return ip6h->nexthdr;
-}
-
-#define XDPBUFSIZE 60
-SEC("xdp.frags")
-int xdp_prog1(struct xdp_md *ctx)
-{
- __u8 pkt[XDPBUFSIZE] = {};
- void *data_end = &pkt[XDPBUFSIZE-1];
- void *data = pkt;
- struct ethhdr *eth = data;
- int rc = XDP_DROP;
- long *value;
- u16 h_proto;
- u64 nh_off;
- u32 ipproto;
-
- if (bpf_xdp_load_bytes(ctx, 0, pkt, sizeof(pkt)))
- return rc;
-
- nh_off = sizeof(*eth);
- if (data + nh_off > data_end)
- return rc;
-
- h_proto = eth->h_proto;
-
- /* Handle VLAN tagged packet */
- if (h_proto == htons(ETH_P_8021Q) || h_proto == htons(ETH_P_8021AD)) {
- struct vlan_hdr *vhdr;
-
- vhdr = data + nh_off;
- nh_off += sizeof(struct vlan_hdr);
- if (data + nh_off > data_end)
- return rc;
- h_proto = vhdr->h_vlan_encapsulated_proto;
- }
- /* Handle double VLAN tagged packet */
- if (h_proto == htons(ETH_P_8021Q) || h_proto == htons(ETH_P_8021AD)) {
- struct vlan_hdr *vhdr;
-
- vhdr = data + nh_off;
- nh_off += sizeof(struct vlan_hdr);
- if (data + nh_off > data_end)
- return rc;
- h_proto = vhdr->h_vlan_encapsulated_proto;
- }
-
- if (h_proto == htons(ETH_P_IP))
- ipproto = parse_ipv4(data, nh_off, data_end);
- else if (h_proto == htons(ETH_P_IPV6))
- ipproto = parse_ipv6(data, nh_off, data_end);
- else
- ipproto = 0;
-
- value = bpf_map_lookup_elem(&rxcnt, &ipproto);
- if (value)
- *value += 1;
-
- return rc;
-}
-
-char _license[] SEC("license") = "GPL";
diff --git a/samples/bpf/xdp1_user.c b/samples/bpf/xdp1_user.c
deleted file mode 100644
index f05e797013e9..000000000000
--- a/samples/bpf/xdp1_user.c
+++ /dev/null
@@ -1,166 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (c) 2016 PLUMgrid
- */
-#include <linux/bpf.h>
-#include <linux/if_link.h>
-#include <assert.h>
-#include <errno.h>
-#include <signal.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <unistd.h>
-#include <libgen.h>
-#include <net/if.h>
-
-#include "bpf_util.h"
-#include <bpf/bpf.h>
-#include <bpf/libbpf.h>
-
-static int ifindex;
-static __u32 xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST;
-static __u32 prog_id;
-
-static void int_exit(int sig)
-{
- __u32 curr_prog_id = 0;
-
- if (bpf_xdp_query_id(ifindex, xdp_flags, &curr_prog_id)) {
- printf("bpf_xdp_query_id failed\n");
- exit(1);
- }
- if (prog_id == curr_prog_id)
- bpf_xdp_detach(ifindex, xdp_flags, NULL);
- else if (!curr_prog_id)
- printf("couldn't find a prog id on a given interface\n");
- else
- printf("program on interface changed, not removing\n");
- exit(0);
-}
-
-/* simple per-protocol drop counter
- */
-static void poll_stats(int map_fd, int interval)
-{
- unsigned int nr_cpus = bpf_num_possible_cpus();
- __u64 values[nr_cpus], prev[UINT8_MAX] = { 0 };
- int i;
-
- while (1) {
- __u32 key = UINT32_MAX;
-
- sleep(interval);
-
- while (bpf_map_get_next_key(map_fd, &key, &key) == 0) {
- __u64 sum = 0;
-
- assert(bpf_map_lookup_elem(map_fd, &key, values) == 0);
- for (i = 0; i < nr_cpus; i++)
- sum += values[i];
- if (sum > prev[key])
- printf("proto %u: %10llu pkt/s\n",
- key, (sum - prev[key]) / interval);
- prev[key] = sum;
- }
- }
-}
-
-static void usage(const char *prog)
-{
- fprintf(stderr,
- "usage: %s [OPTS] IFACE\n\n"
- "OPTS:\n"
- " -S use skb-mode\n"
- " -N enforce native mode\n"
- " -F force loading prog\n",
- prog);
-}
-
-int main(int argc, char **argv)
-{
- struct bpf_prog_info info = {};
- __u32 info_len = sizeof(info);
- const char *optstr = "FSN";
- int prog_fd, map_fd, opt;
- struct bpf_program *prog;
- struct bpf_object *obj;
- struct bpf_map *map;
- char filename[256];
- int err;
-
- while ((opt = getopt(argc, argv, optstr)) != -1) {
- switch (opt) {
- case 'S':
- xdp_flags |= XDP_FLAGS_SKB_MODE;
- break;
- case 'N':
- /* default, set below */
- break;
- case 'F':
- xdp_flags &= ~XDP_FLAGS_UPDATE_IF_NOEXIST;
- break;
- default:
- usage(basename(argv[0]));
- return 1;
- }
- }
-
- if (!(xdp_flags & XDP_FLAGS_SKB_MODE))
- xdp_flags |= XDP_FLAGS_DRV_MODE;
-
- if (optind == argc) {
- usage(basename(argv[0]));
- return 1;
- }
-
- ifindex = if_nametoindex(argv[optind]);
- if (!ifindex) {
- perror("if_nametoindex");
- return 1;
- }
-
- snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
- obj = bpf_object__open_file(filename, NULL);
- if (libbpf_get_error(obj))
- return 1;
-
- prog = bpf_object__next_program(obj, NULL);
- bpf_program__set_type(prog, BPF_PROG_TYPE_XDP);
-
- err = bpf_object__load(obj);
- if (err)
- return 1;
-
- prog_fd = bpf_program__fd(prog);
-
- map = bpf_object__next_map(obj, NULL);
- if (!map) {
- printf("finding a map in obj file failed\n");
- return 1;
- }
- map_fd = bpf_map__fd(map);
-
- if (!prog_fd) {
- printf("bpf_prog_load_xattr: %s\n", strerror(errno));
- return 1;
- }
-
- signal(SIGINT, int_exit);
- signal(SIGTERM, int_exit);
-
- if (bpf_xdp_attach(ifindex, prog_fd, xdp_flags, NULL) < 0) {
- printf("link set xdp fd failed\n");
- return 1;
- }
-
- err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len);
- if (err) {
- printf("can't get prog info - %s\n", strerror(errno));
- return err;
- }
- prog_id = info.id;
-
- poll_stats(map_fd, 1);
-
- return 0;
-}
diff --git a/samples/bpf/xdp2_kern.c b/samples/bpf/xdp2_kern.c
deleted file mode 100644
index 8bca674451ed..000000000000
--- a/samples/bpf/xdp2_kern.c
+++ /dev/null
@@ -1,125 +0,0 @@
-/* Copyright (c) 2016 PLUMgrid
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- */
-#define KBUILD_MODNAME "foo"
-#include <uapi/linux/bpf.h>
-#include <linux/in.h>
-#include <linux/if_ether.h>
-#include <linux/if_packet.h>
-#include <linux/if_vlan.h>
-#include <linux/ip.h>
-#include <linux/ipv6.h>
-#include <bpf/bpf_helpers.h>
-
-struct {
- __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
- __type(key, u32);
- __type(value, long);
- __uint(max_entries, 256);
-} rxcnt SEC(".maps");
-
-static void swap_src_dst_mac(void *data)
-{
- unsigned short *p = data;
- unsigned short dst[3];
-
- dst[0] = p[0];
- dst[1] = p[1];
- dst[2] = p[2];
- p[0] = p[3];
- p[1] = p[4];
- p[2] = p[5];
- p[3] = dst[0];
- p[4] = dst[1];
- p[5] = dst[2];
-}
-
-static int parse_ipv4(void *data, u64 nh_off, void *data_end)
-{
- struct iphdr *iph = data + nh_off;
-
- if (iph + 1 > data_end)
- return 0;
- return iph->protocol;
-}
-
-static int parse_ipv6(void *data, u64 nh_off, void *data_end)
-{
- struct ipv6hdr *ip6h = data + nh_off;
-
- if (ip6h + 1 > data_end)
- return 0;
- return ip6h->nexthdr;
-}
-
-#define XDPBUFSIZE 60
-SEC("xdp.frags")
-int xdp_prog1(struct xdp_md *ctx)
-{
- __u8 pkt[XDPBUFSIZE] = {};
- void *data_end = &pkt[XDPBUFSIZE-1];
- void *data = pkt;
- struct ethhdr *eth = data;
- int rc = XDP_DROP;
- long *value;
- u16 h_proto;
- u64 nh_off;
- u32 ipproto;
-
- if (bpf_xdp_load_bytes(ctx, 0, pkt, sizeof(pkt)))
- return rc;
-
- nh_off = sizeof(*eth);
- if (data + nh_off > data_end)
- return rc;
-
- h_proto = eth->h_proto;
-
- /* Handle VLAN tagged packet */
- if (h_proto == htons(ETH_P_8021Q) || h_proto == htons(ETH_P_8021AD)) {
- struct vlan_hdr *vhdr;
-
- vhdr = data + nh_off;
- nh_off += sizeof(struct vlan_hdr);
- if (data + nh_off > data_end)
- return rc;
- h_proto = vhdr->h_vlan_encapsulated_proto;
- }
- /* Handle double VLAN tagged packet */
- if (h_proto == htons(ETH_P_8021Q) || h_proto == htons(ETH_P_8021AD)) {
- struct vlan_hdr *vhdr;
-
- vhdr = data + nh_off;
- nh_off += sizeof(struct vlan_hdr);
- if (data + nh_off > data_end)
- return rc;
- h_proto = vhdr->h_vlan_encapsulated_proto;
- }
-
- if (h_proto == htons(ETH_P_IP))
- ipproto = parse_ipv4(data, nh_off, data_end);
- else if (h_proto == htons(ETH_P_IPV6))
- ipproto = parse_ipv6(data, nh_off, data_end);
- else
- ipproto = 0;
-
- value = bpf_map_lookup_elem(&rxcnt, &ipproto);
- if (value)
- *value += 1;
-
- if (ipproto == IPPROTO_UDP) {
- swap_src_dst_mac(data);
-
- if (bpf_xdp_store_bytes(ctx, 0, pkt, sizeof(pkt)))
- return rc;
-
- rc = XDP_TX;
- }
-
- return rc;
-}
-
-char _license[] SEC("license") = "GPL";
diff --git a/samples/bpf/xdp_monitor.bpf.c b/samples/bpf/xdp_monitor.bpf.c
deleted file mode 100644
index cfb41e2205f4..000000000000
--- a/samples/bpf/xdp_monitor.bpf.c
+++ /dev/null
@@ -1,8 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2017-2018 Jesper Dangaard Brouer, Red Hat Inc.
- *
- * XDP monitor tool, based on tracepoints
- */
-#include "xdp_sample.bpf.h"
-
-char _license[] SEC("license") = "GPL";
diff --git a/samples/bpf/xdp_monitor_user.c b/samples/bpf/xdp_monitor_user.c
deleted file mode 100644
index 58015eb2ffae..000000000000
--- a/samples/bpf/xdp_monitor_user.c
+++ /dev/null
@@ -1,118 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2017 Jesper Dangaard Brouer, Red Hat, Inc. */
-static const char *__doc__=
-"XDP monitor tool, based on tracepoints\n";
-
-static const char *__doc_err_only__=
-" NOTICE: Only tracking XDP redirect errors\n"
-" Enable redirect success stats via '-s/--stats'\n"
-" (which comes with a per packet processing overhead)\n";
-
-#include <errno.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <stdbool.h>
-#include <stdint.h>
-#include <string.h>
-#include <ctype.h>
-#include <unistd.h>
-#include <locale.h>
-#include <getopt.h>
-#include <net/if.h>
-#include <time.h>
-#include <signal.h>
-#include <bpf/bpf.h>
-#include <bpf/libbpf.h>
-#include "bpf_util.h"
-#include "xdp_sample_user.h"
-#include "xdp_monitor.skel.h"
-
-static int mask = SAMPLE_REDIRECT_ERR_CNT | SAMPLE_CPUMAP_ENQUEUE_CNT |
- SAMPLE_CPUMAP_KTHREAD_CNT | SAMPLE_EXCEPTION_CNT |
- SAMPLE_DEVMAP_XMIT_CNT | SAMPLE_DEVMAP_XMIT_CNT_MULTI;
-
-DEFINE_SAMPLE_INIT(xdp_monitor);
-
-static const struct option long_options[] = {
- { "help", no_argument, NULL, 'h' },
- { "stats", no_argument, NULL, 's' },
- { "interval", required_argument, NULL, 'i' },
- { "verbose", no_argument, NULL, 'v' },
- {}
-};
-
-int main(int argc, char **argv)
-{
- unsigned long interval = 2;
- int ret = EXIT_FAIL_OPTION;
- struct xdp_monitor *skel;
- bool errors_only = true;
- int longindex = 0, opt;
- bool error = true;
-
- /* Parse commands line args */
- while ((opt = getopt_long(argc, argv, "si:vh",
- long_options, &longindex)) != -1) {
- switch (opt) {
- case 's':
- errors_only = false;
- mask |= SAMPLE_REDIRECT_CNT;
- break;
- case 'i':
- interval = strtoul(optarg, NULL, 0);
- break;
- case 'v':
- sample_switch_mode();
- break;
- case 'h':
- error = false;
- default:
- sample_usage(argv, long_options, __doc__, mask, error);
- return ret;
- }
- }
-
- skel = xdp_monitor__open();
- if (!skel) {
- fprintf(stderr, "Failed to xdp_monitor__open: %s\n",
- strerror(errno));
- ret = EXIT_FAIL_BPF;
- goto end;
- }
-
- ret = sample_init_pre_load(skel);
- if (ret < 0) {
- fprintf(stderr, "Failed to sample_init_pre_load: %s\n", strerror(-ret));
- ret = EXIT_FAIL_BPF;
- goto end_destroy;
- }
-
- ret = xdp_monitor__load(skel);
- if (ret < 0) {
- fprintf(stderr, "Failed to xdp_monitor__load: %s\n", strerror(errno));
- ret = EXIT_FAIL_BPF;
- goto end_destroy;
- }
-
- ret = sample_init(skel, mask);
- if (ret < 0) {
- fprintf(stderr, "Failed to initialize sample: %s\n", strerror(-ret));
- ret = EXIT_FAIL_BPF;
- goto end_destroy;
- }
-
- if (errors_only)
- printf("%s", __doc_err_only__);
-
- ret = sample_run(interval, NULL, NULL);
- if (ret < 0) {
- fprintf(stderr, "Failed during sample run: %s\n", strerror(-ret));
- ret = EXIT_FAIL;
- goto end_destroy;
- }
- ret = EXIT_OK;
-end_destroy:
- xdp_monitor__destroy(skel);
-end:
- sample_exit(ret);
-}
diff --git a/samples/bpf/xdp_redirect.bpf.c b/samples/bpf/xdp_redirect.bpf.c
deleted file mode 100644
index 7c02bacfe96b..000000000000
--- a/samples/bpf/xdp_redirect.bpf.c
+++ /dev/null
@@ -1,49 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright (c) 2016 John Fastabend <john.r.fastabend@intel.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- */
-#include "vmlinux.h"
-#include "xdp_sample.bpf.h"
-#include "xdp_sample_shared.h"
-
-const volatile int ifindex_out;
-
-SEC("xdp")
-int xdp_redirect_prog(struct xdp_md *ctx)
-{
- void *data_end = (void *)(long)ctx->data_end;
- void *data = (void *)(long)ctx->data;
- u32 key = bpf_get_smp_processor_id();
- struct ethhdr *eth = data;
- struct datarec *rec;
- u64 nh_off;
-
- nh_off = sizeof(*eth);
- if (data + nh_off > data_end)
- return XDP_DROP;
-
- rec = bpf_map_lookup_elem(&rx_cnt, &key);
- if (!rec)
- return XDP_PASS;
- NO_TEAR_INC(rec->processed);
-
- swap_src_dst_mac(data);
- return bpf_redirect(ifindex_out, 0);
-}
-
-/* Redirect require an XDP bpf_prog loaded on the TX device */
-SEC("xdp")
-int xdp_redirect_dummy_prog(struct xdp_md *ctx)
-{
- return XDP_PASS;
-}
-
-char _license[] SEC("license") = "GPL";
diff --git a/samples/bpf/xdp_redirect_cpu.bpf.c b/samples/bpf/xdp_redirect_cpu.bpf.c
deleted file mode 100644
index 87c54bfdbb70..000000000000
--- a/samples/bpf/xdp_redirect_cpu.bpf.c
+++ /dev/null
@@ -1,539 +0,0 @@
-/* XDP redirect to CPUs via cpumap (BPF_MAP_TYPE_CPUMAP)
- *
- * GPLv2, Copyright(c) 2017 Jesper Dangaard Brouer, Red Hat, Inc.
- */
-#include "vmlinux.h"
-#include "xdp_sample.bpf.h"
-#include "xdp_sample_shared.h"
-#include "hash_func01.h"
-
-/* Special map type that can XDP_REDIRECT frames to another CPU */
-struct {
- __uint(type, BPF_MAP_TYPE_CPUMAP);
- __uint(key_size, sizeof(u32));
- __uint(value_size, sizeof(struct bpf_cpumap_val));
-} cpu_map SEC(".maps");
-
-/* Set of maps controlling available CPU, and for iterating through
- * selectable redirect CPUs.
- */
-struct {
- __uint(type, BPF_MAP_TYPE_ARRAY);
- __type(key, u32);
- __type(value, u32);
-} cpus_available SEC(".maps");
-
-struct {
- __uint(type, BPF_MAP_TYPE_ARRAY);
- __type(key, u32);
- __type(value, u32);
- __uint(max_entries, 1);
-} cpus_count SEC(".maps");
-
-struct {
- __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
- __type(key, u32);
- __type(value, u32);
- __uint(max_entries, 1);
-} cpus_iterator SEC(".maps");
-
-struct {
- __uint(type, BPF_MAP_TYPE_DEVMAP);
- __uint(key_size, sizeof(int));
- __uint(value_size, sizeof(struct bpf_devmap_val));
- __uint(max_entries, 1);
-} tx_port SEC(".maps");
-
-char tx_mac_addr[ETH_ALEN];
-
-/* Helper parse functions */
-
-static __always_inline
-bool parse_eth(struct ethhdr *eth, void *data_end,
- u16 *eth_proto, u64 *l3_offset)
-{
- u16 eth_type;
- u64 offset;
-
- offset = sizeof(*eth);
- if ((void *)eth + offset > data_end)
- return false;
-
- eth_type = eth->h_proto;
-
- /* Skip non 802.3 Ethertypes */
- if (__builtin_expect(bpf_ntohs(eth_type) < ETH_P_802_3_MIN, 0))
- return false;
-
- /* Handle VLAN tagged packet */
- if (eth_type == bpf_htons(ETH_P_8021Q) ||
- eth_type == bpf_htons(ETH_P_8021AD)) {
- struct vlan_hdr *vlan_hdr;
-
- vlan_hdr = (void *)eth + offset;
- offset += sizeof(*vlan_hdr);
- if ((void *)eth + offset > data_end)
- return false;
- eth_type = vlan_hdr->h_vlan_encapsulated_proto;
- }
- /* Handle double VLAN tagged packet */
- if (eth_type == bpf_htons(ETH_P_8021Q) ||
- eth_type == bpf_htons(ETH_P_8021AD)) {
- struct vlan_hdr *vlan_hdr;
-
- vlan_hdr = (void *)eth + offset;
- offset += sizeof(*vlan_hdr);
- if ((void *)eth + offset > data_end)
- return false;
- eth_type = vlan_hdr->h_vlan_encapsulated_proto;
- }
-
- *eth_proto = bpf_ntohs(eth_type);
- *l3_offset = offset;
- return true;
-}
-
-static __always_inline
-u16 get_dest_port_ipv4_udp(struct xdp_md *ctx, u64 nh_off)
-{
- void *data_end = (void *)(long)ctx->data_end;
- void *data = (void *)(long)ctx->data;
- struct iphdr *iph = data + nh_off;
- struct udphdr *udph;
-
- if (iph + 1 > data_end)
- return 0;
- if (!(iph->protocol == IPPROTO_UDP))
- return 0;
-
- udph = (void *)(iph + 1);
- if (udph + 1 > data_end)
- return 0;
-
- return bpf_ntohs(udph->dest);
-}
-
-static __always_inline
-int get_proto_ipv4(struct xdp_md *ctx, u64 nh_off)
-{
- void *data_end = (void *)(long)ctx->data_end;
- void *data = (void *)(long)ctx->data;
- struct iphdr *iph = data + nh_off;
-
- if (iph + 1 > data_end)
- return 0;
- return iph->protocol;
-}
-
-static __always_inline
-int get_proto_ipv6(struct xdp_md *ctx, u64 nh_off)
-{
- void *data_end = (void *)(long)ctx->data_end;
- void *data = (void *)(long)ctx->data;
- struct ipv6hdr *ip6h = data + nh_off;
-
- if (ip6h + 1 > data_end)
- return 0;
- return ip6h->nexthdr;
-}
-
-SEC("xdp")
-int xdp_prognum0_no_touch(struct xdp_md *ctx)
-{
- u32 key = bpf_get_smp_processor_id();
- struct datarec *rec;
- u32 *cpu_selected;
- u32 cpu_dest = 0;
- u32 key0 = 0;
-
- /* Only use first entry in cpus_available */
- cpu_selected = bpf_map_lookup_elem(&cpus_available, &key0);
- if (!cpu_selected)
- return XDP_ABORTED;
- cpu_dest = *cpu_selected;
-
- rec = bpf_map_lookup_elem(&rx_cnt, &key);
- if (!rec)
- return XDP_PASS;
- NO_TEAR_INC(rec->processed);
-
- if (cpu_dest >= nr_cpus) {
- NO_TEAR_INC(rec->issue);
- return XDP_ABORTED;
- }
- return bpf_redirect_map(&cpu_map, cpu_dest, 0);
-}
-
-SEC("xdp")
-int xdp_prognum1_touch_data(struct xdp_md *ctx)
-{
- void *data_end = (void *)(long)ctx->data_end;
- void *data = (void *)(long)ctx->data;
- u32 key = bpf_get_smp_processor_id();
- struct ethhdr *eth = data;
- struct datarec *rec;
- u32 *cpu_selected;
- u32 cpu_dest = 0;
- u32 key0 = 0;
- u16 eth_type;
-
- /* Only use first entry in cpus_available */
- cpu_selected = bpf_map_lookup_elem(&cpus_available, &key0);
- if (!cpu_selected)
- return XDP_ABORTED;
- cpu_dest = *cpu_selected;
-
- /* Validate packet length is minimum Eth header size */
- if (eth + 1 > data_end)
- return XDP_ABORTED;
-
- rec = bpf_map_lookup_elem(&rx_cnt, &key);
- if (!rec)
- return XDP_PASS;
- NO_TEAR_INC(rec->processed);
-
- /* Read packet data, and use it (drop non 802.3 Ethertypes) */
- eth_type = eth->h_proto;
- if (bpf_ntohs(eth_type) < ETH_P_802_3_MIN) {
- NO_TEAR_INC(rec->dropped);
- return XDP_DROP;
- }
-
- if (cpu_dest >= nr_cpus) {
- NO_TEAR_INC(rec->issue);
- return XDP_ABORTED;
- }
- return bpf_redirect_map(&cpu_map, cpu_dest, 0);
-}
-
-SEC("xdp")
-int xdp_prognum2_round_robin(struct xdp_md *ctx)
-{
- void *data_end = (void *)(long)ctx->data_end;
- void *data = (void *)(long)ctx->data;
- u32 key = bpf_get_smp_processor_id();
- struct datarec *rec;
- u32 cpu_dest = 0;
- u32 key0 = 0;
-
- u32 *cpu_selected;
- u32 *cpu_iterator;
- u32 *cpu_max;
- u32 cpu_idx;
-
- cpu_max = bpf_map_lookup_elem(&cpus_count, &key0);
- if (!cpu_max)
- return XDP_ABORTED;
-
- cpu_iterator = bpf_map_lookup_elem(&cpus_iterator, &key0);
- if (!cpu_iterator)
- return XDP_ABORTED;
- cpu_idx = *cpu_iterator;
-
- *cpu_iterator += 1;
- if (*cpu_iterator == *cpu_max)
- *cpu_iterator = 0;
-
- cpu_selected = bpf_map_lookup_elem(&cpus_available, &cpu_idx);
- if (!cpu_selected)
- return XDP_ABORTED;
- cpu_dest = *cpu_selected;
-
- rec = bpf_map_lookup_elem(&rx_cnt, &key);
- if (!rec)
- return XDP_PASS;
- NO_TEAR_INC(rec->processed);
-
- if (cpu_dest >= nr_cpus) {
- NO_TEAR_INC(rec->issue);
- return XDP_ABORTED;
- }
- return bpf_redirect_map(&cpu_map, cpu_dest, 0);
-}
-
-SEC("xdp")
-int xdp_prognum3_proto_separate(struct xdp_md *ctx)
-{
- void *data_end = (void *)(long)ctx->data_end;
- void *data = (void *)(long)ctx->data;
- u32 key = bpf_get_smp_processor_id();
- struct ethhdr *eth = data;
- u8 ip_proto = IPPROTO_UDP;
- struct datarec *rec;
- u16 eth_proto = 0;
- u64 l3_offset = 0;
- u32 cpu_dest = 0;
- u32 *cpu_lookup;
- u32 cpu_idx = 0;
-
- rec = bpf_map_lookup_elem(&rx_cnt, &key);
- if (!rec)
- return XDP_PASS;
- NO_TEAR_INC(rec->processed);
-
- if (!(parse_eth(eth, data_end, &eth_proto, &l3_offset)))
- return XDP_PASS; /* Just skip */
-
- /* Extract L4 protocol */
- switch (eth_proto) {
- case ETH_P_IP:
- ip_proto = get_proto_ipv4(ctx, l3_offset);
- break;
- case ETH_P_IPV6:
- ip_proto = get_proto_ipv6(ctx, l3_offset);
- break;
- case ETH_P_ARP:
- cpu_idx = 0; /* ARP packet handled on separate CPU */
- break;
- default:
- cpu_idx = 0;
- }
-
- /* Choose CPU based on L4 protocol */
- switch (ip_proto) {
- case IPPROTO_ICMP:
- case IPPROTO_ICMPV6:
- cpu_idx = 2;
- break;
- case IPPROTO_TCP:
- cpu_idx = 0;
- break;
- case IPPROTO_UDP:
- cpu_idx = 1;
- break;
- default:
- cpu_idx = 0;
- }
-
- cpu_lookup = bpf_map_lookup_elem(&cpus_available, &cpu_idx);
- if (!cpu_lookup)
- return XDP_ABORTED;
- cpu_dest = *cpu_lookup;
-
- if (cpu_dest >= nr_cpus) {
- NO_TEAR_INC(rec->issue);
- return XDP_ABORTED;
- }
- return bpf_redirect_map(&cpu_map, cpu_dest, 0);
-}
-
-SEC("xdp")
-int xdp_prognum4_ddos_filter_pktgen(struct xdp_md *ctx)
-{
- void *data_end = (void *)(long)ctx->data_end;
- void *data = (void *)(long)ctx->data;
- u32 key = bpf_get_smp_processor_id();
- struct ethhdr *eth = data;
- u8 ip_proto = IPPROTO_UDP;
- struct datarec *rec;
- u16 eth_proto = 0;
- u64 l3_offset = 0;
- u32 cpu_dest = 0;
- u32 *cpu_lookup;
- u32 cpu_idx = 0;
- u16 dest_port;
-
- rec = bpf_map_lookup_elem(&rx_cnt, &key);
- if (!rec)
- return XDP_PASS;
- NO_TEAR_INC(rec->processed);
-
- if (!(parse_eth(eth, data_end, &eth_proto, &l3_offset)))
- return XDP_PASS; /* Just skip */
-
- /* Extract L4 protocol */
- switch (eth_proto) {
- case ETH_P_IP:
- ip_proto = get_proto_ipv4(ctx, l3_offset);
- break;
- case ETH_P_IPV6:
- ip_proto = get_proto_ipv6(ctx, l3_offset);
- break;
- case ETH_P_ARP:
- cpu_idx = 0; /* ARP packet handled on separate CPU */
- break;
- default:
- cpu_idx = 0;
- }
-
- /* Choose CPU based on L4 protocol */
- switch (ip_proto) {
- case IPPROTO_ICMP:
- case IPPROTO_ICMPV6:
- cpu_idx = 2;
- break;
- case IPPROTO_TCP:
- cpu_idx = 0;
- break;
- case IPPROTO_UDP:
- cpu_idx = 1;
- /* DDoS filter UDP port 9 (pktgen) */
- dest_port = get_dest_port_ipv4_udp(ctx, l3_offset);
- if (dest_port == 9) {
- NO_TEAR_INC(rec->dropped);
- return XDP_DROP;
- }
- break;
- default:
- cpu_idx = 0;
- }
-
- cpu_lookup = bpf_map_lookup_elem(&cpus_available, &cpu_idx);
- if (!cpu_lookup)
- return XDP_ABORTED;
- cpu_dest = *cpu_lookup;
-
- if (cpu_dest >= nr_cpus) {
- NO_TEAR_INC(rec->issue);
- return XDP_ABORTED;
- }
- return bpf_redirect_map(&cpu_map, cpu_dest, 0);
-}
-
-/* Hashing initval */
-#define INITVAL 15485863
-
-static __always_inline
-u32 get_ipv4_hash_ip_pair(struct xdp_md *ctx, u64 nh_off)
-{
- void *data_end = (void *)(long)ctx->data_end;
- void *data = (void *)(long)ctx->data;
- struct iphdr *iph = data + nh_off;
- u32 cpu_hash;
-
- if (iph + 1 > data_end)
- return 0;
-
- cpu_hash = iph->saddr + iph->daddr;
- cpu_hash = SuperFastHash((char *)&cpu_hash, 4, INITVAL + iph->protocol);
-
- return cpu_hash;
-}
-
-static __always_inline
-u32 get_ipv6_hash_ip_pair(struct xdp_md *ctx, u64 nh_off)
-{
- void *data_end = (void *)(long)ctx->data_end;
- void *data = (void *)(long)ctx->data;
- struct ipv6hdr *ip6h = data + nh_off;
- u32 cpu_hash;
-
- if (ip6h + 1 > data_end)
- return 0;
-
- cpu_hash = ip6h->saddr.in6_u.u6_addr32[0] + ip6h->daddr.in6_u.u6_addr32[0];
- cpu_hash += ip6h->saddr.in6_u.u6_addr32[1] + ip6h->daddr.in6_u.u6_addr32[1];
- cpu_hash += ip6h->saddr.in6_u.u6_addr32[2] + ip6h->daddr.in6_u.u6_addr32[2];
- cpu_hash += ip6h->saddr.in6_u.u6_addr32[3] + ip6h->daddr.in6_u.u6_addr32[3];
- cpu_hash = SuperFastHash((char *)&cpu_hash, 4, INITVAL + ip6h->nexthdr);
-
- return cpu_hash;
-}
-
-/* Load-Balance traffic based on hashing IP-addrs + L4-proto. The
- * hashing scheme is symmetric, meaning swapping IP src/dest still hit
- * same CPU.
- */
-SEC("xdp")
-int xdp_prognum5_lb_hash_ip_pairs(struct xdp_md *ctx)
-{
- void *data_end = (void *)(long)ctx->data_end;
- void *data = (void *)(long)ctx->data;
- u32 key = bpf_get_smp_processor_id();
- struct ethhdr *eth = data;
- struct datarec *rec;
- u16 eth_proto = 0;
- u64 l3_offset = 0;
- u32 cpu_dest = 0;
- u32 cpu_idx = 0;
- u32 *cpu_lookup;
- u32 key0 = 0;
- u32 *cpu_max;
- u32 cpu_hash;
-
- rec = bpf_map_lookup_elem(&rx_cnt, &key);
- if (!rec)
- return XDP_PASS;
- NO_TEAR_INC(rec->processed);
-
- cpu_max = bpf_map_lookup_elem(&cpus_count, &key0);
- if (!cpu_max)
- return XDP_ABORTED;
-
- if (!(parse_eth(eth, data_end, &eth_proto, &l3_offset)))
- return XDP_PASS; /* Just skip */
-
- /* Hash for IPv4 and IPv6 */
- switch (eth_proto) {
- case ETH_P_IP:
- cpu_hash = get_ipv4_hash_ip_pair(ctx, l3_offset);
- break;
- case ETH_P_IPV6:
- cpu_hash = get_ipv6_hash_ip_pair(ctx, l3_offset);
- break;
- case ETH_P_ARP: /* ARP packet handled on CPU idx 0 */
- default:
- cpu_hash = 0;
- }
-
- /* Choose CPU based on hash */
- cpu_idx = cpu_hash % *cpu_max;
-
- cpu_lookup = bpf_map_lookup_elem(&cpus_available, &cpu_idx);
- if (!cpu_lookup)
- return XDP_ABORTED;
- cpu_dest = *cpu_lookup;
-
- if (cpu_dest >= nr_cpus) {
- NO_TEAR_INC(rec->issue);
- return XDP_ABORTED;
- }
- return bpf_redirect_map(&cpu_map, cpu_dest, 0);
-}
-
-SEC("xdp/cpumap")
-int xdp_redirect_cpu_devmap(struct xdp_md *ctx)
-{
- void *data_end = (void *)(long)ctx->data_end;
- void *data = (void *)(long)ctx->data;
- struct ethhdr *eth = data;
- u64 nh_off;
-
- nh_off = sizeof(*eth);
- if (data + nh_off > data_end)
- return XDP_DROP;
-
- swap_src_dst_mac(data);
- return bpf_redirect_map(&tx_port, 0, 0);
-}
-
-SEC("xdp/cpumap")
-int xdp_redirect_cpu_pass(struct xdp_md *ctx)
-{
- return XDP_PASS;
-}
-
-SEC("xdp/cpumap")
-int xdp_redirect_cpu_drop(struct xdp_md *ctx)
-{
- return XDP_DROP;
-}
-
-SEC("xdp/devmap")
-int xdp_redirect_egress_prog(struct xdp_md *ctx)
-{
- void *data_end = (void *)(long)ctx->data_end;
- void *data = (void *)(long)ctx->data;
- struct ethhdr *eth = data;
- u64 nh_off;
-
- nh_off = sizeof(*eth);
- if (data + nh_off > data_end)
- return XDP_DROP;
-
- __builtin_memcpy(eth->h_source, (const char *)tx_mac_addr, ETH_ALEN);
-
- return XDP_PASS;
-}
-
-char _license[] SEC("license") = "GPL";
diff --git a/samples/bpf/xdp_redirect_cpu_user.c b/samples/bpf/xdp_redirect_cpu_user.c
deleted file mode 100644
index e1458405e2ba..000000000000
--- a/samples/bpf/xdp_redirect_cpu_user.c
+++ /dev/null
@@ -1,559 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/* Copyright(c) 2017 Jesper Dangaard Brouer, Red Hat, Inc.
- */
-static const char *__doc__ =
-"XDP CPU redirect tool, using BPF_MAP_TYPE_CPUMAP\n"
-"Usage: xdp_redirect_cpu -d <IFINDEX|IFNAME> -c 0 ... -c N\n"
-"Valid specification for CPUMAP BPF program:\n"
-" --mprog-name/-e pass (use built-in XDP_PASS program)\n"
-" --mprog-name/-e drop (use built-in XDP_DROP program)\n"
-" --redirect-device/-r <ifindex|ifname> (use built-in DEVMAP redirect program)\n"
-" Custom CPUMAP BPF program:\n"
-" --mprog-filename/-f <filename> --mprog-name/-e <program>\n"
-" Optionally, also pass --redirect-map/-m and --redirect-device/-r together\n"
-" to configure DEVMAP in BPF object <filename>\n";
-
-#include <errno.h>
-#include <signal.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <stdbool.h>
-#include <string.h>
-#include <unistd.h>
-#include <locale.h>
-#include <sys/sysinfo.h>
-#include <getopt.h>
-#include <net/if.h>
-#include <time.h>
-#include <linux/limits.h>
-#include <arpa/inet.h>
-#include <linux/if_link.h>
-#include <bpf/bpf.h>
-#include <bpf/libbpf.h>
-#include "bpf_util.h"
-#include "xdp_sample_user.h"
-#include "xdp_redirect_cpu.skel.h"
-
-static int map_fd;
-static int avail_fd;
-static int count_fd;
-
-static int mask = SAMPLE_RX_CNT | SAMPLE_REDIRECT_ERR_MAP_CNT |
- SAMPLE_CPUMAP_ENQUEUE_CNT | SAMPLE_CPUMAP_KTHREAD_CNT |
- SAMPLE_EXCEPTION_CNT;
-
-DEFINE_SAMPLE_INIT(xdp_redirect_cpu);
-
-static const struct option long_options[] = {
- { "help", no_argument, NULL, 'h' },
- { "dev", required_argument, NULL, 'd' },
- { "skb-mode", no_argument, NULL, 'S' },
- { "progname", required_argument, NULL, 'p' },
- { "qsize", required_argument, NULL, 'q' },
- { "cpu", required_argument, NULL, 'c' },
- { "stress-mode", no_argument, NULL, 'x' },
- { "force", no_argument, NULL, 'F' },
- { "interval", required_argument, NULL, 'i' },
- { "verbose", no_argument, NULL, 'v' },
- { "stats", no_argument, NULL, 's' },
- { "mprog-name", required_argument, NULL, 'e' },
- { "mprog-filename", required_argument, NULL, 'f' },
- { "redirect-device", required_argument, NULL, 'r' },
- { "redirect-map", required_argument, NULL, 'm' },
- {}
-};
-
-static void print_avail_progs(struct bpf_object *obj)
-{
- struct bpf_program *pos;
-
- printf(" Programs to be used for -p/--progname:\n");
- bpf_object__for_each_program(pos, obj) {
- if (bpf_program__type(pos) == BPF_PROG_TYPE_XDP) {
- if (!strncmp(bpf_program__name(pos), "xdp_prognum",
- sizeof("xdp_prognum") - 1))
- printf(" %s\n", bpf_program__name(pos));
- }
- }
-}
-
-static void usage(char *argv[], const struct option *long_options,
- const char *doc, int mask, bool error, struct bpf_object *obj)
-{
- sample_usage(argv, long_options, doc, mask, error);
- print_avail_progs(obj);
-}
-
-static int create_cpu_entry(__u32 cpu, struct bpf_cpumap_val *value,
- __u32 avail_idx, bool new)
-{
- __u32 curr_cpus_count = 0;
- __u32 key = 0;
- int ret;
-
- /* Add a CPU entry to cpumap, as this allocate a cpu entry in
- * the kernel for the cpu.
- */
- ret = bpf_map_update_elem(map_fd, &cpu, value, 0);
- if (ret < 0) {
- fprintf(stderr, "Create CPU entry failed: %s\n", strerror(errno));
- return ret;
- }
-
- /* Inform bpf_prog's that a new CPU is available to select
- * from via some control maps.
- */
- ret = bpf_map_update_elem(avail_fd, &avail_idx, &cpu, 0);
- if (ret < 0) {
- fprintf(stderr, "Add to avail CPUs failed: %s\n", strerror(errno));
- return ret;
- }
-
- /* When not replacing/updating existing entry, bump the count */
- ret = bpf_map_lookup_elem(count_fd, &key, &curr_cpus_count);
- if (ret < 0) {
- fprintf(stderr, "Failed reading curr cpus_count: %s\n",
- strerror(errno));
- return ret;
- }
- if (new) {
- curr_cpus_count++;
- ret = bpf_map_update_elem(count_fd, &key,
- &curr_cpus_count, 0);
- if (ret < 0) {
- fprintf(stderr, "Failed write curr cpus_count: %s\n",
- strerror(errno));
- return ret;
- }
- }
-
- printf("%s CPU: %u as idx: %u qsize: %d cpumap_prog_fd: %d (cpus_count: %u)\n",
- new ? "Add new" : "Replace", cpu, avail_idx,
- value->qsize, value->bpf_prog.fd, curr_cpus_count);
-
- return 0;
-}
-
-/* CPUs are zero-indexed. Thus, add a special sentinel default value
- * in map cpus_available to mark CPU index'es not configured
- */
-static int mark_cpus_unavailable(void)
-{
- int ret, i, n_cpus = libbpf_num_possible_cpus();
- __u32 invalid_cpu = n_cpus;
-
- for (i = 0; i < n_cpus; i++) {
- ret = bpf_map_update_elem(avail_fd, &i,
- &invalid_cpu, 0);
- if (ret < 0) {
- fprintf(stderr, "Failed marking CPU unavailable: %s\n",
- strerror(errno));
- return ret;
- }
- }
-
- return 0;
-}
-
-/* Stress cpumap management code by concurrently changing underlying cpumap */
-static void stress_cpumap(void *ctx)
-{
- struct bpf_cpumap_val *value = ctx;
-
- /* Changing qsize will cause kernel to free and alloc a new
- * bpf_cpu_map_entry, with an associated/complicated tear-down
- * procedure.
- */
- value->qsize = 1024;
- create_cpu_entry(1, value, 0, false);
- value->qsize = 8;
- create_cpu_entry(1, value, 0, false);
- value->qsize = 16000;
- create_cpu_entry(1, value, 0, false);
-}
-
-static int set_cpumap_prog(struct xdp_redirect_cpu *skel,
- const char *redir_interface, const char *redir_map,
- const char *mprog_filename, const char *mprog_name)
-{
- if (mprog_filename) {
- struct bpf_program *prog;
- struct bpf_object *obj;
- int ret;
-
- if (!mprog_name) {
- fprintf(stderr, "BPF program not specified for file %s\n",
- mprog_filename);
- goto end;
- }
- if ((redir_interface && !redir_map) || (!redir_interface && redir_map)) {
- fprintf(stderr, "--redirect-%s specified but --redirect-%s not specified\n",
- redir_interface ? "device" : "map", redir_interface ? "map" : "device");
- goto end;
- }
-
- /* Custom BPF program */
- obj = bpf_object__open_file(mprog_filename, NULL);
- if (!obj) {
- ret = -errno;
- fprintf(stderr, "Failed to bpf_prog_load_xattr: %s\n",
- strerror(errno));
- return ret;
- }
-
- ret = bpf_object__load(obj);
- if (ret < 0) {
- ret = -errno;
- fprintf(stderr, "Failed to bpf_object__load: %s\n",
- strerror(errno));
- return ret;
- }
-
- if (redir_map) {
- int err, redir_map_fd, ifindex_out, key = 0;
-
- redir_map_fd = bpf_object__find_map_fd_by_name(obj, redir_map);
- if (redir_map_fd < 0) {
- fprintf(stderr, "Failed to bpf_object__find_map_fd_by_name: %s\n",
- strerror(errno));
- return redir_map_fd;
- }
-
- ifindex_out = if_nametoindex(redir_interface);
- if (!ifindex_out)
- ifindex_out = strtoul(redir_interface, NULL, 0);
- if (!ifindex_out) {
- fprintf(stderr, "Bad interface name or index\n");
- return -EINVAL;
- }
-
- err = bpf_map_update_elem(redir_map_fd, &key, &ifindex_out, 0);
- if (err < 0)
- return err;
- }
-
- prog = bpf_object__find_program_by_name(obj, mprog_name);
- if (!prog) {
- ret = -errno;
- fprintf(stderr, "Failed to bpf_object__find_program_by_name: %s\n",
- strerror(errno));
- return ret;
- }
-
- return bpf_program__fd(prog);
- } else {
- if (mprog_name) {
- if (redir_interface || redir_map) {
- fprintf(stderr, "Need to specify --mprog-filename/-f\n");
- goto end;
- }
- if (!strcmp(mprog_name, "pass") || !strcmp(mprog_name, "drop")) {
- /* Use built-in pass/drop programs */
- return *mprog_name == 'p' ? bpf_program__fd(skel->progs.xdp_redirect_cpu_pass)
- : bpf_program__fd(skel->progs.xdp_redirect_cpu_drop);
- } else {
- fprintf(stderr, "Unknown name \"%s\" for built-in BPF program\n",
- mprog_name);
- goto end;
- }
- } else {
- if (redir_map) {
- fprintf(stderr, "Need to specify --mprog-filename, --mprog-name and"
- " --redirect-device with --redirect-map\n");
- goto end;
- }
- if (redir_interface) {
- /* Use built-in devmap redirect */
- struct bpf_devmap_val val = {};
- int ifindex_out, err;
- __u32 key = 0;
-
- if (!redir_interface)
- return 0;
-
- ifindex_out = if_nametoindex(redir_interface);
- if (!ifindex_out)
- ifindex_out = strtoul(redir_interface, NULL, 0);
- if (!ifindex_out) {
- fprintf(stderr, "Bad interface name or index\n");
- return -EINVAL;
- }
-
- if (get_mac_addr(ifindex_out, skel->bss->tx_mac_addr) < 0) {
- printf("Get interface %d mac failed\n", ifindex_out);
- return -EINVAL;
- }
-
- val.ifindex = ifindex_out;
- val.bpf_prog.fd = bpf_program__fd(skel->progs.xdp_redirect_egress_prog);
- err = bpf_map_update_elem(bpf_map__fd(skel->maps.tx_port), &key, &val, 0);
- if (err < 0)
- return -errno;
-
- return bpf_program__fd(skel->progs.xdp_redirect_cpu_devmap);
- }
- }
- }
-
- /* Disabled */
- return 0;
-end:
- fprintf(stderr, "Invalid options for CPUMAP BPF program\n");
- return -EINVAL;
-}
-
-int main(int argc, char **argv)
-{
- const char *redir_interface = NULL, *redir_map = NULL;
- const char *mprog_filename = NULL, *mprog_name = NULL;
- struct xdp_redirect_cpu *skel;
- struct bpf_map_info info = {};
- struct bpf_cpumap_val value;
- __u32 infosz = sizeof(info);
- int ret = EXIT_FAIL_OPTION;
- unsigned long interval = 2;
- bool stress_mode = false;
- struct bpf_program *prog;
- const char *prog_name;
- bool generic = false;
- bool force = false;
- int added_cpus = 0;
- bool error = true;
- int longindex = 0;
- int add_cpu = -1;
- int ifindex = -1;
- int *cpu, i, opt;
- __u32 qsize;
- int n_cpus;
-
- n_cpus = libbpf_num_possible_cpus();
-
- /* Notice: Choosing the queue size is very important when CPU is
- * configured with power-saving states.
- *
- * If deepest state take 133 usec to wakeup from (133/10^6). When link
- * speed is 10Gbit/s ((10*10^9/8) in bytes/sec). How many bytes can
- * arrive with in 133 usec at this speed: (10*10^9/8)*(133/10^6) =
- * 166250 bytes. With MTU size packets this is 110 packets, and with
- * minimum Ethernet (MAC-preamble + intergap) 84 bytes is 1979 packets.
- *
- * Setting default cpumap queue to 2048 as worst-case (small packet)
- * should be +64 packet due kthread wakeup call (due to xdp_do_flush)
- * worst-case is 2043 packets.
- *
- * Sysadm can configured system to avoid deep-sleep via:
- * tuned-adm profile network-latency
- */
- qsize = 2048;
-
- skel = xdp_redirect_cpu__open();
- if (!skel) {
- fprintf(stderr, "Failed to xdp_redirect_cpu__open: %s\n",
- strerror(errno));
- ret = EXIT_FAIL_BPF;
- goto end;
- }
-
- ret = sample_init_pre_load(skel);
- if (ret < 0) {
- fprintf(stderr, "Failed to sample_init_pre_load: %s\n", strerror(-ret));
- ret = EXIT_FAIL_BPF;
- goto end_destroy;
- }
-
- if (bpf_map__set_max_entries(skel->maps.cpu_map, n_cpus) < 0) {
- fprintf(stderr, "Failed to set max entries for cpu_map map: %s",
- strerror(errno));
- ret = EXIT_FAIL_BPF;
- goto end_destroy;
- }
-
- if (bpf_map__set_max_entries(skel->maps.cpus_available, n_cpus) < 0) {
- fprintf(stderr, "Failed to set max entries for cpus_available map: %s",
- strerror(errno));
- ret = EXIT_FAIL_BPF;
- goto end_destroy;
- }
-
- cpu = calloc(n_cpus, sizeof(int));
- if (!cpu) {
- fprintf(stderr, "Failed to allocate cpu array\n");
- goto end_destroy;
- }
-
- prog = skel->progs.xdp_prognum5_lb_hash_ip_pairs;
- while ((opt = getopt_long(argc, argv, "d:si:Sxp:f:e:r:m:c:q:Fvh",
- long_options, &longindex)) != -1) {
- switch (opt) {
- case 'd':
- if (strlen(optarg) >= IF_NAMESIZE) {
- fprintf(stderr, "-d/--dev name too long\n");
- usage(argv, long_options, __doc__, mask, true, skel->obj);
- goto end_cpu;
- }
- ifindex = if_nametoindex(optarg);
- if (!ifindex)
- ifindex = strtoul(optarg, NULL, 0);
- if (!ifindex) {
- fprintf(stderr, "Bad interface index or name (%d): %s\n",
- errno, strerror(errno));
- usage(argv, long_options, __doc__, mask, true, skel->obj);
- goto end_cpu;
- }
- break;
- case 's':
- mask |= SAMPLE_REDIRECT_MAP_CNT;
- break;
- case 'i':
- interval = strtoul(optarg, NULL, 0);
- break;
- case 'S':
- generic = true;
- break;
- case 'x':
- stress_mode = true;
- break;
- case 'p':
- /* Selecting eBPF prog to load */
- prog_name = optarg;
- prog = bpf_object__find_program_by_name(skel->obj,
- prog_name);
- if (!prog) {
- fprintf(stderr,
- "Failed to find program %s specified by"
- " option -p/--progname\n",
- prog_name);
- print_avail_progs(skel->obj);
- goto end_cpu;
- }
- break;
- case 'f':
- mprog_filename = optarg;
- break;
- case 'e':
- mprog_name = optarg;
- break;
- case 'r':
- redir_interface = optarg;
- mask |= SAMPLE_DEVMAP_XMIT_CNT_MULTI;
- break;
- case 'm':
- redir_map = optarg;
- break;
- case 'c':
- /* Add multiple CPUs */
- add_cpu = strtoul(optarg, NULL, 0);
- if (add_cpu >= n_cpus) {
- fprintf(stderr,
- "--cpu nr too large for cpumap err (%d):%s\n",
- errno, strerror(errno));
- usage(argv, long_options, __doc__, mask, true, skel->obj);
- goto end_cpu;
- }
- cpu[added_cpus++] = add_cpu;
- break;
- case 'q':
- qsize = strtoul(optarg, NULL, 0);
- break;
- case 'F':
- force = true;
- break;
- case 'v':
- sample_switch_mode();
- break;
- case 'h':
- error = false;
- default:
- usage(argv, long_options, __doc__, mask, error, skel->obj);
- goto end_cpu;
- }
- }
-
- ret = EXIT_FAIL_OPTION;
- if (ifindex == -1) {
- fprintf(stderr, "Required option --dev missing\n");
- usage(argv, long_options, __doc__, mask, true, skel->obj);
- goto end_cpu;
- }
-
- if (add_cpu == -1) {
- fprintf(stderr, "Required option --cpu missing\n"
- "Specify multiple --cpu option to add more\n");
- usage(argv, long_options, __doc__, mask, true, skel->obj);
- goto end_cpu;
- }
-
- skel->rodata->from_match[0] = ifindex;
- if (redir_interface)
- skel->rodata->to_match[0] = if_nametoindex(redir_interface);
-
- ret = xdp_redirect_cpu__load(skel);
- if (ret < 0) {
- fprintf(stderr, "Failed to xdp_redirect_cpu__load: %s\n",
- strerror(errno));
- goto end_cpu;
- }
-
- ret = bpf_map_get_info_by_fd(bpf_map__fd(skel->maps.cpu_map), &info, &infosz);
- if (ret < 0) {
- fprintf(stderr, "Failed bpf_map_get_info_by_fd for cpumap: %s\n",
- strerror(errno));
- goto end_cpu;
- }
-
- skel->bss->cpumap_map_id = info.id;
-
- map_fd = bpf_map__fd(skel->maps.cpu_map);
- avail_fd = bpf_map__fd(skel->maps.cpus_available);
- count_fd = bpf_map__fd(skel->maps.cpus_count);
-
- ret = mark_cpus_unavailable();
- if (ret < 0) {
- fprintf(stderr, "Unable to mark CPUs as unavailable\n");
- goto end_cpu;
- }
-
- ret = sample_init(skel, mask);
- if (ret < 0) {
- fprintf(stderr, "Failed to initialize sample: %s\n", strerror(-ret));
- ret = EXIT_FAIL;
- goto end_cpu;
- }
-
- value.bpf_prog.fd = set_cpumap_prog(skel, redir_interface, redir_map,
- mprog_filename, mprog_name);
- if (value.bpf_prog.fd < 0) {
- fprintf(stderr, "Failed to set CPUMAP BPF program: %s\n",
- strerror(-value.bpf_prog.fd));
- usage(argv, long_options, __doc__, mask, true, skel->obj);
- ret = EXIT_FAIL_BPF;
- goto end_cpu;
- }
- value.qsize = qsize;
-
- for (i = 0; i < added_cpus; i++) {
- if (create_cpu_entry(cpu[i], &value, i, true) < 0) {
- fprintf(stderr, "Cannot proceed, exiting\n");
- usage(argv, long_options, __doc__, mask, true, skel->obj);
- goto end_cpu;
- }
- }
-
- ret = EXIT_FAIL_XDP;
- if (sample_install_xdp(prog, ifindex, generic, force) < 0)
- goto end_cpu;
-
- ret = sample_run(interval, stress_mode ? stress_cpumap : NULL, &value);
- if (ret < 0) {
- fprintf(stderr, "Failed during sample run: %s\n", strerror(-ret));
- ret = EXIT_FAIL;
- goto end_cpu;
- }
- ret = EXIT_OK;
-end_cpu:
- free(cpu);
-end_destroy:
- xdp_redirect_cpu__destroy(skel);
-end:
- sample_exit(ret);
-}
diff --git a/samples/bpf/xdp_redirect_map.bpf.c b/samples/bpf/xdp_redirect_map.bpf.c
deleted file mode 100644
index 8557c278df77..000000000000
--- a/samples/bpf/xdp_redirect_map.bpf.c
+++ /dev/null
@@ -1,97 +0,0 @@
-/* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- */
-#define KBUILD_MODNAME "foo"
-
-#include "vmlinux.h"
-#include "xdp_sample.bpf.h"
-#include "xdp_sample_shared.h"
-
-/* The 2nd xdp prog on egress does not support skb mode, so we define two
- * maps, tx_port_general and tx_port_native.
- */
-struct {
- __uint(type, BPF_MAP_TYPE_DEVMAP);
- __uint(key_size, sizeof(int));
- __uint(value_size, sizeof(int));
- __uint(max_entries, 1);
-} tx_port_general SEC(".maps");
-
-struct {
- __uint(type, BPF_MAP_TYPE_DEVMAP);
- __uint(key_size, sizeof(int));
- __uint(value_size, sizeof(struct bpf_devmap_val));
- __uint(max_entries, 1);
-} tx_port_native SEC(".maps");
-
-/* store egress interface mac address */
-const volatile __u8 tx_mac_addr[ETH_ALEN];
-
-static __always_inline int xdp_redirect_map(struct xdp_md *ctx, void *redirect_map)
-{
- void *data_end = (void *)(long)ctx->data_end;
- void *data = (void *)(long)ctx->data;
- u32 key = bpf_get_smp_processor_id();
- struct ethhdr *eth = data;
- struct datarec *rec;
- u64 nh_off;
-
- nh_off = sizeof(*eth);
- if (data + nh_off > data_end)
- return XDP_DROP;
-
- rec = bpf_map_lookup_elem(&rx_cnt, &key);
- if (!rec)
- return XDP_PASS;
- NO_TEAR_INC(rec->processed);
- swap_src_dst_mac(data);
- return bpf_redirect_map(redirect_map, 0, 0);
-}
-
-SEC("xdp")
-int xdp_redirect_map_general(struct xdp_md *ctx)
-{
- return xdp_redirect_map(ctx, &tx_port_general);
-}
-
-SEC("xdp")
-int xdp_redirect_map_native(struct xdp_md *ctx)
-{
- return xdp_redirect_map(ctx, &tx_port_native);
-}
-
-SEC("xdp/devmap")
-int xdp_redirect_map_egress(struct xdp_md *ctx)
-{
- void *data_end = (void *)(long)ctx->data_end;
- void *data = (void *)(long)ctx->data;
- u8 *mac_addr = (u8 *) tx_mac_addr;
- struct ethhdr *eth = data;
- u64 nh_off;
-
- nh_off = sizeof(*eth);
- if (data + nh_off > data_end)
- return XDP_DROP;
-
- barrier_var(mac_addr); /* prevent optimizing out memcpy */
- __builtin_memcpy(eth->h_source, mac_addr, ETH_ALEN);
-
- return XDP_PASS;
-}
-
-/* Redirect require an XDP bpf_prog loaded on the TX device */
-SEC("xdp")
-int xdp_redirect_dummy_prog(struct xdp_md *ctx)
-{
- return XDP_PASS;
-}
-
-char _license[] SEC("license") = "GPL";
diff --git a/samples/bpf/xdp_redirect_map_multi.bpf.c b/samples/bpf/xdp_redirect_map_multi.bpf.c
deleted file mode 100644
index 8b2fd4ec2c76..000000000000
--- a/samples/bpf/xdp_redirect_map_multi.bpf.c
+++ /dev/null
@@ -1,77 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#define KBUILD_MODNAME "foo"
-
-#include "vmlinux.h"
-#include "xdp_sample.bpf.h"
-#include "xdp_sample_shared.h"
-
-struct {
- __uint(type, BPF_MAP_TYPE_DEVMAP_HASH);
- __uint(key_size, sizeof(int));
- __uint(value_size, sizeof(int));
- __uint(max_entries, 32);
-} forward_map_general SEC(".maps");
-
-struct {
- __uint(type, BPF_MAP_TYPE_DEVMAP_HASH);
- __uint(key_size, sizeof(int));
- __uint(value_size, sizeof(struct bpf_devmap_val));
- __uint(max_entries, 32);
-} forward_map_native SEC(".maps");
-
-/* map to store egress interfaces mac addresses */
-struct {
- __uint(type, BPF_MAP_TYPE_HASH);
- __type(key, u32);
- __type(value, __be64);
- __uint(max_entries, 32);
-} mac_map SEC(".maps");
-
-static int xdp_redirect_map(struct xdp_md *ctx, void *forward_map)
-{
- u32 key = bpf_get_smp_processor_id();
- struct datarec *rec;
-
- rec = bpf_map_lookup_elem(&rx_cnt, &key);
- if (!rec)
- return XDP_PASS;
- NO_TEAR_INC(rec->processed);
-
- return bpf_redirect_map(forward_map, 0,
- BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS);
-}
-
-SEC("xdp")
-int xdp_redirect_map_general(struct xdp_md *ctx)
-{
- return xdp_redirect_map(ctx, &forward_map_general);
-}
-
-SEC("xdp")
-int xdp_redirect_map_native(struct xdp_md *ctx)
-{
- return xdp_redirect_map(ctx, &forward_map_native);
-}
-
-SEC("xdp/devmap")
-int xdp_devmap_prog(struct xdp_md *ctx)
-{
- void *data_end = (void *)(long)ctx->data_end;
- void *data = (void *)(long)ctx->data;
- u32 key = ctx->egress_ifindex;
- struct ethhdr *eth = data;
- __be64 *mac;
- u64 nh_off;
-
- nh_off = sizeof(*eth);
- if (data + nh_off > data_end)
- return XDP_DROP;
-
- mac = bpf_map_lookup_elem(&mac_map, &key);
- if (mac)
- __builtin_memcpy(eth->h_source, mac, ETH_ALEN);
-
- return XDP_PASS;
-}
-
-char _license[] SEC("license") = "GPL";
diff --git a/samples/bpf/xdp_redirect_map_multi_user.c b/samples/bpf/xdp_redirect_map_multi_user.c
deleted file mode 100644
index 9e24f2705b67..000000000000
--- a/samples/bpf/xdp_redirect_map_multi_user.c
+++ /dev/null
@@ -1,232 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-static const char *__doc__ =
-"XDP multi redirect tool, using BPF_MAP_TYPE_DEVMAP and BPF_F_BROADCAST flag for bpf_redirect_map\n"
-"Usage: xdp_redirect_map_multi <IFINDEX|IFNAME> <IFINDEX|IFNAME> ... <IFINDEX|IFNAME>\n";
-
-#include <linux/bpf.h>
-#include <linux/if_link.h>
-#include <assert.h>
-#include <getopt.h>
-#include <errno.h>
-#include <signal.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <net/if.h>
-#include <unistd.h>
-#include <libgen.h>
-#include <sys/ioctl.h>
-#include <sys/types.h>
-#include <sys/socket.h>
-#include <netinet/in.h>
-#include <linux/if_ether.h>
-#include <bpf/bpf.h>
-#include <bpf/libbpf.h>
-#include "bpf_util.h"
-#include "xdp_sample_user.h"
-#include "xdp_redirect_map_multi.skel.h"
-
-#define MAX_IFACE_NUM 32
-static int ifaces[MAX_IFACE_NUM] = {};
-
-static int mask = SAMPLE_RX_CNT | SAMPLE_REDIRECT_ERR_MAP_CNT |
- SAMPLE_EXCEPTION_CNT | SAMPLE_DEVMAP_XMIT_CNT |
- SAMPLE_DEVMAP_XMIT_CNT_MULTI | SAMPLE_SKIP_HEADING;
-
-DEFINE_SAMPLE_INIT(xdp_redirect_map_multi);
-
-static const struct option long_options[] = {
- { "help", no_argument, NULL, 'h' },
- { "skb-mode", no_argument, NULL, 'S' },
- { "force", no_argument, NULL, 'F' },
- { "load-egress", no_argument, NULL, 'X' },
- { "stats", no_argument, NULL, 's' },
- { "interval", required_argument, NULL, 'i' },
- { "verbose", no_argument, NULL, 'v' },
- {}
-};
-
-static int update_mac_map(struct bpf_map *map)
-{
- int mac_map_fd = bpf_map__fd(map);
- unsigned char mac_addr[6];
- unsigned int ifindex;
- int i, ret = -1;
-
- for (i = 0; ifaces[i] > 0; i++) {
- ifindex = ifaces[i];
-
- ret = get_mac_addr(ifindex, mac_addr);
- if (ret < 0) {
- fprintf(stderr, "get interface %d mac failed\n",
- ifindex);
- return ret;
- }
-
- ret = bpf_map_update_elem(mac_map_fd, &ifindex, mac_addr, 0);
- if (ret < 0) {
- fprintf(stderr, "Failed to update mac address for ifindex %d\n",
- ifindex);
- return ret;
- }
- }
-
- return 0;
-}
-
-int main(int argc, char **argv)
-{
- struct bpf_devmap_val devmap_val = {};
- struct xdp_redirect_map_multi *skel;
- struct bpf_program *ingress_prog;
- bool xdp_devmap_attached = false;
- struct bpf_map *forward_map;
- int ret = EXIT_FAIL_OPTION;
- unsigned long interval = 2;
- char ifname[IF_NAMESIZE];
- unsigned int ifindex;
- bool generic = false;
- bool force = false;
- bool tried = false;
- bool error = true;
- int i, opt;
-
- while ((opt = getopt_long(argc, argv, "hSFXi:vs",
- long_options, NULL)) != -1) {
- switch (opt) {
- case 'S':
- generic = true;
- /* devmap_xmit tracepoint not available */
- mask &= ~(SAMPLE_DEVMAP_XMIT_CNT |
- SAMPLE_DEVMAP_XMIT_CNT_MULTI);
- break;
- case 'F':
- force = true;
- break;
- case 'X':
- xdp_devmap_attached = true;
- break;
- case 'i':
- interval = strtoul(optarg, NULL, 0);
- break;
- case 'v':
- sample_switch_mode();
- break;
- case 's':
- mask |= SAMPLE_REDIRECT_MAP_CNT;
- break;
- case 'h':
- error = false;
- default:
- sample_usage(argv, long_options, __doc__, mask, error);
- return ret;
- }
- }
-
- if (argc <= optind + 1) {
- sample_usage(argv, long_options, __doc__, mask, error);
- return ret;
- }
-
- skel = xdp_redirect_map_multi__open();
- if (!skel) {
- fprintf(stderr, "Failed to xdp_redirect_map_multi__open: %s\n",
- strerror(errno));
- ret = EXIT_FAIL_BPF;
- goto end;
- }
-
- ret = sample_init_pre_load(skel);
- if (ret < 0) {
- fprintf(stderr, "Failed to sample_init_pre_load: %s\n", strerror(-ret));
- ret = EXIT_FAIL_BPF;
- goto end_destroy;
- }
-
- ret = EXIT_FAIL_OPTION;
- for (i = 0; i < MAX_IFACE_NUM && argv[optind + i]; i++) {
- ifaces[i] = if_nametoindex(argv[optind + i]);
- if (!ifaces[i])
- ifaces[i] = strtoul(argv[optind + i], NULL, 0);
- if (!if_indextoname(ifaces[i], ifname)) {
- fprintf(stderr, "Bad interface index or name\n");
- sample_usage(argv, long_options, __doc__, mask, true);
- goto end_destroy;
- }
-
- skel->rodata->from_match[i] = ifaces[i];
- skel->rodata->to_match[i] = ifaces[i];
- }
-
- ret = xdp_redirect_map_multi__load(skel);
- if (ret < 0) {
- fprintf(stderr, "Failed to xdp_redirect_map_multi__load: %s\n",
- strerror(errno));
- ret = EXIT_FAIL_BPF;
- goto end_destroy;
- }
-
- if (xdp_devmap_attached) {
- /* Update mac_map with all egress interfaces' mac addr */
- if (update_mac_map(skel->maps.mac_map) < 0) {
- fprintf(stderr, "Updating mac address failed\n");
- ret = EXIT_FAIL;
- goto end_destroy;
- }
- }
-
- ret = sample_init(skel, mask);
- if (ret < 0) {
- fprintf(stderr, "Failed to initialize sample: %s\n", strerror(-ret));
- ret = EXIT_FAIL;
- goto end_destroy;
- }
-
- ingress_prog = skel->progs.xdp_redirect_map_native;
- forward_map = skel->maps.forward_map_native;
-
- for (i = 0; ifaces[i] > 0; i++) {
- ifindex = ifaces[i];
-
- ret = EXIT_FAIL_XDP;
-restart:
- /* bind prog_fd to each interface */
- if (sample_install_xdp(ingress_prog, ifindex, generic, force) < 0) {
- if (generic && !tried) {
- fprintf(stderr,
- "Trying fallback to sizeof(int) as value_size for devmap in generic mode\n");
- ingress_prog = skel->progs.xdp_redirect_map_general;
- forward_map = skel->maps.forward_map_general;
- tried = true;
- goto restart;
- }
- goto end_destroy;
- }
-
- /* Add all the interfaces to forward group and attach
- * egress devmap program if exist
- */
- devmap_val.ifindex = ifindex;
- if (xdp_devmap_attached)
- devmap_val.bpf_prog.fd = bpf_program__fd(skel->progs.xdp_devmap_prog);
- ret = bpf_map_update_elem(bpf_map__fd(forward_map), &ifindex, &devmap_val, 0);
- if (ret < 0) {
- fprintf(stderr, "Failed to update devmap value: %s\n",
- strerror(errno));
- ret = EXIT_FAIL_BPF;
- goto end_destroy;
- }
- }
-
- ret = sample_run(interval, NULL, NULL);
- if (ret < 0) {
- fprintf(stderr, "Failed during sample run: %s\n", strerror(-ret));
- ret = EXIT_FAIL;
- goto end_destroy;
- }
- ret = EXIT_OK;
-end_destroy:
- xdp_redirect_map_multi__destroy(skel);
-end:
- sample_exit(ret);
-}
diff --git a/samples/bpf/xdp_redirect_map_user.c b/samples/bpf/xdp_redirect_map_user.c
deleted file mode 100644
index c889a1394dc1..000000000000
--- a/samples/bpf/xdp_redirect_map_user.c
+++ /dev/null
@@ -1,228 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io
- */
-static const char *__doc__ =
-"XDP redirect tool, using BPF_MAP_TYPE_DEVMAP\n"
-"Usage: xdp_redirect_map <IFINDEX|IFNAME>_IN <IFINDEX|IFNAME>_OUT\n";
-
-#include <linux/bpf.h>
-#include <linux/if_link.h>
-#include <assert.h>
-#include <errno.h>
-#include <signal.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <stdbool.h>
-#include <string.h>
-#include <net/if.h>
-#include <unistd.h>
-#include <libgen.h>
-#include <getopt.h>
-#include <bpf/bpf.h>
-#include <bpf/libbpf.h>
-#include "bpf_util.h"
-#include "xdp_sample_user.h"
-#include "xdp_redirect_map.skel.h"
-
-static int mask = SAMPLE_RX_CNT | SAMPLE_REDIRECT_ERR_MAP_CNT |
- SAMPLE_EXCEPTION_CNT | SAMPLE_DEVMAP_XMIT_CNT_MULTI;
-
-DEFINE_SAMPLE_INIT(xdp_redirect_map);
-
-static const struct option long_options[] = {
- { "help", no_argument, NULL, 'h' },
- { "skb-mode", no_argument, NULL, 'S' },
- { "force", no_argument, NULL, 'F' },
- { "load-egress", no_argument, NULL, 'X' },
- { "stats", no_argument, NULL, 's' },
- { "interval", required_argument, NULL, 'i' },
- { "verbose", no_argument, NULL, 'v' },
- {}
-};
-
-static int verbose = 0;
-
-int main(int argc, char **argv)
-{
- struct bpf_devmap_val devmap_val = {};
- bool xdp_devmap_attached = false;
- struct xdp_redirect_map *skel;
- char str[2 * IF_NAMESIZE + 1];
- char ifname_out[IF_NAMESIZE];
- struct bpf_map *tx_port_map;
- char ifname_in[IF_NAMESIZE];
- int ifindex_in, ifindex_out;
- unsigned long interval = 2;
- int ret = EXIT_FAIL_OPTION;
- struct bpf_program *prog;
- bool generic = false;
- bool force = false;
- bool tried = false;
- bool error = true;
- int opt, key = 0;
-
- while ((opt = getopt_long(argc, argv, "hSFXi:vs",
- long_options, NULL)) != -1) {
- switch (opt) {
- case 'S':
- generic = true;
- /* devmap_xmit tracepoint not available */
- mask &= ~(SAMPLE_DEVMAP_XMIT_CNT |
- SAMPLE_DEVMAP_XMIT_CNT_MULTI);
- break;
- case 'F':
- force = true;
- break;
- case 'X':
- xdp_devmap_attached = true;
- break;
- case 'i':
- interval = strtoul(optarg, NULL, 0);
- break;
- case 'v':
- sample_switch_mode();
- verbose = 1;
- break;
- case 's':
- mask |= SAMPLE_REDIRECT_MAP_CNT;
- break;
- case 'h':
- error = false;
- default:
- sample_usage(argv, long_options, __doc__, mask, error);
- return ret;
- }
- }
-
- if (argc <= optind + 1) {
- sample_usage(argv, long_options, __doc__, mask, true);
- goto end;
- }
-
- ifindex_in = if_nametoindex(argv[optind]);
- if (!ifindex_in)
- ifindex_in = strtoul(argv[optind], NULL, 0);
-
- ifindex_out = if_nametoindex(argv[optind + 1]);
- if (!ifindex_out)
- ifindex_out = strtoul(argv[optind + 1], NULL, 0);
-
- if (!ifindex_in || !ifindex_out) {
- fprintf(stderr, "Bad interface index or name\n");
- sample_usage(argv, long_options, __doc__, mask, true);
- goto end;
- }
-
- skel = xdp_redirect_map__open();
- if (!skel) {
- fprintf(stderr, "Failed to xdp_redirect_map__open: %s\n",
- strerror(errno));
- ret = EXIT_FAIL_BPF;
- goto end;
- }
-
- ret = sample_init_pre_load(skel);
- if (ret < 0) {
- fprintf(stderr, "Failed to sample_init_pre_load: %s\n", strerror(-ret));
- ret = EXIT_FAIL_BPF;
- goto end_destroy;
- }
-
- /* Load 2nd xdp prog on egress. */
- if (xdp_devmap_attached) {
- ret = get_mac_addr(ifindex_out, skel->rodata->tx_mac_addr);
- if (ret < 0) {
- fprintf(stderr, "Failed to get interface %d mac address: %s\n",
- ifindex_out, strerror(-ret));
- ret = EXIT_FAIL;
- goto end_destroy;
- }
- if (verbose)
- printf("Egress ifindex:%d using src MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
- ifindex_out,
- skel->rodata->tx_mac_addr[0], skel->rodata->tx_mac_addr[1],
- skel->rodata->tx_mac_addr[2], skel->rodata->tx_mac_addr[3],
- skel->rodata->tx_mac_addr[4], skel->rodata->tx_mac_addr[5]);
- }
-
- skel->rodata->from_match[0] = ifindex_in;
- skel->rodata->to_match[0] = ifindex_out;
-
- ret = xdp_redirect_map__load(skel);
- if (ret < 0) {
- fprintf(stderr, "Failed to xdp_redirect_map__load: %s\n",
- strerror(errno));
- ret = EXIT_FAIL_BPF;
- goto end_destroy;
- }
-
- ret = sample_init(skel, mask);
- if (ret < 0) {
- fprintf(stderr, "Failed to initialize sample: %s\n", strerror(-ret));
- ret = EXIT_FAIL;
- goto end_destroy;
- }
-
- prog = skel->progs.xdp_redirect_map_native;
- tx_port_map = skel->maps.tx_port_native;
-restart:
- if (sample_install_xdp(prog, ifindex_in, generic, force) < 0) {
- /* First try with struct bpf_devmap_val as value for generic
- * mode, then fallback to sizeof(int) for older kernels.
- */
- fprintf(stderr,
- "Trying fallback to sizeof(int) as value_size for devmap in generic mode\n");
- if (generic && !tried) {
- prog = skel->progs.xdp_redirect_map_general;
- tx_port_map = skel->maps.tx_port_general;
- tried = true;
- goto restart;
- }
- ret = EXIT_FAIL_XDP;
- goto end_destroy;
- }
-
- /* Loading dummy XDP prog on out-device */
- sample_install_xdp(skel->progs.xdp_redirect_dummy_prog, ifindex_out, generic, force);
-
- devmap_val.ifindex = ifindex_out;
- if (xdp_devmap_attached)
- devmap_val.bpf_prog.fd = bpf_program__fd(skel->progs.xdp_redirect_map_egress);
- ret = bpf_map_update_elem(bpf_map__fd(tx_port_map), &key, &devmap_val, 0);
- if (ret < 0) {
- fprintf(stderr, "Failed to update devmap value: %s\n",
- strerror(errno));
- ret = EXIT_FAIL_BPF;
- goto end_destroy;
- }
-
- ret = EXIT_FAIL;
- if (!if_indextoname(ifindex_in, ifname_in)) {
- fprintf(stderr, "Failed to if_indextoname for %d: %s\n", ifindex_in,
- strerror(errno));
- goto end_destroy;
- }
-
- if (!if_indextoname(ifindex_out, ifname_out)) {
- fprintf(stderr, "Failed to if_indextoname for %d: %s\n", ifindex_out,
- strerror(errno));
- goto end_destroy;
- }
-
- safe_strncpy(str, get_driver_name(ifindex_in), sizeof(str));
- printf("Redirecting from %s (ifindex %d; driver %s) to %s (ifindex %d; driver %s)\n",
- ifname_in, ifindex_in, str, ifname_out, ifindex_out, get_driver_name(ifindex_out));
- snprintf(str, sizeof(str), "%s->%s", ifname_in, ifname_out);
-
- ret = sample_run(interval, NULL, NULL);
- if (ret < 0) {
- fprintf(stderr, "Failed during sample run: %s\n", strerror(-ret));
- ret = EXIT_FAIL;
- goto end_destroy;
- }
- ret = EXIT_OK;
-end_destroy:
- xdp_redirect_map__destroy(skel);
-end:
- sample_exit(ret);
-}
diff --git a/samples/bpf/xdp_redirect_user.c b/samples/bpf/xdp_redirect_user.c
deleted file mode 100644
index 8663dd631b6e..000000000000
--- a/samples/bpf/xdp_redirect_user.c
+++ /dev/null
@@ -1,172 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (c) 2016 John Fastabend <john.r.fastabend@intel.com>
- */
-static const char *__doc__ =
-"XDP redirect tool, using bpf_redirect helper\n"
-"Usage: xdp_redirect <IFINDEX|IFNAME>_IN <IFINDEX|IFNAME>_OUT\n";
-
-#include <linux/bpf.h>
-#include <linux/if_link.h>
-#include <assert.h>
-#include <errno.h>
-#include <signal.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <stdbool.h>
-#include <string.h>
-#include <net/if.h>
-#include <unistd.h>
-#include <libgen.h>
-#include <getopt.h>
-#include <bpf/bpf.h>
-#include <bpf/libbpf.h>
-#include "bpf_util.h"
-#include "xdp_sample_user.h"
-#include "xdp_redirect.skel.h"
-
-static int mask = SAMPLE_RX_CNT | SAMPLE_REDIRECT_ERR_CNT |
- SAMPLE_EXCEPTION_CNT | SAMPLE_DEVMAP_XMIT_CNT_MULTI;
-
-DEFINE_SAMPLE_INIT(xdp_redirect);
-
-static const struct option long_options[] = {
- {"help", no_argument, NULL, 'h' },
- {"skb-mode", no_argument, NULL, 'S' },
- {"force", no_argument, NULL, 'F' },
- {"stats", no_argument, NULL, 's' },
- {"interval", required_argument, NULL, 'i' },
- {"verbose", no_argument, NULL, 'v' },
- {}
-};
-
-int main(int argc, char **argv)
-{
- int ifindex_in, ifindex_out, opt;
- char str[2 * IF_NAMESIZE + 1];
- char ifname_out[IF_NAMESIZE];
- char ifname_in[IF_NAMESIZE];
- int ret = EXIT_FAIL_OPTION;
- unsigned long interval = 2;
- struct xdp_redirect *skel;
- bool generic = false;
- bool force = false;
- bool error = true;
-
- while ((opt = getopt_long(argc, argv, "hSFi:vs",
- long_options, NULL)) != -1) {
- switch (opt) {
- case 'S':
- generic = true;
- mask &= ~(SAMPLE_DEVMAP_XMIT_CNT |
- SAMPLE_DEVMAP_XMIT_CNT_MULTI);
- break;
- case 'F':
- force = true;
- break;
- case 'i':
- interval = strtoul(optarg, NULL, 0);
- break;
- case 'v':
- sample_switch_mode();
- break;
- case 's':
- mask |= SAMPLE_REDIRECT_CNT;
- break;
- case 'h':
- error = false;
- default:
- sample_usage(argv, long_options, __doc__, mask, error);
- return ret;
- }
- }
-
- if (argc <= optind + 1) {
- sample_usage(argv, long_options, __doc__, mask, true);
- return ret;
- }
-
- ifindex_in = if_nametoindex(argv[optind]);
- if (!ifindex_in)
- ifindex_in = strtoul(argv[optind], NULL, 0);
-
- ifindex_out = if_nametoindex(argv[optind + 1]);
- if (!ifindex_out)
- ifindex_out = strtoul(argv[optind + 1], NULL, 0);
-
- if (!ifindex_in || !ifindex_out) {
- fprintf(stderr, "Bad interface index or name\n");
- sample_usage(argv, long_options, __doc__, mask, true);
- goto end;
- }
-
- skel = xdp_redirect__open();
- if (!skel) {
- fprintf(stderr, "Failed to xdp_redirect__open: %s\n", strerror(errno));
- ret = EXIT_FAIL_BPF;
- goto end;
- }
-
- ret = sample_init_pre_load(skel);
- if (ret < 0) {
- fprintf(stderr, "Failed to sample_init_pre_load: %s\n", strerror(-ret));
- ret = EXIT_FAIL_BPF;
- goto end_destroy;
- }
-
- skel->rodata->from_match[0] = ifindex_in;
- skel->rodata->to_match[0] = ifindex_out;
- skel->rodata->ifindex_out = ifindex_out;
-
- ret = xdp_redirect__load(skel);
- if (ret < 0) {
- fprintf(stderr, "Failed to xdp_redirect__load: %s\n", strerror(errno));
- ret = EXIT_FAIL_BPF;
- goto end_destroy;
- }
-
- ret = sample_init(skel, mask);
- if (ret < 0) {
- fprintf(stderr, "Failed to initialize sample: %s\n", strerror(-ret));
- ret = EXIT_FAIL;
- goto end_destroy;
- }
-
- ret = EXIT_FAIL_XDP;
- if (sample_install_xdp(skel->progs.xdp_redirect_prog, ifindex_in,
- generic, force) < 0)
- goto end_destroy;
-
- /* Loading dummy XDP prog on out-device */
- sample_install_xdp(skel->progs.xdp_redirect_dummy_prog, ifindex_out,
- generic, force);
-
- ret = EXIT_FAIL;
- if (!if_indextoname(ifindex_in, ifname_in)) {
- fprintf(stderr, "Failed to if_indextoname for %d: %s\n", ifindex_in,
- strerror(errno));
- goto end_destroy;
- }
-
- if (!if_indextoname(ifindex_out, ifname_out)) {
- fprintf(stderr, "Failed to if_indextoname for %d: %s\n", ifindex_out,
- strerror(errno));
- goto end_destroy;
- }
-
- safe_strncpy(str, get_driver_name(ifindex_in), sizeof(str));
- printf("Redirecting from %s (ifindex %d; driver %s) to %s (ifindex %d; driver %s)\n",
- ifname_in, ifindex_in, str, ifname_out, ifindex_out, get_driver_name(ifindex_out));
- snprintf(str, sizeof(str), "%s->%s", ifname_in, ifname_out);
-
- ret = sample_run(interval, NULL, NULL);
- if (ret < 0) {
- fprintf(stderr, "Failed during sample run: %s\n", strerror(-ret));
- ret = EXIT_FAIL;
- goto end_destroy;
- }
- ret = EXIT_OK;
-end_destroy:
- xdp_redirect__destroy(skel);
-end:
- sample_exit(ret);
-}
diff --git a/samples/bpf/xdp_rxq_info_kern.c b/samples/bpf/xdp_rxq_info_kern.c
deleted file mode 100644
index 5e7459f9bf3e..000000000000
--- a/samples/bpf/xdp_rxq_info_kern.c
+++ /dev/null
@@ -1,140 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0
- * Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc.
- *
- * Example howto extract XDP RX-queue info
- */
-#include <uapi/linux/bpf.h>
-#include <uapi/linux/if_ether.h>
-#include <uapi/linux/in.h>
-#include <bpf/bpf_helpers.h>
-
-/* Config setup from with userspace
- *
- * User-side setup ifindex in config_map, to verify that
- * ctx->ingress_ifindex is correct (against configured ifindex)
- */
-struct config {
- __u32 action;
- int ifindex;
- __u32 options;
-};
-enum cfg_options_flags {
- NO_TOUCH = 0x0U,
- READ_MEM = 0x1U,
- SWAP_MAC = 0x2U,
-};
-
-struct {
- __uint(type, BPF_MAP_TYPE_ARRAY);
- __type(key, int);
- __type(value, struct config);
- __uint(max_entries, 1);
-} config_map SEC(".maps");
-
-/* Common stats data record (shared with userspace) */
-struct datarec {
- __u64 processed;
- __u64 issue;
-};
-
-struct {
- __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
- __type(key, u32);
- __type(value, struct datarec);
- __uint(max_entries, 1);
-} stats_global_map SEC(".maps");
-
-#define MAX_RXQs 64
-
-/* Stats per rx_queue_index (per CPU) */
-struct {
- __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
- __type(key, u32);
- __type(value, struct datarec);
- __uint(max_entries, MAX_RXQs + 1);
-} rx_queue_index_map SEC(".maps");
-
-static __always_inline
-void swap_src_dst_mac(void *data)
-{
- unsigned short *p = data;
- unsigned short dst[3];
-
- dst[0] = p[0];
- dst[1] = p[1];
- dst[2] = p[2];
- p[0] = p[3];
- p[1] = p[4];
- p[2] = p[5];
- p[3] = dst[0];
- p[4] = dst[1];
- p[5] = dst[2];
-}
-
-SEC("xdp_prog0")
-int xdp_prognum0(struct xdp_md *ctx)
-{
- void *data_end = (void *)(long)ctx->data_end;
- void *data = (void *)(long)ctx->data;
- struct datarec *rec, *rxq_rec;
- int ingress_ifindex;
- struct config *config;
- u32 key = 0;
-
- /* Global stats record */
- rec = bpf_map_lookup_elem(&stats_global_map, &key);
- if (!rec)
- return XDP_ABORTED;
- rec->processed++;
-
- /* Accessing ctx->ingress_ifindex, cause BPF to rewrite BPF
- * instructions inside kernel to access xdp_rxq->dev->ifindex
- */
- ingress_ifindex = ctx->ingress_ifindex;
-
- config = bpf_map_lookup_elem(&config_map, &key);
- if (!config)
- return XDP_ABORTED;
-
- /* Simple test: check ctx provided ifindex is as expected */
- if (ingress_ifindex != config->ifindex) {
- /* count this error case */
- rec->issue++;
- return XDP_ABORTED;
- }
-
- /* Update stats per rx_queue_index. Handle if rx_queue_index
- * is larger than stats map can contain info for.
- */
- key = ctx->rx_queue_index;
- if (key >= MAX_RXQs)
- key = MAX_RXQs;
- rxq_rec = bpf_map_lookup_elem(&rx_queue_index_map, &key);
- if (!rxq_rec)
- return XDP_ABORTED;
- rxq_rec->processed++;
- if (key == MAX_RXQs)
- rxq_rec->issue++;
-
- /* Default: Don't touch packet data, only count packets */
- if (unlikely(config->options & (READ_MEM|SWAP_MAC))) {
- struct ethhdr *eth = data;
-
- if (eth + 1 > data_end)
- return XDP_ABORTED;
-
- /* Avoid compiler removing this: Drop non 802.3 Ethertypes */
- if (ntohs(eth->h_proto) < ETH_P_802_3_MIN)
- return XDP_ABORTED;
-
- /* XDP_TX requires changing MAC-addrs, else HW may drop.
- * Can also be enabled with --swapmac (for test purposes)
- */
- if (unlikely(config->options & SWAP_MAC))
- swap_src_dst_mac(data);
- }
-
- return config->action;
-}
-
-char _license[] SEC("license") = "GPL";
diff --git a/samples/bpf/xdp_rxq_info_user.c b/samples/bpf/xdp_rxq_info_user.c
deleted file mode 100644
index b95e0ef61f06..000000000000
--- a/samples/bpf/xdp_rxq_info_user.c
+++ /dev/null
@@ -1,614 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0
- * Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc.
- */
-static const char *__doc__ = " XDP RX-queue info extract example\n\n"
- "Monitor how many packets per sec (pps) are received\n"
- "per NIC RX queue index and which CPU processed the packet\n"
- ;
-
-#include <errno.h>
-#include <signal.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <stdbool.h>
-#include <string.h>
-#include <unistd.h>
-#include <locale.h>
-#include <getopt.h>
-#include <net/if.h>
-#include <time.h>
-#include <limits.h>
-#include <arpa/inet.h>
-#include <linux/if_link.h>
-
-#include <bpf/bpf.h>
-#include <bpf/libbpf.h>
-#include "bpf_util.h"
-
-static int ifindex = -1;
-static char ifname_buf[IF_NAMESIZE];
-static char *ifname;
-static __u32 prog_id;
-
-static __u32 xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST;
-
-static struct bpf_map *stats_global_map;
-static struct bpf_map *rx_queue_index_map;
-
-/* Exit return codes */
-#define EXIT_OK 0
-#define EXIT_FAIL 1
-#define EXIT_FAIL_OPTION 2
-#define EXIT_FAIL_XDP 3
-#define EXIT_FAIL_BPF 4
-#define EXIT_FAIL_MEM 5
-
-#define FAIL_MEM_SIG INT_MAX
-#define FAIL_STAT_SIG (INT_MAX - 1)
-
-static const struct option long_options[] = {
- {"help", no_argument, NULL, 'h' },
- {"dev", required_argument, NULL, 'd' },
- {"skb-mode", no_argument, NULL, 'S' },
- {"sec", required_argument, NULL, 's' },
- {"no-separators", no_argument, NULL, 'z' },
- {"action", required_argument, NULL, 'a' },
- {"readmem", no_argument, NULL, 'r' },
- {"swapmac", no_argument, NULL, 'm' },
- {"force", no_argument, NULL, 'F' },
- {0, 0, NULL, 0 }
-};
-
-static void int_exit(int sig)
-{
- __u32 curr_prog_id = 0;
-
- if (ifindex > -1) {
- if (bpf_xdp_query_id(ifindex, xdp_flags, &curr_prog_id)) {
- printf("bpf_xdp_query_id failed\n");
- exit(EXIT_FAIL);
- }
- if (prog_id == curr_prog_id) {
- fprintf(stderr,
- "Interrupted: Removing XDP program on ifindex:%d device:%s\n",
- ifindex, ifname);
- bpf_xdp_detach(ifindex, xdp_flags, NULL);
- } else if (!curr_prog_id) {
- printf("couldn't find a prog id on a given iface\n");
- } else {
- printf("program on interface changed, not removing\n");
- }
- }
-
- if (sig == FAIL_MEM_SIG)
- exit(EXIT_FAIL_MEM);
- else if (sig == FAIL_STAT_SIG)
- exit(EXIT_FAIL);
-
- exit(EXIT_OK);
-}
-
-struct config {
- __u32 action;
- int ifindex;
- __u32 options;
-};
-enum cfg_options_flags {
- NO_TOUCH = 0x0U,
- READ_MEM = 0x1U,
- SWAP_MAC = 0x2U,
-};
-#define XDP_ACTION_MAX (XDP_TX + 1)
-#define XDP_ACTION_MAX_STRLEN 11
-static const char *xdp_action_names[XDP_ACTION_MAX] = {
- [XDP_ABORTED] = "XDP_ABORTED",
- [XDP_DROP] = "XDP_DROP",
- [XDP_PASS] = "XDP_PASS",
- [XDP_TX] = "XDP_TX",
-};
-
-static const char *action2str(int action)
-{
- if (action < XDP_ACTION_MAX)
- return xdp_action_names[action];
- return NULL;
-}
-
-static int parse_xdp_action(char *action_str)
-{
- size_t maxlen;
- __u64 action = -1;
- int i;
-
- for (i = 0; i < XDP_ACTION_MAX; i++) {
- maxlen = XDP_ACTION_MAX_STRLEN;
- if (strncmp(xdp_action_names[i], action_str, maxlen) == 0) {
- action = i;
- break;
- }
- }
- return action;
-}
-
-static void list_xdp_actions(void)
-{
- int i;
-
- printf("Available XDP --action <options>\n");
- for (i = 0; i < XDP_ACTION_MAX; i++)
- printf("\t%s\n", xdp_action_names[i]);
- printf("\n");
-}
-
-static char* options2str(enum cfg_options_flags flag)
-{
- if (flag == NO_TOUCH)
- return "no_touch";
- if (flag & SWAP_MAC)
- return "swapmac";
- if (flag & READ_MEM)
- return "read";
- fprintf(stderr, "ERR: Unknown config option flags");
- int_exit(FAIL_STAT_SIG);
- return "unknown";
-}
-
-static void usage(char *argv[])
-{
- int i;
-
- printf("\nDOCUMENTATION:\n%s\n", __doc__);
- printf(" Usage: %s (options-see-below)\n", argv[0]);
- printf(" Listing options:\n");
- for (i = 0; long_options[i].name != 0; i++) {
- printf(" --%-12s", long_options[i].name);
- if (long_options[i].flag != NULL)
- printf(" flag (internal value:%d)",
- *long_options[i].flag);
- else
- printf(" short-option: -%c",
- long_options[i].val);
- printf("\n");
- }
- printf("\n");
- list_xdp_actions();
-}
-
-#define NANOSEC_PER_SEC 1000000000 /* 10^9 */
-static __u64 gettime(void)
-{
- struct timespec t;
- int res;
-
- res = clock_gettime(CLOCK_MONOTONIC, &t);
- if (res < 0) {
- fprintf(stderr, "Error with gettimeofday! (%i)\n", res);
- int_exit(FAIL_STAT_SIG);
- }
- return (__u64) t.tv_sec * NANOSEC_PER_SEC + t.tv_nsec;
-}
-
-/* Common stats data record shared with _kern.c */
-struct datarec {
- __u64 processed;
- __u64 issue;
-};
-struct record {
- __u64 timestamp;
- struct datarec total;
- struct datarec *cpu;
-};
-struct stats_record {
- struct record stats;
- struct record *rxq;
-};
-
-static struct datarec *alloc_record_per_cpu(void)
-{
- unsigned int nr_cpus = bpf_num_possible_cpus();
- struct datarec *array;
-
- array = calloc(nr_cpus, sizeof(struct datarec));
- if (!array) {
- fprintf(stderr, "Mem alloc error (nr_cpus:%u)\n", nr_cpus);
- int_exit(FAIL_MEM_SIG);
- }
- return array;
-}
-
-static struct record *alloc_record_per_rxq(void)
-{
- unsigned int nr_rxqs = bpf_map__max_entries(rx_queue_index_map);
- struct record *array;
-
- array = calloc(nr_rxqs, sizeof(struct record));
- if (!array) {
- fprintf(stderr, "Mem alloc error (nr_rxqs:%u)\n", nr_rxqs);
- int_exit(FAIL_MEM_SIG);
- }
- return array;
-}
-
-static struct stats_record *alloc_stats_record(void)
-{
- unsigned int nr_rxqs = bpf_map__max_entries(rx_queue_index_map);
- struct stats_record *rec;
- int i;
-
- rec = calloc(1, sizeof(struct stats_record));
- if (!rec) {
- fprintf(stderr, "Mem alloc error\n");
- int_exit(FAIL_MEM_SIG);
- }
- rec->rxq = alloc_record_per_rxq();
- for (i = 0; i < nr_rxqs; i++)
- rec->rxq[i].cpu = alloc_record_per_cpu();
-
- rec->stats.cpu = alloc_record_per_cpu();
- return rec;
-}
-
-static void free_stats_record(struct stats_record *r)
-{
- unsigned int nr_rxqs = bpf_map__max_entries(rx_queue_index_map);
- int i;
-
- for (i = 0; i < nr_rxqs; i++)
- free(r->rxq[i].cpu);
-
- free(r->rxq);
- free(r->stats.cpu);
- free(r);
-}
-
-static bool map_collect_percpu(int fd, __u32 key, struct record *rec)
-{
- /* For percpu maps, userspace gets a value per possible CPU */
- unsigned int nr_cpus = bpf_num_possible_cpus();
- struct datarec values[nr_cpus];
- __u64 sum_processed = 0;
- __u64 sum_issue = 0;
- int i;
-
- if ((bpf_map_lookup_elem(fd, &key, values)) != 0) {
- fprintf(stderr,
- "ERR: bpf_map_lookup_elem failed key:0x%X\n", key);
- return false;
- }
- /* Get time as close as possible to reading map contents */
- rec->timestamp = gettime();
-
- /* Record and sum values from each CPU */
- for (i = 0; i < nr_cpus; i++) {
- rec->cpu[i].processed = values[i].processed;
- sum_processed += values[i].processed;
- rec->cpu[i].issue = values[i].issue;
- sum_issue += values[i].issue;
- }
- rec->total.processed = sum_processed;
- rec->total.issue = sum_issue;
- return true;
-}
-
-static void stats_collect(struct stats_record *rec)
-{
- int fd, i, max_rxqs;
-
- fd = bpf_map__fd(stats_global_map);
- map_collect_percpu(fd, 0, &rec->stats);
-
- fd = bpf_map__fd(rx_queue_index_map);
- max_rxqs = bpf_map__max_entries(rx_queue_index_map);
- for (i = 0; i < max_rxqs; i++)
- map_collect_percpu(fd, i, &rec->rxq[i]);
-}
-
-static double calc_period(struct record *r, struct record *p)
-{
- double period_ = 0;
- __u64 period = 0;
-
- period = r->timestamp - p->timestamp;
- if (period > 0)
- period_ = ((double) period / NANOSEC_PER_SEC);
-
- return period_;
-}
-
-static __u64 calc_pps(struct datarec *r, struct datarec *p, double period_)
-{
- __u64 packets = 0;
- __u64 pps = 0;
-
- if (period_ > 0) {
- packets = r->processed - p->processed;
- pps = packets / period_;
- }
- return pps;
-}
-
-static __u64 calc_errs_pps(struct datarec *r,
- struct datarec *p, double period_)
-{
- __u64 packets = 0;
- __u64 pps = 0;
-
- if (period_ > 0) {
- packets = r->issue - p->issue;
- pps = packets / period_;
- }
- return pps;
-}
-
-static void stats_print(struct stats_record *stats_rec,
- struct stats_record *stats_prev,
- int action, __u32 cfg_opt)
-{
- unsigned int nr_rxqs = bpf_map__max_entries(rx_queue_index_map);
- unsigned int nr_cpus = bpf_num_possible_cpus();
- double pps = 0, err = 0;
- struct record *rec, *prev;
- double t;
- int rxq;
- int i;
-
- /* Header */
- printf("\nRunning XDP on dev:%s (ifindex:%d) action:%s options:%s\n",
- ifname, ifindex, action2str(action), options2str(cfg_opt));
-
- /* stats_global_map */
- {
- char *fmt_rx = "%-15s %-7d %'-11.0f %'-10.0f %s\n";
- char *fm2_rx = "%-15s %-7s %'-11.0f\n";
- char *errstr = "";
-
- printf("%-15s %-7s %-11s %-11s\n",
- "XDP stats", "CPU", "pps", "issue-pps");
-
- rec = &stats_rec->stats;
- prev = &stats_prev->stats;
- t = calc_period(rec, prev);
- for (i = 0; i < nr_cpus; i++) {
- struct datarec *r = &rec->cpu[i];
- struct datarec *p = &prev->cpu[i];
-
- pps = calc_pps (r, p, t);
- err = calc_errs_pps(r, p, t);
- if (err > 0)
- errstr = "invalid-ifindex";
- if (pps > 0)
- printf(fmt_rx, "XDP-RX CPU",
- i, pps, err, errstr);
- }
- pps = calc_pps (&rec->total, &prev->total, t);
- err = calc_errs_pps(&rec->total, &prev->total, t);
- printf(fm2_rx, "XDP-RX CPU", "total", pps, err);
- }
-
- /* rx_queue_index_map */
- printf("\n%-15s %-7s %-11s %-11s\n",
- "RXQ stats", "RXQ:CPU", "pps", "issue-pps");
-
- for (rxq = 0; rxq < nr_rxqs; rxq++) {
- char *fmt_rx = "%-15s %3d:%-3d %'-11.0f %'-10.0f %s\n";
- char *fm2_rx = "%-15s %3d:%-3s %'-11.0f\n";
- char *errstr = "";
- int rxq_ = rxq;
-
- /* Last RXQ in map catch overflows */
- if (rxq_ == nr_rxqs - 1)
- rxq_ = -1;
-
- rec = &stats_rec->rxq[rxq];
- prev = &stats_prev->rxq[rxq];
- t = calc_period(rec, prev);
- for (i = 0; i < nr_cpus; i++) {
- struct datarec *r = &rec->cpu[i];
- struct datarec *p = &prev->cpu[i];
-
- pps = calc_pps (r, p, t);
- err = calc_errs_pps(r, p, t);
- if (err > 0) {
- if (rxq_ == -1)
- errstr = "map-overflow-RXQ";
- else
- errstr = "err";
- }
- if (pps > 0)
- printf(fmt_rx, "rx_queue_index",
- rxq_, i, pps, err, errstr);
- }
- pps = calc_pps (&rec->total, &prev->total, t);
- err = calc_errs_pps(&rec->total, &prev->total, t);
- if (pps || err)
- printf(fm2_rx, "rx_queue_index", rxq_, "sum", pps, err);
- }
-}
-
-
-/* Pointer swap trick */
-static inline void swap(struct stats_record **a, struct stats_record **b)
-{
- struct stats_record *tmp;
-
- tmp = *a;
- *a = *b;
- *b = tmp;
-}
-
-static void stats_poll(int interval, int action, __u32 cfg_opt)
-{
- struct stats_record *record, *prev;
-
- record = alloc_stats_record();
- prev = alloc_stats_record();
- stats_collect(record);
-
- while (1) {
- swap(&prev, &record);
- stats_collect(record);
- stats_print(record, prev, action, cfg_opt);
- sleep(interval);
- }
-
- free_stats_record(record);
- free_stats_record(prev);
-}
-
-
-int main(int argc, char **argv)
-{
- __u32 cfg_options= NO_TOUCH ; /* Default: Don't touch packet memory */
- struct bpf_prog_info info = {};
- __u32 info_len = sizeof(info);
- int prog_fd, map_fd, opt, err;
- bool use_separators = true;
- struct config cfg = { 0 };
- struct bpf_program *prog;
- struct bpf_object *obj;
- struct bpf_map *map;
- char filename[256];
- int longindex = 0;
- int interval = 2;
- __u32 key = 0;
-
-
- char action_str_buf[XDP_ACTION_MAX_STRLEN + 1 /* for \0 */] = { 0 };
- int action = XDP_PASS; /* Default action */
- char *action_str = NULL;
-
- snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
-
- obj = bpf_object__open_file(filename, NULL);
- if (libbpf_get_error(obj))
- return EXIT_FAIL;
-
- prog = bpf_object__next_program(obj, NULL);
- bpf_program__set_type(prog, BPF_PROG_TYPE_XDP);
-
- err = bpf_object__load(obj);
- if (err)
- return EXIT_FAIL;
- prog_fd = bpf_program__fd(prog);
-
- map = bpf_object__find_map_by_name(obj, "config_map");
- stats_global_map = bpf_object__find_map_by_name(obj, "stats_global_map");
- rx_queue_index_map = bpf_object__find_map_by_name(obj, "rx_queue_index_map");
- if (!map || !stats_global_map || !rx_queue_index_map) {
- printf("finding a map in obj file failed\n");
- return EXIT_FAIL;
- }
- map_fd = bpf_map__fd(map);
-
- if (!prog_fd) {
- fprintf(stderr, "ERR: bpf_prog_load_xattr: %s\n", strerror(errno));
- return EXIT_FAIL;
- }
-
- /* Parse commands line args */
- while ((opt = getopt_long(argc, argv, "FhSrmzd:s:a:",
- long_options, &longindex)) != -1) {
- switch (opt) {
- case 'd':
- if (strlen(optarg) >= IF_NAMESIZE) {
- fprintf(stderr, "ERR: --dev name too long\n");
- goto error;
- }
- ifname = (char *)&ifname_buf;
- strncpy(ifname, optarg, IF_NAMESIZE);
- ifindex = if_nametoindex(ifname);
- if (ifindex == 0) {
- fprintf(stderr,
- "ERR: --dev name unknown err(%d):%s\n",
- errno, strerror(errno));
- goto error;
- }
- break;
- case 's':
- interval = atoi(optarg);
- break;
- case 'S':
- xdp_flags |= XDP_FLAGS_SKB_MODE;
- break;
- case 'z':
- use_separators = false;
- break;
- case 'a':
- action_str = (char *)&action_str_buf;
- strncpy(action_str, optarg, XDP_ACTION_MAX_STRLEN);
- break;
- case 'r':
- cfg_options |= READ_MEM;
- break;
- case 'm':
- cfg_options |= SWAP_MAC;
- break;
- case 'F':
- xdp_flags &= ~XDP_FLAGS_UPDATE_IF_NOEXIST;
- break;
- case 'h':
- error:
- default:
- usage(argv);
- return EXIT_FAIL_OPTION;
- }
- }
-
- if (!(xdp_flags & XDP_FLAGS_SKB_MODE))
- xdp_flags |= XDP_FLAGS_DRV_MODE;
-
- /* Required option */
- if (ifindex == -1) {
- fprintf(stderr, "ERR: required option --dev missing\n");
- usage(argv);
- return EXIT_FAIL_OPTION;
- }
- cfg.ifindex = ifindex;
-
- /* Parse action string */
- if (action_str) {
- action = parse_xdp_action(action_str);
- if (action < 0) {
- fprintf(stderr, "ERR: Invalid XDP --action: %s\n",
- action_str);
- list_xdp_actions();
- return EXIT_FAIL_OPTION;
- }
- }
- cfg.action = action;
-
- /* XDP_TX requires changing MAC-addrs, else HW may drop */
- if (action == XDP_TX)
- cfg_options |= SWAP_MAC;
- cfg.options = cfg_options;
-
- /* Trick to pretty printf with thousands separators use %' */
- if (use_separators)
- setlocale(LC_NUMERIC, "en_US");
-
- /* User-side setup ifindex in config_map */
- err = bpf_map_update_elem(map_fd, &key, &cfg, 0);
- if (err) {
- fprintf(stderr, "Store config failed (err:%d)\n", err);
- exit(EXIT_FAIL_BPF);
- }
-
- /* Remove XDP program when program is interrupted or killed */
- signal(SIGINT, int_exit);
- signal(SIGTERM, int_exit);
-
- if (bpf_xdp_attach(ifindex, prog_fd, xdp_flags, NULL) < 0) {
- fprintf(stderr, "link set xdp fd failed\n");
- return EXIT_FAIL_XDP;
- }
-
- err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len);
- if (err) {
- printf("can't get prog info - %s\n", strerror(errno));
- return err;
- }
- prog_id = info.id;
-
- stats_poll(interval, action, cfg_options);
- return EXIT_OK;
-}
diff --git a/samples/bpf/xdp_sample_pkts_kern.c b/samples/bpf/xdp_sample_pkts_kern.c
deleted file mode 100644
index 9cf76b340dd7..000000000000
--- a/samples/bpf/xdp_sample_pkts_kern.c
+++ /dev/null
@@ -1,57 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/ptrace.h>
-#include <linux/version.h>
-#include <uapi/linux/bpf.h>
-#include <bpf/bpf_helpers.h>
-
-#define SAMPLE_SIZE 64ul
-
-struct {
- __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
- __uint(key_size, sizeof(int));
- __uint(value_size, sizeof(u32));
-} my_map SEC(".maps");
-
-SEC("xdp_sample")
-int xdp_sample_prog(struct xdp_md *ctx)
-{
- void *data_end = (void *)(long)ctx->data_end;
- void *data = (void *)(long)ctx->data;
-
- /* Metadata will be in the perf event before the packet data. */
- struct S {
- u16 cookie;
- u16 pkt_len;
- } __packed metadata;
-
- if (data < data_end) {
- /* The XDP perf_event_output handler will use the upper 32 bits
- * of the flags argument as a number of bytes to include of the
- * packet payload in the event data. If the size is too big, the
- * call to bpf_perf_event_output will fail and return -EFAULT.
- *
- * See bpf_xdp_event_output in net/core/filter.c.
- *
- * The BPF_F_CURRENT_CPU flag means that the event output fd
- * will be indexed by the CPU number in the event map.
- */
- u64 flags = BPF_F_CURRENT_CPU;
- u16 sample_size;
- int ret;
-
- metadata.cookie = 0xdead;
- metadata.pkt_len = (u16)(data_end - data);
- sample_size = min(metadata.pkt_len, SAMPLE_SIZE);
- flags |= (u64)sample_size << 32;
-
- ret = bpf_perf_event_output(ctx, &my_map, flags,
- &metadata, sizeof(metadata));
- if (ret)
- bpf_printk("perf_event_output failed: %d\n", ret);
- }
-
- return XDP_PASS;
-}
-
-char _license[] SEC("license") = "GPL";
-u32 _version SEC("version") = LINUX_VERSION_CODE;
diff --git a/samples/bpf/xdp_sample_pkts_user.c b/samples/bpf/xdp_sample_pkts_user.c
deleted file mode 100644
index e39d7f654f30..000000000000
--- a/samples/bpf/xdp_sample_pkts_user.c
+++ /dev/null
@@ -1,196 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <linux/perf_event.h>
-#include <linux/bpf.h>
-#include <net/if.h>
-#include <errno.h>
-#include <assert.h>
-#include <sys/sysinfo.h>
-#include <sys/ioctl.h>
-#include <signal.h>
-#include <bpf/libbpf.h>
-#include <bpf/bpf.h>
-#include <libgen.h>
-#include <linux/if_link.h>
-
-#include "perf-sys.h"
-
-static int if_idx;
-static char *if_name;
-static __u32 xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST;
-static __u32 prog_id;
-static struct perf_buffer *pb = NULL;
-
-static int do_attach(int idx, int fd, const char *name)
-{
- struct bpf_prog_info info = {};
- __u32 info_len = sizeof(info);
- int err;
-
- err = bpf_xdp_attach(idx, fd, xdp_flags, NULL);
- if (err < 0) {
- printf("ERROR: failed to attach program to %s\n", name);
- return err;
- }
-
- err = bpf_prog_get_info_by_fd(fd, &info, &info_len);
- if (err) {
- printf("can't get prog info - %s\n", strerror(errno));
- return err;
- }
- prog_id = info.id;
-
- return err;
-}
-
-static int do_detach(int idx, const char *name)
-{
- __u32 curr_prog_id = 0;
- int err = 0;
-
- err = bpf_xdp_query_id(idx, xdp_flags, &curr_prog_id);
- if (err) {
- printf("bpf_xdp_query_id failed\n");
- return err;
- }
- if (prog_id == curr_prog_id) {
- err = bpf_xdp_detach(idx, xdp_flags, NULL);
- if (err < 0)
- printf("ERROR: failed to detach prog from %s\n", name);
- } else if (!curr_prog_id) {
- printf("couldn't find a prog id on a %s\n", name);
- } else {
- printf("program on interface changed, not removing\n");
- }
-
- return err;
-}
-
-#define SAMPLE_SIZE 64
-
-static void print_bpf_output(void *ctx, int cpu, void *data, __u32 size)
-{
- struct {
- __u16 cookie;
- __u16 pkt_len;
- __u8 pkt_data[SAMPLE_SIZE];
- } __packed *e = data;
- int i;
-
- if (e->cookie != 0xdead) {
- printf("BUG cookie %x sized %d\n", e->cookie, size);
- return;
- }
-
- printf("Pkt len: %-5d bytes. Ethernet hdr: ", e->pkt_len);
- for (i = 0; i < 14 && i < e->pkt_len; i++)
- printf("%02x ", e->pkt_data[i]);
- printf("\n");
-}
-
-static void sig_handler(int signo)
-{
- do_detach(if_idx, if_name);
- perf_buffer__free(pb);
- exit(0);
-}
-
-static void usage(const char *prog)
-{
- fprintf(stderr,
- "%s: %s [OPTS] <ifname|ifindex>\n\n"
- "OPTS:\n"
- " -F force loading prog\n"
- " -S use skb-mode\n",
- __func__, prog);
-}
-
-int main(int argc, char **argv)
-{
- const char *optstr = "FS";
- int prog_fd, map_fd, opt;
- struct bpf_program *prog;
- struct bpf_object *obj;
- struct bpf_map *map;
- char filename[256];
- int ret, err;
-
- while ((opt = getopt(argc, argv, optstr)) != -1) {
- switch (opt) {
- case 'F':
- xdp_flags &= ~XDP_FLAGS_UPDATE_IF_NOEXIST;
- break;
- case 'S':
- xdp_flags |= XDP_FLAGS_SKB_MODE;
- break;
- default:
- usage(basename(argv[0]));
- return 1;
- }
- }
-
- if (!(xdp_flags & XDP_FLAGS_SKB_MODE))
- xdp_flags |= XDP_FLAGS_DRV_MODE;
-
- if (optind == argc) {
- usage(basename(argv[0]));
- return 1;
- }
-
- snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
-
- obj = bpf_object__open_file(filename, NULL);
- if (libbpf_get_error(obj))
- return 1;
-
- prog = bpf_object__next_program(obj, NULL);
- bpf_program__set_type(prog, BPF_PROG_TYPE_XDP);
-
- err = bpf_object__load(obj);
- if (err)
- return 1;
-
- prog_fd = bpf_program__fd(prog);
-
- map = bpf_object__next_map(obj, NULL);
- if (!map) {
- printf("finding a map in obj file failed\n");
- return 1;
- }
- map_fd = bpf_map__fd(map);
-
- if_idx = if_nametoindex(argv[optind]);
- if (!if_idx)
- if_idx = strtoul(argv[optind], NULL, 0);
-
- if (!if_idx) {
- fprintf(stderr, "Invalid ifname\n");
- return 1;
- }
- if_name = argv[optind];
- err = do_attach(if_idx, prog_fd, if_name);
- if (err)
- return err;
-
- if (signal(SIGINT, sig_handler) ||
- signal(SIGHUP, sig_handler) ||
- signal(SIGTERM, sig_handler)) {
- perror("signal");
- return 1;
- }
-
- pb = perf_buffer__new(map_fd, 8, print_bpf_output, NULL, NULL, NULL);
- err = libbpf_get_error(pb);
- if (err) {
- perror("perf_buffer setup failed");
- return 1;
- }
-
- while ((ret = perf_buffer__poll(pb, 1000)) >= 0) {
- }
-
- kill(0, SIGINT);
- return ret;
-}
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index d21deb46f49f..8790b3962e4b 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -1039,6 +1039,7 @@ enum bpf_attach_type {
BPF_NETFILTER,
BPF_TCX_INGRESS,
BPF_TCX_EGRESS,
+ BPF_TRACE_UPROBE_MULTI,
__MAX_BPF_ATTACH_TYPE
};
@@ -1057,6 +1058,7 @@ enum bpf_link_type {
BPF_LINK_TYPE_STRUCT_OPS = 9,
BPF_LINK_TYPE_NETFILTER = 10,
BPF_LINK_TYPE_TCX = 11,
+ BPF_LINK_TYPE_UPROBE_MULTI = 12,
MAX_BPF_LINK_TYPE,
};
@@ -1186,7 +1188,16 @@ enum bpf_perf_event_type {
/* link_create.kprobe_multi.flags used in LINK_CREATE command for
* BPF_TRACE_KPROBE_MULTI attach type to create return probe.
*/
-#define BPF_F_KPROBE_MULTI_RETURN (1U << 0)
+enum {
+ BPF_F_KPROBE_MULTI_RETURN = (1U << 0)
+};
+
+/* link_create.uprobe_multi.flags used in LINK_CREATE command for
+ * BPF_TRACE_UPROBE_MULTI attach type to create return probe.
+ */
+enum {
+ BPF_F_UPROBE_MULTI_RETURN = (1U << 0)
+};
/* link_create.netfilter.flags used in LINK_CREATE command for
* BPF_PROG_TYPE_NETFILTER to enable IP packet defragmentation.
@@ -1624,6 +1635,15 @@ union bpf_attr {
};
__u64 expected_revision;
} tcx;
+ struct {
+ __aligned_u64 path;
+ __aligned_u64 offsets;
+ __aligned_u64 ref_ctr_offsets;
+ __aligned_u64 cookies;
+ __u32 cnt;
+ __u32 flags;
+ __u32 pid;
+ } uprobe_multi;
};
} link_create;
diff --git a/tools/lib/bpf/Build b/tools/lib/bpf/Build
index b8b0a6369363..2d0c282c8588 100644
--- a/tools/lib/bpf/Build
+++ b/tools/lib/bpf/Build
@@ -1,4 +1,4 @@
libbpf-y := libbpf.o bpf.o nlattr.o btf.o libbpf_errno.o str_error.o \
netlink.o bpf_prog_linfo.o libbpf_probes.o hashmap.o \
btf_dump.o ringbuf.o strset.o linker.o gen_loader.o relo_core.o \
- usdt.o zip.o
+ usdt.o zip.o elf.o
diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c
index c9b6b311a441..b0f1913763a3 100644
--- a/tools/lib/bpf/bpf.c
+++ b/tools/lib/bpf/bpf.c
@@ -767,6 +767,17 @@ int bpf_link_create(int prog_fd, int target_fd,
if (!OPTS_ZEROED(opts, kprobe_multi))
return libbpf_err(-EINVAL);
break;
+ case BPF_TRACE_UPROBE_MULTI:
+ attr.link_create.uprobe_multi.flags = OPTS_GET(opts, uprobe_multi.flags, 0);
+ attr.link_create.uprobe_multi.cnt = OPTS_GET(opts, uprobe_multi.cnt, 0);
+ attr.link_create.uprobe_multi.path = ptr_to_u64(OPTS_GET(opts, uprobe_multi.path, 0));
+ attr.link_create.uprobe_multi.offsets = ptr_to_u64(OPTS_GET(opts, uprobe_multi.offsets, 0));
+ attr.link_create.uprobe_multi.ref_ctr_offsets = ptr_to_u64(OPTS_GET(opts, uprobe_multi.ref_ctr_offsets, 0));
+ attr.link_create.uprobe_multi.cookies = ptr_to_u64(OPTS_GET(opts, uprobe_multi.cookies, 0));
+ attr.link_create.uprobe_multi.pid = OPTS_GET(opts, uprobe_multi.pid, 0);
+ if (!OPTS_ZEROED(opts, uprobe_multi))
+ return libbpf_err(-EINVAL);
+ break;
case BPF_TRACE_FENTRY:
case BPF_TRACE_FEXIT:
case BPF_MODIFY_RETURN:
diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h
index 044a74ffc38a..74c2887cfd24 100644
--- a/tools/lib/bpf/bpf.h
+++ b/tools/lib/bpf/bpf.h
@@ -393,6 +393,15 @@ struct bpf_link_create_opts {
const __u64 *cookies;
} kprobe_multi;
struct {
+ __u32 flags;
+ __u32 cnt;
+ const char *path;
+ const unsigned long *offsets;
+ const unsigned long *ref_ctr_offsets;
+ const __u64 *cookies;
+ __u32 pid;
+ } uprobe_multi;
+ struct {
__u64 cookie;
} tracing;
struct {
@@ -409,7 +418,7 @@ struct bpf_link_create_opts {
};
size_t :0;
};
-#define bpf_link_create_opts__last_field kprobe_multi.cookies
+#define bpf_link_create_opts__last_field uprobe_multi.pid
LIBBPF_API int bpf_link_create(int prog_fd, int target_fd,
enum bpf_attach_type attach_type,
diff --git a/tools/lib/bpf/elf.c b/tools/lib/bpf/elf.c
new file mode 100644
index 000000000000..9d0296c1726a
--- /dev/null
+++ b/tools/lib/bpf/elf.c
@@ -0,0 +1,440 @@
+// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+
+#include <libelf.h>
+#include <gelf.h>
+#include <fcntl.h>
+#include <linux/kernel.h>
+
+#include "libbpf_internal.h"
+#include "str_error.h"
+
+#define STRERR_BUFSIZE 128
+
+int elf_open(const char *binary_path, struct elf_fd *elf_fd)
+{
+ char errmsg[STRERR_BUFSIZE];
+ int fd, ret;
+ Elf *elf;
+
+ if (elf_version(EV_CURRENT) == EV_NONE) {
+ pr_warn("elf: failed to init libelf for %s\n", binary_path);
+ return -LIBBPF_ERRNO__LIBELF;
+ }
+ fd = open(binary_path, O_RDONLY | O_CLOEXEC);
+ if (fd < 0) {
+ ret = -errno;
+ pr_warn("elf: failed to open %s: %s\n", binary_path,
+ libbpf_strerror_r(ret, errmsg, sizeof(errmsg)));
+ return ret;
+ }
+ elf = elf_begin(fd, ELF_C_READ_MMAP, NULL);
+ if (!elf) {
+ pr_warn("elf: could not read elf from %s: %s\n", binary_path, elf_errmsg(-1));
+ close(fd);
+ return -LIBBPF_ERRNO__FORMAT;
+ }
+ elf_fd->fd = fd;
+ elf_fd->elf = elf;
+ return 0;
+}
+
+void elf_close(struct elf_fd *elf_fd)
+{
+ if (!elf_fd)
+ return;
+ elf_end(elf_fd->elf);
+ close(elf_fd->fd);
+}
+
+/* Return next ELF section of sh_type after scn, or first of that type if scn is NULL. */
+static Elf_Scn *elf_find_next_scn_by_type(Elf *elf, int sh_type, Elf_Scn *scn)
+{
+ while ((scn = elf_nextscn(elf, scn)) != NULL) {
+ GElf_Shdr sh;
+
+ if (!gelf_getshdr(scn, &sh))
+ continue;
+ if (sh.sh_type == sh_type)
+ return scn;
+ }
+ return NULL;
+}
+
+struct elf_sym {
+ const char *name;
+ GElf_Sym sym;
+ GElf_Shdr sh;
+};
+
+struct elf_sym_iter {
+ Elf *elf;
+ Elf_Data *syms;
+ size_t nr_syms;
+ size_t strtabidx;
+ size_t next_sym_idx;
+ struct elf_sym sym;
+ int st_type;
+};
+
+static int elf_sym_iter_new(struct elf_sym_iter *iter,
+ Elf *elf, const char *binary_path,
+ int sh_type, int st_type)
+{
+ Elf_Scn *scn = NULL;
+ GElf_Ehdr ehdr;
+ GElf_Shdr sh;
+
+ memset(iter, 0, sizeof(*iter));
+
+ if (!gelf_getehdr(elf, &ehdr)) {
+ pr_warn("elf: failed to get ehdr from %s: %s\n", binary_path, elf_errmsg(-1));
+ return -EINVAL;
+ }
+
+ scn = elf_find_next_scn_by_type(elf, sh_type, NULL);
+ if (!scn) {
+ pr_debug("elf: failed to find symbol table ELF sections in '%s'\n",
+ binary_path);
+ return -ENOENT;
+ }
+
+ if (!gelf_getshdr(scn, &sh))
+ return -EINVAL;
+
+ iter->strtabidx = sh.sh_link;
+ iter->syms = elf_getdata(scn, 0);
+ if (!iter->syms) {
+ pr_warn("elf: failed to get symbols for symtab section in '%s': %s\n",
+ binary_path, elf_errmsg(-1));
+ return -EINVAL;
+ }
+ iter->nr_syms = iter->syms->d_size / sh.sh_entsize;
+ iter->elf = elf;
+ iter->st_type = st_type;
+ return 0;
+}
+
+static struct elf_sym *elf_sym_iter_next(struct elf_sym_iter *iter)
+{
+ struct elf_sym *ret = &iter->sym;
+ GElf_Sym *sym = &ret->sym;
+ const char *name = NULL;
+ Elf_Scn *sym_scn;
+ size_t idx;
+
+ for (idx = iter->next_sym_idx; idx < iter->nr_syms; idx++) {
+ if (!gelf_getsym(iter->syms, idx, sym))
+ continue;
+ if (GELF_ST_TYPE(sym->st_info) != iter->st_type)
+ continue;
+ name = elf_strptr(iter->elf, iter->strtabidx, sym->st_name);
+ if (!name)
+ continue;
+ sym_scn = elf_getscn(iter->elf, sym->st_shndx);
+ if (!sym_scn)
+ continue;
+ if (!gelf_getshdr(sym_scn, &ret->sh))
+ continue;
+
+ iter->next_sym_idx = idx + 1;
+ ret->name = name;
+ return ret;
+ }
+
+ return NULL;
+}
+
+
+/* Transform symbol's virtual address (absolute for binaries and relative
+ * for shared libs) into file offset, which is what kernel is expecting
+ * for uprobe/uretprobe attachment.
+ * See Documentation/trace/uprobetracer.rst for more details. This is done
+ * by looking up symbol's containing section's header and using iter's virtual
+ * address (sh_addr) and corresponding file offset (sh_offset) to transform
+ * sym.st_value (virtual address) into desired final file offset.
+ */
+static unsigned long elf_sym_offset(struct elf_sym *sym)
+{
+ return sym->sym.st_value - sym->sh.sh_addr + sym->sh.sh_offset;
+}
+
+/* Find offset of function name in the provided ELF object. "binary_path" is
+ * the path to the ELF binary represented by "elf", and only used for error
+ * reporting matters. "name" matches symbol name or name@@LIB for library
+ * functions.
+ */
+long elf_find_func_offset(Elf *elf, const char *binary_path, const char *name)
+{
+ int i, sh_types[2] = { SHT_DYNSYM, SHT_SYMTAB };
+ bool is_shared_lib, is_name_qualified;
+ long ret = -ENOENT;
+ size_t name_len;
+ GElf_Ehdr ehdr;
+
+ if (!gelf_getehdr(elf, &ehdr)) {
+ pr_warn("elf: failed to get ehdr from %s: %s\n", binary_path, elf_errmsg(-1));
+ ret = -LIBBPF_ERRNO__FORMAT;
+ goto out;
+ }
+ /* for shared lib case, we do not need to calculate relative offset */
+ is_shared_lib = ehdr.e_type == ET_DYN;
+
+ name_len = strlen(name);
+ /* Does name specify "@@LIB"? */
+ is_name_qualified = strstr(name, "@@") != NULL;
+
+ /* Search SHT_DYNSYM, SHT_SYMTAB for symbol. This search order is used because if
+ * a binary is stripped, it may only have SHT_DYNSYM, and a fully-statically
+ * linked binary may not have SHT_DYMSYM, so absence of a section should not be
+ * reported as a warning/error.
+ */
+ for (i = 0; i < ARRAY_SIZE(sh_types); i++) {
+ struct elf_sym_iter iter;
+ struct elf_sym *sym;
+ int last_bind = -1;
+ int cur_bind;
+
+ ret = elf_sym_iter_new(&iter, elf, binary_path, sh_types[i], STT_FUNC);
+ if (ret == -ENOENT)
+ continue;
+ if (ret)
+ goto out;
+
+ while ((sym = elf_sym_iter_next(&iter))) {
+ /* User can specify func, func@@LIB or func@@LIB_VERSION. */
+ if (strncmp(sym->name, name, name_len) != 0)
+ continue;
+ /* ...but we don't want a search for "foo" to match 'foo2" also, so any
+ * additional characters in sname should be of the form "@@LIB".
+ */
+ if (!is_name_qualified && sym->name[name_len] != '\0' && sym->name[name_len] != '@')
+ continue;
+
+ cur_bind = GELF_ST_BIND(sym->sym.st_info);
+
+ if (ret > 0) {
+ /* handle multiple matches */
+ if (last_bind != STB_WEAK && cur_bind != STB_WEAK) {
+ /* Only accept one non-weak bind. */
+ pr_warn("elf: ambiguous match for '%s', '%s' in '%s'\n",
+ sym->name, name, binary_path);
+ ret = -LIBBPF_ERRNO__FORMAT;
+ goto out;
+ } else if (cur_bind == STB_WEAK) {
+ /* already have a non-weak bind, and
+ * this is a weak bind, so ignore.
+ */
+ continue;
+ }
+ }
+
+ ret = elf_sym_offset(sym);
+ last_bind = cur_bind;
+ }
+ if (ret > 0)
+ break;
+ }
+
+ if (ret > 0) {
+ pr_debug("elf: symbol address match for '%s' in '%s': 0x%lx\n", name, binary_path,
+ ret);
+ } else {
+ if (ret == 0) {
+ pr_warn("elf: '%s' is 0 in symtab for '%s': %s\n", name, binary_path,
+ is_shared_lib ? "should not be 0 in a shared library" :
+ "try using shared library path instead");
+ ret = -ENOENT;
+ } else {
+ pr_warn("elf: failed to find symbol '%s' in '%s'\n", name, binary_path);
+ }
+ }
+out:
+ return ret;
+}
+
+/* Find offset of function name in ELF object specified by path. "name" matches
+ * symbol name or name@@LIB for library functions.
+ */
+long elf_find_func_offset_from_file(const char *binary_path, const char *name)
+{
+ struct elf_fd elf_fd;
+ long ret = -ENOENT;
+
+ ret = elf_open(binary_path, &elf_fd);
+ if (ret)
+ return ret;
+ ret = elf_find_func_offset(elf_fd.elf, binary_path, name);
+ elf_close(&elf_fd);
+ return ret;
+}
+
+struct symbol {
+ const char *name;
+ int bind;
+ int idx;
+};
+
+static int symbol_cmp(const void *a, const void *b)
+{
+ const struct symbol *sym_a = a;
+ const struct symbol *sym_b = b;
+
+ return strcmp(sym_a->name, sym_b->name);
+}
+
+/*
+ * Return offsets in @poffsets for symbols specified in @syms array argument.
+ * On success returns 0 and offsets are returned in allocated array with @cnt
+ * size, that needs to be released by the caller.
+ */
+int elf_resolve_syms_offsets(const char *binary_path, int cnt,
+ const char **syms, unsigned long **poffsets)
+{
+ int sh_types[2] = { SHT_DYNSYM, SHT_SYMTAB };
+ int err = 0, i, cnt_done = 0;
+ unsigned long *offsets;
+ struct symbol *symbols;
+ struct elf_fd elf_fd;
+
+ err = elf_open(binary_path, &elf_fd);
+ if (err)
+ return err;
+
+ offsets = calloc(cnt, sizeof(*offsets));
+ symbols = calloc(cnt, sizeof(*symbols));
+
+ if (!offsets || !symbols) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ for (i = 0; i < cnt; i++) {
+ symbols[i].name = syms[i];
+ symbols[i].idx = i;
+ }
+
+ qsort(symbols, cnt, sizeof(*symbols), symbol_cmp);
+
+ for (i = 0; i < ARRAY_SIZE(sh_types); i++) {
+ struct elf_sym_iter iter;
+ struct elf_sym *sym;
+
+ err = elf_sym_iter_new(&iter, elf_fd.elf, binary_path, sh_types[i], STT_FUNC);
+ if (err == -ENOENT)
+ continue;
+ if (err)
+ goto out;
+
+ while ((sym = elf_sym_iter_next(&iter))) {
+ unsigned long sym_offset = elf_sym_offset(sym);
+ int bind = GELF_ST_BIND(sym->sym.st_info);
+ struct symbol *found, tmp = {
+ .name = sym->name,
+ };
+ unsigned long *offset;
+
+ found = bsearch(&tmp, symbols, cnt, sizeof(*symbols), symbol_cmp);
+ if (!found)
+ continue;
+
+ offset = &offsets[found->idx];
+ if (*offset > 0) {
+ /* same offset, no problem */
+ if (*offset == sym_offset)
+ continue;
+ /* handle multiple matches */
+ if (found->bind != STB_WEAK && bind != STB_WEAK) {
+ /* Only accept one non-weak bind. */
+ pr_warn("elf: ambiguous match found '%s@%lu' in '%s' previous offset %lu\n",
+ sym->name, sym_offset, binary_path, *offset);
+ err = -ESRCH;
+ goto out;
+ } else if (bind == STB_WEAK) {
+ /* already have a non-weak bind, and
+ * this is a weak bind, so ignore.
+ */
+ continue;
+ }
+ } else {
+ cnt_done++;
+ }
+ *offset = sym_offset;
+ found->bind = bind;
+ }
+ }
+
+ if (cnt != cnt_done) {
+ err = -ENOENT;
+ goto out;
+ }
+
+ *poffsets = offsets;
+
+out:
+ free(symbols);
+ if (err)
+ free(offsets);
+ elf_close(&elf_fd);
+ return err;
+}
+
+/*
+ * Return offsets in @poffsets for symbols specified by @pattern argument.
+ * On success returns 0 and offsets are returned in allocated @poffsets
+ * array with the @pctn size, that needs to be released by the caller.
+ */
+int elf_resolve_pattern_offsets(const char *binary_path, const char *pattern,
+ unsigned long **poffsets, size_t *pcnt)
+{
+ int sh_types[2] = { SHT_SYMTAB, SHT_DYNSYM };
+ unsigned long *offsets = NULL;
+ size_t cap = 0, cnt = 0;
+ struct elf_fd elf_fd;
+ int err = 0, i;
+
+ err = elf_open(binary_path, &elf_fd);
+ if (err)
+ return err;
+
+ for (i = 0; i < ARRAY_SIZE(sh_types); i++) {
+ struct elf_sym_iter iter;
+ struct elf_sym *sym;
+
+ err = elf_sym_iter_new(&iter, elf_fd.elf, binary_path, sh_types[i], STT_FUNC);
+ if (err == -ENOENT)
+ continue;
+ if (err)
+ goto out;
+
+ while ((sym = elf_sym_iter_next(&iter))) {
+ if (!glob_match(sym->name, pattern))
+ continue;
+
+ err = libbpf_ensure_mem((void **) &offsets, &cap, sizeof(*offsets),
+ cnt + 1);
+ if (err)
+ goto out;
+
+ offsets[cnt++] = elf_sym_offset(sym);
+ }
+
+ /* If we found anything in the first symbol section,
+ * do not search others to avoid duplicates.
+ */
+ if (cnt)
+ break;
+ }
+
+ if (cnt) {
+ *poffsets = offsets;
+ *pcnt = cnt;
+ } else {
+ err = -ENOENT;
+ }
+
+out:
+ if (err)
+ free(offsets);
+ elf_close(&elf_fd);
+ return err;
+}
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index b14a4376a86e..96ff1aa4bf6a 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -120,6 +120,7 @@ static const char * const attach_type_name[] = {
[BPF_NETFILTER] = "netfilter",
[BPF_TCX_INGRESS] = "tcx_ingress",
[BPF_TCX_EGRESS] = "tcx_egress",
+ [BPF_TRACE_UPROBE_MULTI] = "trace_uprobe_multi",
};
static const char * const link_type_name[] = {
@@ -135,6 +136,7 @@ static const char * const link_type_name[] = {
[BPF_LINK_TYPE_STRUCT_OPS] = "struct_ops",
[BPF_LINK_TYPE_NETFILTER] = "netfilter",
[BPF_LINK_TYPE_TCX] = "tcx",
+ [BPF_LINK_TYPE_UPROBE_MULTI] = "uprobe_multi",
};
static const char * const map_type_name[] = {
@@ -365,6 +367,8 @@ enum sec_def_flags {
SEC_SLEEPABLE = 8,
/* BPF program support non-linear XDP buffer */
SEC_XDP_FRAGS = 16,
+ /* Setup proper attach type for usdt probes. */
+ SEC_USDT = 32,
};
struct bpf_sec_def {
@@ -550,6 +554,7 @@ struct extern_desc {
int btf_id;
int sec_btf_id;
const char *name;
+ char *essent_name;
bool is_set;
bool is_weak;
union {
@@ -3770,6 +3775,7 @@ static int bpf_object__collect_externs(struct bpf_object *obj)
struct extern_desc *ext;
int i, n, off, dummy_var_btf_id;
const char *ext_name, *sec_name;
+ size_t ext_essent_len;
Elf_Scn *scn;
Elf64_Shdr *sh;
@@ -3819,6 +3825,14 @@ static int bpf_object__collect_externs(struct bpf_object *obj)
ext->sym_idx = i;
ext->is_weak = ELF64_ST_BIND(sym->st_info) == STB_WEAK;
+ ext_essent_len = bpf_core_essential_name_len(ext->name);
+ ext->essent_name = NULL;
+ if (ext_essent_len != strlen(ext->name)) {
+ ext->essent_name = strndup(ext->name, ext_essent_len);
+ if (!ext->essent_name)
+ return -ENOMEM;
+ }
+
ext->sec_btf_id = find_extern_sec_btf_id(obj->btf, ext->btf_id);
if (ext->sec_btf_id <= 0) {
pr_warn("failed to find BTF for extern '%s' [%d] section: %d\n",
@@ -4817,6 +4831,39 @@ static int probe_perf_link(void)
return link_fd < 0 && err == -EBADF;
}
+static int probe_uprobe_multi_link(void)
+{
+ LIBBPF_OPTS(bpf_prog_load_opts, load_opts,
+ .expected_attach_type = BPF_TRACE_UPROBE_MULTI,
+ );
+ LIBBPF_OPTS(bpf_link_create_opts, link_opts);
+ struct bpf_insn insns[] = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ };
+ int prog_fd, link_fd, err;
+ unsigned long offset = 0;
+
+ prog_fd = bpf_prog_load(BPF_PROG_TYPE_KPROBE, NULL, "GPL",
+ insns, ARRAY_SIZE(insns), &load_opts);
+ if (prog_fd < 0)
+ return -errno;
+
+ /* Creating uprobe in '/' binary should fail with -EBADF. */
+ link_opts.uprobe_multi.path = "/";
+ link_opts.uprobe_multi.offsets = &offset;
+ link_opts.uprobe_multi.cnt = 1;
+
+ link_fd = bpf_link_create(prog_fd, -1, BPF_TRACE_UPROBE_MULTI, &link_opts);
+ err = -errno; /* close() can clobber errno */
+
+ if (link_fd >= 0)
+ close(link_fd);
+ close(prog_fd);
+
+ return link_fd < 0 && err == -EBADF;
+}
+
static int probe_kern_bpf_cookie(void)
{
struct bpf_insn insns[] = {
@@ -4913,6 +4960,9 @@ static struct kern_feature_desc {
[FEAT_SYSCALL_WRAPPER] = {
"Kernel using syscall wrapper", probe_kern_syscall_wrapper,
},
+ [FEAT_UPROBE_MULTI_LINK] = {
+ "BPF multi-uprobe link support", probe_uprobe_multi_link,
+ },
};
bool kernel_supports(const struct bpf_object *obj, enum kern_feature_id feat_id)
@@ -6780,6 +6830,10 @@ static int libbpf_prepare_prog_load(struct bpf_program *prog,
if (prog->type == BPF_PROG_TYPE_XDP && (def & SEC_XDP_FRAGS))
opts->prog_flags |= BPF_F_XDP_HAS_FRAGS;
+ /* special check for usdt to use uprobe_multi link */
+ if ((def & SEC_USDT) && kernel_supports(prog->obj, FEAT_UPROBE_MULTI_LINK))
+ prog->expected_attach_type = BPF_TRACE_UPROBE_MULTI;
+
if ((def & SEC_ATTACH_BTF) && !prog->attach_btf_id) {
int btf_obj_fd = 0, btf_type_id = 0, err;
const char *attach_name;
@@ -6848,7 +6902,6 @@ static int bpf_object_load_prog(struct bpf_object *obj, struct bpf_program *prog
if (!insns || !insns_cnt)
return -EINVAL;
- load_attr.expected_attach_type = prog->expected_attach_type;
if (kernel_supports(obj, FEAT_PROG_NAME))
prog_name = prog->name;
load_attr.attach_prog_fd = prog->attach_prog_fd;
@@ -6884,6 +6937,9 @@ static int bpf_object_load_prog(struct bpf_object *obj, struct bpf_program *prog
insns_cnt = prog->insns_cnt;
}
+ /* allow prog_prepare_load_fn to change expected_attach_type */
+ load_attr.expected_attach_type = prog->expected_attach_type;
+
if (obj->gen_loader) {
bpf_gen__prog_load(obj->gen_loader, prog->type, prog->name,
license, insns, insns_cnt, &load_attr,
@@ -7624,7 +7680,8 @@ static int bpf_object__resolve_ksym_func_btf_id(struct bpf_object *obj,
local_func_proto_id = ext->ksym.type_id;
- kfunc_id = find_ksym_btf_id(obj, ext->name, BTF_KIND_FUNC, &kern_btf, &mod_btf);
+ kfunc_id = find_ksym_btf_id(obj, ext->essent_name ?: ext->name, BTF_KIND_FUNC, &kern_btf,
+ &mod_btf);
if (kfunc_id < 0) {
if (kfunc_id == -ESRCH && ext->is_weak)
return 0;
@@ -7639,6 +7696,9 @@ static int bpf_object__resolve_ksym_func_btf_id(struct bpf_object *obj,
ret = bpf_core_types_are_compat(obj->btf, local_func_proto_id,
kern_btf, kfunc_proto_id);
if (ret <= 0) {
+ if (ext->is_weak)
+ return 0;
+
pr_warn("extern (func ksym) '%s': func_proto [%d] incompatible with %s [%d]\n",
ext->name, local_func_proto_id,
mod_btf ? mod_btf->name : "vmlinux", kfunc_proto_id);
@@ -8316,6 +8376,21 @@ int bpf_object__pin(struct bpf_object *obj, const char *path)
return 0;
}
+int bpf_object__unpin(struct bpf_object *obj, const char *path)
+{
+ int err;
+
+ err = bpf_object__unpin_programs(obj, path);
+ if (err)
+ return libbpf_err(err);
+
+ err = bpf_object__unpin_maps(obj, path);
+ if (err)
+ return libbpf_err(err);
+
+ return 0;
+}
+
static void bpf_map__destroy(struct bpf_map *map)
{
if (map->inner_map) {
@@ -8363,6 +8438,7 @@ void bpf_object__close(struct bpf_object *obj)
bpf_object__elf_finish(obj);
bpf_object_unload(obj);
btf__free(obj->btf);
+ btf__free(obj->btf_vmlinux);
btf_ext__free(obj->btf_ext);
for (i = 0; i < obj->nr_maps; i++)
@@ -8370,6 +8446,10 @@ void bpf_object__close(struct bpf_object *obj)
zfree(&obj->btf_custom_path);
zfree(&obj->kconfig);
+
+ for (i = 0; i < obj->nr_extern; i++)
+ zfree(&obj->externs[i].essent_name);
+
zfree(&obj->externs);
obj->nr_extern = 0;
@@ -8681,6 +8761,7 @@ static int attach_tp(const struct bpf_program *prog, long cookie, struct bpf_lin
static int attach_raw_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link);
static int attach_trace(const struct bpf_program *prog, long cookie, struct bpf_link **link);
static int attach_kprobe_multi(const struct bpf_program *prog, long cookie, struct bpf_link **link);
+static int attach_uprobe_multi(const struct bpf_program *prog, long cookie, struct bpf_link **link);
static int attach_lsm(const struct bpf_program *prog, long cookie, struct bpf_link **link);
static int attach_iter(const struct bpf_program *prog, long cookie, struct bpf_link **link);
@@ -8696,9 +8777,14 @@ static const struct bpf_sec_def section_defs[] = {
SEC_DEF("uretprobe.s+", KPROBE, 0, SEC_SLEEPABLE, attach_uprobe),
SEC_DEF("kprobe.multi+", KPROBE, BPF_TRACE_KPROBE_MULTI, SEC_NONE, attach_kprobe_multi),
SEC_DEF("kretprobe.multi+", KPROBE, BPF_TRACE_KPROBE_MULTI, SEC_NONE, attach_kprobe_multi),
+ SEC_DEF("uprobe.multi+", KPROBE, BPF_TRACE_UPROBE_MULTI, SEC_NONE, attach_uprobe_multi),
+ SEC_DEF("uretprobe.multi+", KPROBE, BPF_TRACE_UPROBE_MULTI, SEC_NONE, attach_uprobe_multi),
+ SEC_DEF("uprobe.multi.s+", KPROBE, BPF_TRACE_UPROBE_MULTI, SEC_SLEEPABLE, attach_uprobe_multi),
+ SEC_DEF("uretprobe.multi.s+", KPROBE, BPF_TRACE_UPROBE_MULTI, SEC_SLEEPABLE, attach_uprobe_multi),
SEC_DEF("ksyscall+", KPROBE, 0, SEC_NONE, attach_ksyscall),
SEC_DEF("kretsyscall+", KPROBE, 0, SEC_NONE, attach_ksyscall),
- SEC_DEF("usdt+", KPROBE, 0, SEC_NONE, attach_usdt),
+ SEC_DEF("usdt+", KPROBE, 0, SEC_USDT, attach_usdt),
+ SEC_DEF("usdt.s+", KPROBE, 0, SEC_USDT | SEC_SLEEPABLE, attach_usdt),
SEC_DEF("tc/ingress", SCHED_CLS, BPF_TCX_INGRESS, SEC_NONE), /* alias for tcx */
SEC_DEF("tc/egress", SCHED_CLS, BPF_TCX_EGRESS, SEC_NONE), /* alias for tcx */
SEC_DEF("tcx/ingress", SCHED_CLS, BPF_TCX_INGRESS, SEC_NONE),
@@ -10549,7 +10635,7 @@ struct bpf_link *bpf_program__attach_ksyscall(const struct bpf_program *prog,
}
/* Adapted from perf/util/string.c */
-static bool glob_match(const char *str, const char *pat)
+bool glob_match(const char *str, const char *pat)
{
while (*str && *pat && *pat != '*') {
if (*pat == '?') { /* Matches any single character */
@@ -10902,6 +10988,37 @@ static int attach_kprobe_multi(const struct bpf_program *prog, long cookie, stru
return libbpf_get_error(*link);
}
+static int attach_uprobe_multi(const struct bpf_program *prog, long cookie, struct bpf_link **link)
+{
+ char *probe_type = NULL, *binary_path = NULL, *func_name = NULL;
+ LIBBPF_OPTS(bpf_uprobe_multi_opts, opts);
+ int n, ret = -EINVAL;
+
+ *link = NULL;
+
+ n = sscanf(prog->sec_name, "%m[^/]/%m[^:]:%ms",
+ &probe_type, &binary_path, &func_name);
+ switch (n) {
+ case 1:
+ /* handle SEC("u[ret]probe") - format is valid, but auto-attach is impossible. */
+ ret = 0;
+ break;
+ case 3:
+ opts.retprobe = strcmp(probe_type, "uretprobe.multi") == 0;
+ *link = bpf_program__attach_uprobe_multi(prog, -1, binary_path, func_name, &opts);
+ ret = libbpf_get_error(*link);
+ break;
+ default:
+ pr_warn("prog '%s': invalid format of section definition '%s'\n", prog->name,
+ prog->sec_name);
+ break;
+ }
+ free(probe_type);
+ free(binary_path);
+ free(func_name);
+ return ret;
+}
+
static void gen_uprobe_legacy_event_name(char *buf, size_t buf_sz,
const char *binary_path, uint64_t offset)
{
@@ -10984,191 +11101,6 @@ err_clean_legacy:
return err;
}
-/* Return next ELF section of sh_type after scn, or first of that type if scn is NULL. */
-static Elf_Scn *elf_find_next_scn_by_type(Elf *elf, int sh_type, Elf_Scn *scn)
-{
- while ((scn = elf_nextscn(elf, scn)) != NULL) {
- GElf_Shdr sh;
-
- if (!gelf_getshdr(scn, &sh))
- continue;
- if (sh.sh_type == sh_type)
- return scn;
- }
- return NULL;
-}
-
-/* Find offset of function name in the provided ELF object. "binary_path" is
- * the path to the ELF binary represented by "elf", and only used for error
- * reporting matters. "name" matches symbol name or name@@LIB for library
- * functions.
- */
-static long elf_find_func_offset(Elf *elf, const char *binary_path, const char *name)
-{
- int i, sh_types[2] = { SHT_DYNSYM, SHT_SYMTAB };
- bool is_shared_lib, is_name_qualified;
- long ret = -ENOENT;
- size_t name_len;
- GElf_Ehdr ehdr;
-
- if (!gelf_getehdr(elf, &ehdr)) {
- pr_warn("elf: failed to get ehdr from %s: %s\n", binary_path, elf_errmsg(-1));
- ret = -LIBBPF_ERRNO__FORMAT;
- goto out;
- }
- /* for shared lib case, we do not need to calculate relative offset */
- is_shared_lib = ehdr.e_type == ET_DYN;
-
- name_len = strlen(name);
- /* Does name specify "@@LIB"? */
- is_name_qualified = strstr(name, "@@") != NULL;
-
- /* Search SHT_DYNSYM, SHT_SYMTAB for symbol. This search order is used because if
- * a binary is stripped, it may only have SHT_DYNSYM, and a fully-statically
- * linked binary may not have SHT_DYMSYM, so absence of a section should not be
- * reported as a warning/error.
- */
- for (i = 0; i < ARRAY_SIZE(sh_types); i++) {
- size_t nr_syms, strtabidx, idx;
- Elf_Data *symbols = NULL;
- Elf_Scn *scn = NULL;
- int last_bind = -1;
- const char *sname;
- GElf_Shdr sh;
-
- scn = elf_find_next_scn_by_type(elf, sh_types[i], NULL);
- if (!scn) {
- pr_debug("elf: failed to find symbol table ELF sections in '%s'\n",
- binary_path);
- continue;
- }
- if (!gelf_getshdr(scn, &sh))
- continue;
- strtabidx = sh.sh_link;
- symbols = elf_getdata(scn, 0);
- if (!symbols) {
- pr_warn("elf: failed to get symbols for symtab section in '%s': %s\n",
- binary_path, elf_errmsg(-1));
- ret = -LIBBPF_ERRNO__FORMAT;
- goto out;
- }
- nr_syms = symbols->d_size / sh.sh_entsize;
-
- for (idx = 0; idx < nr_syms; idx++) {
- int curr_bind;
- GElf_Sym sym;
- Elf_Scn *sym_scn;
- GElf_Shdr sym_sh;
-
- if (!gelf_getsym(symbols, idx, &sym))
- continue;
-
- if (GELF_ST_TYPE(sym.st_info) != STT_FUNC)
- continue;
-
- sname = elf_strptr(elf, strtabidx, sym.st_name);
- if (!sname)
- continue;
-
- curr_bind = GELF_ST_BIND(sym.st_info);
-
- /* User can specify func, func@@LIB or func@@LIB_VERSION. */
- if (strncmp(sname, name, name_len) != 0)
- continue;
- /* ...but we don't want a search for "foo" to match 'foo2" also, so any
- * additional characters in sname should be of the form "@@LIB".
- */
- if (!is_name_qualified && sname[name_len] != '\0' && sname[name_len] != '@')
- continue;
-
- if (ret >= 0) {
- /* handle multiple matches */
- if (last_bind != STB_WEAK && curr_bind != STB_WEAK) {
- /* Only accept one non-weak bind. */
- pr_warn("elf: ambiguous match for '%s', '%s' in '%s'\n",
- sname, name, binary_path);
- ret = -LIBBPF_ERRNO__FORMAT;
- goto out;
- } else if (curr_bind == STB_WEAK) {
- /* already have a non-weak bind, and
- * this is a weak bind, so ignore.
- */
- continue;
- }
- }
-
- /* Transform symbol's virtual address (absolute for
- * binaries and relative for shared libs) into file
- * offset, which is what kernel is expecting for
- * uprobe/uretprobe attachment.
- * See Documentation/trace/uprobetracer.rst for more
- * details.
- * This is done by looking up symbol's containing
- * section's header and using it's virtual address
- * (sh_addr) and corresponding file offset (sh_offset)
- * to transform sym.st_value (virtual address) into
- * desired final file offset.
- */
- sym_scn = elf_getscn(elf, sym.st_shndx);
- if (!sym_scn)
- continue;
- if (!gelf_getshdr(sym_scn, &sym_sh))
- continue;
-
- ret = sym.st_value - sym_sh.sh_addr + sym_sh.sh_offset;
- last_bind = curr_bind;
- }
- if (ret > 0)
- break;
- }
-
- if (ret > 0) {
- pr_debug("elf: symbol address match for '%s' in '%s': 0x%lx\n", name, binary_path,
- ret);
- } else {
- if (ret == 0) {
- pr_warn("elf: '%s' is 0 in symtab for '%s': %s\n", name, binary_path,
- is_shared_lib ? "should not be 0 in a shared library" :
- "try using shared library path instead");
- ret = -ENOENT;
- } else {
- pr_warn("elf: failed to find symbol '%s' in '%s'\n", name, binary_path);
- }
- }
-out:
- return ret;
-}
-
-/* Find offset of function name in ELF object specified by path. "name" matches
- * symbol name or name@@LIB for library functions.
- */
-static long elf_find_func_offset_from_file(const char *binary_path, const char *name)
-{
- char errmsg[STRERR_BUFSIZE];
- long ret = -ENOENT;
- Elf *elf;
- int fd;
-
- fd = open(binary_path, O_RDONLY | O_CLOEXEC);
- if (fd < 0) {
- ret = -errno;
- pr_warn("failed to open %s: %s\n", binary_path,
- libbpf_strerror_r(ret, errmsg, sizeof(errmsg)));
- return ret;
- }
- elf = elf_begin(fd, ELF_C_READ_MMAP, NULL);
- if (!elf) {
- pr_warn("elf: could not read elf from %s: %s\n", binary_path, elf_errmsg(-1));
- close(fd);
- return -LIBBPF_ERRNO__FORMAT;
- }
-
- ret = elf_find_func_offset(elf, binary_path, name);
- elf_end(elf);
- close(fd);
- return ret;
-}
-
/* Find offset of function name in archive specified by path. Currently
* supported are .zip files that do not compress their contents, as used on
* Android in the form of APKs, for example. "file_name" is the name of the ELF
@@ -11311,6 +11243,120 @@ static int resolve_full_path(const char *file, char *result, size_t result_sz)
return -ENOENT;
}
+struct bpf_link *
+bpf_program__attach_uprobe_multi(const struct bpf_program *prog,
+ pid_t pid,
+ const char *path,
+ const char *func_pattern,
+ const struct bpf_uprobe_multi_opts *opts)
+{
+ const unsigned long *ref_ctr_offsets = NULL, *offsets = NULL;
+ LIBBPF_OPTS(bpf_link_create_opts, lopts);
+ unsigned long *resolved_offsets = NULL;
+ int err = 0, link_fd, prog_fd;
+ struct bpf_link *link = NULL;
+ char errmsg[STRERR_BUFSIZE];
+ char full_path[PATH_MAX];
+ const __u64 *cookies;
+ const char **syms;
+ size_t cnt;
+
+ if (!OPTS_VALID(opts, bpf_uprobe_multi_opts))
+ return libbpf_err_ptr(-EINVAL);
+
+ syms = OPTS_GET(opts, syms, NULL);
+ offsets = OPTS_GET(opts, offsets, NULL);
+ ref_ctr_offsets = OPTS_GET(opts, ref_ctr_offsets, NULL);
+ cookies = OPTS_GET(opts, cookies, NULL);
+ cnt = OPTS_GET(opts, cnt, 0);
+
+ /*
+ * User can specify 2 mutually exclusive set of inputs:
+ *
+ * 1) use only path/func_pattern/pid arguments
+ *
+ * 2) use path/pid with allowed combinations of:
+ * syms/offsets/ref_ctr_offsets/cookies/cnt
+ *
+ * - syms and offsets are mutually exclusive
+ * - ref_ctr_offsets and cookies are optional
+ *
+ * Any other usage results in error.
+ */
+
+ if (!path)
+ return libbpf_err_ptr(-EINVAL);
+ if (!func_pattern && cnt == 0)
+ return libbpf_err_ptr(-EINVAL);
+
+ if (func_pattern) {
+ if (syms || offsets || ref_ctr_offsets || cookies || cnt)
+ return libbpf_err_ptr(-EINVAL);
+ } else {
+ if (!!syms == !!offsets)
+ return libbpf_err_ptr(-EINVAL);
+ }
+
+ if (func_pattern) {
+ if (!strchr(path, '/')) {
+ err = resolve_full_path(path, full_path, sizeof(full_path));
+ if (err) {
+ pr_warn("prog '%s': failed to resolve full path for '%s': %d\n",
+ prog->name, path, err);
+ return libbpf_err_ptr(err);
+ }
+ path = full_path;
+ }
+
+ err = elf_resolve_pattern_offsets(path, func_pattern,
+ &resolved_offsets, &cnt);
+ if (err < 0)
+ return libbpf_err_ptr(err);
+ offsets = resolved_offsets;
+ } else if (syms) {
+ err = elf_resolve_syms_offsets(path, cnt, syms, &resolved_offsets);
+ if (err < 0)
+ return libbpf_err_ptr(err);
+ offsets = resolved_offsets;
+ }
+
+ lopts.uprobe_multi.path = path;
+ lopts.uprobe_multi.offsets = offsets;
+ lopts.uprobe_multi.ref_ctr_offsets = ref_ctr_offsets;
+ lopts.uprobe_multi.cookies = cookies;
+ lopts.uprobe_multi.cnt = cnt;
+ lopts.uprobe_multi.flags = OPTS_GET(opts, retprobe, false) ? BPF_F_UPROBE_MULTI_RETURN : 0;
+
+ if (pid == 0)
+ pid = getpid();
+ if (pid > 0)
+ lopts.uprobe_multi.pid = pid;
+
+ link = calloc(1, sizeof(*link));
+ if (!link) {
+ err = -ENOMEM;
+ goto error;
+ }
+ link->detach = &bpf_link__detach_fd;
+
+ prog_fd = bpf_program__fd(prog);
+ link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &lopts);
+ if (link_fd < 0) {
+ err = -errno;
+ pr_warn("prog '%s': failed to attach multi-uprobe: %s\n",
+ prog->name, libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
+ goto error;
+ }
+ link->fd = link_fd;
+ free(resolved_offsets);
+ return link;
+
+error:
+ free(resolved_offsets);
+ free(link);
+ return libbpf_err_ptr(err);
+}
+
LIBBPF_API struct bpf_link *
bpf_program__attach_uprobe_opts(const struct bpf_program *prog, pid_t pid,
const char *binary_path, size_t func_offset,
diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
index 55b97b208754..0e52621cba43 100644
--- a/tools/lib/bpf/libbpf.h
+++ b/tools/lib/bpf/libbpf.h
@@ -266,6 +266,7 @@ LIBBPF_API int bpf_object__pin_programs(struct bpf_object *obj,
LIBBPF_API int bpf_object__unpin_programs(struct bpf_object *obj,
const char *path);
LIBBPF_API int bpf_object__pin(struct bpf_object *object, const char *path);
+LIBBPF_API int bpf_object__unpin(struct bpf_object *object, const char *path);
LIBBPF_API const char *bpf_object__name(const struct bpf_object *obj);
LIBBPF_API unsigned int bpf_object__kversion(const struct bpf_object *obj);
@@ -529,6 +530,57 @@ bpf_program__attach_kprobe_multi_opts(const struct bpf_program *prog,
const char *pattern,
const struct bpf_kprobe_multi_opts *opts);
+struct bpf_uprobe_multi_opts {
+ /* size of this struct, for forward/backward compatibility */
+ size_t sz;
+ /* array of function symbols to attach to */
+ const char **syms;
+ /* array of function addresses to attach to */
+ const unsigned long *offsets;
+ /* optional, array of associated ref counter offsets */
+ const unsigned long *ref_ctr_offsets;
+ /* optional, array of associated BPF cookies */
+ const __u64 *cookies;
+ /* number of elements in syms/addrs/cookies arrays */
+ size_t cnt;
+ /* create return uprobes */
+ bool retprobe;
+ size_t :0;
+};
+
+#define bpf_uprobe_multi_opts__last_field retprobe
+
+/**
+ * @brief **bpf_program__attach_uprobe_multi()** attaches a BPF program
+ * to multiple uprobes with uprobe_multi link.
+ *
+ * User can specify 2 mutually exclusive set of inputs:
+ *
+ * 1) use only path/func_pattern/pid arguments
+ *
+ * 2) use path/pid with allowed combinations of
+ * syms/offsets/ref_ctr_offsets/cookies/cnt
+ *
+ * - syms and offsets are mutually exclusive
+ * - ref_ctr_offsets and cookies are optional
+ *
+ *
+ * @param prog BPF program to attach
+ * @param pid Process ID to attach the uprobe to, 0 for self (own process),
+ * -1 for all processes
+ * @param binary_path Path to binary
+ * @param func_pattern Regular expression to specify functions to attach
+ * BPF program to
+ * @param opts Additional options (see **struct bpf_uprobe_multi_opts**)
+ * @return 0, on success; negative error code, otherwise
+ */
+LIBBPF_API struct bpf_link *
+bpf_program__attach_uprobe_multi(const struct bpf_program *prog,
+ pid_t pid,
+ const char *binary_path,
+ const char *func_pattern,
+ const struct bpf_uprobe_multi_opts *opts);
+
struct bpf_ksyscall_opts {
/* size of this struct, for forward/backward compatibility */
size_t sz;
diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map
index 9c7538dd5835..57712321490f 100644
--- a/tools/lib/bpf/libbpf.map
+++ b/tools/lib/bpf/libbpf.map
@@ -395,7 +395,9 @@ LIBBPF_1.2.0 {
LIBBPF_1.3.0 {
global:
bpf_obj_pin_opts;
+ bpf_object__unpin;
bpf_prog_detach_opts;
bpf_program__attach_netfilter;
bpf_program__attach_tcx;
+ bpf_program__attach_uprobe_multi;
} LIBBPF_1.2.0;
diff --git a/tools/lib/bpf/libbpf_internal.h b/tools/lib/bpf/libbpf_internal.h
index e4d05662a96c..f0f08635adb0 100644
--- a/tools/lib/bpf/libbpf_internal.h
+++ b/tools/lib/bpf/libbpf_internal.h
@@ -15,6 +15,7 @@
#include <linux/err.h>
#include <fcntl.h>
#include <unistd.h>
+#include <libelf.h>
#include "relo_core.h"
/* make sure libbpf doesn't use kernel-only integer typedefs */
@@ -354,6 +355,8 @@ enum kern_feature_id {
FEAT_BTF_ENUM64,
/* Kernel uses syscall wrapper (CONFIG_ARCH_HAS_SYSCALL_WRAPPER) */
FEAT_SYSCALL_WRAPPER,
+ /* BPF multi-uprobe link support */
+ FEAT_UPROBE_MULTI_LINK,
__FEAT_CNT,
};
@@ -577,4 +580,22 @@ static inline bool is_pow_of_2(size_t x)
#define PROG_LOAD_ATTEMPTS 5
int sys_bpf_prog_load(union bpf_attr *attr, unsigned int size, int attempts);
+bool glob_match(const char *str, const char *pat);
+
+long elf_find_func_offset(Elf *elf, const char *binary_path, const char *name);
+long elf_find_func_offset_from_file(const char *binary_path, const char *name);
+
+struct elf_fd {
+ Elf *elf;
+ int fd;
+};
+
+int elf_open(const char *binary_path, struct elf_fd *elf_fd);
+void elf_close(struct elf_fd *elf_fd);
+
+int elf_resolve_syms_offsets(const char *binary_path, int cnt,
+ const char **syms, unsigned long **poffsets);
+int elf_resolve_pattern_offsets(const char *binary_path, const char *pattern,
+ unsigned long **poffsets, size_t *pcnt);
+
#endif /* __LIBBPF_LIBBPF_INTERNAL_H */
diff --git a/tools/lib/bpf/relo_core.c b/tools/lib/bpf/relo_core.c
index a26b2f5fa0fc..63a4d5ad12d1 100644
--- a/tools/lib/bpf/relo_core.c
+++ b/tools/lib/bpf/relo_core.c
@@ -776,7 +776,7 @@ static int bpf_core_calc_field_relo(const char *prog_name,
break;
case BPF_CORE_FIELD_SIGNED:
*val = (btf_is_any_enum(mt) && BTF_INFO_KFLAG(mt->info)) ||
- (btf_int_encoding(mt) & BTF_INT_SIGNED);
+ (btf_is_int(mt) && (btf_int_encoding(mt) & BTF_INT_SIGNED));
if (validate)
*validate = true; /* signedness is never ambiguous */
break;
diff --git a/tools/lib/bpf/usdt.c b/tools/lib/bpf/usdt.c
index 37455d00b239..93794f01bb67 100644
--- a/tools/lib/bpf/usdt.c
+++ b/tools/lib/bpf/usdt.c
@@ -250,6 +250,7 @@ struct usdt_manager {
bool has_bpf_cookie;
bool has_sema_refcnt;
+ bool has_uprobe_multi;
};
struct usdt_manager *usdt_manager_new(struct bpf_object *obj)
@@ -284,6 +285,11 @@ struct usdt_manager *usdt_manager_new(struct bpf_object *obj)
*/
man->has_sema_refcnt = faccessat(AT_FDCWD, ref_ctr_sysfs_path, F_OK, AT_EACCESS) == 0;
+ /*
+ * Detect kernel support for uprobe multi link to be used for attaching
+ * usdt probes.
+ */
+ man->has_uprobe_multi = kernel_supports(obj, FEAT_UPROBE_MULTI_LINK);
return man;
}
@@ -808,6 +814,8 @@ struct bpf_link_usdt {
long abs_ip;
struct bpf_link *link;
} *uprobes;
+
+ struct bpf_link *multi_link;
};
static int bpf_link_usdt_detach(struct bpf_link *link)
@@ -816,6 +824,9 @@ static int bpf_link_usdt_detach(struct bpf_link *link)
struct usdt_manager *man = usdt_link->usdt_man;
int i;
+ bpf_link__destroy(usdt_link->multi_link);
+
+ /* When having multi_link, uprobe_cnt is 0 */
for (i = 0; i < usdt_link->uprobe_cnt; i++) {
/* detach underlying uprobe link */
bpf_link__destroy(usdt_link->uprobes[i].link);
@@ -946,32 +957,24 @@ struct bpf_link *usdt_manager_attach_usdt(struct usdt_manager *man, const struct
const char *usdt_provider, const char *usdt_name,
__u64 usdt_cookie)
{
- int i, fd, err, spec_map_fd, ip_map_fd;
+ unsigned long *offsets = NULL, *ref_ctr_offsets = NULL;
+ int i, err, spec_map_fd, ip_map_fd;
LIBBPF_OPTS(bpf_uprobe_opts, opts);
struct hashmap *specs_hash = NULL;
struct bpf_link_usdt *link = NULL;
struct usdt_target *targets = NULL;
+ __u64 *cookies = NULL;
+ struct elf_fd elf_fd;
size_t target_cnt;
- Elf *elf;
spec_map_fd = bpf_map__fd(man->specs_map);
ip_map_fd = bpf_map__fd(man->ip_to_spec_id_map);
- fd = open(path, O_RDONLY | O_CLOEXEC);
- if (fd < 0) {
- err = -errno;
- pr_warn("usdt: failed to open ELF binary '%s': %d\n", path, err);
+ err = elf_open(path, &elf_fd);
+ if (err)
return libbpf_err_ptr(err);
- }
-
- elf = elf_begin(fd, ELF_C_READ_MMAP, NULL);
- if (!elf) {
- err = -EBADF;
- pr_warn("usdt: failed to parse ELF binary '%s': %s\n", path, elf_errmsg(-1));
- goto err_out;
- }
- err = sanity_check_usdt_elf(elf, path);
+ err = sanity_check_usdt_elf(elf_fd.elf, path);
if (err)
goto err_out;
@@ -984,7 +987,7 @@ struct bpf_link *usdt_manager_attach_usdt(struct usdt_manager *man, const struct
/* discover USDT in given binary, optionally limiting
* activations to a given PID, if pid > 0
*/
- err = collect_usdt_targets(man, elf, path, pid, usdt_provider, usdt_name,
+ err = collect_usdt_targets(man, elf_fd.elf, path, pid, usdt_provider, usdt_name,
usdt_cookie, &targets, &target_cnt);
if (err <= 0) {
err = (err == 0) ? -ENOENT : err;
@@ -1007,10 +1010,21 @@ struct bpf_link *usdt_manager_attach_usdt(struct usdt_manager *man, const struct
link->link.detach = &bpf_link_usdt_detach;
link->link.dealloc = &bpf_link_usdt_dealloc;
- link->uprobes = calloc(target_cnt, sizeof(*link->uprobes));
- if (!link->uprobes) {
- err = -ENOMEM;
- goto err_out;
+ if (man->has_uprobe_multi) {
+ offsets = calloc(target_cnt, sizeof(*offsets));
+ cookies = calloc(target_cnt, sizeof(*cookies));
+ ref_ctr_offsets = calloc(target_cnt, sizeof(*ref_ctr_offsets));
+
+ if (!offsets || !ref_ctr_offsets || !cookies) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+ } else {
+ link->uprobes = calloc(target_cnt, sizeof(*link->uprobes));
+ if (!link->uprobes) {
+ err = -ENOMEM;
+ goto err_out;
+ }
}
for (i = 0; i < target_cnt; i++) {
@@ -1051,37 +1065,65 @@ struct bpf_link *usdt_manager_attach_usdt(struct usdt_manager *man, const struct
goto err_out;
}
- opts.ref_ctr_offset = target->sema_off;
- opts.bpf_cookie = man->has_bpf_cookie ? spec_id : 0;
- uprobe_link = bpf_program__attach_uprobe_opts(prog, pid, path,
- target->rel_ip, &opts);
- err = libbpf_get_error(uprobe_link);
- if (err) {
- pr_warn("usdt: failed to attach uprobe #%d for '%s:%s' in '%s': %d\n",
- i, usdt_provider, usdt_name, path, err);
+ if (man->has_uprobe_multi) {
+ offsets[i] = target->rel_ip;
+ ref_ctr_offsets[i] = target->sema_off;
+ cookies[i] = spec_id;
+ } else {
+ opts.ref_ctr_offset = target->sema_off;
+ opts.bpf_cookie = man->has_bpf_cookie ? spec_id : 0;
+ uprobe_link = bpf_program__attach_uprobe_opts(prog, pid, path,
+ target->rel_ip, &opts);
+ err = libbpf_get_error(uprobe_link);
+ if (err) {
+ pr_warn("usdt: failed to attach uprobe #%d for '%s:%s' in '%s': %d\n",
+ i, usdt_provider, usdt_name, path, err);
+ goto err_out;
+ }
+
+ link->uprobes[i].link = uprobe_link;
+ link->uprobes[i].abs_ip = target->abs_ip;
+ link->uprobe_cnt++;
+ }
+ }
+
+ if (man->has_uprobe_multi) {
+ LIBBPF_OPTS(bpf_uprobe_multi_opts, opts_multi,
+ .ref_ctr_offsets = ref_ctr_offsets,
+ .offsets = offsets,
+ .cookies = cookies,
+ .cnt = target_cnt,
+ );
+
+ link->multi_link = bpf_program__attach_uprobe_multi(prog, pid, path,
+ NULL, &opts_multi);
+ if (!link->multi_link) {
+ err = -errno;
+ pr_warn("usdt: failed to attach uprobe multi for '%s:%s' in '%s': %d\n",
+ usdt_provider, usdt_name, path, err);
goto err_out;
}
- link->uprobes[i].link = uprobe_link;
- link->uprobes[i].abs_ip = target->abs_ip;
- link->uprobe_cnt++;
+ free(offsets);
+ free(ref_ctr_offsets);
+ free(cookies);
}
free(targets);
hashmap__free(specs_hash);
- elf_end(elf);
- close(fd);
-
+ elf_close(&elf_fd);
return &link->link;
err_out:
+ free(offsets);
+ free(ref_ctr_offsets);
+ free(cookies);
+
if (link)
bpf_link__destroy(&link->link);
free(targets);
hashmap__free(specs_hash);
- if (elf)
- elf_end(elf);
- close(fd);
+ elf_close(&elf_fd);
return libbpf_err_ptr(err);
}
diff --git a/tools/testing/selftests/bpf/.gitignore b/tools/testing/selftests/bpf/.gitignore
index 110518ba4804..f1aebabfb017 100644
--- a/tools/testing/selftests/bpf/.gitignore
+++ b/tools/testing/selftests/bpf/.gitignore
@@ -44,6 +44,7 @@ test_cpp
/bench
/veristat
/sign-file
+/uprobe_multi
*.ko
*.tmp
xskxceiver
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index e4e1e6492268..edef49fcd23e 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -585,6 +585,7 @@ TRUNNER_EXTRA_FILES := $(OUTPUT)/urandom_read $(OUTPUT)/bpf_testmod.ko \
$(OUTPUT)/liburandom_read.so \
$(OUTPUT)/xdp_synproxy \
$(OUTPUT)/sign-file \
+ $(OUTPUT)/uprobe_multi \
ima_setup.sh \
verify_sig_setup.sh \
$(wildcard progs/btf_dump_test_case_*.c) \
@@ -698,6 +699,10 @@ $(OUTPUT)/veristat: $(OUTPUT)/veristat.o
$(call msg,BINARY,,$@)
$(Q)$(CC) $(CFLAGS) $(LDFLAGS) $(filter %.a %.o,$^) $(LDLIBS) -o $@
+$(OUTPUT)/uprobe_multi: uprobe_multi.c
+ $(call msg,BINARY,,$@)
+ $(Q)$(CC) $(CFLAGS) $(LDFLAGS) $^ $(LDLIBS) -o $@
+
EXTRA_CLEAN := $(TEST_CUSTOM_PROGS) $(SCRATCH_DIR) $(HOST_SCRATCH_DIR) \
prog_tests/tests.h map_tests/tests.h verifier/tests.h \
feature bpftool \
diff --git a/tools/testing/selftests/bpf/bench.h b/tools/testing/selftests/bpf/bench.h
index 7ff32be3d730..68180d8f8558 100644
--- a/tools/testing/selftests/bpf/bench.h
+++ b/tools/testing/selftests/bpf/bench.h
@@ -81,15 +81,6 @@ void grace_period_latency_basic_stats(struct bench_res res[], int res_cnt,
void grace_period_ticks_basic_stats(struct bench_res res[], int res_cnt,
struct basic_stats *gp_stat);
-static inline __u64 get_time_ns(void)
-{
- struct timespec t;
-
- clock_gettime(CLOCK_MONOTONIC, &t);
-
- return (u64)t.tv_sec * 1000000000 + t.tv_nsec;
-}
-
static inline void atomic_inc(long *value)
{
(void)__atomic_add_fetch(value, 1, __ATOMIC_RELAXED);
diff --git a/tools/testing/selftests/bpf/config b/tools/testing/selftests/bpf/config
index 3b350bc31343..1c7584e8dd9e 100644
--- a/tools/testing/selftests/bpf/config
+++ b/tools/testing/selftests/bpf/config
@@ -16,6 +16,7 @@ CONFIG_CRYPTO_USER_API_HASH=y
CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_INFO_BTF=y
CONFIG_DEBUG_INFO_DWARF4=y
+CONFIG_DUMMY=y
CONFIG_DYNAMIC_FTRACE=y
CONFIG_FPROBE=y
CONFIG_FTRACE_SYSCALLS=y
@@ -59,6 +60,7 @@ CONFIG_NET_IPGRE=y
CONFIG_NET_IPGRE_DEMUX=y
CONFIG_NET_IPIP=y
CONFIG_NET_MPLS_GSO=y
+CONFIG_NET_SCH_FQ=y
CONFIG_NET_SCH_INGRESS=y
CONFIG_NET_SCHED=y
CONFIG_NETDEVSIM=y
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c b/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c
index 26b2d1bffdfd..1454cebc262b 100644
--- a/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c
@@ -11,6 +11,7 @@
#include <bpf/btf.h>
#include "test_bpf_cookie.skel.h"
#include "kprobe_multi.skel.h"
+#include "uprobe_multi.skel.h"
/* uprobe attach point */
static noinline void trigger_func(void)
@@ -239,6 +240,81 @@ cleanup:
bpf_link__destroy(link1);
kprobe_multi__destroy(skel);
}
+
+/* defined in prog_tests/uprobe_multi_test.c */
+void uprobe_multi_func_1(void);
+void uprobe_multi_func_2(void);
+void uprobe_multi_func_3(void);
+
+static void uprobe_multi_test_run(struct uprobe_multi *skel)
+{
+ skel->bss->uprobe_multi_func_1_addr = (__u64) uprobe_multi_func_1;
+ skel->bss->uprobe_multi_func_2_addr = (__u64) uprobe_multi_func_2;
+ skel->bss->uprobe_multi_func_3_addr = (__u64) uprobe_multi_func_3;
+
+ skel->bss->pid = getpid();
+ skel->bss->test_cookie = true;
+
+ uprobe_multi_func_1();
+ uprobe_multi_func_2();
+ uprobe_multi_func_3();
+
+ ASSERT_EQ(skel->bss->uprobe_multi_func_1_result, 1, "uprobe_multi_func_1_result");
+ ASSERT_EQ(skel->bss->uprobe_multi_func_2_result, 1, "uprobe_multi_func_2_result");
+ ASSERT_EQ(skel->bss->uprobe_multi_func_3_result, 1, "uprobe_multi_func_3_result");
+
+ ASSERT_EQ(skel->bss->uretprobe_multi_func_1_result, 1, "uretprobe_multi_func_1_result");
+ ASSERT_EQ(skel->bss->uretprobe_multi_func_2_result, 1, "uretprobe_multi_func_2_result");
+ ASSERT_EQ(skel->bss->uretprobe_multi_func_3_result, 1, "uretprobe_multi_func_3_result");
+}
+
+static void uprobe_multi_attach_api_subtest(void)
+{
+ struct bpf_link *link1 = NULL, *link2 = NULL;
+ struct uprobe_multi *skel = NULL;
+ LIBBPF_OPTS(bpf_uprobe_multi_opts, opts);
+ const char *syms[3] = {
+ "uprobe_multi_func_1",
+ "uprobe_multi_func_2",
+ "uprobe_multi_func_3",
+ };
+ __u64 cookies[3];
+
+ cookies[0] = 3; /* uprobe_multi_func_1 */
+ cookies[1] = 1; /* uprobe_multi_func_2 */
+ cookies[2] = 2; /* uprobe_multi_func_3 */
+
+ opts.syms = syms;
+ opts.cnt = ARRAY_SIZE(syms);
+ opts.cookies = &cookies[0];
+
+ skel = uprobe_multi__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "uprobe_multi"))
+ goto cleanup;
+
+ link1 = bpf_program__attach_uprobe_multi(skel->progs.uprobe, -1,
+ "/proc/self/exe", NULL, &opts);
+ if (!ASSERT_OK_PTR(link1, "bpf_program__attach_uprobe_multi"))
+ goto cleanup;
+
+ cookies[0] = 2; /* uprobe_multi_func_1 */
+ cookies[1] = 3; /* uprobe_multi_func_2 */
+ cookies[2] = 1; /* uprobe_multi_func_3 */
+
+ opts.retprobe = true;
+ link2 = bpf_program__attach_uprobe_multi(skel->progs.uretprobe, -1,
+ "/proc/self/exe", NULL, &opts);
+ if (!ASSERT_OK_PTR(link2, "bpf_program__attach_uprobe_multi_retprobe"))
+ goto cleanup;
+
+ uprobe_multi_test_run(skel);
+
+cleanup:
+ bpf_link__destroy(link2);
+ bpf_link__destroy(link1);
+ uprobe_multi__destroy(skel);
+}
+
static void uprobe_subtest(struct test_bpf_cookie *skel)
{
DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, opts);
@@ -515,6 +591,8 @@ void test_bpf_cookie(void)
kprobe_multi_attach_api_subtest();
if (test__start_subtest("uprobe"))
uprobe_subtest(skel);
+ if (test__start_subtest("multi_uprobe_attach_api"))
+ uprobe_multi_attach_api_subtest();
if (test__start_subtest("tracepoint"))
tp_subtest(skel);
if (test__start_subtest("perf_event"))
diff --git a/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c b/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c
index 2173c4bb555e..179fe300534f 100644
--- a/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c
+++ b/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c
@@ -304,14 +304,6 @@ cleanup:
kprobe_multi__destroy(skel);
}
-static inline __u64 get_time_ns(void)
-{
- struct timespec t;
-
- clock_gettime(CLOCK_MONOTONIC, &t);
- return (__u64) t.tv_sec * 1000000000 + t.tv_nsec;
-}
-
static size_t symbol_hash(long key, void *ctx __maybe_unused)
{
return str_hash((const char *) key);
diff --git a/tools/testing/selftests/bpf/prog_tests/local_kptr_stash.c b/tools/testing/selftests/bpf/prog_tests/local_kptr_stash.c
index 76f1da877f81..b25b870f87ba 100644
--- a/tools/testing/selftests/bpf/prog_tests/local_kptr_stash.c
+++ b/tools/testing/selftests/bpf/prog_tests/local_kptr_stash.c
@@ -5,6 +5,7 @@
#include <network_helpers.h>
#include "local_kptr_stash.skel.h"
+#include "local_kptr_stash_fail.skel.h"
static void test_local_kptr_stash_simple(void)
{
LIBBPF_OPTS(bpf_test_run_opts, opts,
@@ -26,6 +27,27 @@ static void test_local_kptr_stash_simple(void)
local_kptr_stash__destroy(skel);
}
+static void test_local_kptr_stash_plain(void)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, opts,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .repeat = 1,
+ );
+ struct local_kptr_stash *skel;
+ int ret;
+
+ skel = local_kptr_stash__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "local_kptr_stash__open_and_load"))
+ return;
+
+ ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.stash_plain), &opts);
+ ASSERT_OK(ret, "local_kptr_stash_add_plain run");
+ ASSERT_OK(opts.retval, "local_kptr_stash_add_plain retval");
+
+ local_kptr_stash__destroy(skel);
+}
+
static void test_local_kptr_stash_unstash(void)
{
LIBBPF_OPTS(bpf_test_run_opts, opts,
@@ -51,10 +73,19 @@ static void test_local_kptr_stash_unstash(void)
local_kptr_stash__destroy(skel);
}
-void test_local_kptr_stash_success(void)
+static void test_local_kptr_stash_fail(void)
+{
+ RUN_TESTS(local_kptr_stash_fail);
+}
+
+void test_local_kptr_stash(void)
{
if (test__start_subtest("local_kptr_stash_simple"))
test_local_kptr_stash_simple();
+ if (test__start_subtest("local_kptr_stash_plain"))
+ test_local_kptr_stash_plain();
if (test__start_subtest("local_kptr_stash_unstash"))
test_local_kptr_stash_unstash();
+ if (test__start_subtest("local_kptr_stash_fail"))
+ test_local_kptr_stash_fail();
}
diff --git a/tools/testing/selftests/bpf/prog_tests/lwt_helpers.h b/tools/testing/selftests/bpf/prog_tests/lwt_helpers.h
new file mode 100644
index 000000000000..61333f2a03f9
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/lwt_helpers.h
@@ -0,0 +1,139 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __LWT_HELPERS_H
+#define __LWT_HELPERS_H
+
+#include <time.h>
+#include <net/if.h>
+#include <linux/if_tun.h>
+#include <linux/icmp.h>
+
+#include "test_progs.h"
+
+#define log_err(MSG, ...) \
+ fprintf(stderr, "(%s:%d: errno: %s) " MSG "\n", \
+ __FILE__, __LINE__, strerror(errno), ##__VA_ARGS__)
+
+#define RUN_TEST(name) \
+ ({ \
+ if (test__start_subtest(#name)) \
+ if (ASSERT_OK(netns_create(), "netns_create")) { \
+ struct nstoken *token = open_netns(NETNS); \
+ if (ASSERT_OK_PTR(token, "setns")) { \
+ test_ ## name(); \
+ close_netns(token); \
+ } \
+ netns_delete(); \
+ } \
+ })
+
+#define NETNS "ns_lwt"
+
+static inline int netns_create(void)
+{
+ return system("ip netns add " NETNS);
+}
+
+static inline int netns_delete(void)
+{
+ return system("ip netns del " NETNS ">/dev/null 2>&1");
+}
+
+static int open_tuntap(const char *dev_name, bool need_mac)
+{
+ int err = 0;
+ struct ifreq ifr;
+ int fd = open("/dev/net/tun", O_RDWR);
+
+ if (!ASSERT_GT(fd, 0, "open(/dev/net/tun)"))
+ return -1;
+
+ ifr.ifr_flags = IFF_NO_PI | (need_mac ? IFF_TAP : IFF_TUN);
+ memcpy(ifr.ifr_name, dev_name, IFNAMSIZ);
+
+ err = ioctl(fd, TUNSETIFF, &ifr);
+ if (!ASSERT_OK(err, "ioctl(TUNSETIFF)")) {
+ close(fd);
+ return -1;
+ }
+
+ err = fcntl(fd, F_SETFL, O_NONBLOCK);
+ if (!ASSERT_OK(err, "fcntl(O_NONBLOCK)")) {
+ close(fd);
+ return -1;
+ }
+
+ return fd;
+}
+
+#define ICMP_PAYLOAD_SIZE 100
+
+/* Match an ICMP packet with payload len ICMP_PAYLOAD_SIZE */
+static int __expect_icmp_ipv4(char *buf, ssize_t len)
+{
+ struct iphdr *ip = (struct iphdr *)buf;
+ struct icmphdr *icmp = (struct icmphdr *)(ip + 1);
+ ssize_t min_header_len = sizeof(*ip) + sizeof(*icmp);
+
+ if (len < min_header_len)
+ return -1;
+
+ if (ip->protocol != IPPROTO_ICMP)
+ return -1;
+
+ if (icmp->type != ICMP_ECHO)
+ return -1;
+
+ return len == ICMP_PAYLOAD_SIZE + min_header_len;
+}
+
+typedef int (*filter_t) (char *, ssize_t);
+
+/* wait_for_packet - wait for a packet that matches the filter
+ *
+ * @fd: tun fd/packet socket to read packet
+ * @filter: filter function, returning 1 if matches
+ * @timeout: timeout to wait for the packet
+ *
+ * Returns 1 if a matching packet is read, 0 if timeout expired, -1 on error.
+ */
+static int wait_for_packet(int fd, filter_t filter, struct timeval *timeout)
+{
+ char buf[4096];
+ int max_retry = 5; /* in case we read some spurious packets */
+ fd_set fds;
+
+ FD_ZERO(&fds);
+ while (max_retry--) {
+ /* Linux modifies timeout arg... So make a copy */
+ struct timeval copied_timeout = *timeout;
+ ssize_t ret = -1;
+
+ FD_SET(fd, &fds);
+
+ ret = select(1 + fd, &fds, NULL, NULL, &copied_timeout);
+ if (ret <= 0) {
+ if (errno == EINTR)
+ continue;
+ else if (errno == EAGAIN || ret == 0)
+ return 0;
+
+ log_err("select failed");
+ return -1;
+ }
+
+ ret = read(fd, buf, sizeof(buf));
+
+ if (ret <= 0) {
+ log_err("read(dev): %ld", ret);
+ return -1;
+ }
+
+ if (filter && filter(buf, ret) > 0)
+ return 1;
+ }
+
+ return 0;
+}
+
+#endif /* __LWT_HELPERS_H */
diff --git a/tools/testing/selftests/bpf/prog_tests/lwt_redirect.c b/tools/testing/selftests/bpf/prog_tests/lwt_redirect.c
new file mode 100644
index 000000000000..59b38569f310
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/lwt_redirect.c
@@ -0,0 +1,330 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+
+/*
+ * Test suite of lwt_xmit BPF programs that redirect packets
+ * The file tests focus not only if these programs work as expected normally,
+ * but also if they can handle abnormal situations gracefully.
+ *
+ * WARNING
+ * -------
+ * This test suite may crash the kernel, thus should be run in a VM.
+ *
+ * Setup:
+ * ---------
+ * All tests are performed in a single netns. Two lwt encap routes are setup for
+ * each subtest:
+ *
+ * ip route add 10.0.0.0/24 encap bpf xmit <obj> sec "<ingress_sec>" dev link_err
+ * ip route add 20.0.0.0/24 encap bpf xmit <obj> sec "<egress_sec>" dev link_err
+ *
+ * Here <obj> is statically defined to test_lwt_redirect.bpf.o, and each section
+ * of this object holds a program entry to test. The BPF object is built from
+ * progs/test_lwt_redirect.c. We didn't use generated BPF skeleton since the
+ * attachment for lwt programs are not supported by libbpf yet.
+ *
+ * For testing, ping commands are run in the test netns:
+ *
+ * ping 10.0.0.<ifindex> -c 1 -w 1 -s 100
+ * ping 20.0.0.<ifindex> -c 1 -w 1 -s 100
+ *
+ * Scenarios:
+ * --------------------------------
+ * 1. Redirect to a running tap/tun device
+ * 2. Redirect to a down tap/tun device
+ * 3. Redirect to a vlan device with lower layer down
+ *
+ * Case 1, ping packets should be received by packet socket on target device
+ * when redirected to ingress, and by tun/tap fd when redirected to egress.
+ *
+ * Case 2,3 are considered successful as long as they do not crash the kernel
+ * as a regression.
+ *
+ * Case 1,2 use tap device to test redirect to device that requires MAC
+ * header, and tun device to test the case with no MAC header added.
+ */
+#include <sys/socket.h>
+#include <net/if.h>
+#include <linux/if_ether.h>
+#include <linux/if_packet.h>
+#include <linux/if_tun.h>
+#include <linux/icmp.h>
+#include <arpa/inet.h>
+#include <unistd.h>
+#include <errno.h>
+#include <stdbool.h>
+#include <stdlib.h>
+
+#include "lwt_helpers.h"
+#include "test_progs.h"
+#include "network_helpers.h"
+
+#define BPF_OBJECT "test_lwt_redirect.bpf.o"
+#define INGRESS_SEC(need_mac) ((need_mac) ? "redir_ingress" : "redir_ingress_nomac")
+#define EGRESS_SEC(need_mac) ((need_mac) ? "redir_egress" : "redir_egress_nomac")
+#define LOCAL_SRC "10.0.0.1"
+#define CIDR_TO_INGRESS "10.0.0.0/24"
+#define CIDR_TO_EGRESS "20.0.0.0/24"
+
+/* ping to redirect toward given dev, with last byte of dest IP being the target
+ * device index.
+ *
+ * Note: ping command inside BPF-CI is busybox version, so it does not have certain
+ * function, such like -m option to set packet mark.
+ */
+static void ping_dev(const char *dev, bool is_ingress)
+{
+ int link_index = if_nametoindex(dev);
+ char ip[256];
+
+ if (!ASSERT_GE(link_index, 0, "if_nametoindex"))
+ return;
+
+ if (is_ingress)
+ snprintf(ip, sizeof(ip), "10.0.0.%d", link_index);
+ else
+ snprintf(ip, sizeof(ip), "20.0.0.%d", link_index);
+
+ /* We won't get a reply. Don't fail here */
+ SYS_NOFAIL("ping %s -c1 -W1 -s %d >/dev/null 2>&1",
+ ip, ICMP_PAYLOAD_SIZE);
+}
+
+static int new_packet_sock(const char *ifname)
+{
+ int err = 0;
+ int ignore_outgoing = 1;
+ int ifindex = -1;
+ int s = -1;
+
+ s = socket(AF_PACKET, SOCK_RAW, 0);
+ if (!ASSERT_GE(s, 0, "socket(AF_PACKET)"))
+ return -1;
+
+ ifindex = if_nametoindex(ifname);
+ if (!ASSERT_GE(ifindex, 0, "if_nametoindex")) {
+ close(s);
+ return -1;
+ }
+
+ struct sockaddr_ll addr = {
+ .sll_family = AF_PACKET,
+ .sll_protocol = htons(ETH_P_IP),
+ .sll_ifindex = ifindex,
+ };
+
+ err = bind(s, (struct sockaddr *)&addr, sizeof(addr));
+ if (!ASSERT_OK(err, "bind(AF_PACKET)")) {
+ close(s);
+ return -1;
+ }
+
+ /* Use packet socket to capture only the ingress, so we can distinguish
+ * the case where a regression that actually redirects the packet to
+ * the egress.
+ */
+ err = setsockopt(s, SOL_PACKET, PACKET_IGNORE_OUTGOING,
+ &ignore_outgoing, sizeof(ignore_outgoing));
+ if (!ASSERT_OK(err, "setsockopt(PACKET_IGNORE_OUTGOING)")) {
+ close(s);
+ return -1;
+ }
+
+ err = fcntl(s, F_SETFL, O_NONBLOCK);
+ if (!ASSERT_OK(err, "fcntl(O_NONBLOCK)")) {
+ close(s);
+ return -1;
+ }
+
+ return s;
+}
+
+static int expect_icmp(char *buf, ssize_t len)
+{
+ struct ethhdr *eth = (struct ethhdr *)buf;
+
+ if (len < (ssize_t)sizeof(*eth))
+ return -1;
+
+ if (eth->h_proto == htons(ETH_P_IP))
+ return __expect_icmp_ipv4((char *)(eth + 1), len - sizeof(*eth));
+
+ return -1;
+}
+
+static int expect_icmp_nomac(char *buf, ssize_t len)
+{
+ return __expect_icmp_ipv4(buf, len);
+}
+
+static void send_and_capture_test_packets(const char *test_name, int tap_fd,
+ const char *target_dev, bool need_mac)
+{
+ int psock = -1;
+ struct timeval timeo = {
+ .tv_sec = 0,
+ .tv_usec = 250000,
+ };
+ int ret = -1;
+
+ filter_t filter = need_mac ? expect_icmp : expect_icmp_nomac;
+
+ ping_dev(target_dev, false);
+
+ ret = wait_for_packet(tap_fd, filter, &timeo);
+ if (!ASSERT_EQ(ret, 1, "wait_for_epacket")) {
+ log_err("%s egress test fails", test_name);
+ goto out;
+ }
+
+ psock = new_packet_sock(target_dev);
+ ping_dev(target_dev, true);
+
+ ret = wait_for_packet(psock, filter, &timeo);
+ if (!ASSERT_EQ(ret, 1, "wait_for_ipacket")) {
+ log_err("%s ingress test fails", test_name);
+ goto out;
+ }
+
+out:
+ if (psock >= 0)
+ close(psock);
+}
+
+static int setup_redirect_target(const char *target_dev, bool need_mac)
+{
+ int target_index = -1;
+ int tap_fd = -1;
+
+ tap_fd = open_tuntap(target_dev, need_mac);
+ if (!ASSERT_GE(tap_fd, 0, "open_tuntap"))
+ goto fail;
+
+ target_index = if_nametoindex(target_dev);
+ if (!ASSERT_GE(target_index, 0, "if_nametoindex"))
+ goto fail;
+
+ SYS(fail, "ip link add link_err type dummy");
+ SYS(fail, "ip link set lo up");
+ SYS(fail, "ip addr add dev lo " LOCAL_SRC "/32");
+ SYS(fail, "ip link set link_err up");
+ SYS(fail, "ip link set %s up", target_dev);
+
+ SYS(fail, "ip route add %s dev link_err encap bpf xmit obj %s sec %s",
+ CIDR_TO_INGRESS, BPF_OBJECT, INGRESS_SEC(need_mac));
+
+ SYS(fail, "ip route add %s dev link_err encap bpf xmit obj %s sec %s",
+ CIDR_TO_EGRESS, BPF_OBJECT, EGRESS_SEC(need_mac));
+
+ return tap_fd;
+
+fail:
+ if (tap_fd >= 0)
+ close(tap_fd);
+ return -1;
+}
+
+static void test_lwt_redirect_normal(void)
+{
+ const char *target_dev = "tap0";
+ int tap_fd = -1;
+ bool need_mac = true;
+
+ tap_fd = setup_redirect_target(target_dev, need_mac);
+ if (!ASSERT_GE(tap_fd, 0, "setup_redirect_target"))
+ return;
+
+ send_and_capture_test_packets(__func__, tap_fd, target_dev, need_mac);
+ close(tap_fd);
+}
+
+static void test_lwt_redirect_normal_nomac(void)
+{
+ const char *target_dev = "tun0";
+ int tap_fd = -1;
+ bool need_mac = false;
+
+ tap_fd = setup_redirect_target(target_dev, need_mac);
+ if (!ASSERT_GE(tap_fd, 0, "setup_redirect_target"))
+ return;
+
+ send_and_capture_test_packets(__func__, tap_fd, target_dev, need_mac);
+ close(tap_fd);
+}
+
+/* This test aims to prevent regression of future. As long as the kernel does
+ * not panic, it is considered as success.
+ */
+static void __test_lwt_redirect_dev_down(bool need_mac)
+{
+ const char *target_dev = "tap0";
+ int tap_fd = -1;
+
+ tap_fd = setup_redirect_target(target_dev, need_mac);
+ if (!ASSERT_GE(tap_fd, 0, "setup_redirect_target"))
+ return;
+
+ SYS(out, "ip link set %s down", target_dev);
+ ping_dev(target_dev, true);
+ ping_dev(target_dev, false);
+
+out:
+ close(tap_fd);
+}
+
+static void test_lwt_redirect_dev_down(void)
+{
+ __test_lwt_redirect_dev_down(true);
+}
+
+static void test_lwt_redirect_dev_down_nomac(void)
+{
+ __test_lwt_redirect_dev_down(false);
+}
+
+/* This test aims to prevent regression of future. As long as the kernel does
+ * not panic, it is considered as success.
+ */
+static void test_lwt_redirect_dev_carrier_down(void)
+{
+ const char *lower_dev = "tap0";
+ const char *vlan_dev = "vlan100";
+ int tap_fd = -1;
+
+ tap_fd = setup_redirect_target(lower_dev, true);
+ if (!ASSERT_GE(tap_fd, 0, "setup_redirect_target"))
+ return;
+
+ SYS(out, "ip link add vlan100 link %s type vlan id 100", lower_dev);
+ SYS(out, "ip link set %s up", vlan_dev);
+ SYS(out, "ip link set %s down", lower_dev);
+ ping_dev(vlan_dev, true);
+ ping_dev(vlan_dev, false);
+
+out:
+ close(tap_fd);
+}
+
+static void *test_lwt_redirect_run(void *arg)
+{
+ netns_delete();
+ RUN_TEST(lwt_redirect_normal);
+ RUN_TEST(lwt_redirect_normal_nomac);
+ RUN_TEST(lwt_redirect_dev_down);
+ RUN_TEST(lwt_redirect_dev_down_nomac);
+ RUN_TEST(lwt_redirect_dev_carrier_down);
+ return NULL;
+}
+
+void test_lwt_redirect(void)
+{
+ pthread_t test_thread;
+ int err;
+
+ /* Run the tests in their own thread to isolate the namespace changes
+ * so they do not affect the environment of other tests.
+ * (specifically needed because of unshare(CLONE_NEWNS) in open_netns())
+ */
+ err = pthread_create(&test_thread, NULL, &test_lwt_redirect_run, NULL);
+ if (ASSERT_OK(err, "pthread_create"))
+ ASSERT_OK(pthread_join(test_thread, NULL), "pthread_join");
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/lwt_reroute.c b/tools/testing/selftests/bpf/prog_tests/lwt_reroute.c
new file mode 100644
index 000000000000..f4bb2d5fcae0
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/lwt_reroute.c
@@ -0,0 +1,262 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+
+/*
+ * Test suite of lwt BPF programs that reroutes packets
+ * The file tests focus not only if these programs work as expected normally,
+ * but also if they can handle abnormal situations gracefully. This test
+ * suite currently only covers lwt_xmit hook. lwt_in tests have not been
+ * implemented.
+ *
+ * WARNING
+ * -------
+ * This test suite can crash the kernel, thus should be run in a VM.
+ *
+ * Setup:
+ * ---------
+ * all tests are performed in a single netns. A lwt encap route is setup for
+ * each subtest:
+ *
+ * ip route add 10.0.0.0/24 encap bpf xmit <obj> sec "<section_N>" dev link_err
+ *
+ * Here <obj> is statically defined to test_lwt_reroute.bpf.o, and it contains
+ * a single test program entry. This program sets packet mark by last byte of
+ * the IPv4 daddr. For example, a packet going to 1.2.3.4 will receive a skb
+ * mark 4. A packet will only be marked once, and IP x.x.x.0 will be skipped
+ * to avoid route loop. We didn't use generated BPF skeleton since the
+ * attachment for lwt programs are not supported by libbpf yet.
+ *
+ * The test program will bring up a tun device, and sets up the following
+ * routes:
+ *
+ * ip rule add pref 100 from all fwmark <tun_index> lookup 100
+ * ip route add table 100 default dev tun0
+ *
+ * For normal testing, a ping command is running in the test netns:
+ *
+ * ping 10.0.0.<tun_index> -c 1 -w 1 -s 100
+ *
+ * For abnormal testing, fq is used as the qdisc of the tun device. Then a UDP
+ * socket will try to overflow the fq queue and trigger qdisc drop error.
+ *
+ * Scenarios:
+ * --------------------------------
+ * 1. Reroute to a running tun device
+ * 2. Reroute to a device where qdisc drop
+ *
+ * For case 1, ping packets should be received by the tun device.
+ *
+ * For case 2, force UDP packets to overflow fq limit. As long as kernel
+ * is not crashed, it is considered successful.
+ */
+#include "lwt_helpers.h"
+#include "network_helpers.h"
+#include <linux/net_tstamp.h>
+
+#define BPF_OBJECT "test_lwt_reroute.bpf.o"
+#define LOCAL_SRC "10.0.0.1"
+#define TEST_CIDR "10.0.0.0/24"
+#define XMIT_HOOK "xmit"
+#define XMIT_SECTION "lwt_xmit"
+#define NSEC_PER_SEC 1000000000ULL
+
+/* send a ping to be rerouted to the target device */
+static void ping_once(const char *ip)
+{
+ /* We won't get a reply. Don't fail here */
+ SYS_NOFAIL("ping %s -c1 -W1 -s %d >/dev/null 2>&1",
+ ip, ICMP_PAYLOAD_SIZE);
+}
+
+/* Send snd_target UDP packets to overflow the fq queue and trigger qdisc drop
+ * error. This is done via TX tstamp to force buffering delayed packets.
+ */
+static int overflow_fq(int snd_target, const char *target_ip)
+{
+ struct sockaddr_in addr = {
+ .sin_family = AF_INET,
+ .sin_port = htons(1234),
+ };
+
+ char data_buf[8]; /* only #pkts matter, so use a random small buffer */
+ char control_buf[CMSG_SPACE(sizeof(uint64_t))];
+ struct iovec iov = {
+ .iov_base = data_buf,
+ .iov_len = sizeof(data_buf),
+ };
+ int err = -1;
+ int s = -1;
+ struct sock_txtime txtime_on = {
+ .clockid = CLOCK_MONOTONIC,
+ .flags = 0,
+ };
+ struct msghdr msg = {
+ .msg_name = &addr,
+ .msg_namelen = sizeof(addr),
+ .msg_control = control_buf,
+ .msg_controllen = sizeof(control_buf),
+ .msg_iovlen = 1,
+ .msg_iov = &iov,
+ };
+ struct cmsghdr *cmsg = CMSG_FIRSTHDR(&msg);
+
+ memset(data_buf, 0, sizeof(data_buf));
+
+ s = socket(AF_INET, SOCK_DGRAM, 0);
+ if (!ASSERT_GE(s, 0, "socket"))
+ goto out;
+
+ err = setsockopt(s, SOL_SOCKET, SO_TXTIME, &txtime_on, sizeof(txtime_on));
+ if (!ASSERT_OK(err, "setsockopt(SO_TXTIME)"))
+ goto out;
+
+ err = inet_pton(AF_INET, target_ip, &addr.sin_addr);
+ if (!ASSERT_EQ(err, 1, "inet_pton"))
+ goto out;
+
+ while (snd_target > 0) {
+ struct timespec now;
+
+ memset(control_buf, 0, sizeof(control_buf));
+ cmsg->cmsg_type = SCM_TXTIME;
+ cmsg->cmsg_level = SOL_SOCKET;
+ cmsg->cmsg_len = CMSG_LEN(sizeof(uint64_t));
+
+ err = clock_gettime(CLOCK_MONOTONIC, &now);
+ if (!ASSERT_OK(err, "clock_gettime(CLOCK_MONOTONIC)")) {
+ err = -1;
+ goto out;
+ }
+
+ *(uint64_t *)CMSG_DATA(cmsg) = (now.tv_nsec + 1) * NSEC_PER_SEC +
+ now.tv_nsec;
+
+ /* we will intentionally send more than fq limit, so ignore
+ * the error here.
+ */
+ sendmsg(s, &msg, MSG_NOSIGNAL);
+ snd_target--;
+ }
+
+ /* no kernel crash so far is considered success */
+ err = 0;
+
+out:
+ if (s >= 0)
+ close(s);
+
+ return err;
+}
+
+static int setup(const char *tun_dev)
+{
+ int target_index = -1;
+ int tap_fd = -1;
+
+ tap_fd = open_tuntap(tun_dev, false);
+ if (!ASSERT_GE(tap_fd, 0, "open_tun"))
+ return -1;
+
+ target_index = if_nametoindex(tun_dev);
+ if (!ASSERT_GE(target_index, 0, "if_nametoindex"))
+ return -1;
+
+ SYS(fail, "ip link add link_err type dummy");
+ SYS(fail, "ip link set lo up");
+ SYS(fail, "ip addr add dev lo " LOCAL_SRC "/32");
+ SYS(fail, "ip link set link_err up");
+ SYS(fail, "ip link set %s up", tun_dev);
+
+ SYS(fail, "ip route add %s dev link_err encap bpf xmit obj %s sec lwt_xmit",
+ TEST_CIDR, BPF_OBJECT);
+
+ SYS(fail, "ip rule add pref 100 from all fwmark %d lookup 100",
+ target_index);
+ SYS(fail, "ip route add t 100 default dev %s", tun_dev);
+
+ return tap_fd;
+
+fail:
+ if (tap_fd >= 0)
+ close(tap_fd);
+ return -1;
+}
+
+static void test_lwt_reroute_normal_xmit(void)
+{
+ const char *tun_dev = "tun0";
+ int tun_fd = -1;
+ int ifindex = -1;
+ char ip[256];
+ struct timeval timeo = {
+ .tv_sec = 0,
+ .tv_usec = 250000,
+ };
+
+ tun_fd = setup(tun_dev);
+ if (!ASSERT_GE(tun_fd, 0, "setup_reroute"))
+ return;
+
+ ifindex = if_nametoindex(tun_dev);
+ if (!ASSERT_GE(ifindex, 0, "if_nametoindex"))
+ return;
+
+ snprintf(ip, 256, "10.0.0.%d", ifindex);
+
+ /* ping packets should be received by the tun device */
+ ping_once(ip);
+
+ if (!ASSERT_EQ(wait_for_packet(tun_fd, __expect_icmp_ipv4, &timeo), 1,
+ "wait_for_packet"))
+ log_err("%s xmit", __func__);
+}
+
+/*
+ * Test the failure case when the skb is dropped at the qdisc. This is a
+ * regression prevention at the xmit hook only.
+ */
+static void test_lwt_reroute_qdisc_dropped(void)
+{
+ const char *tun_dev = "tun0";
+ int tun_fd = -1;
+ int ifindex = -1;
+ char ip[256];
+
+ tun_fd = setup(tun_dev);
+ if (!ASSERT_GE(tun_fd, 0, "setup_reroute"))
+ goto fail;
+
+ SYS(fail, "tc qdisc replace dev %s root fq limit 5 flow_limit 5", tun_dev);
+
+ ifindex = if_nametoindex(tun_dev);
+ if (!ASSERT_GE(ifindex, 0, "if_nametoindex"))
+ return;
+
+ snprintf(ip, 256, "10.0.0.%d", ifindex);
+ ASSERT_EQ(overflow_fq(10, ip), 0, "overflow_fq");
+
+fail:
+ if (tun_fd >= 0)
+ close(tun_fd);
+}
+
+static void *test_lwt_reroute_run(void *arg)
+{
+ netns_delete();
+ RUN_TEST(lwt_reroute_normal_xmit);
+ RUN_TEST(lwt_reroute_qdisc_dropped);
+ return NULL;
+}
+
+void test_lwt_reroute(void)
+{
+ pthread_t test_thread;
+ int err;
+
+ /* Run the tests in their own thread to isolate the namespace changes
+ * so they do not affect the environment of other tests.
+ * (specifically needed because of unshare(CLONE_NEWNS) in open_netns())
+ */
+ err = pthread_create(&test_thread, NULL, &test_lwt_reroute_run, NULL);
+ if (ASSERT_OK(err, "pthread_create"))
+ ASSERT_OK(pthread_join(test_thread, NULL), "pthread_join");
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/refcounted_kptr.c b/tools/testing/selftests/bpf/prog_tests/refcounted_kptr.c
index 7423983472c7..d6bd5e16e637 100644
--- a/tools/testing/selftests/bpf/prog_tests/refcounted_kptr.c
+++ b/tools/testing/selftests/bpf/prog_tests/refcounted_kptr.c
@@ -9,12 +9,38 @@
void test_refcounted_kptr(void)
{
+ RUN_TESTS(refcounted_kptr);
}
void test_refcounted_kptr_fail(void)
{
+ RUN_TESTS(refcounted_kptr_fail);
}
void test_refcounted_kptr_wrong_owner(void)
{
+ LIBBPF_OPTS(bpf_test_run_opts, opts,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .repeat = 1,
+ );
+ struct refcounted_kptr *skel;
+ int ret;
+
+ skel = refcounted_kptr__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "refcounted_kptr__open_and_load"))
+ return;
+
+ ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.rbtree_wrong_owner_remove_fail_a1), &opts);
+ ASSERT_OK(ret, "rbtree_wrong_owner_remove_fail_a1");
+ ASSERT_OK(opts.retval, "rbtree_wrong_owner_remove_fail_a1 retval");
+
+ ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.rbtree_wrong_owner_remove_fail_b), &opts);
+ ASSERT_OK(ret, "rbtree_wrong_owner_remove_fail_b");
+ ASSERT_OK(opts.retval, "rbtree_wrong_owner_remove_fail_b retval");
+
+ ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.rbtree_wrong_owner_remove_fail_a2), &opts);
+ ASSERT_OK(ret, "rbtree_wrong_owner_remove_fail_a2");
+ ASSERT_OK(opts.retval, "rbtree_wrong_owner_remove_fail_a2 retval");
+ refcounted_kptr__destroy(skel);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/task_kfunc.c b/tools/testing/selftests/bpf/prog_tests/task_kfunc.c
index 740d5f644b40..d4579f735398 100644
--- a/tools/testing/selftests/bpf/prog_tests/task_kfunc.c
+++ b/tools/testing/selftests/bpf/prog_tests/task_kfunc.c
@@ -79,6 +79,8 @@ static const char * const success_tests[] = {
"test_task_from_pid_current",
"test_task_from_pid_invalid",
"task_kfunc_acquire_trusted_walked",
+ "test_task_kfunc_flavor_relo",
+ "test_task_kfunc_flavor_relo_not_found",
};
void test_task_kfunc(void)
diff --git a/tools/testing/selftests/bpf/prog_tests/tc_bpf.c b/tools/testing/selftests/bpf/prog_tests/tc_bpf.c
index e873766276d1..48b55539331e 100644
--- a/tools/testing/selftests/bpf/prog_tests/tc_bpf.c
+++ b/tools/testing/selftests/bpf/prog_tests/tc_bpf.c
@@ -3,6 +3,7 @@
#include <test_progs.h>
#include <linux/pkt_cls.h>
+#include "cap_helpers.h"
#include "test_tc_bpf.skel.h"
#define LO_IFINDEX 1
@@ -327,7 +328,7 @@ static int test_tc_bpf_api(struct bpf_tc_hook *hook, int fd)
return 0;
}
-void test_tc_bpf(void)
+void tc_bpf_root(void)
{
DECLARE_LIBBPF_OPTS(bpf_tc_hook, hook, .ifindex = LO_IFINDEX,
.attach_point = BPF_TC_INGRESS);
@@ -393,3 +394,36 @@ end:
}
test_tc_bpf__destroy(skel);
}
+
+void tc_bpf_non_root(void)
+{
+ struct test_tc_bpf *skel = NULL;
+ __u64 caps = 0;
+ int ret;
+
+ /* In case CAP_BPF and CAP_PERFMON is not set */
+ ret = cap_enable_effective(1ULL << CAP_BPF | 1ULL << CAP_NET_ADMIN, &caps);
+ if (!ASSERT_OK(ret, "set_cap_bpf_cap_net_admin"))
+ return;
+ ret = cap_disable_effective(1ULL << CAP_SYS_ADMIN | 1ULL << CAP_PERFMON, NULL);
+ if (!ASSERT_OK(ret, "disable_cap_sys_admin"))
+ goto restore_cap;
+
+ skel = test_tc_bpf__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "test_tc_bpf__open_and_load"))
+ goto restore_cap;
+
+ test_tc_bpf__destroy(skel);
+
+restore_cap:
+ if (caps)
+ cap_enable_effective(caps, NULL);
+}
+
+void test_tc_bpf(void)
+{
+ if (test__start_subtest("tc_bpf_root"))
+ tc_bpf_root();
+ if (test__start_subtest("tc_bpf_non_root"))
+ tc_bpf_non_root();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c b/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c
new file mode 100644
index 000000000000..cd051d3901a9
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c
@@ -0,0 +1,415 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <unistd.h>
+#include <test_progs.h>
+#include "uprobe_multi.skel.h"
+#include "uprobe_multi_bench.skel.h"
+#include "uprobe_multi_usdt.skel.h"
+#include "bpf/libbpf_internal.h"
+#include "testing_helpers.h"
+
+static char test_data[] = "test_data";
+
+noinline void uprobe_multi_func_1(void)
+{
+ asm volatile ("");
+}
+
+noinline void uprobe_multi_func_2(void)
+{
+ asm volatile ("");
+}
+
+noinline void uprobe_multi_func_3(void)
+{
+ asm volatile ("");
+}
+
+struct child {
+ int go[2];
+ int pid;
+};
+
+static void release_child(struct child *child)
+{
+ int child_status;
+
+ if (!child)
+ return;
+ close(child->go[1]);
+ close(child->go[0]);
+ if (child->pid > 0)
+ waitpid(child->pid, &child_status, 0);
+}
+
+static void kick_child(struct child *child)
+{
+ char c = 1;
+
+ if (child) {
+ write(child->go[1], &c, 1);
+ release_child(child);
+ }
+ fflush(NULL);
+}
+
+static struct child *spawn_child(void)
+{
+ static struct child child;
+ int err;
+ int c;
+
+ /* pipe to notify child to execute the trigger functions */
+ if (pipe(child.go))
+ return NULL;
+
+ child.pid = fork();
+ if (child.pid < 0) {
+ release_child(&child);
+ errno = EINVAL;
+ return NULL;
+ }
+
+ /* child */
+ if (child.pid == 0) {
+ close(child.go[1]);
+
+ /* wait for parent's kick */
+ err = read(child.go[0], &c, 1);
+ if (err != 1)
+ exit(err);
+
+ uprobe_multi_func_1();
+ uprobe_multi_func_2();
+ uprobe_multi_func_3();
+
+ exit(errno);
+ }
+
+ return &child;
+}
+
+static void uprobe_multi_test_run(struct uprobe_multi *skel, struct child *child)
+{
+ skel->bss->uprobe_multi_func_1_addr = (__u64) uprobe_multi_func_1;
+ skel->bss->uprobe_multi_func_2_addr = (__u64) uprobe_multi_func_2;
+ skel->bss->uprobe_multi_func_3_addr = (__u64) uprobe_multi_func_3;
+
+ skel->bss->user_ptr = test_data;
+
+ /*
+ * Disable pid check in bpf program if we are pid filter test,
+ * because the probe should be executed only by child->pid
+ * passed at the probe attach.
+ */
+ skel->bss->pid = child ? 0 : getpid();
+
+ if (child)
+ kick_child(child);
+
+ /* trigger all probes */
+ uprobe_multi_func_1();
+ uprobe_multi_func_2();
+ uprobe_multi_func_3();
+
+ /*
+ * There are 2 entry and 2 exit probe called for each uprobe_multi_func_[123]
+ * function and each slepable probe (6) increments uprobe_multi_sleep_result.
+ */
+ ASSERT_EQ(skel->bss->uprobe_multi_func_1_result, 2, "uprobe_multi_func_1_result");
+ ASSERT_EQ(skel->bss->uprobe_multi_func_2_result, 2, "uprobe_multi_func_2_result");
+ ASSERT_EQ(skel->bss->uprobe_multi_func_3_result, 2, "uprobe_multi_func_3_result");
+
+ ASSERT_EQ(skel->bss->uretprobe_multi_func_1_result, 2, "uretprobe_multi_func_1_result");
+ ASSERT_EQ(skel->bss->uretprobe_multi_func_2_result, 2, "uretprobe_multi_func_2_result");
+ ASSERT_EQ(skel->bss->uretprobe_multi_func_3_result, 2, "uretprobe_multi_func_3_result");
+
+ ASSERT_EQ(skel->bss->uprobe_multi_sleep_result, 6, "uprobe_multi_sleep_result");
+
+ if (child)
+ ASSERT_EQ(skel->bss->child_pid, child->pid, "uprobe_multi_child_pid");
+}
+
+static void test_skel_api(void)
+{
+ struct uprobe_multi *skel = NULL;
+ int err;
+
+ skel = uprobe_multi__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "uprobe_multi__open_and_load"))
+ goto cleanup;
+
+ err = uprobe_multi__attach(skel);
+ if (!ASSERT_OK(err, "uprobe_multi__attach"))
+ goto cleanup;
+
+ uprobe_multi_test_run(skel, NULL);
+
+cleanup:
+ uprobe_multi__destroy(skel);
+}
+
+static void
+__test_attach_api(const char *binary, const char *pattern, struct bpf_uprobe_multi_opts *opts,
+ struct child *child)
+{
+ pid_t pid = child ? child->pid : -1;
+ struct uprobe_multi *skel = NULL;
+
+ skel = uprobe_multi__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "uprobe_multi__open_and_load"))
+ goto cleanup;
+
+ opts->retprobe = false;
+ skel->links.uprobe = bpf_program__attach_uprobe_multi(skel->progs.uprobe, pid,
+ binary, pattern, opts);
+ if (!ASSERT_OK_PTR(skel->links.uprobe, "bpf_program__attach_uprobe_multi"))
+ goto cleanup;
+
+ opts->retprobe = true;
+ skel->links.uretprobe = bpf_program__attach_uprobe_multi(skel->progs.uretprobe, pid,
+ binary, pattern, opts);
+ if (!ASSERT_OK_PTR(skel->links.uretprobe, "bpf_program__attach_uprobe_multi"))
+ goto cleanup;
+
+ opts->retprobe = false;
+ skel->links.uprobe_sleep = bpf_program__attach_uprobe_multi(skel->progs.uprobe_sleep, pid,
+ binary, pattern, opts);
+ if (!ASSERT_OK_PTR(skel->links.uprobe_sleep, "bpf_program__attach_uprobe_multi"))
+ goto cleanup;
+
+ opts->retprobe = true;
+ skel->links.uretprobe_sleep = bpf_program__attach_uprobe_multi(skel->progs.uretprobe_sleep,
+ pid, binary, pattern, opts);
+ if (!ASSERT_OK_PTR(skel->links.uretprobe_sleep, "bpf_program__attach_uprobe_multi"))
+ goto cleanup;
+
+ opts->retprobe = false;
+ skel->links.uprobe_extra = bpf_program__attach_uprobe_multi(skel->progs.uprobe_extra, -1,
+ binary, pattern, opts);
+ if (!ASSERT_OK_PTR(skel->links.uprobe_extra, "bpf_program__attach_uprobe_multi"))
+ goto cleanup;
+
+ uprobe_multi_test_run(skel, child);
+
+cleanup:
+ uprobe_multi__destroy(skel);
+}
+
+static void
+test_attach_api(const char *binary, const char *pattern, struct bpf_uprobe_multi_opts *opts)
+{
+ struct child *child;
+
+ /* no pid filter */
+ __test_attach_api(binary, pattern, opts, NULL);
+
+ /* pid filter */
+ child = spawn_child();
+ if (!ASSERT_OK_PTR(child, "spawn_child"))
+ return;
+
+ __test_attach_api(binary, pattern, opts, child);
+}
+
+static void test_attach_api_pattern(void)
+{
+ LIBBPF_OPTS(bpf_uprobe_multi_opts, opts);
+
+ test_attach_api("/proc/self/exe", "uprobe_multi_func_*", &opts);
+ test_attach_api("/proc/self/exe", "uprobe_multi_func_?", &opts);
+}
+
+static void test_attach_api_syms(void)
+{
+ LIBBPF_OPTS(bpf_uprobe_multi_opts, opts);
+ const char *syms[3] = {
+ "uprobe_multi_func_1",
+ "uprobe_multi_func_2",
+ "uprobe_multi_func_3",
+ };
+
+ opts.syms = syms;
+ opts.cnt = ARRAY_SIZE(syms);
+ test_attach_api("/proc/self/exe", NULL, &opts);
+}
+
+static void __test_link_api(struct child *child)
+{
+ int prog_fd, link1_fd = -1, link2_fd = -1, link3_fd = -1, link4_fd = -1;
+ LIBBPF_OPTS(bpf_link_create_opts, opts);
+ const char *path = "/proc/self/exe";
+ struct uprobe_multi *skel = NULL;
+ unsigned long *offsets = NULL;
+ const char *syms[3] = {
+ "uprobe_multi_func_1",
+ "uprobe_multi_func_2",
+ "uprobe_multi_func_3",
+ };
+ int link_extra_fd = -1;
+ int err;
+
+ err = elf_resolve_syms_offsets(path, 3, syms, (unsigned long **) &offsets);
+ if (!ASSERT_OK(err, "elf_resolve_syms_offsets"))
+ return;
+
+ opts.uprobe_multi.path = path;
+ opts.uprobe_multi.offsets = offsets;
+ opts.uprobe_multi.cnt = ARRAY_SIZE(syms);
+ opts.uprobe_multi.pid = child ? child->pid : 0;
+
+ skel = uprobe_multi__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "uprobe_multi__open_and_load"))
+ goto cleanup;
+
+ opts.kprobe_multi.flags = 0;
+ prog_fd = bpf_program__fd(skel->progs.uprobe);
+ link1_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
+ if (!ASSERT_GE(link1_fd, 0, "link1_fd"))
+ goto cleanup;
+
+ opts.kprobe_multi.flags = BPF_F_UPROBE_MULTI_RETURN;
+ prog_fd = bpf_program__fd(skel->progs.uretprobe);
+ link2_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
+ if (!ASSERT_GE(link2_fd, 0, "link2_fd"))
+ goto cleanup;
+
+ opts.kprobe_multi.flags = 0;
+ prog_fd = bpf_program__fd(skel->progs.uprobe_sleep);
+ link3_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
+ if (!ASSERT_GE(link3_fd, 0, "link3_fd"))
+ goto cleanup;
+
+ opts.kprobe_multi.flags = BPF_F_UPROBE_MULTI_RETURN;
+ prog_fd = bpf_program__fd(skel->progs.uretprobe_sleep);
+ link4_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
+ if (!ASSERT_GE(link4_fd, 0, "link4_fd"))
+ goto cleanup;
+
+ opts.kprobe_multi.flags = 0;
+ opts.uprobe_multi.pid = 0;
+ prog_fd = bpf_program__fd(skel->progs.uprobe_extra);
+ link_extra_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
+ if (!ASSERT_GE(link_extra_fd, 0, "link_extra_fd"))
+ goto cleanup;
+
+ uprobe_multi_test_run(skel, child);
+
+cleanup:
+ if (link1_fd >= 0)
+ close(link1_fd);
+ if (link2_fd >= 0)
+ close(link2_fd);
+ if (link3_fd >= 0)
+ close(link3_fd);
+ if (link4_fd >= 0)
+ close(link4_fd);
+ if (link_extra_fd >= 0)
+ close(link_extra_fd);
+
+ uprobe_multi__destroy(skel);
+ free(offsets);
+}
+
+void test_link_api(void)
+{
+ struct child *child;
+
+ /* no pid filter */
+ __test_link_api(NULL);
+
+ /* pid filter */
+ child = spawn_child();
+ if (!ASSERT_OK_PTR(child, "spawn_child"))
+ return;
+
+ __test_link_api(child);
+}
+
+static void test_bench_attach_uprobe(void)
+{
+ long attach_start_ns = 0, attach_end_ns = 0;
+ struct uprobe_multi_bench *skel = NULL;
+ long detach_start_ns, detach_end_ns;
+ double attach_delta, detach_delta;
+ int err;
+
+ skel = uprobe_multi_bench__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "uprobe_multi_bench__open_and_load"))
+ goto cleanup;
+
+ attach_start_ns = get_time_ns();
+
+ err = uprobe_multi_bench__attach(skel);
+ if (!ASSERT_OK(err, "uprobe_multi_bench__attach"))
+ goto cleanup;
+
+ attach_end_ns = get_time_ns();
+
+ system("./uprobe_multi bench");
+
+ ASSERT_EQ(skel->bss->count, 50000, "uprobes_count");
+
+cleanup:
+ detach_start_ns = get_time_ns();
+ uprobe_multi_bench__destroy(skel);
+ detach_end_ns = get_time_ns();
+
+ attach_delta = (attach_end_ns - attach_start_ns) / 1000000000.0;
+ detach_delta = (detach_end_ns - detach_start_ns) / 1000000000.0;
+
+ printf("%s: attached in %7.3lfs\n", __func__, attach_delta);
+ printf("%s: detached in %7.3lfs\n", __func__, detach_delta);
+}
+
+static void test_bench_attach_usdt(void)
+{
+ long attach_start_ns = 0, attach_end_ns = 0;
+ struct uprobe_multi_usdt *skel = NULL;
+ long detach_start_ns, detach_end_ns;
+ double attach_delta, detach_delta;
+
+ skel = uprobe_multi_usdt__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "uprobe_multi__open"))
+ goto cleanup;
+
+ attach_start_ns = get_time_ns();
+
+ skel->links.usdt0 = bpf_program__attach_usdt(skel->progs.usdt0, -1, "./uprobe_multi",
+ "test", "usdt", NULL);
+ if (!ASSERT_OK_PTR(skel->links.usdt0, "bpf_program__attach_usdt"))
+ goto cleanup;
+
+ attach_end_ns = get_time_ns();
+
+ system("./uprobe_multi usdt");
+
+ ASSERT_EQ(skel->bss->count, 50000, "usdt_count");
+
+cleanup:
+ detach_start_ns = get_time_ns();
+ uprobe_multi_usdt__destroy(skel);
+ detach_end_ns = get_time_ns();
+
+ attach_delta = (attach_end_ns - attach_start_ns) / 1000000000.0;
+ detach_delta = (detach_end_ns - detach_start_ns) / 1000000000.0;
+
+ printf("%s: attached in %7.3lfs\n", __func__, attach_delta);
+ printf("%s: detached in %7.3lfs\n", __func__, detach_delta);
+}
+
+void test_uprobe_multi_test(void)
+{
+ if (test__start_subtest("skel_api"))
+ test_skel_api();
+ if (test__start_subtest("attach_api_pattern"))
+ test_attach_api_pattern();
+ if (test__start_subtest("attach_api_syms"))
+ test_attach_api_syms();
+ if (test__start_subtest("link_api"))
+ test_link_api();
+ if (test__start_subtest("bench_uprobe"))
+ test_bench_attach_uprobe();
+ if (test__start_subtest("bench_usdt"))
+ test_bench_attach_usdt();
+}
diff --git a/tools/testing/selftests/bpf/progs/local_kptr_stash.c b/tools/testing/selftests/bpf/progs/local_kptr_stash.c
index 06838083079c..b567a666d2b8 100644
--- a/tools/testing/selftests/bpf/progs/local_kptr_stash.c
+++ b/tools/testing/selftests/bpf/progs/local_kptr_stash.c
@@ -14,10 +14,16 @@ struct node_data {
struct bpf_rb_node node;
};
+struct plain_local {
+ long key;
+ long data;
+};
+
struct map_value {
struct prog_test_ref_kfunc *not_kptr;
struct prog_test_ref_kfunc __kptr *val;
struct node_data __kptr *node;
+ struct plain_local __kptr *plain;
};
/* This is necessary so that LLVM generates BTF for node_data struct
@@ -67,6 +73,28 @@ long stash_rb_nodes(void *ctx)
}
SEC("tc")
+long stash_plain(void *ctx)
+{
+ struct map_value *mapval;
+ struct plain_local *res;
+ int idx = 0;
+
+ mapval = bpf_map_lookup_elem(&some_nodes, &idx);
+ if (!mapval)
+ return 1;
+
+ res = bpf_obj_new(typeof(*res));
+ if (!res)
+ return 1;
+ res->key = 41;
+
+ res = bpf_kptr_xchg(&mapval->plain, res);
+ if (res)
+ bpf_obj_drop(res);
+ return 0;
+}
+
+SEC("tc")
long unstash_rb_node(void *ctx)
{
struct map_value *mapval;
diff --git a/tools/testing/selftests/bpf/progs/local_kptr_stash_fail.c b/tools/testing/selftests/bpf/progs/local_kptr_stash_fail.c
new file mode 100644
index 000000000000..fcf7a7567da2
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/local_kptr_stash_fail.c
@@ -0,0 +1,85 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_core_read.h>
+#include "../bpf_experimental.h"
+#include "bpf_misc.h"
+
+struct node_data {
+ long key;
+ long data;
+ struct bpf_rb_node node;
+};
+
+struct map_value {
+ struct node_data __kptr *node;
+};
+
+struct node_data2 {
+ long key[4];
+};
+
+/* This is necessary so that LLVM generates BTF for node_data struct
+ * If it's not included, a fwd reference for node_data will be generated but
+ * no struct. Example BTF of "node" field in map_value when not included:
+ *
+ * [10] PTR '(anon)' type_id=35
+ * [34] FWD 'node_data' fwd_kind=struct
+ * [35] TYPE_TAG 'kptr_ref' type_id=34
+ */
+struct node_data *just_here_because_btf_bug;
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, int);
+ __type(value, struct map_value);
+ __uint(max_entries, 2);
+} some_nodes SEC(".maps");
+
+SEC("tc")
+__failure __msg("invalid kptr access, R2 type=ptr_node_data2 expected=ptr_node_data")
+long stash_rb_nodes(void *ctx)
+{
+ struct map_value *mapval;
+ struct node_data2 *res;
+ int idx = 0;
+
+ mapval = bpf_map_lookup_elem(&some_nodes, &idx);
+ if (!mapval)
+ return 1;
+
+ res = bpf_obj_new(typeof(*res));
+ if (!res)
+ return 1;
+ res->key[0] = 40;
+
+ res = bpf_kptr_xchg(&mapval->node, res);
+ if (res)
+ bpf_obj_drop(res);
+ return 0;
+}
+
+SEC("tc")
+__failure __msg("R1 must have zero offset when passed to release func")
+long drop_rb_node_off(void *ctx)
+{
+ struct map_value *mapval;
+ struct node_data *res;
+ int idx = 0;
+
+ mapval = bpf_map_lookup_elem(&some_nodes, &idx);
+ if (!mapval)
+ return 1;
+
+ res = bpf_obj_new(typeof(*res));
+ if (!res)
+ return 1;
+ /* Try releasing with graph node offset */
+ bpf_obj_drop(&res->node);
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/refcounted_kptr.c b/tools/testing/selftests/bpf/progs/refcounted_kptr.c
index c55652fdc63a..893a4fdb4b6e 100644
--- a/tools/testing/selftests/bpf/progs/refcounted_kptr.c
+++ b/tools/testing/selftests/bpf/progs/refcounted_kptr.c
@@ -8,6 +8,9 @@
#include "bpf_misc.h"
#include "bpf_experimental.h"
+extern void bpf_rcu_read_lock(void) __ksym;
+extern void bpf_rcu_read_unlock(void) __ksym;
+
struct node_data {
long key;
long list_data;
@@ -497,4 +500,72 @@ long rbtree_wrong_owner_remove_fail_a2(void *ctx)
return 0;
}
+SEC("?fentry.s/bpf_testmod_test_read")
+__success
+int BPF_PROG(rbtree_sleepable_rcu,
+ struct file *file, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf, loff_t off, size_t len)
+{
+ struct bpf_rb_node *rb;
+ struct node_data *n, *m = NULL;
+
+ n = bpf_obj_new(typeof(*n));
+ if (!n)
+ return 0;
+
+ bpf_rcu_read_lock();
+ bpf_spin_lock(&lock);
+ bpf_rbtree_add(&root, &n->r, less);
+ rb = bpf_rbtree_first(&root);
+ if (!rb)
+ goto err_out;
+
+ rb = bpf_rbtree_remove(&root, rb);
+ if (!rb)
+ goto err_out;
+
+ m = container_of(rb, struct node_data, r);
+
+err_out:
+ bpf_spin_unlock(&lock);
+ bpf_rcu_read_unlock();
+ if (m)
+ bpf_obj_drop(m);
+ return 0;
+}
+
+SEC("?fentry.s/bpf_testmod_test_read")
+__success
+int BPF_PROG(rbtree_sleepable_rcu_no_explicit_rcu_lock,
+ struct file *file, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf, loff_t off, size_t len)
+{
+ struct bpf_rb_node *rb;
+ struct node_data *n, *m = NULL;
+
+ n = bpf_obj_new(typeof(*n));
+ if (!n)
+ return 0;
+
+ /* No explicit bpf_rcu_read_lock */
+ bpf_spin_lock(&lock);
+ bpf_rbtree_add(&root, &n->r, less);
+ rb = bpf_rbtree_first(&root);
+ if (!rb)
+ goto err_out;
+
+ rb = bpf_rbtree_remove(&root, rb);
+ if (!rb)
+ goto err_out;
+
+ m = container_of(rb, struct node_data, r);
+
+err_out:
+ bpf_spin_unlock(&lock);
+ /* No explicit bpf_rcu_read_unlock */
+ if (m)
+ bpf_obj_drop(m);
+ return 0;
+}
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c b/tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c
index 0b09e5c915b1..1ef07f6ee580 100644
--- a/tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c
+++ b/tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c
@@ -13,6 +13,9 @@ struct node_acquire {
struct bpf_refcount refcount;
};
+extern void bpf_rcu_read_lock(void) __ksym;
+extern void bpf_rcu_read_unlock(void) __ksym;
+
#define private(name) SEC(".data." #name) __hidden __attribute__((aligned(8)))
private(A) struct bpf_spin_lock glock;
private(A) struct bpf_rb_root groot __contains(node_acquire, node);
@@ -71,4 +74,29 @@ long rbtree_refcounted_node_ref_escapes_owning_input(void *ctx)
return 0;
}
+SEC("?fentry.s/bpf_testmod_test_read")
+__failure __msg("function calls are not allowed while holding a lock")
+int BPF_PROG(rbtree_fail_sleepable_lock_across_rcu,
+ struct file *file, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf, loff_t off, size_t len)
+{
+ struct node_acquire *n;
+
+ n = bpf_obj_new(typeof(*n));
+ if (!n)
+ return 0;
+
+ /* spin_{lock,unlock} are in different RCU CS */
+ bpf_rcu_read_lock();
+ bpf_spin_lock(&glock);
+ bpf_rbtree_add(&groot, &n->node, less);
+ bpf_rcu_read_unlock();
+
+ bpf_rcu_read_lock();
+ bpf_spin_unlock(&glock);
+ bpf_rcu_read_unlock();
+
+ return 0;
+}
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/task_kfunc_success.c b/tools/testing/selftests/bpf/progs/task_kfunc_success.c
index b09371bba204..70df695312dc 100644
--- a/tools/testing/selftests/bpf/progs/task_kfunc_success.c
+++ b/tools/testing/selftests/bpf/progs/task_kfunc_success.c
@@ -18,6 +18,13 @@ int err, pid;
*/
struct task_struct *bpf_task_acquire(struct task_struct *p) __ksym __weak;
+
+struct task_struct *bpf_task_acquire___one(struct task_struct *task) __ksym __weak;
+/* The two-param bpf_task_acquire doesn't exist */
+struct task_struct *bpf_task_acquire___two(struct task_struct *p, void *ctx) __ksym __weak;
+/* Incorrect type for first param */
+struct task_struct *bpf_task_acquire___three(void *ctx) __ksym __weak;
+
void invalid_kfunc(void) __ksym __weak;
void bpf_testmod_test_mod_kfunc(int i) __ksym __weak;
@@ -56,6 +63,50 @@ static int test_acquire_release(struct task_struct *task)
}
SEC("tp_btf/task_newtask")
+int BPF_PROG(test_task_kfunc_flavor_relo, struct task_struct *task, u64 clone_flags)
+{
+ struct task_struct *acquired = NULL;
+ int fake_ctx = 42;
+
+ if (bpf_ksym_exists(bpf_task_acquire___one)) {
+ acquired = bpf_task_acquire___one(task);
+ } else if (bpf_ksym_exists(bpf_task_acquire___two)) {
+ /* Here, bpf_object__resolve_ksym_func_btf_id's find_ksym_btf_id
+ * call will find vmlinux's bpf_task_acquire, but subsequent
+ * bpf_core_types_are_compat will fail
+ */
+ acquired = bpf_task_acquire___two(task, &fake_ctx);
+ err = 3;
+ return 0;
+ } else if (bpf_ksym_exists(bpf_task_acquire___three)) {
+ /* bpf_core_types_are_compat will fail similarly to above case */
+ acquired = bpf_task_acquire___three(&fake_ctx);
+ err = 4;
+ return 0;
+ }
+
+ if (acquired)
+ bpf_task_release(acquired);
+ else
+ err = 5;
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_task_kfunc_flavor_relo_not_found, struct task_struct *task, u64 clone_flags)
+{
+ /* Neither symbol should successfully resolve.
+ * Success or failure of one ___flavor should not affect others
+ */
+ if (bpf_ksym_exists(bpf_task_acquire___two))
+ err = 1;
+ else if (bpf_ksym_exists(bpf_task_acquire___three))
+ err = 2;
+
+ return 0;
+}
+
+SEC("tp_btf/task_newtask")
int BPF_PROG(test_task_acquire_release_argument, struct task_struct *task, u64 clone_flags)
{
if (!is_test_kfunc_task())
diff --git a/tools/testing/selftests/bpf/progs/test_ldsx_insn.c b/tools/testing/selftests/bpf/progs/test_ldsx_insn.c
index 321abf862801..67c14ba1e87b 100644
--- a/tools/testing/selftests/bpf/progs/test_ldsx_insn.c
+++ b/tools/testing/selftests/bpf/progs/test_ldsx_insn.c
@@ -5,7 +5,8 @@
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
-#if defined(__TARGET_ARCH_x86) && __clang_major__ >= 18
+#if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \
+ (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64)) && __clang_major__ >= 18
const volatile int skip = 0;
#else
const volatile int skip = 1;
diff --git a/tools/testing/selftests/bpf/progs/test_lwt_redirect.c b/tools/testing/selftests/bpf/progs/test_lwt_redirect.c
new file mode 100644
index 000000000000..8c895122f293
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_lwt_redirect.c
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_endian.h>
+#include <bpf/bpf_helpers.h>
+#include <linux/ip.h>
+#include "bpf_tracing_net.h"
+
+/* We don't care about whether the packet can be received by network stack.
+ * Just care if the packet is sent to the correct device at correct direction
+ * and not panic the kernel.
+ */
+static int prepend_dummy_mac(struct __sk_buff *skb)
+{
+ char mac[] = {0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0xf,
+ 0xe, 0xd, 0xc, 0xb, 0xa, 0x08, 0x00};
+
+ if (bpf_skb_change_head(skb, ETH_HLEN, 0))
+ return -1;
+
+ if (bpf_skb_store_bytes(skb, 0, mac, sizeof(mac), 0))
+ return -1;
+
+ return 0;
+}
+
+/* Use the last byte of IP address to redirect the packet */
+static int get_redirect_target(struct __sk_buff *skb)
+{
+ struct iphdr *iph = NULL;
+ void *start = (void *)(long)skb->data;
+ void *end = (void *)(long)skb->data_end;
+
+ if (start + sizeof(*iph) > end)
+ return -1;
+
+ iph = (struct iphdr *)start;
+ return bpf_ntohl(iph->daddr) & 0xff;
+}
+
+SEC("redir_ingress")
+int test_lwt_redirect_in(struct __sk_buff *skb)
+{
+ int target = get_redirect_target(skb);
+
+ if (target < 0)
+ return BPF_OK;
+
+ if (prepend_dummy_mac(skb))
+ return BPF_DROP;
+
+ return bpf_redirect(target, BPF_F_INGRESS);
+}
+
+SEC("redir_egress")
+int test_lwt_redirect_out(struct __sk_buff *skb)
+{
+ int target = get_redirect_target(skb);
+
+ if (target < 0)
+ return BPF_OK;
+
+ if (prepend_dummy_mac(skb))
+ return BPF_DROP;
+
+ return bpf_redirect(target, 0);
+}
+
+SEC("redir_egress_nomac")
+int test_lwt_redirect_out_nomac(struct __sk_buff *skb)
+{
+ int target = get_redirect_target(skb);
+
+ if (target < 0)
+ return BPF_OK;
+
+ return bpf_redirect(target, 0);
+}
+
+SEC("redir_ingress_nomac")
+int test_lwt_redirect_in_nomac(struct __sk_buff *skb)
+{
+ int target = get_redirect_target(skb);
+
+ if (target < 0)
+ return BPF_OK;
+
+ return bpf_redirect(target, BPF_F_INGRESS);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_lwt_reroute.c b/tools/testing/selftests/bpf/progs/test_lwt_reroute.c
new file mode 100644
index 000000000000..1dc64351929c
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_lwt_reroute.c
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <inttypes.h>
+#include <linux/bpf.h>
+#include <bpf/bpf_endian.h>
+#include <bpf/bpf_helpers.h>
+#include <linux/if_ether.h>
+#include <linux/ip.h>
+
+/* This function extracts the last byte of the daddr, and uses it
+ * as output dev index.
+ */
+SEC("lwt_xmit")
+int test_lwt_reroute(struct __sk_buff *skb)
+{
+ struct iphdr *iph = NULL;
+ void *start = (void *)(long)skb->data;
+ void *end = (void *)(long)skb->data_end;
+
+ /* set mark at most once */
+ if (skb->mark != 0)
+ return BPF_OK;
+
+ if (start + sizeof(*iph) > end)
+ return BPF_DROP;
+
+ iph = (struct iphdr *)start;
+ skb->mark = bpf_ntohl(iph->daddr) & 0xff;
+
+ /* do not reroute x.x.x.0 packets */
+ if (skb->mark == 0)
+ return BPF_OK;
+
+ return BPF_LWT_REROUTE;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_tc_bpf.c b/tools/testing/selftests/bpf/progs/test_tc_bpf.c
index d28ca8d1f3d0..ef7da419632a 100644
--- a/tools/testing/selftests/bpf/progs/test_tc_bpf.c
+++ b/tools/testing/selftests/bpf/progs/test_tc_bpf.c
@@ -2,6 +2,8 @@
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
+#include <linux/if_ether.h>
+#include <linux/ip.h>
/* Dummy prog to test TC-BPF API */
@@ -10,3 +12,14 @@ int cls(struct __sk_buff *skb)
{
return 0;
}
+
+/* Prog to verify tc-bpf without cap_sys_admin and cap_perfmon */
+SEC("tcx/ingress")
+int pkt_ptr(struct __sk_buff *skb)
+{
+ struct iphdr *iph = (void *)(long)skb->data + sizeof(struct ethhdr);
+
+ if ((long)(iph + 1) > (long)skb->data_end)
+ return 1;
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/uprobe_multi.c b/tools/testing/selftests/bpf/progs/uprobe_multi.c
new file mode 100644
index 000000000000..419d9aa28fce
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/uprobe_multi.c
@@ -0,0 +1,101 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <stdbool.h>
+
+char _license[] SEC("license") = "GPL";
+
+__u64 uprobe_multi_func_1_addr = 0;
+__u64 uprobe_multi_func_2_addr = 0;
+__u64 uprobe_multi_func_3_addr = 0;
+
+__u64 uprobe_multi_func_1_result = 0;
+__u64 uprobe_multi_func_2_result = 0;
+__u64 uprobe_multi_func_3_result = 0;
+
+__u64 uretprobe_multi_func_1_result = 0;
+__u64 uretprobe_multi_func_2_result = 0;
+__u64 uretprobe_multi_func_3_result = 0;
+
+__u64 uprobe_multi_sleep_result = 0;
+
+int pid = 0;
+int child_pid = 0;
+
+bool test_cookie = false;
+void *user_ptr = 0;
+
+static __always_inline bool verify_sleepable_user_copy(void)
+{
+ char data[9];
+
+ bpf_copy_from_user(data, sizeof(data), user_ptr);
+ return bpf_strncmp(data, sizeof(data), "test_data") == 0;
+}
+
+static void uprobe_multi_check(void *ctx, bool is_return, bool is_sleep)
+{
+ child_pid = bpf_get_current_pid_tgid() >> 32;
+
+ if (pid && child_pid != pid)
+ return;
+
+ __u64 cookie = test_cookie ? bpf_get_attach_cookie(ctx) : 0;
+ __u64 addr = bpf_get_func_ip(ctx);
+
+#define SET(__var, __addr, __cookie) ({ \
+ if (addr == __addr && \
+ (!test_cookie || (cookie == __cookie))) \
+ __var += 1; \
+})
+
+ if (is_return) {
+ SET(uretprobe_multi_func_1_result, uprobe_multi_func_1_addr, 2);
+ SET(uretprobe_multi_func_2_result, uprobe_multi_func_2_addr, 3);
+ SET(uretprobe_multi_func_3_result, uprobe_multi_func_3_addr, 1);
+ } else {
+ SET(uprobe_multi_func_1_result, uprobe_multi_func_1_addr, 3);
+ SET(uprobe_multi_func_2_result, uprobe_multi_func_2_addr, 1);
+ SET(uprobe_multi_func_3_result, uprobe_multi_func_3_addr, 2);
+ }
+
+#undef SET
+
+ if (is_sleep && verify_sleepable_user_copy())
+ uprobe_multi_sleep_result += 1;
+}
+
+SEC("uprobe.multi//proc/self/exe:uprobe_multi_func_*")
+int uprobe(struct pt_regs *ctx)
+{
+ uprobe_multi_check(ctx, false, false);
+ return 0;
+}
+
+SEC("uretprobe.multi//proc/self/exe:uprobe_multi_func_*")
+int uretprobe(struct pt_regs *ctx)
+{
+ uprobe_multi_check(ctx, true, false);
+ return 0;
+}
+
+SEC("uprobe.multi.s//proc/self/exe:uprobe_multi_func_*")
+int uprobe_sleep(struct pt_regs *ctx)
+{
+ uprobe_multi_check(ctx, false, true);
+ return 0;
+}
+
+SEC("uretprobe.multi.s//proc/self/exe:uprobe_multi_func_*")
+int uretprobe_sleep(struct pt_regs *ctx)
+{
+ uprobe_multi_check(ctx, true, true);
+ return 0;
+}
+
+SEC("uprobe.multi//proc/self/exe:uprobe_multi_func_*")
+int uprobe_extra(struct pt_regs *ctx)
+{
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/uprobe_multi_bench.c b/tools/testing/selftests/bpf/progs/uprobe_multi_bench.c
new file mode 100644
index 000000000000..5367f6105e30
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/uprobe_multi_bench.c
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+int count;
+
+SEC("uprobe.multi/./uprobe_multi:uprobe_multi_func_*")
+int uprobe_bench(struct pt_regs *ctx)
+{
+ count++;
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/uprobe_multi_usdt.c b/tools/testing/selftests/bpf/progs/uprobe_multi_usdt.c
new file mode 100644
index 000000000000..9e1c33d0bd2f
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/uprobe_multi_usdt.c
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/usdt.bpf.h>
+
+char _license[] SEC("license") = "GPL";
+
+int count;
+
+SEC("usdt")
+int usdt0(struct pt_regs *ctx)
+{
+ count++;
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/verifier_bswap.c b/tools/testing/selftests/bpf/progs/verifier_bswap.c
index 724bb38988b5..8893094725f0 100644
--- a/tools/testing/selftests/bpf/progs/verifier_bswap.c
+++ b/tools/testing/selftests/bpf/progs/verifier_bswap.c
@@ -4,7 +4,8 @@
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
-#if defined(__TARGET_ARCH_x86) && __clang_major__ >= 18
+#if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \
+ (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64)) && __clang_major__ >= 18
SEC("socket")
__description("BSWAP, 16")
diff --git a/tools/testing/selftests/bpf/progs/verifier_gotol.c b/tools/testing/selftests/bpf/progs/verifier_gotol.c
index ce48f7757db2..2dae5322a18e 100644
--- a/tools/testing/selftests/bpf/progs/verifier_gotol.c
+++ b/tools/testing/selftests/bpf/progs/verifier_gotol.c
@@ -4,7 +4,8 @@
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
-#if defined(__TARGET_ARCH_x86) && __clang_major__ >= 18
+#if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \
+ (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64)) && __clang_major__ >= 18
SEC("socket")
__description("gotol, small_imm")
diff --git a/tools/testing/selftests/bpf/progs/verifier_ldsx.c b/tools/testing/selftests/bpf/progs/verifier_ldsx.c
index 3c3d1bddd67f..0c638f45aaf1 100644
--- a/tools/testing/selftests/bpf/progs/verifier_ldsx.c
+++ b/tools/testing/selftests/bpf/progs/verifier_ldsx.c
@@ -4,7 +4,8 @@
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
-#if defined(__TARGET_ARCH_x86) && __clang_major__ >= 18
+#if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \
+ (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64)) && __clang_major__ >= 18
SEC("socket")
__description("LDSX, S8")
diff --git a/tools/testing/selftests/bpf/progs/verifier_movsx.c b/tools/testing/selftests/bpf/progs/verifier_movsx.c
index be6f69a6b659..3c8ac2c57b1b 100644
--- a/tools/testing/selftests/bpf/progs/verifier_movsx.c
+++ b/tools/testing/selftests/bpf/progs/verifier_movsx.c
@@ -4,7 +4,8 @@
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
-#if defined(__TARGET_ARCH_x86) && __clang_major__ >= 18
+#if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \
+ (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64)) && __clang_major__ >= 18
SEC("socket")
__description("MOV32SX, S8")
diff --git a/tools/testing/selftests/bpf/progs/verifier_sdiv.c b/tools/testing/selftests/bpf/progs/verifier_sdiv.c
index f61a9a1058c8..0990f8825675 100644
--- a/tools/testing/selftests/bpf/progs/verifier_sdiv.c
+++ b/tools/testing/selftests/bpf/progs/verifier_sdiv.c
@@ -4,7 +4,8 @@
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
-#if defined(__TARGET_ARCH_x86) && __clang_major__ >= 18
+#if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \
+ (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64)) && __clang_major__ >= 18
SEC("socket")
__description("SDIV32, non-zero imm divisor, check 1")
diff --git a/tools/testing/selftests/bpf/testing_helpers.h b/tools/testing/selftests/bpf/testing_helpers.h
index 5312323881b6..5b7a55136741 100644
--- a/tools/testing/selftests/bpf/testing_helpers.h
+++ b/tools/testing/selftests/bpf/testing_helpers.h
@@ -7,6 +7,7 @@
#include <stdbool.h>
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
+#include <time.h>
int parse_num_list(const char *s, bool **set, int *set_len);
__u32 link_info_prog_id(const struct bpf_link *link, struct bpf_link_info *info);
@@ -33,4 +34,13 @@ int load_bpf_testmod(bool verbose);
int unload_bpf_testmod(bool verbose);
int kern_sync_rcu(void);
+static inline __u64 get_time_ns(void)
+{
+ struct timespec t;
+
+ clock_gettime(CLOCK_MONOTONIC, &t);
+
+ return (u64)t.tv_sec * 1000000000 + t.tv_nsec;
+}
+
#endif /* __TESTING_HELPERS_H */
diff --git a/tools/testing/selftests/bpf/uprobe_multi.c b/tools/testing/selftests/bpf/uprobe_multi.c
new file mode 100644
index 000000000000..a61ceab60b68
--- /dev/null
+++ b/tools/testing/selftests/bpf/uprobe_multi.c
@@ -0,0 +1,91 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <stdio.h>
+#include <string.h>
+#include <sdt.h>
+
+#define __PASTE(a, b) a##b
+#define PASTE(a, b) __PASTE(a, b)
+
+#define NAME(name, idx) PASTE(name, idx)
+
+#define DEF(name, idx) int NAME(name, idx)(void) { return 0; }
+#define CALL(name, idx) NAME(name, idx)();
+
+#define F(body, name, idx) body(name, idx)
+
+#define F10(body, name, idx) \
+ F(body, PASTE(name, idx), 0) F(body, PASTE(name, idx), 1) F(body, PASTE(name, idx), 2) \
+ F(body, PASTE(name, idx), 3) F(body, PASTE(name, idx), 4) F(body, PASTE(name, idx), 5) \
+ F(body, PASTE(name, idx), 6) F(body, PASTE(name, idx), 7) F(body, PASTE(name, idx), 8) \
+ F(body, PASTE(name, idx), 9)
+
+#define F100(body, name, idx) \
+ F10(body, PASTE(name, idx), 0) F10(body, PASTE(name, idx), 1) F10(body, PASTE(name, idx), 2) \
+ F10(body, PASTE(name, idx), 3) F10(body, PASTE(name, idx), 4) F10(body, PASTE(name, idx), 5) \
+ F10(body, PASTE(name, idx), 6) F10(body, PASTE(name, idx), 7) F10(body, PASTE(name, idx), 8) \
+ F10(body, PASTE(name, idx), 9)
+
+#define F1000(body, name, idx) \
+ F100(body, PASTE(name, idx), 0) F100(body, PASTE(name, idx), 1) F100(body, PASTE(name, idx), 2) \
+ F100(body, PASTE(name, idx), 3) F100(body, PASTE(name, idx), 4) F100(body, PASTE(name, idx), 5) \
+ F100(body, PASTE(name, idx), 6) F100(body, PASTE(name, idx), 7) F100(body, PASTE(name, idx), 8) \
+ F100(body, PASTE(name, idx), 9)
+
+#define F10000(body, name, idx) \
+ F1000(body, PASTE(name, idx), 0) F1000(body, PASTE(name, idx), 1) F1000(body, PASTE(name, idx), 2) \
+ F1000(body, PASTE(name, idx), 3) F1000(body, PASTE(name, idx), 4) F1000(body, PASTE(name, idx), 5) \
+ F1000(body, PASTE(name, idx), 6) F1000(body, PASTE(name, idx), 7) F1000(body, PASTE(name, idx), 8) \
+ F1000(body, PASTE(name, idx), 9)
+
+F10000(DEF, uprobe_multi_func_, 0)
+F10000(DEF, uprobe_multi_func_, 1)
+F10000(DEF, uprobe_multi_func_, 2)
+F10000(DEF, uprobe_multi_func_, 3)
+F10000(DEF, uprobe_multi_func_, 4)
+
+static int bench(void)
+{
+ F10000(CALL, uprobe_multi_func_, 0)
+ F10000(CALL, uprobe_multi_func_, 1)
+ F10000(CALL, uprobe_multi_func_, 2)
+ F10000(CALL, uprobe_multi_func_, 3)
+ F10000(CALL, uprobe_multi_func_, 4)
+ return 0;
+}
+
+#define PROBE STAP_PROBE(test, usdt);
+
+#define PROBE10 PROBE PROBE PROBE PROBE PROBE \
+ PROBE PROBE PROBE PROBE PROBE
+#define PROBE100 PROBE10 PROBE10 PROBE10 PROBE10 PROBE10 \
+ PROBE10 PROBE10 PROBE10 PROBE10 PROBE10
+#define PROBE1000 PROBE100 PROBE100 PROBE100 PROBE100 PROBE100 \
+ PROBE100 PROBE100 PROBE100 PROBE100 PROBE100
+#define PROBE10000 PROBE1000 PROBE1000 PROBE1000 PROBE1000 PROBE1000 \
+ PROBE1000 PROBE1000 PROBE1000 PROBE1000 PROBE1000
+
+static int usdt(void)
+{
+ PROBE10000
+ PROBE10000
+ PROBE10000
+ PROBE10000
+ PROBE10000
+ return 0;
+}
+
+int main(int argc, char **argv)
+{
+ if (argc != 2)
+ goto error;
+
+ if (!strcmp("bench", argv[1]))
+ return bench();
+ if (!strcmp("usdt", argv[1]))
+ return usdt();
+
+error:
+ fprintf(stderr, "usage: %s <bench|usdt>\n", argv[0]);
+ return -1;
+}