summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJakub Kicinski <kuba@kernel.org>2022-06-17 18:30:01 -0700
committerJakub Kicinski <kuba@kernel.org>2022-06-17 18:30:01 -0700
commit582573f1b23df1cf7458e665e2aa3acd0551fd2c (patch)
tree69f7f069dad9006f8c87c29e144ad2e445752f67
parentb4a028c4d031c27704ad73b1195ca69a1206941e (diff)
parentc0f3bb4054ef036e5f67e27f2e3cad9e6512cf00 (diff)
downloadlinux-582573f1b23df1cf7458e665e2aa3acd0551fd2c.tar.gz
linux-582573f1b23df1cf7458e665e2aa3acd0551fd2c.tar.bz2
linux-582573f1b23df1cf7458e665e2aa3acd0551fd2c.zip
Merge https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf
Daniel Borkmann says: ==================== pull-request: bpf 2022-06-17 We've added 12 non-merge commits during the last 4 day(s) which contain a total of 14 files changed, 305 insertions(+), 107 deletions(-). The main changes are: 1) Fix x86 JIT tailcall count offset on BPF-2-BPF call, from Jakub Sitnicki. 2) Fix a kprobe_multi link bug which misplaces BPF cookies, from Jiri Olsa. 3) Fix an infinite loop when processing a module's BTF, from Kumar Kartikeya Dwivedi. 4) Fix getting a rethook only in RCU available context, from Masami Hiramatsu. 5) Fix request socket refcount leak in sk lookup helpers, from Jon Maxwell. 6) Fix xsk xmit behavior which wrongly adds skb to already full cq, from Ciara Loftus. * https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf: rethook: Reject getting a rethook if RCU is not watching fprobe, samples: Add use_trace option and show hit/missed counter bpf, docs: Update some of the JIT/maintenance entries selftest/bpf: Fix kprobe_multi bench test bpf: Force cookies array to follow symbols sorting ftrace: Keep address offset in ftrace_lookup_symbols selftests/bpf: Shuffle cookies symbols in kprobe multi test selftests/bpf: Test tail call counting with bpf2bpf and data on stack bpf, x86: Fix tail call count offset calculation on bpf2bpf call bpf: Limit maximum modifier chain length in btf_check_type_tags bpf: Fix request_sock leak in sk lookup helpers xsk: Fix generic transmit when completion queue reservation fails ==================== Link: https://lore.kernel.org/r/20220617202119.2421-1-daniel@iogearbox.net Signed-off-by: Jakub Kicinski <kuba@kernel.org>
-rw-r--r--MAINTAINERS41
-rw-r--r--arch/x86/net/bpf_jit_comp.c3
-rw-r--r--kernel/bpf/btf.c5
-rw-r--r--kernel/trace/bpf_trace.c60
-rw-r--r--kernel/trace/ftrace.c13
-rw-r--r--kernel/trace/rethook.c9
-rw-r--r--net/core/filter.c34
-rw-r--r--net/xdp/xsk.c16
-rw-r--r--samples/fprobe/fprobe_example.c29
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_cookie.c78
-rw-r--r--tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c3
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tailcalls.c55
-rw-r--r--tools/testing/selftests/bpf/progs/kprobe_multi.c24
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall_bpf2bpf6.c42
14 files changed, 305 insertions, 107 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index f52543aedd61..15a2341936ea 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3662,7 +3662,7 @@ BPF JIT for ARM
M: Shubham Bansal <illusionist.neo@gmail.com>
L: netdev@vger.kernel.org
L: bpf@vger.kernel.org
-S: Maintained
+S: Odd Fixes
F: arch/arm/net/
BPF JIT for ARM64
@@ -3686,14 +3686,15 @@ BPF JIT for NFP NICs
M: Jakub Kicinski <kuba@kernel.org>
L: netdev@vger.kernel.org
L: bpf@vger.kernel.org
-S: Supported
+S: Odd Fixes
F: drivers/net/ethernet/netronome/nfp/bpf/
BPF JIT for POWERPC (32-BIT AND 64-BIT)
M: Naveen N. Rao <naveen.n.rao@linux.ibm.com>
+M: Michael Ellerman <mpe@ellerman.id.au>
L: netdev@vger.kernel.org
L: bpf@vger.kernel.org
-S: Maintained
+S: Supported
F: arch/powerpc/net/
BPF JIT for RISC-V (32-bit)
@@ -3719,7 +3720,7 @@ M: Heiko Carstens <hca@linux.ibm.com>
M: Vasily Gorbik <gor@linux.ibm.com>
L: netdev@vger.kernel.org
L: bpf@vger.kernel.org
-S: Maintained
+S: Supported
F: arch/s390/net/
X: arch/s390/net/pnet.c
@@ -3727,14 +3728,14 @@ BPF JIT for SPARC (32-BIT AND 64-BIT)
M: David S. Miller <davem@davemloft.net>
L: netdev@vger.kernel.org
L: bpf@vger.kernel.org
-S: Maintained
+S: Odd Fixes
F: arch/sparc/net/
BPF JIT for X86 32-BIT
M: Wang YanQing <udknight@gmail.com>
L: netdev@vger.kernel.org
L: bpf@vger.kernel.org
-S: Maintained
+S: Odd Fixes
F: arch/x86/net/bpf_jit_comp32.c
BPF JIT for X86 64-BIT
@@ -3757,6 +3758,19 @@ F: include/linux/bpf_lsm.h
F: kernel/bpf/bpf_lsm.c
F: security/bpf/
+BPF L7 FRAMEWORK
+M: John Fastabend <john.fastabend@gmail.com>
+M: Jakub Sitnicki <jakub@cloudflare.com>
+L: netdev@vger.kernel.org
+L: bpf@vger.kernel.org
+S: Maintained
+F: include/linux/skmsg.h
+F: net/core/skmsg.c
+F: net/core/sock_map.c
+F: net/ipv4/tcp_bpf.c
+F: net/ipv4/udp_bpf.c
+F: net/unix/unix_bpf.c
+
BPFTOOL
M: Quentin Monnet <quentin@isovalent.com>
L: bpf@vger.kernel.org
@@ -11096,20 +11110,6 @@ S: Maintained
F: include/net/l3mdev.h
F: net/l3mdev
-L7 BPF FRAMEWORK
-M: John Fastabend <john.fastabend@gmail.com>
-M: Daniel Borkmann <daniel@iogearbox.net>
-M: Jakub Sitnicki <jakub@cloudflare.com>
-L: netdev@vger.kernel.org
-L: bpf@vger.kernel.org
-S: Maintained
-F: include/linux/skmsg.h
-F: net/core/skmsg.c
-F: net/core/sock_map.c
-F: net/ipv4/tcp_bpf.c
-F: net/ipv4/udp_bpf.c
-F: net/unix/unix_bpf.c
-
LANDLOCK SECURITY MODULE
M: Mickaël Salaün <mic@digikod.net>
L: linux-security-module@vger.kernel.org
@@ -13952,7 +13952,6 @@ F: net/ipv6/tcp*.c
NETWORKING [TLS]
M: Boris Pismenny <borisp@nvidia.com>
M: John Fastabend <john.fastabend@gmail.com>
-M: Daniel Borkmann <daniel@iogearbox.net>
M: Jakub Kicinski <kuba@kernel.org>
L: netdev@vger.kernel.org
S: Maintained
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index f298b18a9a3d..c98b8c0ed3b8 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -1420,8 +1420,9 @@ st: if (is_imm8(insn->off))
case BPF_JMP | BPF_CALL:
func = (u8 *) __bpf_call_base + imm32;
if (tail_call_reachable) {
+ /* mov rax, qword ptr [rbp - rounded_stack_depth - 8] */
EMIT3_off32(0x48, 0x8B, 0x85,
- -(bpf_prog->aux->stack_depth + 8));
+ -round_up(bpf_prog->aux->stack_depth, 8) - 8);
if (!imm32 || emit_call(&prog, func, image + addrs[i - 1] + 7))
return -EINVAL;
} else {
diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
index 63d0ac7dfe2f..eb12d4f705cc 100644
--- a/kernel/bpf/btf.c
+++ b/kernel/bpf/btf.c
@@ -4815,6 +4815,7 @@ static int btf_check_type_tags(struct btf_verifier_env *env,
n = btf_nr_types(btf);
for (i = start_id; i < n; i++) {
const struct btf_type *t;
+ int chain_limit = 32;
u32 cur_id = i;
t = btf_type_by_id(btf, i);
@@ -4827,6 +4828,10 @@ static int btf_check_type_tags(struct btf_verifier_env *env,
in_tags = btf_type_is_type_tag(t);
while (btf_type_is_modifier(t)) {
+ if (!chain_limit--) {
+ btf_verifier_log(env, "Max chain length or cycle detected");
+ return -ELOOP;
+ }
if (btf_type_is_type_tag(t)) {
if (!in_tags) {
btf_verifier_log(env, "Type tags don't precede modifiers");
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 7a13e6ac6327..88589d74a892 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -2423,7 +2423,7 @@ kprobe_multi_link_handler(struct fprobe *fp, unsigned long entry_ip,
kprobe_multi_link_prog_run(link, entry_ip, regs);
}
-static int symbols_cmp(const void *a, const void *b)
+static int symbols_cmp_r(const void *a, const void *b, const void *priv)
{
const char **str_a = (const char **) a;
const char **str_b = (const char **) b;
@@ -2431,6 +2431,28 @@ static int symbols_cmp(const void *a, const void *b)
return strcmp(*str_a, *str_b);
}
+struct multi_symbols_sort {
+ const char **funcs;
+ u64 *cookies;
+};
+
+static void symbols_swap_r(void *a, void *b, int size, const void *priv)
+{
+ const struct multi_symbols_sort *data = priv;
+ const char **name_a = a, **name_b = b;
+
+ swap(*name_a, *name_b);
+
+ /* If defined, swap also related cookies. */
+ if (data->cookies) {
+ u64 *cookie_a, *cookie_b;
+
+ cookie_a = data->cookies + (name_a - data->funcs);
+ cookie_b = data->cookies + (name_b - data->funcs);
+ swap(*cookie_a, *cookie_b);
+ }
+}
+
int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
{
struct bpf_kprobe_multi_link *link = NULL;
@@ -2468,38 +2490,46 @@ int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr
if (!addrs)
return -ENOMEM;
+ ucookies = u64_to_user_ptr(attr->link_create.kprobe_multi.cookies);
+ if (ucookies) {
+ cookies = kvmalloc_array(cnt, sizeof(*addrs), GFP_KERNEL);
+ if (!cookies) {
+ err = -ENOMEM;
+ goto error;
+ }
+ if (copy_from_user(cookies, ucookies, size)) {
+ err = -EFAULT;
+ goto error;
+ }
+ }
+
if (uaddrs) {
if (copy_from_user(addrs, uaddrs, size)) {
err = -EFAULT;
goto error;
}
} else {
+ struct multi_symbols_sort data = {
+ .cookies = cookies,
+ };
struct user_syms us;
err = copy_user_syms(&us, usyms, cnt);
if (err)
goto error;
- sort(us.syms, cnt, sizeof(*us.syms), symbols_cmp, NULL);
+ if (cookies)
+ data.funcs = us.syms;
+
+ sort_r(us.syms, cnt, sizeof(*us.syms), symbols_cmp_r,
+ symbols_swap_r, &data);
+
err = ftrace_lookup_symbols(us.syms, cnt, addrs);
free_user_syms(&us);
if (err)
goto error;
}
- ucookies = u64_to_user_ptr(attr->link_create.kprobe_multi.cookies);
- if (ucookies) {
- cookies = kvmalloc_array(cnt, sizeof(*addrs), GFP_KERNEL);
- if (!cookies) {
- err = -ENOMEM;
- goto error;
- }
- if (copy_from_user(cookies, ucookies, size)) {
- err = -EFAULT;
- goto error;
- }
- }
-
link = kzalloc(sizeof(*link), GFP_KERNEL);
if (!link) {
err = -ENOMEM;
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index e750fe141a60..601ccf1b2f09 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -8029,15 +8029,23 @@ static int kallsyms_callback(void *data, const char *name,
struct module *mod, unsigned long addr)
{
struct kallsyms_data *args = data;
+ const char **sym;
+ int idx;
- if (!bsearch(&name, args->syms, args->cnt, sizeof(*args->syms), symbols_cmp))
+ sym = bsearch(&name, args->syms, args->cnt, sizeof(*args->syms), symbols_cmp);
+ if (!sym)
+ return 0;
+
+ idx = sym - args->syms;
+ if (args->addrs[idx])
return 0;
addr = ftrace_location(addr);
if (!addr)
return 0;
- args->addrs[args->found++] = addr;
+ args->addrs[idx] = addr;
+ args->found++;
return args->found == args->cnt ? 1 : 0;
}
@@ -8062,6 +8070,7 @@ int ftrace_lookup_symbols(const char **sorted_syms, size_t cnt, unsigned long *a
struct kallsyms_data args;
int err;
+ memset(addrs, 0, sizeof(*addrs) * cnt);
args.addrs = addrs;
args.syms = sorted_syms;
args.cnt = cnt;
diff --git a/kernel/trace/rethook.c b/kernel/trace/rethook.c
index b56833700d23..c69d82273ce7 100644
--- a/kernel/trace/rethook.c
+++ b/kernel/trace/rethook.c
@@ -154,6 +154,15 @@ struct rethook_node *rethook_try_get(struct rethook *rh)
if (unlikely(!handler))
return NULL;
+ /*
+ * This expects the caller will set up a rethook on a function entry.
+ * When the function returns, the rethook will eventually be reclaimed
+ * or released in the rethook_recycle() with call_rcu().
+ * This means the caller must be run in the RCU-availabe context.
+ */
+ if (unlikely(!rcu_is_watching()))
+ return NULL;
+
fn = freelist_try_get(&rh->pool);
if (!fn)
return NULL;
diff --git a/net/core/filter.c b/net/core/filter.c
index 5af58eb48587..5d16d66727fc 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -6516,10 +6516,21 @@ __bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
ifindex, proto, netns_id, flags);
if (sk) {
- sk = sk_to_full_sk(sk);
- if (!sk_fullsock(sk)) {
+ struct sock *sk2 = sk_to_full_sk(sk);
+
+ /* sk_to_full_sk() may return (sk)->rsk_listener, so make sure the original sk
+ * sock refcnt is decremented to prevent a request_sock leak.
+ */
+ if (!sk_fullsock(sk2))
+ sk2 = NULL;
+ if (sk2 != sk) {
sock_gen_put(sk);
- return NULL;
+ /* Ensure there is no need to bump sk2 refcnt */
+ if (unlikely(sk2 && !sock_flag(sk2, SOCK_RCU_FREE))) {
+ WARN_ONCE(1, "Found non-RCU, unreferenced socket!");
+ return NULL;
+ }
+ sk = sk2;
}
}
@@ -6553,10 +6564,21 @@ bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
flags);
if (sk) {
- sk = sk_to_full_sk(sk);
- if (!sk_fullsock(sk)) {
+ struct sock *sk2 = sk_to_full_sk(sk);
+
+ /* sk_to_full_sk() may return (sk)->rsk_listener, so make sure the original sk
+ * sock refcnt is decremented to prevent a request_sock leak.
+ */
+ if (!sk_fullsock(sk2))
+ sk2 = NULL;
+ if (sk2 != sk) {
sock_gen_put(sk);
- return NULL;
+ /* Ensure there is no need to bump sk2 refcnt */
+ if (unlikely(sk2 && !sock_flag(sk2, SOCK_RCU_FREE))) {
+ WARN_ONCE(1, "Found non-RCU, unreferenced socket!");
+ return NULL;
+ }
+ sk = sk2;
}
}
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index 19ac872a6624..09002387987e 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -538,12 +538,6 @@ static int xsk_generic_xmit(struct sock *sk)
goto out;
}
- skb = xsk_build_skb(xs, &desc);
- if (IS_ERR(skb)) {
- err = PTR_ERR(skb);
- goto out;
- }
-
/* This is the backpressure mechanism for the Tx path.
* Reserve space in the completion queue and only proceed
* if there is space in it. This avoids having to implement
@@ -552,11 +546,19 @@ static int xsk_generic_xmit(struct sock *sk)
spin_lock_irqsave(&xs->pool->cq_lock, flags);
if (xskq_prod_reserve(xs->pool->cq)) {
spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
- kfree_skb(skb);
goto out;
}
spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
+ skb = xsk_build_skb(xs, &desc);
+ if (IS_ERR(skb)) {
+ err = PTR_ERR(skb);
+ spin_lock_irqsave(&xs->pool->cq_lock, flags);
+ xskq_prod_cancel(xs->pool->cq);
+ spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
+ goto out;
+ }
+
err = __dev_direct_xmit(skb, xs->queue_id);
if (err == NETDEV_TX_BUSY) {
/* Tell user-space to retry the send */
diff --git a/samples/fprobe/fprobe_example.c b/samples/fprobe/fprobe_example.c
index 24d3cf109140..01ee6c8c8382 100644
--- a/samples/fprobe/fprobe_example.c
+++ b/samples/fprobe/fprobe_example.c
@@ -21,6 +21,7 @@
#define BACKTRACE_DEPTH 16
#define MAX_SYMBOL_LEN 4096
struct fprobe sample_probe;
+static unsigned long nhit;
static char symbol[MAX_SYMBOL_LEN] = "kernel_clone";
module_param_string(symbol, symbol, sizeof(symbol), 0644);
@@ -28,6 +29,8 @@ static char nosymbol[MAX_SYMBOL_LEN] = "";
module_param_string(nosymbol, nosymbol, sizeof(nosymbol), 0644);
static bool stackdump = true;
module_param(stackdump, bool, 0644);
+static bool use_trace = false;
+module_param(use_trace, bool, 0644);
static void show_backtrace(void)
{
@@ -40,7 +43,15 @@ static void show_backtrace(void)
static void sample_entry_handler(struct fprobe *fp, unsigned long ip, struct pt_regs *regs)
{
- pr_info("Enter <%pS> ip = 0x%p\n", (void *)ip, (void *)ip);
+ if (use_trace)
+ /*
+ * This is just an example, no kernel code should call
+ * trace_printk() except when actively debugging.
+ */
+ trace_printk("Enter <%pS> ip = 0x%p\n", (void *)ip, (void *)ip);
+ else
+ pr_info("Enter <%pS> ip = 0x%p\n", (void *)ip, (void *)ip);
+ nhit++;
if (stackdump)
show_backtrace();
}
@@ -49,8 +60,17 @@ static void sample_exit_handler(struct fprobe *fp, unsigned long ip, struct pt_r
{
unsigned long rip = instruction_pointer(regs);
- pr_info("Return from <%pS> ip = 0x%p to rip = 0x%p (%pS)\n",
- (void *)ip, (void *)ip, (void *)rip, (void *)rip);
+ if (use_trace)
+ /*
+ * This is just an example, no kernel code should call
+ * trace_printk() except when actively debugging.
+ */
+ trace_printk("Return from <%pS> ip = 0x%p to rip = 0x%p (%pS)\n",
+ (void *)ip, (void *)ip, (void *)rip, (void *)rip);
+ else
+ pr_info("Return from <%pS> ip = 0x%p to rip = 0x%p (%pS)\n",
+ (void *)ip, (void *)ip, (void *)rip, (void *)rip);
+ nhit++;
if (stackdump)
show_backtrace();
}
@@ -112,7 +132,8 @@ static void __exit fprobe_exit(void)
{
unregister_fprobe(&sample_probe);
- pr_info("fprobe at %s unregistered\n", symbol);
+ pr_info("fprobe at %s unregistered. %ld times hit, %ld times missed\n",
+ symbol, nhit, sample_probe.nmissed);
}
module_init(fprobe_init)
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c b/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c
index 83ef55e3caa4..2974b44f80fa 100644
--- a/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c
@@ -121,24 +121,24 @@ static void kprobe_multi_link_api_subtest(void)
})
GET_ADDR("bpf_fentry_test1", addrs[0]);
- GET_ADDR("bpf_fentry_test2", addrs[1]);
- GET_ADDR("bpf_fentry_test3", addrs[2]);
- GET_ADDR("bpf_fentry_test4", addrs[3]);
- GET_ADDR("bpf_fentry_test5", addrs[4]);
- GET_ADDR("bpf_fentry_test6", addrs[5]);
- GET_ADDR("bpf_fentry_test7", addrs[6]);
+ GET_ADDR("bpf_fentry_test3", addrs[1]);
+ GET_ADDR("bpf_fentry_test4", addrs[2]);
+ GET_ADDR("bpf_fentry_test5", addrs[3]);
+ GET_ADDR("bpf_fentry_test6", addrs[4]);
+ GET_ADDR("bpf_fentry_test7", addrs[5]);
+ GET_ADDR("bpf_fentry_test2", addrs[6]);
GET_ADDR("bpf_fentry_test8", addrs[7]);
#undef GET_ADDR
- cookies[0] = 1;
- cookies[1] = 2;
- cookies[2] = 3;
- cookies[3] = 4;
- cookies[4] = 5;
- cookies[5] = 6;
- cookies[6] = 7;
- cookies[7] = 8;
+ cookies[0] = 1; /* bpf_fentry_test1 */
+ cookies[1] = 2; /* bpf_fentry_test3 */
+ cookies[2] = 3; /* bpf_fentry_test4 */
+ cookies[3] = 4; /* bpf_fentry_test5 */
+ cookies[4] = 5; /* bpf_fentry_test6 */
+ cookies[5] = 6; /* bpf_fentry_test7 */
+ cookies[6] = 7; /* bpf_fentry_test2 */
+ cookies[7] = 8; /* bpf_fentry_test8 */
opts.kprobe_multi.addrs = (const unsigned long *) &addrs;
opts.kprobe_multi.cnt = ARRAY_SIZE(addrs);
@@ -149,14 +149,14 @@ static void kprobe_multi_link_api_subtest(void)
if (!ASSERT_GE(link1_fd, 0, "link1_fd"))
goto cleanup;
- cookies[0] = 8;
- cookies[1] = 7;
- cookies[2] = 6;
- cookies[3] = 5;
- cookies[4] = 4;
- cookies[5] = 3;
- cookies[6] = 2;
- cookies[7] = 1;
+ cookies[0] = 8; /* bpf_fentry_test1 */
+ cookies[1] = 7; /* bpf_fentry_test3 */
+ cookies[2] = 6; /* bpf_fentry_test4 */
+ cookies[3] = 5; /* bpf_fentry_test5 */
+ cookies[4] = 4; /* bpf_fentry_test6 */
+ cookies[5] = 3; /* bpf_fentry_test7 */
+ cookies[6] = 2; /* bpf_fentry_test2 */
+ cookies[7] = 1; /* bpf_fentry_test8 */
opts.kprobe_multi.flags = BPF_F_KPROBE_MULTI_RETURN;
prog_fd = bpf_program__fd(skel->progs.test_kretprobe);
@@ -181,12 +181,12 @@ static void kprobe_multi_attach_api_subtest(void)
struct kprobe_multi *skel = NULL;
const char *syms[8] = {
"bpf_fentry_test1",
- "bpf_fentry_test2",
"bpf_fentry_test3",
"bpf_fentry_test4",
"bpf_fentry_test5",
"bpf_fentry_test6",
"bpf_fentry_test7",
+ "bpf_fentry_test2",
"bpf_fentry_test8",
};
__u64 cookies[8];
@@ -198,14 +198,14 @@ static void kprobe_multi_attach_api_subtest(void)
skel->bss->pid = getpid();
skel->bss->test_cookie = true;
- cookies[0] = 1;
- cookies[1] = 2;
- cookies[2] = 3;
- cookies[3] = 4;
- cookies[4] = 5;
- cookies[5] = 6;
- cookies[6] = 7;
- cookies[7] = 8;
+ cookies[0] = 1; /* bpf_fentry_test1 */
+ cookies[1] = 2; /* bpf_fentry_test3 */
+ cookies[2] = 3; /* bpf_fentry_test4 */
+ cookies[3] = 4; /* bpf_fentry_test5 */
+ cookies[4] = 5; /* bpf_fentry_test6 */
+ cookies[5] = 6; /* bpf_fentry_test7 */
+ cookies[6] = 7; /* bpf_fentry_test2 */
+ cookies[7] = 8; /* bpf_fentry_test8 */
opts.syms = syms;
opts.cnt = ARRAY_SIZE(syms);
@@ -216,14 +216,14 @@ static void kprobe_multi_attach_api_subtest(void)
if (!ASSERT_OK_PTR(link1, "bpf_program__attach_kprobe_multi_opts"))
goto cleanup;
- cookies[0] = 8;
- cookies[1] = 7;
- cookies[2] = 6;
- cookies[3] = 5;
- cookies[4] = 4;
- cookies[5] = 3;
- cookies[6] = 2;
- cookies[7] = 1;
+ cookies[0] = 8; /* bpf_fentry_test1 */
+ cookies[1] = 7; /* bpf_fentry_test3 */
+ cookies[2] = 6; /* bpf_fentry_test4 */
+ cookies[3] = 5; /* bpf_fentry_test5 */
+ cookies[4] = 4; /* bpf_fentry_test6 */
+ cookies[5] = 3; /* bpf_fentry_test7 */
+ cookies[6] = 2; /* bpf_fentry_test2 */
+ cookies[7] = 1; /* bpf_fentry_test8 */
opts.retprobe = true;
diff --git a/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c b/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c
index 586dc52d6fb9..5b93d5d0bd93 100644
--- a/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c
+++ b/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c
@@ -364,6 +364,9 @@ static int get_syms(char ***symsp, size_t *cntp)
continue;
if (!strncmp(name, "rcu_", 4))
continue;
+ if (!strncmp(name, "__ftrace_invalid_address__",
+ sizeof("__ftrace_invalid_address__") - 1))
+ continue;
err = hashmap__add(map, name, NULL);
if (err) {
free(name);
diff --git a/tools/testing/selftests/bpf/prog_tests/tailcalls.c b/tools/testing/selftests/bpf/prog_tests/tailcalls.c
index c4da87ec3ba4..19c70880cfb3 100644
--- a/tools/testing/selftests/bpf/prog_tests/tailcalls.c
+++ b/tools/testing/selftests/bpf/prog_tests/tailcalls.c
@@ -831,6 +831,59 @@ out:
bpf_object__close(obj);
}
+#include "tailcall_bpf2bpf6.skel.h"
+
+/* Tail call counting works even when there is data on stack which is
+ * not aligned to 8 bytes.
+ */
+static void test_tailcall_bpf2bpf_6(void)
+{
+ struct tailcall_bpf2bpf6 *obj;
+ int err, map_fd, prog_fd, main_fd, data_fd, i, val;
+ LIBBPF_OPTS(bpf_test_run_opts, topts,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .repeat = 1,
+ );
+
+ obj = tailcall_bpf2bpf6__open_and_load();
+ if (!ASSERT_OK_PTR(obj, "open and load"))
+ return;
+
+ main_fd = bpf_program__fd(obj->progs.entry);
+ if (!ASSERT_GE(main_fd, 0, "entry prog fd"))
+ goto out;
+
+ map_fd = bpf_map__fd(obj->maps.jmp_table);
+ if (!ASSERT_GE(map_fd, 0, "jmp_table map fd"))
+ goto out;
+
+ prog_fd = bpf_program__fd(obj->progs.classifier_0);
+ if (!ASSERT_GE(prog_fd, 0, "classifier_0 prog fd"))
+ goto out;
+
+ i = 0;
+ err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
+ if (!ASSERT_OK(err, "jmp_table map update"))
+ goto out;
+
+ err = bpf_prog_test_run_opts(main_fd, &topts);
+ ASSERT_OK(err, "entry prog test run");
+ ASSERT_EQ(topts.retval, 0, "tailcall retval");
+
+ data_fd = bpf_map__fd(obj->maps.bss);
+ if (!ASSERT_GE(map_fd, 0, "bss map fd"))
+ goto out;
+
+ i = 0;
+ err = bpf_map_lookup_elem(data_fd, &i, &val);
+ ASSERT_OK(err, "bss map lookup");
+ ASSERT_EQ(val, 1, "done flag is set");
+
+out:
+ tailcall_bpf2bpf6__destroy(obj);
+}
+
void test_tailcalls(void)
{
if (test__start_subtest("tailcall_1"))
@@ -855,4 +908,6 @@ void test_tailcalls(void)
test_tailcall_bpf2bpf_4(false);
if (test__start_subtest("tailcall_bpf2bpf_5"))
test_tailcall_bpf2bpf_4(true);
+ if (test__start_subtest("tailcall_bpf2bpf_6"))
+ test_tailcall_bpf2bpf_6();
}
diff --git a/tools/testing/selftests/bpf/progs/kprobe_multi.c b/tools/testing/selftests/bpf/progs/kprobe_multi.c
index 93510f4f0f3a..08f95a8155d1 100644
--- a/tools/testing/selftests/bpf/progs/kprobe_multi.c
+++ b/tools/testing/selftests/bpf/progs/kprobe_multi.c
@@ -54,21 +54,21 @@ static void kprobe_multi_check(void *ctx, bool is_return)
if (is_return) {
SET(kretprobe_test1_result, &bpf_fentry_test1, 8);
- SET(kretprobe_test2_result, &bpf_fentry_test2, 7);
- SET(kretprobe_test3_result, &bpf_fentry_test3, 6);
- SET(kretprobe_test4_result, &bpf_fentry_test4, 5);
- SET(kretprobe_test5_result, &bpf_fentry_test5, 4);
- SET(kretprobe_test6_result, &bpf_fentry_test6, 3);
- SET(kretprobe_test7_result, &bpf_fentry_test7, 2);
+ SET(kretprobe_test2_result, &bpf_fentry_test2, 2);
+ SET(kretprobe_test3_result, &bpf_fentry_test3, 7);
+ SET(kretprobe_test4_result, &bpf_fentry_test4, 6);
+ SET(kretprobe_test5_result, &bpf_fentry_test5, 5);
+ SET(kretprobe_test6_result, &bpf_fentry_test6, 4);
+ SET(kretprobe_test7_result, &bpf_fentry_test7, 3);
SET(kretprobe_test8_result, &bpf_fentry_test8, 1);
} else {
SET(kprobe_test1_result, &bpf_fentry_test1, 1);
- SET(kprobe_test2_result, &bpf_fentry_test2, 2);
- SET(kprobe_test3_result, &bpf_fentry_test3, 3);
- SET(kprobe_test4_result, &bpf_fentry_test4, 4);
- SET(kprobe_test5_result, &bpf_fentry_test5, 5);
- SET(kprobe_test6_result, &bpf_fentry_test6, 6);
- SET(kprobe_test7_result, &bpf_fentry_test7, 7);
+ SET(kprobe_test2_result, &bpf_fentry_test2, 7);
+ SET(kprobe_test3_result, &bpf_fentry_test3, 2);
+ SET(kprobe_test4_result, &bpf_fentry_test4, 3);
+ SET(kprobe_test5_result, &bpf_fentry_test5, 4);
+ SET(kprobe_test6_result, &bpf_fentry_test6, 5);
+ SET(kprobe_test7_result, &bpf_fentry_test7, 6);
SET(kprobe_test8_result, &bpf_fentry_test8, 8);
}
diff --git a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf6.c b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf6.c
new file mode 100644
index 000000000000..41ce83da78e8
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf6.c
@@ -0,0 +1,42 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+#define __unused __attribute__((unused))
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+ __uint(max_entries, 1);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(__u32));
+} jmp_table SEC(".maps");
+
+int done = 0;
+
+SEC("tc")
+int classifier_0(struct __sk_buff *skb __unused)
+{
+ done = 1;
+ return 0;
+}
+
+static __noinline
+int subprog_tail(struct __sk_buff *skb)
+{
+ /* Don't propagate the constant to the caller */
+ volatile int ret = 1;
+
+ bpf_tail_call_static(skb, &jmp_table, 0);
+ return ret;
+}
+
+SEC("tc")
+int entry(struct __sk_buff *skb)
+{
+ /* Have data on stack which size is not a multiple of 8 */
+ volatile char arr[1] = {};
+
+ return subprog_tail(skb);
+}
+
+char __license[] SEC("license") = "GPL";