summaryrefslogtreecommitdiffstats
path: root/tools/bpf
diff options
context:
space:
mode:
authorPaolo Abeni <pabeni@redhat.com>2022-09-06 23:21:14 +0200
committerPaolo Abeni <pabeni@redhat.com>2022-09-06 23:21:18 +0200
commit2786bcff28bd88955fc61adf9cb7370fbc182bad (patch)
treebfe785dc3705d30514b20616acaaea71d8869923 /tools/bpf
parent03fdb11da92fde0bdc0b6e9c1c642b7414d49e8d (diff)
parent274052a2b0ab9f380ce22b19ff80a99b99ecb198 (diff)
downloadlinux-2786bcff28bd88955fc61adf9cb7370fbc182bad.tar.gz
linux-2786bcff28bd88955fc61adf9cb7370fbc182bad.tar.bz2
linux-2786bcff28bd88955fc61adf9cb7370fbc182bad.zip
Merge https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
Daniel Borkmann says: ==================== pull-request: bpf-next 2022-09-05 The following pull-request contains BPF updates for your *net-next* tree. We've added 106 non-merge commits during the last 18 day(s) which contain a total of 159 files changed, 5225 insertions(+), 1358 deletions(-). There are two small merge conflicts, resolve them as follows: 1) tools/testing/selftests/bpf/DENYLIST.s390x Commit 27e23836ce22 ("selftests/bpf: Add lru_bug to s390x deny list") in bpf tree was needed to get BPF CI green on s390x, but it conflicted with newly added tests on bpf-next. Resolve by adding both hunks, result: [...] lru_bug # prog 'printk': failed to auto-attach: -524 setget_sockopt # attach unexpected error: -524 (trampoline) cb_refs # expected error message unexpected error: -524 (trampoline) cgroup_hierarchical_stats # JIT does not support calling kernel function (kfunc) htab_update # failed to attach: ERROR: strerror_r(-524)=22 (trampoline) [...] 2) net/core/filter.c Commit 1227c1771dd2 ("net: Fix data-races around sysctl_[rw]mem_(max|default).") from net tree conflicts with commit 29003875bd5b ("bpf: Change bpf_setsockopt(SOL_SOCKET) to reuse sk_setsockopt()") from bpf-next tree. Take the code as it is from bpf-next tree, result: [...] if (getopt) { if (optname == SO_BINDTODEVICE) return -EINVAL; return sk_getsockopt(sk, SOL_SOCKET, optname, KERNEL_SOCKPTR(optval), KERNEL_SOCKPTR(optlen)); } return sk_setsockopt(sk, SOL_SOCKET, optname, KERNEL_SOCKPTR(optval), *optlen); [...] The main changes are: 1) Add any-context BPF specific memory allocator which is useful in particular for BPF tracing with bonus of performance equal to full prealloc, from Alexei Starovoitov. 2) Big batch to remove duplicated code from bpf_{get,set}sockopt() helpers as an effort to reuse the existing core socket code as much as possible, from Martin KaFai Lau. 3) Extend BPF flow dissector for BPF programs to just augment the in-kernel dissector with custom logic. In other words, allow for partial replacement, from Shmulik Ladkani. 4) Add a new cgroup iterator to BPF with different traversal options, from Hao Luo. 5) Support for BPF to collect hierarchical cgroup statistics efficiently through BPF integration with the rstat framework, from Yosry Ahmed. 6) Support bpf_{g,s}et_retval() under more BPF cgroup hooks, from Stanislav Fomichev. 7) BPF hash table and local storages fixes under fully preemptible kernel, from Hou Tao. 8) Add various improvements to BPF selftests and libbpf for compilation with gcc BPF backend, from James Hilliard. 9) Fix verifier helper permissions and reference state management for synchronous callbacks, from Kumar Kartikeya Dwivedi. 10) Add support for BPF selftest's xskxceiver to also be used against real devices that support MAC loopback, from Maciej Fijalkowski. 11) Various fixes to the bpf-helpers(7) man page generation script, from Quentin Monnet. 12) Document BPF verifier's tnum_in(tnum_range(), ...) gotchas, from Shung-Hsi Yu. 13) Various minor misc improvements all over the place. * https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next: (106 commits) bpf: Optimize rcu_barrier usage between hash map and bpf_mem_alloc. bpf: Remove usage of kmem_cache from bpf_mem_cache. bpf: Remove prealloc-only restriction for sleepable bpf programs. bpf: Prepare bpf_mem_alloc to be used by sleepable bpf programs. bpf: Remove tracing program restriction on map types bpf: Convert percpu hash map to per-cpu bpf_mem_alloc. bpf: Add percpu allocation support to bpf_mem_alloc. bpf: Batch call_rcu callbacks instead of SLAB_TYPESAFE_BY_RCU. bpf: Adjust low/high watermarks in bpf_mem_cache bpf: Optimize call_rcu in non-preallocated hash map. bpf: Optimize element count in non-preallocated hash map. bpf: Relax the requirement to use preallocated hash maps in tracing progs. samples/bpf: Reduce syscall overhead in map_perf_test. selftests/bpf: Improve test coverage of test_maps bpf: Convert hash map to bpf_mem_alloc. bpf: Introduce any context BPF specific memory allocator. selftest/bpf: Add test for bpf_getsockopt() bpf: Change bpf_getsockopt(SOL_IPV6) to reuse do_ipv6_getsockopt() bpf: Change bpf_getsockopt(SOL_IP) to reuse do_ip_getsockopt() bpf: Change bpf_getsockopt(SOL_TCP) to reuse do_tcp_getsockopt() ... ==================== Link: https://lore.kernel.org/r/20220905161136.9150-1-daniel@iogearbox.net Signed-off-by: Paolo Abeni <pabeni@redhat.com>
Diffstat (limited to 'tools/bpf')
-rw-r--r--tools/bpf/bpftool/btf_dumper.c2
-rw-r--r--tools/bpf/bpftool/link.c35
2 files changed, 36 insertions, 1 deletions
diff --git a/tools/bpf/bpftool/btf_dumper.c b/tools/bpf/bpftool/btf_dumper.c
index 125798b0bc5d..19924b6ce796 100644
--- a/tools/bpf/bpftool/btf_dumper.c
+++ b/tools/bpf/bpftool/btf_dumper.c
@@ -452,7 +452,7 @@ static int btf_dumper_int(const struct btf_type *t, __u8 bit_offset,
*(char *)data);
break;
case BTF_INT_BOOL:
- jsonw_bool(jw, *(int *)data);
+ jsonw_bool(jw, *(bool *)data);
break;
default:
/* shouldn't happen */
diff --git a/tools/bpf/bpftool/link.c b/tools/bpf/bpftool/link.c
index 7a20931c3250..ef0dc2f8d5a2 100644
--- a/tools/bpf/bpftool/link.c
+++ b/tools/bpf/bpftool/link.c
@@ -83,6 +83,29 @@ static bool is_iter_map_target(const char *target_name)
strcmp(target_name, "bpf_sk_storage_map") == 0;
}
+static bool is_iter_cgroup_target(const char *target_name)
+{
+ return strcmp(target_name, "cgroup") == 0;
+}
+
+static const char *cgroup_order_string(__u32 order)
+{
+ switch (order) {
+ case BPF_CGROUP_ITER_ORDER_UNSPEC:
+ return "order_unspec";
+ case BPF_CGROUP_ITER_SELF_ONLY:
+ return "self_only";
+ case BPF_CGROUP_ITER_DESCENDANTS_PRE:
+ return "descendants_pre";
+ case BPF_CGROUP_ITER_DESCENDANTS_POST:
+ return "descendants_post";
+ case BPF_CGROUP_ITER_ANCESTORS_UP:
+ return "ancestors_up";
+ default: /* won't happen */
+ return "unknown";
+ }
+}
+
static void show_iter_json(struct bpf_link_info *info, json_writer_t *wtr)
{
const char *target_name = u64_to_ptr(info->iter.target_name);
@@ -91,6 +114,12 @@ static void show_iter_json(struct bpf_link_info *info, json_writer_t *wtr)
if (is_iter_map_target(target_name))
jsonw_uint_field(wtr, "map_id", info->iter.map.map_id);
+
+ if (is_iter_cgroup_target(target_name)) {
+ jsonw_lluint_field(wtr, "cgroup_id", info->iter.cgroup.cgroup_id);
+ jsonw_string_field(wtr, "order",
+ cgroup_order_string(info->iter.cgroup.order));
+ }
}
static int get_prog_info(int prog_id, struct bpf_prog_info *info)
@@ -208,6 +237,12 @@ static void show_iter_plain(struct bpf_link_info *info)
if (is_iter_map_target(target_name))
printf("map_id %u ", info->iter.map.map_id);
+
+ if (is_iter_cgroup_target(target_name)) {
+ printf("cgroup_id %llu ", info->iter.cgroup.cgroup_id);
+ printf("order %s ",
+ cgroup_order_string(info->iter.cgroup.order));
+ }
}
static int show_link_close_plain(int fd, struct bpf_link_info *info)