diff options
author | Jakub Kicinski <kuba@kernel.org> | 2022-01-24 15:42:28 -0800 |
---|---|---|
committer | Jakub Kicinski <kuba@kernel.org> | 2022-01-24 15:42:29 -0800 |
commit | caaba96131b3a132590316c49887af85e07930b6 (patch) | |
tree | a67317a69670fbeb96ff6e106ec4de98c1ba15fe /tools/perf | |
parent | e52984be9a522fb55c8f3e3df860d464d6658585 (diff) | |
parent | 0bfb95f59a6613e30c0672b8ef2c9502302bf6bb (diff) | |
download | linux-stable-caaba96131b3a132590316c49887af85e07930b6.tar.gz linux-stable-caaba96131b3a132590316c49887af85e07930b6.tar.bz2 linux-stable-caaba96131b3a132590316c49887af85e07930b6.zip |
Merge https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
Daniel Borkmann says:
====================
pull-request: bpf-next 2022-01-24
We've added 80 non-merge commits during the last 14 day(s) which contain
a total of 128 files changed, 4990 insertions(+), 895 deletions(-).
The main changes are:
1) Add XDP multi-buffer support and implement it for the mvneta driver,
from Lorenzo Bianconi, Eelco Chaudron and Toke Høiland-Jørgensen.
2) Add unstable conntrack lookup helpers for BPF by using the BPF kfunc
infra, from Kumar Kartikeya Dwivedi.
3) Extend BPF cgroup programs to export custom ret value to userspace via
two helpers bpf_get_retval() and bpf_set_retval(), from YiFei Zhu.
4) Add support for AF_UNIX iterator batching, from Kuniyuki Iwashima.
5) Complete missing UAPI BPF helper description and change bpf_doc.py script
to enforce consistent & complete helper documentation, from Usama Arif.
6) Deprecate libbpf's legacy BPF map definitions and streamline XDP APIs to
follow tc-based APIs, from Andrii Nakryiko.
7) Support BPF_PROG_QUERY for BPF programs attached to sockmap, from Di Zhu.
8) Deprecate libbpf's bpf_map__def() API and replace users with proper getters
and setters, from Christy Lee.
9) Extend libbpf's btf__add_btf() with an additional hashmap for strings to
reduce overhead, from Kui-Feng Lee.
10) Fix bpftool and libbpf error handling related to libbpf's hashmap__new()
utility function, from Mauricio Vásquez.
11) Add support to BTF program names in bpftool's program dump, from Raman Shukhau.
12) Fix resolve_btfids build to pick up host flags, from Connor O'Brien.
* https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next: (80 commits)
selftests, bpf: Do not yet switch to new libbpf XDP APIs
selftests, xsk: Fix rx_full stats test
bpf: Fix flexible_array.cocci warnings
xdp: disable XDP_REDIRECT for xdp frags
bpf: selftests: add CPUMAP/DEVMAP selftests for xdp frags
bpf: selftests: introduce bpf_xdp_{load,store}_bytes selftest
net: xdp: introduce bpf_xdp_pointer utility routine
bpf: generalise tail call map compatibility check
libbpf: Add SEC name for xdp frags programs
bpf: selftests: update xdp_adjust_tail selftest to include xdp frags
bpf: test_run: add xdp_shared_info pointer in bpf_test_finish signature
bpf: introduce frags support to bpf_prog_test_run_xdp()
bpf: move user_size out of bpf_test_init
bpf: add frags support to xdp copy helpers
bpf: add frags support to the bpf_xdp_adjust_tail() API
bpf: introduce bpf_xdp_get_buff_len helper
net: mvneta: enable jumbo frames if the loaded XDP program support frags
bpf: introduce BPF_F_XDP_HAS_FRAGS flag in prog_flags loading the ebpf program
net: mvneta: add frags support to XDP_TX
xdp: add frags support to xdp_return_{buff/frame}
...
====================
Link: https://lore.kernel.org/r/20220124221235.18993-1-daniel@iogearbox.net
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'tools/perf')
-rw-r--r-- | tools/perf/util/bpf-loader.c | 64 | ||||
-rw-r--r-- | tools/perf/util/bpf_map.c | 28 |
2 files changed, 41 insertions, 51 deletions
diff --git a/tools/perf/util/bpf-loader.c b/tools/perf/util/bpf-loader.c index 7ecfaac7536a..ef2832b4d5cc 100644 --- a/tools/perf/util/bpf-loader.c +++ b/tools/perf/util/bpf-loader.c @@ -1005,24 +1005,22 @@ __bpf_map__config_value(struct bpf_map *map, { struct bpf_map_op *op; const char *map_name = bpf_map__name(map); - const struct bpf_map_def *def = bpf_map__def(map); - if (IS_ERR(def)) { - pr_debug("Unable to get map definition from '%s'\n", - map_name); + if (!map) { + pr_debug("Map '%s' is invalid\n", map_name); return -BPF_LOADER_ERRNO__INTERNAL; } - if (def->type != BPF_MAP_TYPE_ARRAY) { + if (bpf_map__type(map) != BPF_MAP_TYPE_ARRAY) { pr_debug("Map %s type is not BPF_MAP_TYPE_ARRAY\n", map_name); return -BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE; } - if (def->key_size < sizeof(unsigned int)) { + if (bpf_map__key_size(map) < sizeof(unsigned int)) { pr_debug("Map %s has incorrect key size\n", map_name); return -BPF_LOADER_ERRNO__OBJCONF_MAP_KEYSIZE; } - switch (def->value_size) { + switch (bpf_map__value_size(map)) { case 1: case 2: case 4: @@ -1064,7 +1062,6 @@ __bpf_map__config_event(struct bpf_map *map, struct parse_events_term *term, struct evlist *evlist) { - const struct bpf_map_def *def; struct bpf_map_op *op; const char *map_name = bpf_map__name(map); struct evsel *evsel = evlist__find_evsel_by_str(evlist, term->val.str); @@ -1075,18 +1072,16 @@ __bpf_map__config_event(struct bpf_map *map, return -BPF_LOADER_ERRNO__OBJCONF_MAP_NOEVT; } - def = bpf_map__def(map); - if (IS_ERR(def)) { - pr_debug("Unable to get map definition from '%s'\n", - map_name); - return PTR_ERR(def); + if (!map) { + pr_debug("Map '%s' is invalid\n", map_name); + return PTR_ERR(map); } /* * No need to check key_size and value_size: * kernel has already checked them. */ - if (def->type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) { + if (bpf_map__type(map) != BPF_MAP_TYPE_PERF_EVENT_ARRAY) { pr_debug("Map %s type is not BPF_MAP_TYPE_PERF_EVENT_ARRAY\n", map_name); return -BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE; @@ -1135,7 +1130,6 @@ config_map_indices_range_check(struct parse_events_term *term, const char *map_name) { struct parse_events_array *array = &term->array; - const struct bpf_map_def *def; unsigned int i; if (!array->nr_ranges) @@ -1146,10 +1140,8 @@ config_map_indices_range_check(struct parse_events_term *term, return -BPF_LOADER_ERRNO__INTERNAL; } - def = bpf_map__def(map); - if (IS_ERR(def)) { - pr_debug("ERROR: Unable to get map definition from '%s'\n", - map_name); + if (!map) { + pr_debug("Map '%s' is invalid\n", map_name); return -BPF_LOADER_ERRNO__INTERNAL; } @@ -1158,7 +1150,7 @@ config_map_indices_range_check(struct parse_events_term *term, size_t length = array->ranges[i].length; unsigned int idx = start + length - 1; - if (idx >= def->max_entries) { + if (idx >= bpf_map__max_entries(map)) { pr_debug("ERROR: index %d too large\n", idx); return -BPF_LOADER_ERRNO__OBJCONF_MAP_IDX2BIG; } @@ -1251,21 +1243,21 @@ out: } typedef int (*map_config_func_t)(const char *name, int map_fd, - const struct bpf_map_def *pdef, + const struct bpf_map *map, struct bpf_map_op *op, void *pkey, void *arg); static int foreach_key_array_all(map_config_func_t func, void *arg, const char *name, - int map_fd, const struct bpf_map_def *pdef, + int map_fd, const struct bpf_map *map, struct bpf_map_op *op) { unsigned int i; int err; - for (i = 0; i < pdef->max_entries; i++) { - err = func(name, map_fd, pdef, op, &i, arg); + for (i = 0; i < bpf_map__max_entries(map); i++) { + err = func(name, map_fd, map, op, &i, arg); if (err) { pr_debug("ERROR: failed to insert value to %s[%u]\n", name, i); @@ -1278,7 +1270,7 @@ foreach_key_array_all(map_config_func_t func, static int foreach_key_array_ranges(map_config_func_t func, void *arg, const char *name, int map_fd, - const struct bpf_map_def *pdef, + const struct bpf_map *map, struct bpf_map_op *op) { unsigned int i, j; @@ -1291,7 +1283,7 @@ foreach_key_array_ranges(map_config_func_t func, void *arg, for (j = 0; j < length; j++) { unsigned int idx = start + j; - err = func(name, map_fd, pdef, op, &idx, arg); + err = func(name, map_fd, map, op, &idx, arg); if (err) { pr_debug("ERROR: failed to insert value to %s[%u]\n", name, idx); @@ -1307,9 +1299,8 @@ bpf_map_config_foreach_key(struct bpf_map *map, map_config_func_t func, void *arg) { - int err, map_fd; + int err, map_fd, type; struct bpf_map_op *op; - const struct bpf_map_def *def; const char *name = bpf_map__name(map); struct bpf_map_priv *priv = bpf_map__priv(map); @@ -1322,9 +1313,8 @@ bpf_map_config_foreach_key(struct bpf_map *map, return 0; } - def = bpf_map__def(map); - if (IS_ERR(def)) { - pr_debug("ERROR: failed to get definition from map %s\n", name); + if (!map) { + pr_debug("Map '%s' is invalid\n", name); return -BPF_LOADER_ERRNO__INTERNAL; } map_fd = bpf_map__fd(map); @@ -1333,19 +1323,19 @@ bpf_map_config_foreach_key(struct bpf_map *map, return map_fd; } + type = bpf_map__type(map); list_for_each_entry(op, &priv->ops_list, list) { - switch (def->type) { + switch (type) { case BPF_MAP_TYPE_ARRAY: case BPF_MAP_TYPE_PERF_EVENT_ARRAY: switch (op->key_type) { case BPF_MAP_KEY_ALL: err = foreach_key_array_all(func, arg, name, - map_fd, def, op); + map_fd, map, op); break; case BPF_MAP_KEY_RANGES: err = foreach_key_array_ranges(func, arg, name, - map_fd, def, - op); + map_fd, map, op); break; default: pr_debug("ERROR: keytype for map '%s' invalid\n", @@ -1454,7 +1444,7 @@ apply_config_evsel_for_key(const char *name, int map_fd, void *pkey, static int apply_obj_config_map_for_key(const char *name, int map_fd, - const struct bpf_map_def *pdef, + const struct bpf_map *map, struct bpf_map_op *op, void *pkey, void *arg __maybe_unused) { @@ -1463,7 +1453,7 @@ apply_obj_config_map_for_key(const char *name, int map_fd, switch (op->op_type) { case BPF_MAP_OP_SET_VALUE: err = apply_config_value_for_key(map_fd, pkey, - pdef->value_size, + bpf_map__value_size(map), op->v.value); break; case BPF_MAP_OP_SET_EVSEL: diff --git a/tools/perf/util/bpf_map.c b/tools/perf/util/bpf_map.c index eb853ca67cf4..c863ae0c5cb5 100644 --- a/tools/perf/util/bpf_map.c +++ b/tools/perf/util/bpf_map.c @@ -9,25 +9,25 @@ #include <stdlib.h> #include <unistd.h> -static bool bpf_map_def__is_per_cpu(const struct bpf_map_def *def) +static bool bpf_map__is_per_cpu(enum bpf_map_type type) { - return def->type == BPF_MAP_TYPE_PERCPU_HASH || - def->type == BPF_MAP_TYPE_PERCPU_ARRAY || - def->type == BPF_MAP_TYPE_LRU_PERCPU_HASH || - def->type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE; + return type == BPF_MAP_TYPE_PERCPU_HASH || + type == BPF_MAP_TYPE_PERCPU_ARRAY || + type == BPF_MAP_TYPE_LRU_PERCPU_HASH || + type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE; } -static void *bpf_map_def__alloc_value(const struct bpf_map_def *def) +static void *bpf_map__alloc_value(const struct bpf_map *map) { - if (bpf_map_def__is_per_cpu(def)) - return malloc(round_up(def->value_size, 8) * sysconf(_SC_NPROCESSORS_CONF)); + if (bpf_map__is_per_cpu(bpf_map__type(map))) + return malloc(round_up(bpf_map__value_size(map), 8) * + sysconf(_SC_NPROCESSORS_CONF)); - return malloc(def->value_size); + return malloc(bpf_map__value_size(map)); } int bpf_map__fprintf(struct bpf_map *map, FILE *fp) { - const struct bpf_map_def *def = bpf_map__def(map); void *prev_key = NULL, *key, *value; int fd = bpf_map__fd(map), err; int printed = 0; @@ -35,15 +35,15 @@ int bpf_map__fprintf(struct bpf_map *map, FILE *fp) if (fd < 0) return fd; - if (IS_ERR(def)) - return PTR_ERR(def); + if (!map) + return PTR_ERR(map); err = -ENOMEM; - key = malloc(def->key_size); + key = malloc(bpf_map__key_size(map)); if (key == NULL) goto out; - value = bpf_map_def__alloc_value(def); + value = bpf_map__alloc_value(map); if (value == NULL) goto out_free_key; |