summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlexei Starovoitov <ast@kernel.org>2021-12-09 15:40:34 -0800
committerAndrii Nakryiko <andrii@kernel.org>2021-12-10 15:35:14 -0800
commitbd6b3b355af59af1f8c417c269a46be26a1dc87b (patch)
tree0e92aea9d74a32c822d7232e3293fb51a1cc4e87
parentac55b3f00c323cf09d59a191e14bcf39b691078c (diff)
parentb59e4ce8bcaab6445f4a0d37a96ca8953caaf5cf (diff)
downloadlinux-stable-bd6b3b355af59af1f8c417c269a46be26a1dc87b.tar.gz
linux-stable-bd6b3b355af59af1f8c417c269a46be26a1dc87b.tar.bz2
linux-stable-bd6b3b355af59af1f8c417c269a46be26a1dc87b.zip
Merge branch 'Enhance and rework logging controls in libbpf'
Andrii Nakryiko says: ==================== Add new open options and per-program setters to control BTF and program loading log verboseness and allow providing custom log buffers to capture logs of interest. Note how custom log_buf and log_level are orthogonal, which matches previous (alas less customizable) behavior of libbpf, even though it sort of worked by accident: if someone specified log_level = 1 in bpf_object__load_xattr(), first attempt to load any BPF program resulted in wasted bpf() syscall with -EINVAL due to !!log_buf != !!log_level. Then on retry libbpf would allocated log_buffer and try again, after which prog loading would succeed and libbpf would print verbose program loading log through its print callback. This behavior is now documented and made more efficient, not wasting unnecessary syscall. But additionally, log_level can be controlled globally on a per-bpf_object level through bpf_object_open_opts, as well as on a per-program basis with bpf_program__set_log_buf() and bpf_program__set_log_level() APIs. Now that we have a more future-proof way to set log_level, deprecate bpf_object__load_xattr(). v2->v3: - added log_buf selftests for bpf_prog_load() and bpf_btf_load(); - fix !log_buf in bpf_prog_load (John); - fix log_level==0 in bpf_btf_load (thanks selftest!); v1->v2: - fix log_level == 0 handling of bpf_prog_load, add as patch #1 (Alexei); - add comments explaining log_buf_size overflow prevention (Alexei). ==================== Signed-off-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
-rw-r--r--tools/bpf/bpftool/gen.c11
-rw-r--r--tools/bpf/bpftool/prog.c24
-rw-r--r--tools/bpf/bpftool/struct_ops.c15
-rw-r--r--tools/lib/bpf/bpf.c88
-rw-r--r--tools/lib/bpf/bpf.h22
-rw-r--r--tools/lib/bpf/btf.c78
-rw-r--r--tools/lib/bpf/libbpf.c194
-rw-r--r--tools/lib/bpf/libbpf.h49
-rw-r--r--tools/lib/bpf/libbpf.map3
-rw-r--r--tools/lib/bpf/libbpf_internal.h1
-rw-r--r--tools/lib/bpf/libbpf_probes.c2
-rw-r--r--tools/testing/selftests/bpf/map_tests/sk_storage_map.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c6
-rw-r--r--tools/testing/selftests/bpf/prog_tests/btf.c50
-rw-r--r--tools/testing/selftests/bpf/prog_tests/log_buf.c276
-rw-r--r--tools/testing/selftests/bpf/progs/test_log_buf.c24
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c2
-rw-r--r--tools/testing/selftests/bpf/testing_helpers.c10
18 files changed, 683 insertions, 174 deletions
diff --git a/tools/bpf/bpftool/gen.c b/tools/bpf/bpftool/gen.c
index 997a2865e04a..b4695df2ea3d 100644
--- a/tools/bpf/bpftool/gen.c
+++ b/tools/bpf/bpftool/gen.c
@@ -486,7 +486,6 @@ static void codegen_destroy(struct bpf_object *obj, const char *obj_name)
static int gen_trace(struct bpf_object *obj, const char *obj_name, const char *header_guard)
{
- struct bpf_object_load_attr load_attr = {};
DECLARE_LIBBPF_OPTS(gen_loader_opts, opts);
struct bpf_map *map;
char ident[256];
@@ -496,12 +495,7 @@ static int gen_trace(struct bpf_object *obj, const char *obj_name, const char *h
if (err)
return err;
- load_attr.obj = obj;
- if (verifier_logs)
- /* log_level1 + log_level2 + stats, but not stable UAPI */
- load_attr.log_level = 1 + 2 + 4;
-
- err = bpf_object__load_xattr(&load_attr);
+ err = bpf_object__load(obj);
if (err) {
p_err("failed to load object file");
goto out;
@@ -719,6 +713,9 @@ static int do_skeleton(int argc, char **argv)
if (obj_name[0] == '\0')
get_obj_name(obj_name, file);
opts.object_name = obj_name;
+ if (verifier_logs)
+ /* log_level1 + log_level2 + stats, but not stable UAPI */
+ opts.kernel_log_level = 1 + 2 + 4;
obj = bpf_object__open_mem(obj_data, file_sz, &opts);
err = libbpf_get_error(obj);
if (err) {
diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
index 45ccc254e69f..f874896c4154 100644
--- a/tools/bpf/bpftool/prog.c
+++ b/tools/bpf/bpftool/prog.c
@@ -1464,7 +1464,6 @@ static int load_with_options(int argc, char **argv, bool first_prog_only)
DECLARE_LIBBPF_OPTS(bpf_object_open_opts, open_opts,
.relaxed_maps = relaxed_maps,
);
- struct bpf_object_load_attr load_attr = { 0 };
enum bpf_attach_type expected_attach_type;
struct map_replace *map_replace = NULL;
struct bpf_program *prog = NULL, *pos;
@@ -1598,6 +1597,10 @@ static int load_with_options(int argc, char **argv, bool first_prog_only)
set_max_rlimit();
+ if (verifier_logs)
+ /* log_level1 + log_level2 + stats, but not stable UAPI */
+ open_opts.kernel_log_level = 1 + 2 + 4;
+
obj = bpf_object__open_file(file, &open_opts);
if (libbpf_get_error(obj)) {
p_err("failed to open object file");
@@ -1677,12 +1680,7 @@ static int load_with_options(int argc, char **argv, bool first_prog_only)
goto err_close_obj;
}
- load_attr.obj = obj;
- if (verifier_logs)
- /* log_level1 + log_level2 + stats, but not stable UAPI */
- load_attr.log_level = 1 + 2 + 4;
-
- err = bpf_object__load_xattr(&load_attr);
+ err = bpf_object__load(obj);
if (err) {
p_err("failed to load object file");
goto err_close_obj;
@@ -1809,7 +1807,6 @@ static int do_loader(int argc, char **argv)
{
DECLARE_LIBBPF_OPTS(bpf_object_open_opts, open_opts);
DECLARE_LIBBPF_OPTS(gen_loader_opts, gen);
- struct bpf_object_load_attr load_attr = {};
struct bpf_object *obj;
const char *file;
int err = 0;
@@ -1818,6 +1815,10 @@ static int do_loader(int argc, char **argv)
return -1;
file = GET_ARG();
+ if (verifier_logs)
+ /* log_level1 + log_level2 + stats, but not stable UAPI */
+ open_opts.kernel_log_level = 1 + 2 + 4;
+
obj = bpf_object__open_file(file, &open_opts);
if (libbpf_get_error(obj)) {
p_err("failed to open object file");
@@ -1828,12 +1829,7 @@ static int do_loader(int argc, char **argv)
if (err)
goto err_close_obj;
- load_attr.obj = obj;
- if (verifier_logs)
- /* log_level1 + log_level2 + stats, but not stable UAPI */
- load_attr.log_level = 1 + 2 + 4;
-
- err = bpf_object__load_xattr(&load_attr);
+ err = bpf_object__load(obj);
if (err) {
p_err("failed to load object file");
goto err_close_obj;
diff --git a/tools/bpf/bpftool/struct_ops.c b/tools/bpf/bpftool/struct_ops.c
index cbdca37a53f0..2f693b082bdb 100644
--- a/tools/bpf/bpftool/struct_ops.c
+++ b/tools/bpf/bpftool/struct_ops.c
@@ -479,7 +479,7 @@ static int do_unregister(int argc, char **argv)
static int do_register(int argc, char **argv)
{
- struct bpf_object_load_attr load_attr = {};
+ LIBBPF_OPTS(bpf_object_open_opts, open_opts);
const struct bpf_map_def *def;
struct bpf_map_info info = {};
__u32 info_len = sizeof(info);
@@ -494,18 +494,17 @@ static int do_register(int argc, char **argv)
file = GET_ARG();
- obj = bpf_object__open(file);
+ if (verifier_logs)
+ /* log_level1 + log_level2 + stats, but not stable UAPI */
+ open_opts.kernel_log_level = 1 + 2 + 4;
+
+ obj = bpf_object__open_file(file, &open_opts);
if (libbpf_get_error(obj))
return -1;
set_max_rlimit();
- load_attr.obj = obj;
- if (verifier_logs)
- /* log_level1 + log_level2 + stats, but not stable UAPI */
- load_attr.log_level = 1 + 2 + 4;
-
- if (bpf_object__load_xattr(&load_attr)) {
+ if (bpf_object__load(obj)) {
bpf_object__close(obj);
return -1;
}
diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c
index 4e7836e1a7b5..6b2407e12060 100644
--- a/tools/lib/bpf/bpf.c
+++ b/tools/lib/bpf/bpf.c
@@ -303,10 +303,6 @@ int bpf_prog_load_v0_6_0(enum bpf_prog_type prog_type,
if (log_level && !log_buf)
return libbpf_err(-EINVAL);
- attr.log_level = log_level;
- attr.log_buf = ptr_to_u64(log_buf);
- attr.log_size = log_size;
-
func_info_rec_size = OPTS_GET(opts, func_info_rec_size, 0);
func_info = OPTS_GET(opts, func_info, NULL);
attr.func_info_rec_size = func_info_rec_size;
@@ -321,6 +317,12 @@ int bpf_prog_load_v0_6_0(enum bpf_prog_type prog_type,
attr.fd_array = ptr_to_u64(OPTS_GET(opts, fd_array, NULL));
+ if (log_level) {
+ attr.log_buf = ptr_to_u64(log_buf);
+ attr.log_size = log_size;
+ attr.log_level = log_level;
+ }
+
fd = sys_bpf_prog_load(&attr, sizeof(attr), attempts);
if (fd >= 0)
return fd;
@@ -366,16 +368,17 @@ int bpf_prog_load_v0_6_0(enum bpf_prog_type prog_type,
goto done;
}
- if (log_level || !log_buf)
- goto done;
+ if (log_level == 0 && log_buf) {
+ /* log_level == 0 with non-NULL log_buf requires retrying on error
+ * with log_level == 1 and log_buf/log_buf_size set, to get details of
+ * failure
+ */
+ attr.log_buf = ptr_to_u64(log_buf);
+ attr.log_size = log_size;
+ attr.log_level = 1;
- /* Try again with log */
- log_buf[0] = 0;
- attr.log_buf = ptr_to_u64(log_buf);
- attr.log_size = log_size;
- attr.log_level = 1;
-
- fd = sys_bpf_prog_load(&attr, sizeof(attr), attempts);
+ fd = sys_bpf_prog_load(&attr, sizeof(attr), attempts);
+ }
done:
/* free() doesn't affect errno, so we don't need to restore it */
free(finfo);
@@ -1044,24 +1047,65 @@ int bpf_raw_tracepoint_open(const char *name, int prog_fd)
return libbpf_err_errno(fd);
}
-int bpf_load_btf(const void *btf, __u32 btf_size, char *log_buf, __u32 log_buf_size,
- bool do_log)
+int bpf_btf_load(const void *btf_data, size_t btf_size, const struct bpf_btf_load_opts *opts)
{
- union bpf_attr attr = {};
+ const size_t attr_sz = offsetofend(union bpf_attr, btf_log_level);
+ union bpf_attr attr;
+ char *log_buf;
+ size_t log_size;
+ __u32 log_level;
int fd;
- attr.btf = ptr_to_u64(btf);
+ memset(&attr, 0, attr_sz);
+
+ if (!OPTS_VALID(opts, bpf_btf_load_opts))
+ return libbpf_err(-EINVAL);
+
+ log_buf = OPTS_GET(opts, log_buf, NULL);
+ log_size = OPTS_GET(opts, log_size, 0);
+ log_level = OPTS_GET(opts, log_level, 0);
+
+ if (log_size > UINT_MAX)
+ return libbpf_err(-EINVAL);
+ if (log_size && !log_buf)
+ return libbpf_err(-EINVAL);
+
+ attr.btf = ptr_to_u64(btf_data);
attr.btf_size = btf_size;
+ /* log_level == 0 and log_buf != NULL means "try loading without
+ * log_buf, but retry with log_buf and log_level=1 on error", which is
+ * consistent across low-level and high-level BTF and program loading
+ * APIs within libbpf and provides a sensible behavior in practice
+ */
+ if (log_level) {
+ attr.btf_log_buf = ptr_to_u64(log_buf);
+ attr.btf_log_size = (__u32)log_size;
+ attr.btf_log_level = log_level;
+ }
-retry:
- if (do_log && log_buf && log_buf_size) {
- attr.btf_log_level = 1;
- attr.btf_log_size = log_buf_size;
+ fd = sys_bpf_fd(BPF_BTF_LOAD, &attr, attr_sz);
+ if (fd < 0 && log_buf && log_level == 0) {
attr.btf_log_buf = ptr_to_u64(log_buf);
+ attr.btf_log_size = (__u32)log_size;
+ attr.btf_log_level = 1;
+ fd = sys_bpf_fd(BPF_BTF_LOAD, &attr, attr_sz);
}
+ return libbpf_err_errno(fd);
+}
+
+int bpf_load_btf(const void *btf, __u32 btf_size, char *log_buf, __u32 log_buf_size, bool do_log)
+{
+ LIBBPF_OPTS(bpf_btf_load_opts, opts);
+ int fd;
- fd = sys_bpf_fd(BPF_BTF_LOAD, &attr, sizeof(attr));
+retry:
+ if (do_log && log_buf && log_buf_size) {
+ opts.log_buf = log_buf;
+ opts.log_size = log_buf_size;
+ opts.log_level = 1;
+ }
+ fd = bpf_btf_load(btf, btf_size, &opts);
if (fd < 0 && !do_log && log_buf && log_buf_size) {
do_log = true;
goto retry;
diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h
index f79e5fbcf1c1..94e553a0ff9d 100644
--- a/tools/lib/bpf/bpf.h
+++ b/tools/lib/bpf/bpf.h
@@ -195,8 +195,9 @@ struct bpf_load_program_attr {
/* Flags to direct loading requirements */
#define MAPS_RELAX_COMPAT 0x01
-/* Recommend log buffer size */
+/* Recommended log buffer size */
#define BPF_LOG_BUF_SIZE (UINT32_MAX >> 8) /* verifier maximum in kernels <= 5.1 */
+
LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_prog_load() instead")
LIBBPF_API int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr,
char *log_buf, size_t log_buf_sz);
@@ -213,6 +214,23 @@ LIBBPF_API int bpf_verify_program(enum bpf_prog_type type,
char *log_buf, size_t log_buf_sz,
int log_level);
+struct bpf_btf_load_opts {
+ size_t sz; /* size of this struct for forward/backward compatibility */
+
+ /* kernel log options */
+ char *log_buf;
+ __u32 log_level;
+ __u32 log_size;
+};
+#define bpf_btf_load_opts__last_field log_size
+
+LIBBPF_API int bpf_btf_load(const void *btf_data, size_t btf_size,
+ const struct bpf_btf_load_opts *opts);
+
+LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_btf_load() instead")
+LIBBPF_API int bpf_load_btf(const void *btf, __u32 btf_size, char *log_buf,
+ __u32 log_buf_size, bool do_log);
+
LIBBPF_API int bpf_map_update_elem(int fd, const void *key, const void *value,
__u64 flags);
@@ -340,8 +358,6 @@ LIBBPF_API int bpf_prog_query(int target_fd, enum bpf_attach_type type,
__u32 query_flags, __u32 *attach_flags,
__u32 *prog_ids, __u32 *prog_cnt);
LIBBPF_API int bpf_raw_tracepoint_open(const char *name, int prog_fd);
-LIBBPF_API int bpf_load_btf(const void *btf, __u32 btf_size, char *log_buf,
- __u32 log_buf_size, bool do_log);
LIBBPF_API int bpf_task_fd_query(int pid, int fd, __u32 flags, char *buf,
__u32 *buf_len, __u32 *prog_id, __u32 *fd_type,
__u64 *probe_offset, __u64 *probe_addr);
diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c
index 0d7b16eab569..e171424192ae 100644
--- a/tools/lib/bpf/btf.c
+++ b/tools/lib/bpf/btf.c
@@ -1124,54 +1124,86 @@ struct btf *btf__parse_split(const char *path, struct btf *base_btf)
static void *btf_get_raw_data(const struct btf *btf, __u32 *size, bool swap_endian);
-int btf__load_into_kernel(struct btf *btf)
+int btf_load_into_kernel(struct btf *btf, char *log_buf, size_t log_sz, __u32 log_level)
{
- __u32 log_buf_size = 0, raw_size;
- char *log_buf = NULL;
+ LIBBPF_OPTS(bpf_btf_load_opts, opts);
+ __u32 buf_sz = 0, raw_size;
+ char *buf = NULL, *tmp;
void *raw_data;
int err = 0;
if (btf->fd >= 0)
return libbpf_err(-EEXIST);
+ if (log_sz && !log_buf)
+ return libbpf_err(-EINVAL);
-retry_load:
- if (log_buf_size) {
- log_buf = malloc(log_buf_size);
- if (!log_buf)
- return libbpf_err(-ENOMEM);
-
- *log_buf = 0;
- }
-
+ /* cache native raw data representation */
raw_data = btf_get_raw_data(btf, &raw_size, false);
if (!raw_data) {
err = -ENOMEM;
goto done;
}
- /* cache native raw data representation */
btf->raw_size = raw_size;
btf->raw_data = raw_data;
- btf->fd = bpf_load_btf(raw_data, raw_size, log_buf, log_buf_size, false);
+retry_load:
+ /* if log_level is 0, we won't provide log_buf/log_size to the kernel,
+ * initially. Only if BTF loading fails, we bump log_level to 1 and
+ * retry, using either auto-allocated or custom log_buf. This way
+ * non-NULL custom log_buf provides a buffer just in case, but hopes
+ * for successful load and no need for log_buf.
+ */
+ if (log_level) {
+ /* if caller didn't provide custom log_buf, we'll keep
+ * allocating our own progressively bigger buffers for BTF
+ * verification log
+ */
+ if (!log_buf) {
+ buf_sz = max((__u32)BPF_LOG_BUF_SIZE, buf_sz * 2);
+ tmp = realloc(buf, buf_sz);
+ if (!tmp) {
+ err = -ENOMEM;
+ goto done;
+ }
+ buf = tmp;
+ buf[0] = '\0';
+ }
+
+ opts.log_buf = log_buf ? log_buf : buf;
+ opts.log_size = log_buf ? log_sz : buf_sz;
+ opts.log_level = log_level;
+ }
+
+ btf->fd = bpf_btf_load(raw_data, raw_size, &opts);
if (btf->fd < 0) {
- if (!log_buf || errno == ENOSPC) {
- log_buf_size = max((__u32)BPF_LOG_BUF_SIZE,
- log_buf_size << 1);
- free(log_buf);
+ /* time to turn on verbose mode and try again */
+ if (log_level == 0) {
+ log_level = 1;
goto retry_load;
}
+ /* only retry if caller didn't provide custom log_buf, but
+ * make sure we can never overflow buf_sz
+ */
+ if (!log_buf && errno == ENOSPC && buf_sz <= UINT_MAX / 2)
+ goto retry_load;
err = -errno;
- pr_warn("Error loading BTF: %s(%d)\n", strerror(errno), errno);
- if (*log_buf)
- pr_warn("%s\n", log_buf);
- goto done;
+ pr_warn("BTF loading error: %d\n", err);
+ /* don't print out contents of custom log_buf */
+ if (!log_buf && buf[0])
+ pr_warn("-- BEGIN BTF LOAD LOG ---\n%s\n-- END BTF LOAD LOG --\n", buf);
}
done:
- free(log_buf);
+ free(buf);
return libbpf_err(err);
}
+
+int btf__load_into_kernel(struct btf *btf)
+{
+ return btf_load_into_kernel(btf, NULL, 0, 0);
+}
+
int btf__load(struct btf *) __attribute__((alias("btf__load_into_kernel")));
int btf__fd(const struct btf *btf)
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 6db0b5e8540e..18d95c6a89fe 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -331,7 +331,11 @@ struct bpf_program {
struct reloc_desc *reloc_desc;
int nr_reloc;
- int log_level;
+
+ /* BPF verifier log settings */
+ char *log_buf;
+ size_t log_size;
+ __u32 log_level;
struct {
int nr;
@@ -573,6 +577,11 @@ struct bpf_object {
size_t btf_module_cnt;
size_t btf_module_cap;
+ /* optional log settings passed to BPF_BTF_LOAD and BPF_PROG_LOAD commands */
+ char *log_buf;
+ size_t log_size;
+ __u32 log_level;
+
void *priv;
bpf_object_clear_priv_t clear_priv;
@@ -708,6 +717,9 @@ bpf_object__init_prog(struct bpf_object *obj, struct bpf_program *prog,
prog->instances.fds = NULL;
prog->instances.nr = -1;
+ /* inherit object's log_level */
+ prog->log_level = obj->log_level;
+
prog->sec_name = strdup(sec_name);
if (!prog->sec_name)
goto errout;
@@ -3017,7 +3029,9 @@ static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj)
*/
btf__set_fd(kern_btf, 0);
} else {
- err = btf__load_into_kernel(kern_btf);
+ /* currently BPF_BTF_LOAD only supports log_level 1 */
+ err = btf_load_into_kernel(kern_btf, obj->log_buf, obj->log_size,
+ obj->log_level ? 1 : 0);
}
if (sanitize) {
if (!err) {
@@ -6584,8 +6598,10 @@ static int bpf_object_load_prog_instance(struct bpf_object *obj, struct bpf_prog
const char *prog_name = NULL;
char *cp, errmsg[STRERR_BUFSIZE];
size_t log_buf_size = 0;
- char *log_buf = NULL;
+ char *log_buf = NULL, *tmp;
int btf_fd, ret, err;
+ bool own_log_buf = true;
+ __u32 log_level = prog->log_level;
if (prog->type == BPF_PROG_TYPE_UNSPEC) {
/*
@@ -6620,7 +6636,7 @@ static int bpf_object_load_prog_instance(struct bpf_object *obj, struct bpf_prog
load_attr.line_info_rec_size = prog->line_info_rec_size;
load_attr.line_info_cnt = prog->line_info_cnt;
}
- load_attr.log_level = prog->log_level;
+ load_attr.log_level = log_level;
load_attr.prog_flags = prog->prog_flags;
load_attr.fd_array = obj->fd_array;
@@ -6641,22 +6657,45 @@ static int bpf_object_load_prog_instance(struct bpf_object *obj, struct bpf_prog
*prog_fd = -1;
return 0;
}
-retry_load:
- if (log_buf_size) {
- log_buf = malloc(log_buf_size);
- if (!log_buf)
- return -ENOMEM;
- *log_buf = 0;
+retry_load:
+ /* if log_level is zero, we don't request logs initiallly even if
+ * custom log_buf is specified; if the program load fails, then we'll
+ * bump log_level to 1 and use either custom log_buf or we'll allocate
+ * our own and retry the load to get details on what failed
+ */
+ if (log_level) {
+ if (prog->log_buf) {
+ log_buf = prog->log_buf;
+ log_buf_size = prog->log_size;
+ own_log_buf = false;
+ } else if (obj->log_buf) {
+ log_buf = obj->log_buf;
+ log_buf_size = obj->log_size;
+ own_log_buf = false;
+ } else {
+ log_buf_size = max((size_t)BPF_LOG_BUF_SIZE, log_buf_size * 2);
+ tmp = realloc(log_buf, log_buf_size);
+ if (!tmp) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ log_buf = tmp;
+ log_buf[0] = '\0';
+ own_log_buf = true;
+ }
}
load_attr.log_buf = log_buf;
load_attr.log_size = log_buf_size;
- ret = bpf_prog_load(prog->type, prog_name, license, insns, insns_cnt, &load_attr);
+ load_attr.log_level = log_level;
+ ret = bpf_prog_load(prog->type, prog_name, license, insns, insns_cnt, &load_attr);
if (ret >= 0) {
- if (log_buf && load_attr.log_level)
- pr_debug("verifier log:\n%s", log_buf);
+ if (log_level && own_log_buf) {
+ pr_debug("prog '%s': -- BEGIN PROG LOAD LOG --\n%s-- END PROG LOAD LOG --\n",
+ prog->name, log_buf);
+ }
if (obj->has_rodata && kernel_supports(obj, FEAT_PROG_BIND_MAP)) {
struct bpf_map *map;
@@ -6669,8 +6708,8 @@ retry_load:
if (bpf_prog_bind_map(ret, bpf_map__fd(map), NULL)) {
cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
- pr_warn("prog '%s': failed to bind .rodata map: %s\n",
- prog->name, cp);
+ pr_warn("prog '%s': failed to bind map '%s': %s\n",
+ prog->name, map->real_name, cp);
/* Don't fail hard if can't bind rodata. */
}
}
@@ -6681,45 +6720,37 @@ retry_load:
goto out;
}
- if (!log_buf || errno == ENOSPC) {
- log_buf_size = max((size_t)BPF_LOG_BUF_SIZE,
- log_buf_size << 1);
-
- free(log_buf);
+ if (log_level == 0) {
+ log_level = 1;
goto retry_load;
}
- ret = errno ? -errno : -LIBBPF_ERRNO__LOAD;
+ /* On ENOSPC, increase log buffer size and retry, unless custom
+ * log_buf is specified.
+ * Be careful to not overflow u32, though. Kernel's log buf size limit
+ * isn't part of UAPI so it can always be bumped to full 4GB. So don't
+ * multiply by 2 unless we are sure we'll fit within 32 bits.
+ * Currently, we'll get -EINVAL when we reach (UINT_MAX >> 2).
+ */
+ if (own_log_buf && errno == ENOSPC && log_buf_size <= UINT_MAX / 2)
+ goto retry_load;
+
+ ret = -errno;
cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
- pr_warn("load bpf program failed: %s\n", cp);
+ pr_warn("prog '%s': BPF program load failed: %s\n", prog->name, cp);
pr_perm_msg(ret);
- if (log_buf && log_buf[0] != '\0') {
- ret = -LIBBPF_ERRNO__VERIFY;
- pr_warn("-- BEGIN DUMP LOG ---\n");
- pr_warn("\n%s\n", log_buf);
- pr_warn("-- END LOG --\n");
- } else if (insns_cnt >= BPF_MAXINSNS) {
- pr_warn("Program too large (%d insns), at most %d insns\n",
- insns_cnt, BPF_MAXINSNS);
- ret = -LIBBPF_ERRNO__PROG2BIG;
- } else if (prog->type != BPF_PROG_TYPE_KPROBE) {
- /* Wrong program type? */
- int fd;
-
- load_attr.expected_attach_type = 0;
- load_attr.log_buf = NULL;
- load_attr.log_size = 0;
- fd = bpf_prog_load(BPF_PROG_TYPE_KPROBE, prog_name, license,
- insns, insns_cnt, &load_attr);
- if (fd >= 0) {
- close(fd);
- ret = -LIBBPF_ERRNO__PROGTYPE;
- goto out;
- }
+ if (own_log_buf && log_buf && log_buf[0] != '\0') {
+ pr_warn("prog '%s': -- BEGIN PROG LOAD LOG --\n%s-- END PROG LOAD LOG --\n",
+ prog->name, log_buf);
+ }
+ if (insns_cnt >= BPF_MAXINSNS) {
+ pr_warn("prog '%s': program too large (%d insns), at most %d insns\n",
+ prog->name, insns_cnt, BPF_MAXINSNS);
}
out:
- free(log_buf);
+ if (own_log_buf)
+ free(log_buf);
return ret;
}
@@ -6924,14 +6955,16 @@ static int bpf_object_init_progs(struct bpf_object *obj, const struct bpf_object
return 0;
}
-static struct bpf_object *
-__bpf_object__open(const char *path, const void *obj_buf, size_t obj_buf_sz,
- const struct bpf_object_open_opts *opts)
+static struct bpf_object *bpf_object_open(const char *path, const void *obj_buf, size_t obj_buf_sz,
+ const struct bpf_object_open_opts *opts)
{
const char *obj_name, *kconfig, *btf_tmp_path;
struct bpf_object *obj;
char tmp_name[64];
int err;
+ char *log_buf;
+ size_t log_size;
+ __u32 log_level;
if (elf_version(EV_CURRENT) == EV_NONE) {
pr_warn("failed to init libelf for %s\n",
@@ -6954,10 +6987,22 @@ __bpf_object__open(const char *path, const void *obj_buf, size_t obj_buf_sz,
pr_debug("loading object '%s' from buffer\n", obj_name);
}
+ log_buf = OPTS_GET(opts, kernel_log_buf, NULL);
+ log_size = OPTS_GET(opts, kernel_log_size, 0);
+ log_level = OPTS_GET(opts, kernel_log_level, 0);
+ if (log_size > UINT_MAX)
+ return ERR_PTR(-EINVAL);
+ if (log_size && !log_buf)
+ return ERR_PTR(-EINVAL);
+
obj = bpf_object__new(path, obj_buf, obj_buf_sz, obj_name);
if (IS_ERR(obj))
return obj;
+ obj->log_buf = log_buf;
+ obj->log_size = log_size;
+ obj->log_level = log_level;
+
btf_tmp_path = OPTS_GET(opts, btf_custom_path, NULL);
if (btf_tmp_path) {
if (strlen(btf_tmp_path) >= PATH_MAX) {
@@ -7011,7 +7056,7 @@ __bpf_object__open_xattr(struct bpf_object_open_attr *attr, int flags)
return NULL;
pr_debug("loading %s\n", attr->file);
- return __bpf_object__open(attr->file, NULL, 0, &opts);
+ return bpf_object_open(attr->file, NULL, 0, &opts);
}
struct bpf_object *bpf_object__open_xattr(struct bpf_object_open_attr *attr)
@@ -7037,7 +7082,7 @@ bpf_object__open_file(const char *path, const struct bpf_object_open_opts *opts)
pr_debug("loading %s\n", path);
- return libbpf_ptr(__bpf_object__open(path, NULL, 0, opts));
+ return libbpf_ptr(bpf_object_open(path, NULL, 0, opts));
}
struct bpf_object *
@@ -7047,7 +7092,7 @@ bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz,
if (!obj_buf || obj_buf_sz == 0)
return libbpf_err_ptr(-EINVAL);
- return libbpf_ptr(__bpf_object__open(NULL, obj_buf, obj_buf_sz, opts));
+ return libbpf_ptr(bpf_object_open(NULL, obj_buf, obj_buf_sz, opts));
}
struct bpf_object *
@@ -7064,7 +7109,7 @@ bpf_object__open_buffer(const void *obj_buf, size_t obj_buf_sz,
if (!obj_buf || obj_buf_sz == 0)
return errno = EINVAL, NULL;
- return libbpf_ptr(__bpf_object__open(NULL, obj_buf, obj_buf_sz, &opts));
+ return libbpf_ptr(bpf_object_open(NULL, obj_buf, obj_buf_sz, &opts));
}
static int bpf_object_unload(struct bpf_object *obj)
@@ -7417,14 +7462,10 @@ static int bpf_object__resolve_externs(struct bpf_object *obj,
return 0;
}
-int bpf_object__load_xattr(struct bpf_object_load_attr *attr)
+static int bpf_object_load(struct bpf_object *obj, int extra_log_level, const char *target_btf_path)
{
- struct bpf_object *obj;
int err, i;
- if (!attr)
- return libbpf_err(-EINVAL);
- obj = attr->obj;
if (!obj)
return libbpf_err(-EINVAL);
@@ -7434,7 +7475,7 @@ int bpf_object__load_xattr(struct bpf_object_load_attr *attr)
}
if (obj->gen_loader)
- bpf_gen__init(obj->gen_loader, attr->log_level);
+ bpf_gen__init(obj->gen_loader, extra_log_level);
err = bpf_object__probe_loading(obj);
err = err ? : bpf_object__load_vmlinux_btf(obj, false);
@@ -7443,8 +7484,8 @@ int bpf_object__load_xattr(struct bpf_object_load_attr *attr)
err = err ? : bpf_object__sanitize_maps(obj);
err = err ? : bpf_object__init_kern_struct_ops_maps(obj);
err = err ? : bpf_object__create_maps(obj);
- err = err ? : bpf_object__relocate(obj, obj->btf_custom_path ? : attr->target_btf_path);
- err = err ? : bpf_object__load_progs(obj, attr->log_level);
+ err = err ? : bpf_object__relocate(obj, obj->btf_custom_path ? : target_btf_path);
+ err = err ? : bpf_object__load_progs(obj, extra_log_level);
err = err ? : bpf_object_init_prog_arrays(obj);
if (obj->gen_loader) {
@@ -7489,13 +7530,14 @@ out:
return libbpf_err(err);
}
-int bpf_object__load(struct bpf_object *obj)
+int bpf_object__load_xattr(struct bpf_object_load_attr *attr)
{
- struct bpf_object_load_attr attr = {
- .obj = obj,
- };
+ return bpf_object_load(attr->obj, attr->log_level, attr->target_btf_path);
+}
- return bpf_object__load_xattr(&attr);
+int bpf_object__load(struct bpf_object *obj)
+{
+ return bpf_object_load(obj, 0, NULL);
}
static int make_parent_dir(const char *path)
@@ -8491,6 +8533,26 @@ int bpf_program__set_log_level(struct bpf_program *prog, __u32 log_level)
return 0;
}
+const char *bpf_program__log_buf(const struct bpf_program *prog, size_t *log_size)
+{
+ *log_size = prog->log_size;
+ return prog->log_buf;
+}
+
+int bpf_program__set_log_buf(struct bpf_program *prog, char *log_buf, size_t log_size)
+{
+ if (log_size && !log_buf)
+ return -EINVAL;
+ if (prog->log_size > UINT_MAX)
+ return -EINVAL;
+ if (prog->obj->loaded)
+ return -EBUSY;
+
+ prog->log_buf = log_buf;
+ prog->log_size = log_size;
+ return 0;
+}
+
#define SEC_DEF(sec_pfx, ptype, atype, flags, ...) { \
.sec = sec_pfx, \
.prog_type = BPF_PROG_TYPE_##ptype, \
diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
index 4802c1e736c3..a8b894dae633 100644
--- a/tools/lib/bpf/libbpf.h
+++ b/tools/lib/bpf/libbpf.h
@@ -108,8 +108,47 @@ struct bpf_object_open_opts {
* struct_ops, etc) will need actual kernel BTF at /sys/kernel/btf/vmlinux.
*/
const char *btf_custom_path;
+ /* Pointer to a buffer for storing kernel logs for applicable BPF
+ * commands. Valid kernel_log_size has to be specified as well and are
+ * passed-through to bpf() syscall. Keep in mind that kernel might
+ * fail operation with -ENOSPC error if provided buffer is too small
+ * to contain entire log output.
+ * See the comment below for kernel_log_level for interaction between
+ * log_buf and log_level settings.
+ *
+ * If specified, this log buffer will be passed for:
+ * - each BPF progral load (BPF_PROG_LOAD) attempt, unless overriden
+ * with bpf_program__set_log() on per-program level, to get
+ * BPF verifier log output.
+ * - during BPF object's BTF load into kernel (BPF_BTF_LOAD) to get
+ * BTF sanity checking log.
+ *
+ * Each BPF command (BPF_BTF_LOAD or BPF_PROG_LOAD) will overwrite
+ * previous contents, so if you need more fine-grained control, set
+ * per-program buffer with bpf_program__set_log_buf() to preserve each
+ * individual program's verification log. Keep using kernel_log_buf
+ * for BTF verification log, if necessary.
+ */
+ char *kernel_log_buf;
+ size_t kernel_log_size;
+ /*
+ * Log level can be set independently from log buffer. Log_level=0
+ * means that libbpf will attempt loading BTF or program without any
+ * logging requested, but will retry with either its own or custom log
+ * buffer, if provided, and log_level=1 on any error.
+ * And vice versa, setting log_level>0 will request BTF or prog
+ * loading with verbose log from the first attempt (and as such also
+ * for successfully loaded BTF or program), and the actual log buffer
+ * could be either libbpf's own auto-allocated log buffer, if
+ * kernel_log_buffer is NULL, or user-provided custom kernel_log_buf.
+ * If user didn't provide custom log buffer, libbpf will emit captured
+ * logs through its print callback.
+ */
+ __u32 kernel_log_level;
+
+ size_t :0;
};
-#define bpf_object_open_opts__last_field btf_custom_path
+#define bpf_object_open_opts__last_field kernel_log_level
LIBBPF_API struct bpf_object *bpf_object__open(const char *path);
@@ -175,6 +214,7 @@ struct bpf_object_load_attr {
/* Load/unload object into/from kernel */
LIBBPF_API int bpf_object__load(struct bpf_object *obj);
+LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_object__load() instead")
LIBBPF_API int bpf_object__load_xattr(struct bpf_object_load_attr *attr);
LIBBPF_DEPRECATED_SINCE(0, 6, "bpf_object__unload() is deprecated, use bpf_object__close() instead")
LIBBPF_API int bpf_object__unload(struct bpf_object *obj);
@@ -552,8 +592,15 @@ bpf_program__set_expected_attach_type(struct bpf_program *prog,
LIBBPF_API __u32 bpf_program__flags(const struct bpf_program *prog);
LIBBPF_API int bpf_program__set_flags(struct bpf_program *prog, __u32 flags);
+
+/* Per-program log level and log buffer getters/setters.
+ * See bpf_object_open_opts comments regarding log_level and log_buf
+ * interactions.
+ */
LIBBPF_API __u32 bpf_program__log_level(const struct bpf_program *prog);
LIBBPF_API int bpf_program__set_log_level(struct bpf_program *prog, __u32 log_level);
+LIBBPF_API const char *bpf_program__log_buf(const struct bpf_program *prog, size_t *log_size);
+LIBBPF_API int bpf_program__set_log_buf(struct bpf_program *prog, char *log_buf, size_t log_size);
LIBBPF_API int
bpf_program__set_attach_target(struct bpf_program *prog, int attach_prog_fd,
diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map
index 715df3a27389..4d483af7dba6 100644
--- a/tools/lib/bpf/libbpf.map
+++ b/tools/lib/bpf/libbpf.map
@@ -422,6 +422,9 @@ LIBBPF_0.6.0 {
LIBBPF_0.7.0 {
global:
+ bpf_btf_load;
+ bpf_program__log_buf;
bpf_program__log_level;
+ bpf_program__set_log_buf;
bpf_program__set_log_level;
};
diff --git a/tools/lib/bpf/libbpf_internal.h b/tools/lib/bpf/libbpf_internal.h
index 6f143e9e810c..355c41019aed 100644
--- a/tools/lib/bpf/libbpf_internal.h
+++ b/tools/lib/bpf/libbpf_internal.h
@@ -277,6 +277,7 @@ int parse_cpu_mask_str(const char *s, bool **mask, int *mask_sz);
int parse_cpu_mask_file(const char *fcpu, bool **mask, int *mask_sz);
int libbpf__load_raw_btf(const char *raw_types, size_t types_len,
const char *str_sec, size_t str_len);
+int btf_load_into_kernel(struct btf *btf, char *log_buf, size_t log_sz, __u32 log_level);
struct btf *btf_get_from_fd(int btf_fd, struct btf *base_btf);
void btf_get_kernel_prefix_kind(enum bpf_attach_type attach_type,
diff --git a/tools/lib/bpf/libbpf_probes.c b/tools/lib/bpf/libbpf_probes.c
index 41f2be47c2ea..4bdec69523a7 100644
--- a/tools/lib/bpf/libbpf_probes.c
+++ b/tools/lib/bpf/libbpf_probes.c
@@ -164,7 +164,7 @@ int libbpf__load_raw_btf(const char *raw_types, size_t types_len,
memcpy(raw_btf + hdr.hdr_len, raw_types, hdr.type_len);
memcpy(raw_btf + hdr.hdr_len + hdr.type_len, str_sec, hdr.str_len);
- btf_fd = bpf_load_btf(raw_btf, btf_len, NULL, 0, false);
+ btf_fd = bpf_btf_load(raw_btf, btf_len, NULL);
free(raw_btf);
return btf_fd;
diff --git a/tools/testing/selftests/bpf/map_tests/sk_storage_map.c b/tools/testing/selftests/bpf/map_tests/sk_storage_map.c
index 8eea4ffeb092..099eb4dfd4f7 100644
--- a/tools/testing/selftests/bpf/map_tests/sk_storage_map.c
+++ b/tools/testing/selftests/bpf/map_tests/sk_storage_map.c
@@ -136,7 +136,7 @@ static int load_btf(void)
memcpy(raw_btf + sizeof(btf_hdr) + sizeof(btf_raw_types),
btf_str_sec, sizeof(btf_str_sec));
- return bpf_load_btf(raw_btf, sizeof(raw_btf), 0, 0, 0);
+ return bpf_btf_load(raw_btf, sizeof(raw_btf), NULL);
}
static int create_sk_storage_map(void)
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c b/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c
index 94e03df69d71..8daca0ac909f 100644
--- a/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c
@@ -217,14 +217,16 @@ static bool found;
static int libbpf_debug_print(enum libbpf_print_level level,
const char *format, va_list args)
{
- char *log_buf;
+ const char *log_buf;
if (level != LIBBPF_WARN ||
- strcmp(format, "libbpf: \n%s\n")) {
+ !strstr(format, "-- BEGIN PROG LOAD LOG --")) {
vprintf(format, args);
return 0;
}
+ /* skip prog_name */
+ va_arg(args, char *);
log_buf = va_arg(args, char *);
if (!log_buf)
goto out;
diff --git a/tools/testing/selftests/bpf/prog_tests/btf.c b/tools/testing/selftests/bpf/prog_tests/btf.c
index cab810bab593..01b776a7beeb 100644
--- a/tools/testing/selftests/bpf/prog_tests/btf.c
+++ b/tools/testing/selftests/bpf/prog_tests/btf.c
@@ -4071,6 +4071,28 @@ done:
return raw_btf;
}
+static int load_raw_btf(const void *raw_data, size_t raw_size)
+{
+ LIBBPF_OPTS(bpf_btf_load_opts, opts);
+ int btf_fd;
+
+ if (always_log) {
+ opts.log_buf = btf_log_buf,
+ opts.log_size = BTF_LOG_BUF_SIZE,
+ opts.log_level = 1;
+ }
+
+ btf_fd = bpf_btf_load(raw_data, raw_size, &opts);
+ if (btf_fd < 0 && !always_log) {
+ opts.log_buf = btf_log_buf,
+ opts.log_size = BTF_LOG_BUF_SIZE,
+ opts.log_level = 1;
+ btf_fd = bpf_btf_load(raw_data, raw_size, &opts);
+ }
+
+ return btf_fd;
+}
+
static void do_test_raw(unsigned int test_num)
{
struct btf_raw_test *test = &raw_tests[test_num - 1];
@@ -4100,16 +4122,14 @@ static void do_test_raw(unsigned int test_num)
hdr->str_len = (int)hdr->str_len + test->str_len_delta;
*btf_log_buf = '\0';
- btf_fd = bpf_load_btf(raw_btf, raw_btf_size,
- btf_log_buf, BTF_LOG_BUF_SIZE,
- always_log);
+ btf_fd = load_raw_btf(raw_btf, raw_btf_size);
free(raw_btf);
err = ((btf_fd < 0) != test->btf_load_err);
if (CHECK(err, "btf_fd:%d test->btf_load_err:%u",
btf_fd, test->btf_load_err) ||
CHECK(test->err_str && !strstr(btf_log_buf, test->err_str),
- "expected err_str:%s", test->err_str)) {
+ "expected err_str:%s\n", test->err_str)) {
err = -1;
goto done;
}
@@ -4227,9 +4247,7 @@ static int test_big_btf_info(unsigned int test_num)
goto done;
}
- btf_fd = bpf_load_btf(raw_btf, raw_btf_size,
- btf_log_buf, BTF_LOG_BUF_SIZE,
- always_log);
+ btf_fd = load_raw_btf(raw_btf, raw_btf_size);
if (CHECK(btf_fd < 0, "errno:%d", errno)) {
err = -1;
goto done;
@@ -4315,9 +4333,7 @@ static int test_btf_id(unsigned int test_num)
info[i].btf_size = raw_btf_size;
}
- btf_fd[0] = bpf_load_btf(raw_btf, raw_btf_size,
- btf_log_buf, BTF_LOG_BUF_SIZE,
- always_log);
+ btf_fd[0] = load_raw_btf(raw_btf, raw_btf_size);
if (CHECK(btf_fd[0] < 0, "errno:%d", errno)) {
err = -1;
goto done;
@@ -4447,9 +4463,7 @@ static void do_test_get_info(unsigned int test_num)
goto done;
}
- btf_fd = bpf_load_btf(raw_btf, raw_btf_size,
- btf_log_buf, BTF_LOG_BUF_SIZE,
- always_log);
+ btf_fd = load_raw_btf(raw_btf, raw_btf_size);
if (CHECK(btf_fd <= 0, "errno:%d", errno)) {
err = -1;
goto done;
@@ -5169,12 +5183,10 @@ static void do_test_pprint(int test_num)
return;
*btf_log_buf = '\0';
- btf_fd = bpf_load_btf(raw_btf, raw_btf_size,
- btf_log_buf, BTF_LOG_BUF_SIZE,
- always_log);
+ btf_fd = load_raw_btf(raw_btf, raw_btf_size);
free(raw_btf);
- if (CHECK(btf_fd < 0, "errno:%d", errno)) {
+ if (CHECK(btf_fd < 0, "errno:%d\n", errno)) {
err = -1;
goto done;
}
@@ -6538,9 +6550,7 @@ static void do_test_info_raw(unsigned int test_num)
return;
*btf_log_buf = '\0';
- btf_fd = bpf_load_btf(raw_btf, raw_btf_size,
- btf_log_buf, BTF_LOG_BUF_SIZE,
- always_log);
+ btf_fd = load_raw_btf(raw_btf, raw_btf_size);
free(raw_btf);
if (CHECK(btf_fd < 0, "invalid btf_fd errno:%d", errno)) {
diff --git a/tools/testing/selftests/bpf/prog_tests/log_buf.c b/tools/testing/selftests/bpf/prog_tests/log_buf.c
new file mode 100644
index 000000000000..e469b023962b
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/log_buf.c
@@ -0,0 +1,276 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+
+#include <test_progs.h>
+#include <bpf/btf.h>
+
+#include "test_log_buf.skel.h"
+
+static size_t libbpf_log_pos;
+static char libbpf_log_buf[1024 * 1024];
+static bool libbpf_log_error;
+
+static int libbpf_print_cb(enum libbpf_print_level level, const char *fmt, va_list args)
+{
+ int emitted_cnt;
+ size_t left_cnt;
+
+ left_cnt = sizeof(libbpf_log_buf) - libbpf_log_pos;
+ emitted_cnt = vsnprintf(libbpf_log_buf + libbpf_log_pos, left_cnt, fmt, args);
+
+ if (emitted_cnt < 0 || emitted_cnt + 1 > left_cnt) {
+ libbpf_log_error = true;
+ return 0;
+ }
+
+ libbpf_log_pos += emitted_cnt;
+ return 0;
+}
+
+static void obj_load_log_buf(void)
+{
+ libbpf_print_fn_t old_print_cb = libbpf_set_print(libbpf_print_cb);
+ LIBBPF_OPTS(bpf_object_open_opts, opts);
+ const size_t log_buf_sz = 1024 * 1024;
+ struct test_log_buf* skel;
+ char *obj_log_buf, *good_log_buf, *bad_log_buf;
+ int err;
+
+ obj_log_buf = malloc(3 * log_buf_sz);
+ if (!ASSERT_OK_PTR(obj_log_buf, "obj_log_buf"))
+ return;
+
+ good_log_buf = obj_log_buf + log_buf_sz;
+ bad_log_buf = obj_log_buf + 2 * log_buf_sz;
+ obj_log_buf[0] = good_log_buf[0] = bad_log_buf[0] = '\0';
+
+ opts.kernel_log_buf = obj_log_buf;
+ opts.kernel_log_size = log_buf_sz;
+ opts.kernel_log_level = 4; /* for BTF this will turn into 1 */
+
+ /* In the first round every prog has its own log_buf, so libbpf logs
+ * don't have program failure logs
+ */
+ skel = test_log_buf__open_opts(&opts);
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ goto cleanup;
+
+ /* set very verbose level for good_prog so we always get detailed logs */
+ bpf_program__set_log_buf(skel->progs.good_prog, good_log_buf, log_buf_sz);
+ bpf_program__set_log_level(skel->progs.good_prog, 2);
+
+ bpf_program__set_log_buf(skel->progs.bad_prog, bad_log_buf, log_buf_sz);
+ /* log_level 0 with custom log_buf means that verbose logs are not
+ * requested if program load is successful, but libbpf should retry
+ * with log_level 1 on error and put program's verbose load log into
+ * custom log_buf
+ */
+ bpf_program__set_log_level(skel->progs.bad_prog, 0);
+
+ err = test_log_buf__load(skel);
+ if (!ASSERT_ERR(err, "unexpected_load_success"))
+ goto cleanup;
+
+ ASSERT_FALSE(libbpf_log_error, "libbpf_log_error");
+
+ /* there should be no prog loading log because we specified per-prog log buf */
+ ASSERT_NULL(strstr(libbpf_log_buf, "-- BEGIN PROG LOAD LOG --"), "unexp_libbpf_log");
+ ASSERT_OK_PTR(strstr(libbpf_log_buf, "prog 'bad_prog': BPF program load failed"),
+ "libbpf_log_not_empty");
+ ASSERT_OK_PTR(strstr(obj_log_buf, "DATASEC license"), "obj_log_not_empty");
+ ASSERT_OK_PTR(strstr(good_log_buf, "0: R1=ctx(id=0,off=0,imm=0) R10=fp0"),
+ "good_log_verbose");
+ ASSERT_OK_PTR(strstr(bad_log_buf, "invalid access to map value, value_size=16 off=16000 size=4"),
+ "bad_log_not_empty");
+
+ if (env.verbosity > VERBOSE_NONE) {
+ printf("LIBBPF LOG: \n=================\n%s=================\n", libbpf_log_buf);
+ printf("OBJ LOG: \n=================\n%s=================\n", obj_log_buf);
+ printf("GOOD_PROG LOG:\n=================\n%s=================\n", good_log_buf);
+ printf("BAD_PROG LOG:\n=================\n%s=================\n", bad_log_buf);
+ }
+
+ /* reset everything */
+ test_log_buf__destroy(skel);
+ obj_log_buf[0] = good_log_buf[0] = bad_log_buf[0] = '\0';
+ libbpf_log_buf[0] = '\0';
+ libbpf_log_pos = 0;
+ libbpf_log_error = false;
+
+ /* In the second round we let bad_prog's failure be logged through print callback */
+ opts.kernel_log_buf = NULL; /* let everything through into print callback */
+ opts.kernel_log_size = 0;
+ opts.kernel_log_level = 1;
+
+ skel = test_log_buf__open_opts(&opts);
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ goto cleanup;
+
+ /* set normal verbose level for good_prog to check log_level is taken into account */
+ bpf_program__set_log_buf(skel->progs.good_prog, good_log_buf, log_buf_sz);
+ bpf_program__set_log_level(skel->progs.good_prog, 1);
+
+ err = test_log_buf__load(skel);
+ if (!ASSERT_ERR(err, "unexpected_load_success"))
+ goto cleanup;
+
+ ASSERT_FALSE(libbpf_log_error, "libbpf_log_error");
+
+ /* this time prog loading error should be logged through print callback */
+ ASSERT_OK_PTR(strstr(libbpf_log_buf, "libbpf: prog 'bad_prog': -- BEGIN PROG LOAD LOG --"),
+ "libbpf_log_correct");
+ ASSERT_STREQ(obj_log_buf, "", "obj_log__empty");
+ ASSERT_STREQ(good_log_buf, "processed 4 insns (limit 1000000) max_states_per_insn 0 total_states 0 peak_states 0 mark_read 0\n",
+ "good_log_ok");
+ ASSERT_STREQ(bad_log_buf, "", "bad_log_empty");
+
+ if (env.verbosity > VERBOSE_NONE) {
+ printf("LIBBPF LOG: \n=================\n%s=================\n", libbpf_log_buf);
+ printf("OBJ LOG: \n=================\n%s=================\n", obj_log_buf);
+ printf("GOOD_PROG LOG:\n=================\n%s=================\n", good_log_buf);
+ printf("BAD_PROG LOG:\n=================\n%s=================\n", bad_log_buf);
+ }
+
+cleanup:
+ free(obj_log_buf);
+ test_log_buf__destroy(skel);
+ libbpf_set_print(old_print_cb);
+}
+
+static void bpf_prog_load_log_buf(void)
+{
+ const struct bpf_insn good_prog_insns[] = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ };
+ const size_t good_prog_insn_cnt = sizeof(good_prog_insns) / sizeof(struct bpf_insn);
+ const struct bpf_insn bad_prog_insns[] = {
+ BPF_EXIT_INSN(),
+ };
+ size_t bad_prog_insn_cnt = sizeof(bad_prog_insns) / sizeof(struct bpf_insn);
+ LIBBPF_OPTS(bpf_prog_load_opts, opts);
+ const size_t log_buf_sz = 1024 * 1024;
+ char *log_buf;
+ int fd = -1;
+
+ log_buf = malloc(log_buf_sz);
+ if (!ASSERT_OK_PTR(log_buf, "log_buf_alloc"))
+ return;
+ opts.log_buf = log_buf;
+ opts.log_size = log_buf_sz;
+
+ /* with log_level == 0 log_buf shoud stay empty for good prog */
+ log_buf[0] = '\0';
+ opts.log_level = 0;
+ fd = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, "good_prog", "GPL",
+ good_prog_insns, good_prog_insn_cnt, &opts);
+ ASSERT_STREQ(log_buf, "", "good_log_0");
+ ASSERT_GE(fd, 0, "good_fd1");
+ if (fd >= 0)
+ close(fd);
+ fd = -1;
+
+ /* log_level == 2 should always fill log_buf, even for good prog */
+ log_buf[0] = '\0';
+ opts.log_level = 2;
+ fd = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, "good_prog", "GPL",
+ good_prog_insns, good_prog_insn_cnt, &opts);
+ ASSERT_OK_PTR(strstr(log_buf, "0: R1=ctx(id=0,off=0,imm=0) R10=fp0"), "good_log_2");
+ ASSERT_GE(fd, 0, "good_fd2");
+ if (fd >= 0)
+ close(fd);
+ fd = -1;
+
+ /* log_level == 0 should fill log_buf for bad prog */
+ log_buf[0] = '\0';
+ opts.log_level = 0;
+ fd = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, "bad_prog", "GPL",
+ bad_prog_insns, bad_prog_insn_cnt, &opts);
+ ASSERT_OK_PTR(strstr(log_buf, "R0 !read_ok"), "bad_log_0");
+ ASSERT_LT(fd, 0, "bad_fd");
+ if (fd >= 0)
+ close(fd);
+ fd = -1;
+
+ free(log_buf);
+}
+
+static void bpf_btf_load_log_buf(void)
+{
+ LIBBPF_OPTS(bpf_btf_load_opts, opts);
+ const size_t log_buf_sz = 1024 * 1024;
+ const void *raw_btf_data;
+ __u32 raw_btf_size;
+ struct btf *btf;
+ char *log_buf;
+ int fd = -1;
+
+ btf = btf__new_empty();
+ if (!ASSERT_OK_PTR(btf, "empty_btf"))
+ return;
+
+ ASSERT_GT(btf__add_int(btf, "int", 4, 0), 0, "int_type");
+
+ raw_btf_data = btf__raw_data(btf, &raw_btf_size);
+ if (!ASSERT_OK_PTR(raw_btf_data, "raw_btf_data_good"))
+ goto cleanup;
+
+ log_buf = malloc(log_buf_sz);
+ if (!ASSERT_OK_PTR(log_buf, "log_buf_alloc"))
+ goto cleanup;
+ opts.log_buf = log_buf;
+ opts.log_size = log_buf_sz;
+
+ /* with log_level == 0 log_buf shoud stay empty for good BTF */
+ log_buf[0] = '\0';
+ opts.log_level = 0;
+ fd = bpf_btf_load(raw_btf_data, raw_btf_size, &opts);
+ ASSERT_STREQ(log_buf, "", "good_log_0");
+ ASSERT_GE(fd, 0, "good_fd1");
+ if (fd >= 0)
+ close(fd);
+ fd = -1;
+
+ /* log_level == 2 should always fill log_buf, even for good BTF */
+ log_buf[0] = '\0';
+ opts.log_level = 2;
+ fd = bpf_btf_load(raw_btf_data, raw_btf_size, &opts);
+ printf("LOG_BUF: %s\n", log_buf);
+ ASSERT_OK_PTR(strstr(log_buf, "magic: 0xeb9f"), "good_log_2");
+ ASSERT_GE(fd, 0, "good_fd2");
+ if (fd >= 0)
+ close(fd);
+ fd = -1;
+
+ /* make BTF bad, add pointer pointing to non-existing type */
+ ASSERT_GT(btf__add_ptr(btf, 100), 0, "bad_ptr_type");
+
+ raw_btf_data = btf__raw_data(btf, &raw_btf_size);
+ if (!ASSERT_OK_PTR(raw_btf_data, "raw_btf_data_bad"))
+ goto cleanup;
+
+ /* log_level == 0 should fill log_buf for bad BTF */
+ log_buf[0] = '\0';
+ opts.log_level = 0;
+ fd = bpf_btf_load(raw_btf_data, raw_btf_size, &opts);
+ printf("LOG_BUF: %s\n", log_buf);
+ ASSERT_OK_PTR(strstr(log_buf, "[2] PTR (anon) type_id=100 Invalid type_id"), "bad_log_0");
+ ASSERT_LT(fd, 0, "bad_fd");
+ if (fd >= 0)
+ close(fd);
+ fd = -1;
+
+cleanup:
+ free(log_buf);
+ btf__free(btf);
+}
+
+void test_log_buf(void)
+{
+ if (test__start_subtest("obj_load_log_buf"))
+ obj_load_log_buf();
+ if (test__start_subtest("bpf_prog_load_log_buf"))
+ bpf_prog_load_log_buf();
+ if (test__start_subtest("bpf_btf_load_log_buf"))
+ bpf_btf_load_log_buf();
+}
diff --git a/tools/testing/selftests/bpf/progs/test_log_buf.c b/tools/testing/selftests/bpf/progs/test_log_buf.c
new file mode 100644
index 000000000000..199f459bd5ae
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_log_buf.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+int a[4];
+const volatile int off = 4000;
+
+SEC("raw_tp/sys_enter")
+int good_prog(const void *ctx)
+{
+ a[0] = (int)(long)ctx;
+ return a[1];
+}
+
+SEC("raw_tp/sys_enter")
+int bad_prog(const void *ctx)
+{
+ /* out of bounds access */
+ return a[off];
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index 222cb063ddf4..07b88a8f504f 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -641,7 +641,7 @@ static int load_btf(void)
memcpy(ptr, btf_str_sec, hdr.str_len);
ptr += hdr.str_len;
- btf_fd = bpf_load_btf(raw_btf, ptr - raw_btf, 0, 0, 0);
+ btf_fd = bpf_btf_load(raw_btf, ptr - raw_btf, NULL);
free(raw_btf);
if (btf_fd < 0)
return -1;
diff --git a/tools/testing/selftests/bpf/testing_helpers.c b/tools/testing/selftests/bpf/testing_helpers.c
index 0f1c37ac6f2c..795b6798ccee 100644
--- a/tools/testing/selftests/bpf/testing_helpers.c
+++ b/tools/testing/selftests/bpf/testing_helpers.c
@@ -88,13 +88,15 @@ int extra_prog_load_log_flags = 0;
int bpf_prog_test_load(const char *file, enum bpf_prog_type type,
struct bpf_object **pobj, int *prog_fd)
{
- struct bpf_object_load_attr attr = {};
+ LIBBPF_OPTS(bpf_object_open_opts, opts,
+ .kernel_log_level = extra_prog_load_log_flags,
+ );
struct bpf_object *obj;
struct bpf_program *prog;
__u32 flags;
int err;
- obj = bpf_object__open(file);
+ obj = bpf_object__open_file(file, &opts);
if (!obj)
return -errno;
@@ -110,9 +112,7 @@ int bpf_prog_test_load(const char *file, enum bpf_prog_type type,
flags = bpf_program__flags(prog) | BPF_F_TEST_RND_HI32;
bpf_program__set_flags(prog, flags);
- attr.obj = obj;
- attr.log_level = extra_prog_load_log_flags;
- err = bpf_object__load_xattr(&attr);
+ err = bpf_object__load(obj);
if (err)
goto err_out;