summaryrefslogtreecommitdiffstats
path: root/tools/testing/selftests/bpf/prog_tests
diff options
context:
space:
mode:
Diffstat (limited to 'tools/testing/selftests/bpf/prog_tests')
-rw-r--r--tools/testing/selftests/bpf/prog_tests/atomics.c246
-rw-r--r--tools/testing/selftests/bpf/prog_tests/btf_map_in_map.c33
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cgroup_attach_multi.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/core_read_macros.c64
-rw-r--r--tools/testing/selftests/bpf/prog_tests/ksyms_module.c31
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_lsm.c1
6 files changed, 343 insertions, 36 deletions
diff --git a/tools/testing/selftests/bpf/prog_tests/atomics.c b/tools/testing/selftests/bpf/prog_tests/atomics.c
new file mode 100644
index 000000000000..21efe7bbf10d
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/atomics.c
@@ -0,0 +1,246 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <test_progs.h>
+
+#include "atomics.skel.h"
+
+static void test_add(struct atomics *skel)
+{
+ int err, prog_fd;
+ __u32 duration = 0, retval;
+ struct bpf_link *link;
+
+ link = bpf_program__attach(skel->progs.add);
+ if (CHECK(IS_ERR(link), "attach(add)", "err: %ld\n", PTR_ERR(link)))
+ return;
+
+ prog_fd = bpf_program__fd(skel->progs.add);
+ err = bpf_prog_test_run(prog_fd, 1, NULL, 0,
+ NULL, NULL, &retval, &duration);
+ if (CHECK(err || retval, "test_run add",
+ "err %d errno %d retval %d duration %d\n", err, errno, retval, duration))
+ goto cleanup;
+
+ ASSERT_EQ(skel->data->add64_value, 3, "add64_value");
+ ASSERT_EQ(skel->bss->add64_result, 1, "add64_result");
+
+ ASSERT_EQ(skel->data->add32_value, 3, "add32_value");
+ ASSERT_EQ(skel->bss->add32_result, 1, "add32_result");
+
+ ASSERT_EQ(skel->bss->add_stack_value_copy, 3, "add_stack_value");
+ ASSERT_EQ(skel->bss->add_stack_result, 1, "add_stack_result");
+
+ ASSERT_EQ(skel->data->add_noreturn_value, 3, "add_noreturn_value");
+
+cleanup:
+ bpf_link__destroy(link);
+}
+
+static void test_sub(struct atomics *skel)
+{
+ int err, prog_fd;
+ __u32 duration = 0, retval;
+ struct bpf_link *link;
+
+ link = bpf_program__attach(skel->progs.sub);
+ if (CHECK(IS_ERR(link), "attach(sub)", "err: %ld\n", PTR_ERR(link)))
+ return;
+
+ prog_fd = bpf_program__fd(skel->progs.sub);
+ err = bpf_prog_test_run(prog_fd, 1, NULL, 0,
+ NULL, NULL, &retval, &duration);
+ if (CHECK(err || retval, "test_run sub",
+ "err %d errno %d retval %d duration %d\n",
+ err, errno, retval, duration))
+ goto cleanup;
+
+ ASSERT_EQ(skel->data->sub64_value, -1, "sub64_value");
+ ASSERT_EQ(skel->bss->sub64_result, 1, "sub64_result");
+
+ ASSERT_EQ(skel->data->sub32_value, -1, "sub32_value");
+ ASSERT_EQ(skel->bss->sub32_result, 1, "sub32_result");
+
+ ASSERT_EQ(skel->bss->sub_stack_value_copy, -1, "sub_stack_value");
+ ASSERT_EQ(skel->bss->sub_stack_result, 1, "sub_stack_result");
+
+ ASSERT_EQ(skel->data->sub_noreturn_value, -1, "sub_noreturn_value");
+
+cleanup:
+ bpf_link__destroy(link);
+}
+
+static void test_and(struct atomics *skel)
+{
+ int err, prog_fd;
+ __u32 duration = 0, retval;
+ struct bpf_link *link;
+
+ link = bpf_program__attach(skel->progs.and);
+ if (CHECK(IS_ERR(link), "attach(and)", "err: %ld\n", PTR_ERR(link)))
+ return;
+
+ prog_fd = bpf_program__fd(skel->progs.and);
+ err = bpf_prog_test_run(prog_fd, 1, NULL, 0,
+ NULL, NULL, &retval, &duration);
+ if (CHECK(err || retval, "test_run and",
+ "err %d errno %d retval %d duration %d\n", err, errno, retval, duration))
+ goto cleanup;
+
+ ASSERT_EQ(skel->data->and64_value, 0x010ull << 32, "and64_value");
+ ASSERT_EQ(skel->bss->and64_result, 0x110ull << 32, "and64_result");
+
+ ASSERT_EQ(skel->data->and32_value, 0x010, "and32_value");
+ ASSERT_EQ(skel->bss->and32_result, 0x110, "and32_result");
+
+ ASSERT_EQ(skel->data->and_noreturn_value, 0x010ull << 32, "and_noreturn_value");
+cleanup:
+ bpf_link__destroy(link);
+}
+
+static void test_or(struct atomics *skel)
+{
+ int err, prog_fd;
+ __u32 duration = 0, retval;
+ struct bpf_link *link;
+
+ link = bpf_program__attach(skel->progs.or);
+ if (CHECK(IS_ERR(link), "attach(or)", "err: %ld\n", PTR_ERR(link)))
+ return;
+
+ prog_fd = bpf_program__fd(skel->progs.or);
+ err = bpf_prog_test_run(prog_fd, 1, NULL, 0,
+ NULL, NULL, &retval, &duration);
+ if (CHECK(err || retval, "test_run or",
+ "err %d errno %d retval %d duration %d\n",
+ err, errno, retval, duration))
+ goto cleanup;
+
+ ASSERT_EQ(skel->data->or64_value, 0x111ull << 32, "or64_value");
+ ASSERT_EQ(skel->bss->or64_result, 0x110ull << 32, "or64_result");
+
+ ASSERT_EQ(skel->data->or32_value, 0x111, "or32_value");
+ ASSERT_EQ(skel->bss->or32_result, 0x110, "or32_result");
+
+ ASSERT_EQ(skel->data->or_noreturn_value, 0x111ull << 32, "or_noreturn_value");
+cleanup:
+ bpf_link__destroy(link);
+}
+
+static void test_xor(struct atomics *skel)
+{
+ int err, prog_fd;
+ __u32 duration = 0, retval;
+ struct bpf_link *link;
+
+ link = bpf_program__attach(skel->progs.xor);
+ if (CHECK(IS_ERR(link), "attach(xor)", "err: %ld\n", PTR_ERR(link)))
+ return;
+
+ prog_fd = bpf_program__fd(skel->progs.xor);
+ err = bpf_prog_test_run(prog_fd, 1, NULL, 0,
+ NULL, NULL, &retval, &duration);
+ if (CHECK(err || retval, "test_run xor",
+ "err %d errno %d retval %d duration %d\n", err, errno, retval, duration))
+ goto cleanup;
+
+ ASSERT_EQ(skel->data->xor64_value, 0x101ull << 32, "xor64_value");
+ ASSERT_EQ(skel->bss->xor64_result, 0x110ull << 32, "xor64_result");
+
+ ASSERT_EQ(skel->data->xor32_value, 0x101, "xor32_value");
+ ASSERT_EQ(skel->bss->xor32_result, 0x110, "xor32_result");
+
+ ASSERT_EQ(skel->data->xor_noreturn_value, 0x101ull << 32, "xor_nxoreturn_value");
+cleanup:
+ bpf_link__destroy(link);
+}
+
+static void test_cmpxchg(struct atomics *skel)
+{
+ int err, prog_fd;
+ __u32 duration = 0, retval;
+ struct bpf_link *link;
+
+ link = bpf_program__attach(skel->progs.cmpxchg);
+ if (CHECK(IS_ERR(link), "attach(cmpxchg)", "err: %ld\n", PTR_ERR(link)))
+ return;
+
+ prog_fd = bpf_program__fd(skel->progs.cmpxchg);
+ err = bpf_prog_test_run(prog_fd, 1, NULL, 0,
+ NULL, NULL, &retval, &duration);
+ if (CHECK(err || retval, "test_run add",
+ "err %d errno %d retval %d duration %d\n", err, errno, retval, duration))
+ goto cleanup;
+
+ ASSERT_EQ(skel->data->cmpxchg64_value, 2, "cmpxchg64_value");
+ ASSERT_EQ(skel->bss->cmpxchg64_result_fail, 1, "cmpxchg_result_fail");
+ ASSERT_EQ(skel->bss->cmpxchg64_result_succeed, 1, "cmpxchg_result_succeed");
+
+ ASSERT_EQ(skel->data->cmpxchg32_value, 2, "lcmpxchg32_value");
+ ASSERT_EQ(skel->bss->cmpxchg32_result_fail, 1, "cmpxchg_result_fail");
+ ASSERT_EQ(skel->bss->cmpxchg32_result_succeed, 1, "cmpxchg_result_succeed");
+
+cleanup:
+ bpf_link__destroy(link);
+}
+
+static void test_xchg(struct atomics *skel)
+{
+ int err, prog_fd;
+ __u32 duration = 0, retval;
+ struct bpf_link *link;
+
+ link = bpf_program__attach(skel->progs.xchg);
+ if (CHECK(IS_ERR(link), "attach(xchg)", "err: %ld\n", PTR_ERR(link)))
+ return;
+
+ prog_fd = bpf_program__fd(skel->progs.xchg);
+ err = bpf_prog_test_run(prog_fd, 1, NULL, 0,
+ NULL, NULL, &retval, &duration);
+ if (CHECK(err || retval, "test_run add",
+ "err %d errno %d retval %d duration %d\n", err, errno, retval, duration))
+ goto cleanup;
+
+ ASSERT_EQ(skel->data->xchg64_value, 2, "xchg64_value");
+ ASSERT_EQ(skel->bss->xchg64_result, 1, "xchg64_result");
+
+ ASSERT_EQ(skel->data->xchg32_value, 2, "xchg32_value");
+ ASSERT_EQ(skel->bss->xchg32_result, 1, "xchg32_result");
+
+cleanup:
+ bpf_link__destroy(link);
+}
+
+void test_atomics(void)
+{
+ struct atomics *skel;
+ __u32 duration = 0;
+
+ skel = atomics__open_and_load();
+ if (CHECK(!skel, "skel_load", "atomics skeleton failed\n"))
+ return;
+
+ if (skel->data->skip_tests) {
+ printf("%s:SKIP:no ENABLE_ATOMICS_TESTS (missing Clang BPF atomics support)",
+ __func__);
+ test__skip();
+ goto cleanup;
+ }
+
+ if (test__start_subtest("add"))
+ test_add(skel);
+ if (test__start_subtest("sub"))
+ test_sub(skel);
+ if (test__start_subtest("and"))
+ test_and(skel);
+ if (test__start_subtest("or"))
+ test_or(skel);
+ if (test__start_subtest("xor"))
+ test_xor(skel);
+ if (test__start_subtest("cmpxchg"))
+ test_cmpxchg(skel);
+ if (test__start_subtest("xchg"))
+ test_xchg(skel);
+
+cleanup:
+ atomics__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/btf_map_in_map.c b/tools/testing/selftests/bpf/prog_tests/btf_map_in_map.c
index 76ebe4c250f1..eb90a6b8850d 100644
--- a/tools/testing/selftests/bpf/prog_tests/btf_map_in_map.c
+++ b/tools/testing/selftests/bpf/prog_tests/btf_map_in_map.c
@@ -20,39 +20,6 @@ static __u32 bpf_map_id(struct bpf_map *map)
return info.id;
}
-/*
- * Trigger synchronize_rcu() in kernel.
- *
- * ARRAY_OF_MAPS/HASH_OF_MAPS lookup/update operations trigger synchronize_rcu()
- * if looking up an existing non-NULL element or updating the map with a valid
- * inner map FD. Use this fact to trigger synchronize_rcu(): create map-in-map,
- * create a trivial ARRAY map, update map-in-map with ARRAY inner map. Then
- * cleanup. At the end, at least one synchronize_rcu() would be called.
- */
-static int kern_sync_rcu(void)
-{
- int inner_map_fd, outer_map_fd, err, zero = 0;
-
- inner_map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, 4, 4, 1, 0);
- if (CHECK(inner_map_fd < 0, "inner_map_create", "failed %d\n", -errno))
- return -1;
-
- outer_map_fd = bpf_create_map_in_map(BPF_MAP_TYPE_ARRAY_OF_MAPS, NULL,
- sizeof(int), inner_map_fd, 1, 0);
- if (CHECK(outer_map_fd < 0, "outer_map_create", "failed %d\n", -errno)) {
- close(inner_map_fd);
- return -1;
- }
-
- err = bpf_map_update_elem(outer_map_fd, &zero, &inner_map_fd, 0);
- if (err)
- err = -errno;
- CHECK(err, "outer_map_update", "failed %d\n", err);
- close(inner_map_fd);
- close(outer_map_fd);
- return err;
-}
-
static void test_lookup_update(void)
{
int map1_fd, map2_fd, map3_fd, map4_fd, map5_fd, map1_id, map2_id;
diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_attach_multi.c b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_multi.c
index b549fcfacc0b..0a1fc9816cef 100644
--- a/tools/testing/selftests/bpf/prog_tests/cgroup_attach_multi.c
+++ b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_multi.c
@@ -45,13 +45,13 @@ static int prog_load_cnt(int verdict, int val)
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
BPF_MOV64_IMM(BPF_REG_1, val), /* r1 = 1 */
- BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_0, BPF_REG_1, 0, 0), /* xadd r0 += r1 */
+ BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_0, BPF_REG_1, 0),
BPF_LD_MAP_FD(BPF_REG_1, cgroup_storage_fd),
BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
BPF_MOV64_IMM(BPF_REG_1, val),
- BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_W, BPF_REG_0, BPF_REG_1, 0, 0),
+ BPF_ATOMIC_OP(BPF_W, BPF_ADD, BPF_REG_0, BPF_REG_1, 0),
BPF_LD_MAP_FD(BPF_REG_1, percpu_cgroup_storage_fd),
BPF_MOV64_IMM(BPF_REG_2, 0),
diff --git a/tools/testing/selftests/bpf/prog_tests/core_read_macros.c b/tools/testing/selftests/bpf/prog_tests/core_read_macros.c
new file mode 100644
index 000000000000..96f5cf3c6fa2
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/core_read_macros.c
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020 Facebook */
+
+#include <test_progs.h>
+
+struct callback_head {
+ struct callback_head *next;
+ void (*func)(struct callback_head *head);
+};
+
+/* ___shuffled flavor is just an illusion for BPF code, it doesn't really
+ * exist and user-space needs to provide data in the memory layout that
+ * matches callback_head. We just defined ___shuffled flavor to make it easier
+ * to work with the skeleton
+ */
+struct callback_head___shuffled {
+ struct callback_head___shuffled *next;
+ void (*func)(struct callback_head *head);
+};
+
+#include "test_core_read_macros.skel.h"
+
+void test_core_read_macros(void)
+{
+ int duration = 0, err;
+ struct test_core_read_macros* skel;
+ struct test_core_read_macros__bss *bss;
+ struct callback_head u_probe_in;
+ struct callback_head___shuffled u_core_in;
+
+ skel = test_core_read_macros__open_and_load();
+ if (CHECK(!skel, "skel_open", "failed to open skeleton\n"))
+ return;
+ bss = skel->bss;
+ bss->my_pid = getpid();
+
+ /* next pointers have to be set from the kernel side */
+ bss->k_probe_in.func = (void *)(long)0x1234;
+ bss->k_core_in.func = (void *)(long)0xabcd;
+
+ u_probe_in.next = &u_probe_in;
+ u_probe_in.func = (void *)(long)0x5678;
+ bss->u_probe_in = &u_probe_in;
+
+ u_core_in.next = &u_core_in;
+ u_core_in.func = (void *)(long)0xdbca;
+ bss->u_core_in = &u_core_in;
+
+ err = test_core_read_macros__attach(skel);
+ if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err))
+ goto cleanup;
+
+ /* trigger tracepoint */
+ usleep(1);
+
+ ASSERT_EQ(bss->k_probe_out, 0x1234, "k_probe_out");
+ ASSERT_EQ(bss->k_core_out, 0xabcd, "k_core_out");
+
+ ASSERT_EQ(bss->u_probe_out, 0x5678, "u_probe_out");
+ ASSERT_EQ(bss->u_core_out, 0xdbca, "u_core_out");
+
+cleanup:
+ test_core_read_macros__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/ksyms_module.c b/tools/testing/selftests/bpf/prog_tests/ksyms_module.c
new file mode 100644
index 000000000000..4c232b456479
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/ksyms_module.c
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+
+#include <test_progs.h>
+#include <bpf/libbpf.h>
+#include <bpf/btf.h>
+#include "test_ksyms_module.skel.h"
+
+static int duration;
+
+void test_ksyms_module(void)
+{
+ struct test_ksyms_module* skel;
+ int err;
+
+ skel = test_ksyms_module__open_and_load();
+ if (CHECK(!skel, "skel_open", "failed to open skeleton\n"))
+ return;
+
+ err = test_ksyms_module__attach(skel);
+ if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err))
+ goto cleanup;
+
+ usleep(1);
+
+ ASSERT_EQ(skel->bss->triggered, true, "triggered");
+ ASSERT_EQ(skel->bss->out_mod_ksym_global, 123, "global_ksym_val");
+
+cleanup:
+ test_ksyms_module__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/test_lsm.c b/tools/testing/selftests/bpf/prog_tests/test_lsm.c
index 6ab29226c99b..2755e4f81499 100644
--- a/tools/testing/selftests/bpf/prog_tests/test_lsm.c
+++ b/tools/testing/selftests/bpf/prog_tests/test_lsm.c
@@ -10,7 +10,6 @@
#include <unistd.h>
#include <malloc.h>
#include <stdlib.h>
-#include <unistd.h>
#include "lsm.skel.h"