diff options
author | David Vernet <void@manifault.com> | 2023-02-01 11:30:15 -0600 |
---|---|---|
committer | Daniel Borkmann <daniel@iogearbox.net> | 2023-02-02 00:25:14 +0100 |
commit | 400031e05adfcef9e80eca80bdfc3f4b63658be4 (patch) | |
tree | fcac625bc35298cd1182e4415fea360573b69b8b /kernel/bpf | |
parent | 98e6ab7a04353c95b1f4bad345d156ded865fd96 (diff) | |
download | linux-400031e05adfcef9e80eca80bdfc3f4b63658be4.tar.gz linux-400031e05adfcef9e80eca80bdfc3f4b63658be4.tar.bz2 linux-400031e05adfcef9e80eca80bdfc3f4b63658be4.zip |
bpf: Add __bpf_kfunc tag to all kfuncs
Now that we have the __bpf_kfunc tag, we should use add it to all
existing kfuncs to ensure that they'll never be elided in LTO builds.
Signed-off-by: David Vernet <void@manifault.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Stanislav Fomichev <sdf@google.com>
Link: https://lore.kernel.org/bpf/20230201173016.342758-4-void@manifault.com
Diffstat (limited to 'kernel/bpf')
-rw-r--r-- | kernel/bpf/cpumask.c | 60 | ||||
-rw-r--r-- | kernel/bpf/helpers.c | 38 |
2 files changed, 49 insertions, 49 deletions
diff --git a/kernel/bpf/cpumask.c b/kernel/bpf/cpumask.c index 6bbb67dfc998..52b981512a35 100644 --- a/kernel/bpf/cpumask.c +++ b/kernel/bpf/cpumask.c @@ -48,7 +48,7 @@ __diag_ignore_all("-Wmissing-prototypes", * bpf_cpumask_create() allocates memory using the BPF memory allocator, and * will not block. It may return NULL if no memory is available. */ -struct bpf_cpumask *bpf_cpumask_create(void) +__bpf_kfunc struct bpf_cpumask *bpf_cpumask_create(void) { struct bpf_cpumask *cpumask; @@ -74,7 +74,7 @@ struct bpf_cpumask *bpf_cpumask_create(void) * must either be embedded in a map as a kptr, or freed with * bpf_cpumask_release(). */ -struct bpf_cpumask *bpf_cpumask_acquire(struct bpf_cpumask *cpumask) +__bpf_kfunc struct bpf_cpumask *bpf_cpumask_acquire(struct bpf_cpumask *cpumask) { refcount_inc(&cpumask->usage); return cpumask; @@ -90,7 +90,7 @@ struct bpf_cpumask *bpf_cpumask_acquire(struct bpf_cpumask *cpumask) * kptr, or freed with bpf_cpumask_release(). This function may return NULL if * no BPF cpumask was found in the specified map value. */ -struct bpf_cpumask *bpf_cpumask_kptr_get(struct bpf_cpumask **cpumaskp) +__bpf_kfunc struct bpf_cpumask *bpf_cpumask_kptr_get(struct bpf_cpumask **cpumaskp) { struct bpf_cpumask *cpumask; @@ -116,7 +116,7 @@ struct bpf_cpumask *bpf_cpumask_kptr_get(struct bpf_cpumask **cpumaskp) * reference of the BPF cpumask has been released, it is subsequently freed in * an RCU callback in the BPF memory allocator. */ -void bpf_cpumask_release(struct bpf_cpumask *cpumask) +__bpf_kfunc void bpf_cpumask_release(struct bpf_cpumask *cpumask) { if (!cpumask) return; @@ -135,7 +135,7 @@ void bpf_cpumask_release(struct bpf_cpumask *cpumask) * Find the index of the first nonzero bit of the cpumask. A struct bpf_cpumask * pointer may be safely passed to this function. */ -u32 bpf_cpumask_first(const struct cpumask *cpumask) +__bpf_kfunc u32 bpf_cpumask_first(const struct cpumask *cpumask) { return cpumask_first(cpumask); } @@ -148,7 +148,7 @@ u32 bpf_cpumask_first(const struct cpumask *cpumask) * Find the index of the first unset bit of the cpumask. A struct bpf_cpumask * pointer may be safely passed to this function. */ -u32 bpf_cpumask_first_zero(const struct cpumask *cpumask) +__bpf_kfunc u32 bpf_cpumask_first_zero(const struct cpumask *cpumask) { return cpumask_first_zero(cpumask); } @@ -158,7 +158,7 @@ u32 bpf_cpumask_first_zero(const struct cpumask *cpumask) * @cpu: The CPU to be set in the cpumask. * @cpumask: The BPF cpumask in which a bit is being set. */ -void bpf_cpumask_set_cpu(u32 cpu, struct bpf_cpumask *cpumask) +__bpf_kfunc void bpf_cpumask_set_cpu(u32 cpu, struct bpf_cpumask *cpumask) { if (!cpu_valid(cpu)) return; @@ -171,7 +171,7 @@ void bpf_cpumask_set_cpu(u32 cpu, struct bpf_cpumask *cpumask) * @cpu: The CPU to be cleared from the cpumask. * @cpumask: The BPF cpumask in which a bit is being cleared. */ -void bpf_cpumask_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask) +__bpf_kfunc void bpf_cpumask_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask) { if (!cpu_valid(cpu)) return; @@ -188,7 +188,7 @@ void bpf_cpumask_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask) * * true - @cpu is set in the cpumask * * false - @cpu was not set in the cpumask, or @cpu is an invalid cpu. */ -bool bpf_cpumask_test_cpu(u32 cpu, const struct cpumask *cpumask) +__bpf_kfunc bool bpf_cpumask_test_cpu(u32 cpu, const struct cpumask *cpumask) { if (!cpu_valid(cpu)) return false; @@ -205,7 +205,7 @@ bool bpf_cpumask_test_cpu(u32 cpu, const struct cpumask *cpumask) * * true - @cpu is set in the cpumask * * false - @cpu was not set in the cpumask, or @cpu is invalid. */ -bool bpf_cpumask_test_and_set_cpu(u32 cpu, struct bpf_cpumask *cpumask) +__bpf_kfunc bool bpf_cpumask_test_and_set_cpu(u32 cpu, struct bpf_cpumask *cpumask) { if (!cpu_valid(cpu)) return false; @@ -223,7 +223,7 @@ bool bpf_cpumask_test_and_set_cpu(u32 cpu, struct bpf_cpumask *cpumask) * * true - @cpu is set in the cpumask * * false - @cpu was not set in the cpumask, or @cpu is invalid. */ -bool bpf_cpumask_test_and_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask) +__bpf_kfunc bool bpf_cpumask_test_and_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask) { if (!cpu_valid(cpu)) return false; @@ -235,7 +235,7 @@ bool bpf_cpumask_test_and_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask) * bpf_cpumask_setall() - Set all of the bits in a BPF cpumask. * @cpumask: The BPF cpumask having all of its bits set. */ -void bpf_cpumask_setall(struct bpf_cpumask *cpumask) +__bpf_kfunc void bpf_cpumask_setall(struct bpf_cpumask *cpumask) { cpumask_setall((struct cpumask *)cpumask); } @@ -244,7 +244,7 @@ void bpf_cpumask_setall(struct bpf_cpumask *cpumask) * bpf_cpumask_clear() - Clear all of the bits in a BPF cpumask. * @cpumask: The BPF cpumask being cleared. */ -void bpf_cpumask_clear(struct bpf_cpumask *cpumask) +__bpf_kfunc void bpf_cpumask_clear(struct bpf_cpumask *cpumask) { cpumask_clear((struct cpumask *)cpumask); } @@ -261,9 +261,9 @@ void bpf_cpumask_clear(struct bpf_cpumask *cpumask) * * struct bpf_cpumask pointers may be safely passed to @src1 and @src2. */ -bool bpf_cpumask_and(struct bpf_cpumask *dst, - const struct cpumask *src1, - const struct cpumask *src2) +__bpf_kfunc bool bpf_cpumask_and(struct bpf_cpumask *dst, + const struct cpumask *src1, + const struct cpumask *src2) { return cpumask_and((struct cpumask *)dst, src1, src2); } @@ -276,9 +276,9 @@ bool bpf_cpumask_and(struct bpf_cpumask *dst, * * struct bpf_cpumask pointers may be safely passed to @src1 and @src2. */ -void bpf_cpumask_or(struct bpf_cpumask *dst, - const struct cpumask *src1, - const struct cpumask *src2) +__bpf_kfunc void bpf_cpumask_or(struct bpf_cpumask *dst, + const struct cpumask *src1, + const struct cpumask *src2) { cpumask_or((struct cpumask *)dst, src1, src2); } @@ -291,9 +291,9 @@ void bpf_cpumask_or(struct bpf_cpumask *dst, * * struct bpf_cpumask pointers may be safely passed to @src1 and @src2. */ -void bpf_cpumask_xor(struct bpf_cpumask *dst, - const struct cpumask *src1, - const struct cpumask *src2) +__bpf_kfunc void bpf_cpumask_xor(struct bpf_cpumask *dst, + const struct cpumask *src1, + const struct cpumask *src2) { cpumask_xor((struct cpumask *)dst, src1, src2); } @@ -309,7 +309,7 @@ void bpf_cpumask_xor(struct bpf_cpumask *dst, * * struct bpf_cpumask pointers may be safely passed to @src1 and @src2. */ -bool bpf_cpumask_equal(const struct cpumask *src1, const struct cpumask *src2) +__bpf_kfunc bool bpf_cpumask_equal(const struct cpumask *src1, const struct cpumask *src2) { return cpumask_equal(src1, src2); } @@ -325,7 +325,7 @@ bool bpf_cpumask_equal(const struct cpumask *src1, const struct cpumask *src2) * * struct bpf_cpumask pointers may be safely passed to @src1 and @src2. */ -bool bpf_cpumask_intersects(const struct cpumask *src1, const struct cpumask *src2) +__bpf_kfunc bool bpf_cpumask_intersects(const struct cpumask *src1, const struct cpumask *src2) { return cpumask_intersects(src1, src2); } @@ -341,7 +341,7 @@ bool bpf_cpumask_intersects(const struct cpumask *src1, const struct cpumask *sr * * struct bpf_cpumask pointers may be safely passed to @src1 and @src2. */ -bool bpf_cpumask_subset(const struct cpumask *src1, const struct cpumask *src2) +__bpf_kfunc bool bpf_cpumask_subset(const struct cpumask *src1, const struct cpumask *src2) { return cpumask_subset(src1, src2); } @@ -356,7 +356,7 @@ bool bpf_cpumask_subset(const struct cpumask *src1, const struct cpumask *src2) * * A struct bpf_cpumask pointer may be safely passed to @cpumask. */ -bool bpf_cpumask_empty(const struct cpumask *cpumask) +__bpf_kfunc bool bpf_cpumask_empty(const struct cpumask *cpumask) { return cpumask_empty(cpumask); } @@ -371,7 +371,7 @@ bool bpf_cpumask_empty(const struct cpumask *cpumask) * * A struct bpf_cpumask pointer may be safely passed to @cpumask. */ -bool bpf_cpumask_full(const struct cpumask *cpumask) +__bpf_kfunc bool bpf_cpumask_full(const struct cpumask *cpumask) { return cpumask_full(cpumask); } @@ -383,7 +383,7 @@ bool bpf_cpumask_full(const struct cpumask *cpumask) * * A struct bpf_cpumask pointer may be safely passed to @src. */ -void bpf_cpumask_copy(struct bpf_cpumask *dst, const struct cpumask *src) +__bpf_kfunc void bpf_cpumask_copy(struct bpf_cpumask *dst, const struct cpumask *src) { cpumask_copy((struct cpumask *)dst, src); } @@ -398,7 +398,7 @@ void bpf_cpumask_copy(struct bpf_cpumask *dst, const struct cpumask *src) * * A struct bpf_cpumask pointer may be safely passed to @src. */ -u32 bpf_cpumask_any(const struct cpumask *cpumask) +__bpf_kfunc u32 bpf_cpumask_any(const struct cpumask *cpumask) { return cpumask_any(cpumask); } @@ -415,7 +415,7 @@ u32 bpf_cpumask_any(const struct cpumask *cpumask) * * struct bpf_cpumask pointers may be safely passed to @src1 and @src2. */ -u32 bpf_cpumask_any_and(const struct cpumask *src1, const struct cpumask *src2) +__bpf_kfunc u32 bpf_cpumask_any_and(const struct cpumask *src1, const struct cpumask *src2) { return cpumask_any_and(src1, src2); } diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index 458db2db2f81..2dae44581922 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -1776,7 +1776,7 @@ __diag_push(); __diag_ignore_all("-Wmissing-prototypes", "Global functions as their definitions will be in vmlinux BTF"); -void *bpf_obj_new_impl(u64 local_type_id__k, void *meta__ign) +__bpf_kfunc void *bpf_obj_new_impl(u64 local_type_id__k, void *meta__ign) { struct btf_struct_meta *meta = meta__ign; u64 size = local_type_id__k; @@ -1790,7 +1790,7 @@ void *bpf_obj_new_impl(u64 local_type_id__k, void *meta__ign) return p; } -void bpf_obj_drop_impl(void *p__alloc, void *meta__ign) +__bpf_kfunc void bpf_obj_drop_impl(void *p__alloc, void *meta__ign) { struct btf_struct_meta *meta = meta__ign; void *p = p__alloc; @@ -1811,12 +1811,12 @@ static void __bpf_list_add(struct bpf_list_node *node, struct bpf_list_head *hea tail ? list_add_tail(n, h) : list_add(n, h); } -void bpf_list_push_front(struct bpf_list_head *head, struct bpf_list_node *node) +__bpf_kfunc void bpf_list_push_front(struct bpf_list_head *head, struct bpf_list_node *node) { return __bpf_list_add(node, head, false); } -void bpf_list_push_back(struct bpf_list_head *head, struct bpf_list_node *node) +__bpf_kfunc void bpf_list_push_back(struct bpf_list_head *head, struct bpf_list_node *node) { return __bpf_list_add(node, head, true); } @@ -1834,12 +1834,12 @@ static struct bpf_list_node *__bpf_list_del(struct bpf_list_head *head, bool tai return (struct bpf_list_node *)n; } -struct bpf_list_node *bpf_list_pop_front(struct bpf_list_head *head) +__bpf_kfunc struct bpf_list_node *bpf_list_pop_front(struct bpf_list_head *head) { return __bpf_list_del(head, false); } -struct bpf_list_node *bpf_list_pop_back(struct bpf_list_head *head) +__bpf_kfunc struct bpf_list_node *bpf_list_pop_back(struct bpf_list_head *head) { return __bpf_list_del(head, true); } @@ -1850,7 +1850,7 @@ struct bpf_list_node *bpf_list_pop_back(struct bpf_list_head *head) * bpf_task_release(). * @p: The task on which a reference is being acquired. */ -struct task_struct *bpf_task_acquire(struct task_struct *p) +__bpf_kfunc struct task_struct *bpf_task_acquire(struct task_struct *p) { return get_task_struct(p); } @@ -1861,7 +1861,7 @@ struct task_struct *bpf_task_acquire(struct task_struct *p) * released by calling bpf_task_release(). * @p: The task on which a reference is being acquired. */ -struct task_struct *bpf_task_acquire_not_zero(struct task_struct *p) +__bpf_kfunc struct task_struct *bpf_task_acquire_not_zero(struct task_struct *p) { /* For the time being this function returns NULL, as it's not currently * possible to safely acquire a reference to a task with RCU protection @@ -1913,7 +1913,7 @@ struct task_struct *bpf_task_acquire_not_zero(struct task_struct *p) * be released by calling bpf_task_release(). * @pp: A pointer to a task kptr on which a reference is being acquired. */ -struct task_struct *bpf_task_kptr_get(struct task_struct **pp) +__bpf_kfunc struct task_struct *bpf_task_kptr_get(struct task_struct **pp) { /* We must return NULL here until we have clarity on how to properly * leverage RCU for ensuring a task's lifetime. See the comment above @@ -1926,7 +1926,7 @@ struct task_struct *bpf_task_kptr_get(struct task_struct **pp) * bpf_task_release - Release the reference acquired on a task. * @p: The task on which a reference is being released. */ -void bpf_task_release(struct task_struct *p) +__bpf_kfunc void bpf_task_release(struct task_struct *p) { if (!p) return; @@ -1941,7 +1941,7 @@ void bpf_task_release(struct task_struct *p) * calling bpf_cgroup_release(). * @cgrp: The cgroup on which a reference is being acquired. */ -struct cgroup *bpf_cgroup_acquire(struct cgroup *cgrp) +__bpf_kfunc struct cgroup *bpf_cgroup_acquire(struct cgroup *cgrp) { cgroup_get(cgrp); return cgrp; @@ -1953,7 +1953,7 @@ struct cgroup *bpf_cgroup_acquire(struct cgroup *cgrp) * be released by calling bpf_cgroup_release(). * @cgrpp: A pointer to a cgroup kptr on which a reference is being acquired. */ -struct cgroup *bpf_cgroup_kptr_get(struct cgroup **cgrpp) +__bpf_kfunc struct cgroup *bpf_cgroup_kptr_get(struct cgroup **cgrpp) { struct cgroup *cgrp; @@ -1985,7 +1985,7 @@ struct cgroup *bpf_cgroup_kptr_get(struct cgroup **cgrpp) * drops to 0. * @cgrp: The cgroup on which a reference is being released. */ -void bpf_cgroup_release(struct cgroup *cgrp) +__bpf_kfunc void bpf_cgroup_release(struct cgroup *cgrp) { if (!cgrp) return; @@ -2000,7 +2000,7 @@ void bpf_cgroup_release(struct cgroup *cgrp) * @cgrp: The cgroup for which we're performing a lookup. * @level: The level of ancestor to look up. */ -struct cgroup *bpf_cgroup_ancestor(struct cgroup *cgrp, int level) +__bpf_kfunc struct cgroup *bpf_cgroup_ancestor(struct cgroup *cgrp, int level) { struct cgroup *ancestor; @@ -2019,7 +2019,7 @@ struct cgroup *bpf_cgroup_ancestor(struct cgroup *cgrp, int level) * stored in a map, or released with bpf_task_release(). * @pid: The pid of the task being looked up. */ -struct task_struct *bpf_task_from_pid(s32 pid) +__bpf_kfunc struct task_struct *bpf_task_from_pid(s32 pid) { struct task_struct *p; @@ -2032,22 +2032,22 @@ struct task_struct *bpf_task_from_pid(s32 pid) return p; } -void *bpf_cast_to_kern_ctx(void *obj) +__bpf_kfunc void *bpf_cast_to_kern_ctx(void *obj) { return obj; } -void *bpf_rdonly_cast(void *obj__ign, u32 btf_id__k) +__bpf_kfunc void *bpf_rdonly_cast(void *obj__ign, u32 btf_id__k) { return obj__ign; } -void bpf_rcu_read_lock(void) +__bpf_kfunc void bpf_rcu_read_lock(void) { rcu_read_lock(); } -void bpf_rcu_read_unlock(void) +__bpf_kfunc void bpf_rcu_read_unlock(void) { rcu_read_unlock(); } |