summaryrefslogtreecommitdiffstats
path: root/kernel/bpf
diff options
context:
space:
mode:
authorKumar Kartikeya Dwivedi <memxor@gmail.com>2024-11-09 15:14:29 -0800
committerAndrii Nakryiko <andrii@kernel.org>2024-11-11 08:18:51 -0800
commitf6b9a69a9e56b2083aca8a925fc1a28eb698e3ed (patch)
tree8b2ca0fbb9af8bf263c9a161715643cf81e4b0b2 /kernel/bpf
parent937a1c29a287e8f48c4cea714c76a13e14d989ac (diff)
downloadlinux-f6b9a69a9e56b2083aca8a925fc1a28eb698e3ed.tar.gz
linux-f6b9a69a9e56b2083aca8a925fc1a28eb698e3ed.tar.bz2
linux-f6b9a69a9e56b2083aca8a925fc1a28eb698e3ed.zip
bpf: Refactor active lock management
When bpf_spin_lock was introduced originally, there was deliberation on whether to use an array of lock IDs, but since bpf_spin_lock is limited to holding a single lock at any given time, we've been using a single ID to identify the held lock. In preparation for introducing spin locks that can be taken multiple times, introduce support for acquiring multiple lock IDs. For this purpose, reuse the acquired_refs array and store both lock and pointer references. We tag the entry with REF_TYPE_PTR or REF_TYPE_LOCK to disambiguate and find the relevant entry. The ptr field is used to track the map_ptr or btf (for bpf_obj_new allocations) to ensure locks can be matched with protected fields within the same "allocation", i.e. bpf_obj_new object or map value. The struct active_lock is changed to an int as the state is part of the acquired_refs array, and we only need active_lock as a cheap way of detecting lock presence. Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com> Link: https://lore.kernel.org/r/20241109231430.2475236-2-memxor@gmail.com Signed-off-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Diffstat (limited to 'kernel/bpf')
-rw-r--r--kernel/bpf/verifier.c146
1 files changed, 107 insertions, 39 deletions
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 132fc172961f..d55ca27dc031 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -1284,6 +1284,7 @@ static int copy_reference_state(struct bpf_func_state *dst, const struct bpf_fun
if (!dst->refs)
return -ENOMEM;
+ dst->active_locks = src->active_locks;
dst->acquired_refs = src->acquired_refs;
return 0;
}
@@ -1354,6 +1355,7 @@ static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx)
if (err)
return err;
id = ++env->id_gen;
+ state->refs[new_ofs].type = REF_TYPE_PTR;
state->refs[new_ofs].id = id;
state->refs[new_ofs].insn_idx = insn_idx;
state->refs[new_ofs].callback_ref = state->in_callback_fn ? state->frameno : 0;
@@ -1361,6 +1363,25 @@ static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx)
return id;
}
+static int acquire_lock_state(struct bpf_verifier_env *env, int insn_idx, enum ref_state_type type,
+ int id, void *ptr)
+{
+ struct bpf_func_state *state = cur_func(env);
+ int new_ofs = state->acquired_refs;
+ int err;
+
+ err = resize_reference_state(state, state->acquired_refs + 1);
+ if (err)
+ return err;
+ state->refs[new_ofs].type = type;
+ state->refs[new_ofs].id = id;
+ state->refs[new_ofs].insn_idx = insn_idx;
+ state->refs[new_ofs].ptr = ptr;
+
+ state->active_locks++;
+ return 0;
+}
+
/* release function corresponding to acquire_reference_state(). Idempotent. */
static int release_reference_state(struct bpf_func_state *state, int ptr_id)
{
@@ -1368,6 +1389,8 @@ static int release_reference_state(struct bpf_func_state *state, int ptr_id)
last_idx = state->acquired_refs - 1;
for (i = 0; i < state->acquired_refs; i++) {
+ if (state->refs[i].type != REF_TYPE_PTR)
+ continue;
if (state->refs[i].id == ptr_id) {
/* Cannot release caller references in callbacks */
if (state->in_callback_fn && state->refs[i].callback_ref != state->frameno)
@@ -1383,6 +1406,45 @@ static int release_reference_state(struct bpf_func_state *state, int ptr_id)
return -EINVAL;
}
+static int release_lock_state(struct bpf_func_state *state, int type, int id, void *ptr)
+{
+ int i, last_idx;
+
+ last_idx = state->acquired_refs - 1;
+ for (i = 0; i < state->acquired_refs; i++) {
+ if (state->refs[i].type != type)
+ continue;
+ if (state->refs[i].id == id && state->refs[i].ptr == ptr) {
+ if (last_idx && i != last_idx)
+ memcpy(&state->refs[i], &state->refs[last_idx],
+ sizeof(*state->refs));
+ memset(&state->refs[last_idx], 0, sizeof(*state->refs));
+ state->acquired_refs--;
+ state->active_locks--;
+ return 0;
+ }
+ }
+ return -EINVAL;
+}
+
+static struct bpf_reference_state *find_lock_state(struct bpf_verifier_env *env, enum ref_state_type type,
+ int id, void *ptr)
+{
+ struct bpf_func_state *state = cur_func(env);
+ int i;
+
+ for (i = 0; i < state->acquired_refs; i++) {
+ struct bpf_reference_state *s = &state->refs[i];
+
+ if (s->type == REF_TYPE_PTR || s->type != type)
+ continue;
+
+ if (s->id == id && s->ptr == ptr)
+ return s;
+ }
+ return NULL;
+}
+
static void free_func_state(struct bpf_func_state *state)
{
if (!state)
@@ -1453,8 +1515,6 @@ static int copy_verifier_state(struct bpf_verifier_state *dst_state,
dst_state->active_preempt_lock = src->active_preempt_lock;
dst_state->in_sleepable = src->in_sleepable;
dst_state->curframe = src->curframe;
- dst_state->active_lock.ptr = src->active_lock.ptr;
- dst_state->active_lock.id = src->active_lock.id;
dst_state->branches = src->branches;
dst_state->parent = src->parent;
dst_state->first_insn_idx = src->first_insn_idx;
@@ -5442,7 +5502,7 @@ static bool in_sleepable(struct bpf_verifier_env *env)
static bool in_rcu_cs(struct bpf_verifier_env *env)
{
return env->cur_state->active_rcu_lock ||
- env->cur_state->active_lock.ptr ||
+ cur_func(env)->active_locks ||
!in_sleepable(env);
}
@@ -7724,19 +7784,20 @@ static int check_kfunc_mem_size_reg(struct bpf_verifier_env *env, struct bpf_reg
* Since only one bpf_spin_lock is allowed the checks are simpler than
* reg_is_refcounted() logic. The verifier needs to remember only
* one spin_lock instead of array of acquired_refs.
- * cur_state->active_lock remembers which map value element or allocated
+ * cur_func(env)->active_locks remembers which map value element or allocated
* object got locked and clears it after bpf_spin_unlock.
*/
static int process_spin_lock(struct bpf_verifier_env *env, int regno,
bool is_lock)
{
struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
- struct bpf_verifier_state *cur = env->cur_state;
bool is_const = tnum_is_const(reg->var_off);
+ struct bpf_func_state *cur = cur_func(env);
u64 val = reg->var_off.value;
struct bpf_map *map = NULL;
struct btf *btf = NULL;
struct btf_record *rec;
+ int err;
if (!is_const) {
verbose(env,
@@ -7768,16 +7829,23 @@ static int process_spin_lock(struct bpf_verifier_env *env, int regno,
return -EINVAL;
}
if (is_lock) {
- if (cur->active_lock.ptr) {
+ void *ptr;
+
+ if (map)
+ ptr = map;
+ else
+ ptr = btf;
+
+ if (cur->active_locks) {
verbose(env,
"Locking two bpf_spin_locks are not allowed\n");
return -EINVAL;
}
- if (map)
- cur->active_lock.ptr = map;
- else
- cur->active_lock.ptr = btf;
- cur->active_lock.id = reg->id;
+ err = acquire_lock_state(env, env->insn_idx, REF_TYPE_LOCK, reg->id, ptr);
+ if (err < 0) {
+ verbose(env, "Failed to acquire lock state\n");
+ return err;
+ }
} else {
void *ptr;
@@ -7786,20 +7854,17 @@ static int process_spin_lock(struct bpf_verifier_env *env, int regno,
else
ptr = btf;
- if (!cur->active_lock.ptr) {
+ if (!cur->active_locks) {
verbose(env, "bpf_spin_unlock without taking a lock\n");
return -EINVAL;
}
- if (cur->active_lock.ptr != ptr ||
- cur->active_lock.id != reg->id) {
+
+ if (release_lock_state(cur_func(env), REF_TYPE_LOCK, reg->id, ptr)) {
verbose(env, "bpf_spin_unlock of different lock\n");
return -EINVAL;
}
invalidate_non_owning_refs(env);
-
- cur->active_lock.ptr = NULL;
- cur->active_lock.id = 0;
}
return 0;
}
@@ -9861,7 +9926,7 @@ static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
const char *sub_name = subprog_name(env, subprog);
/* Only global subprogs cannot be called with a lock held. */
- if (env->cur_state->active_lock.ptr) {
+ if (cur_func(env)->active_locks) {
verbose(env, "global function calls are not allowed while holding a lock,\n"
"use static function instead\n");
return -EINVAL;
@@ -10386,6 +10451,8 @@ static int check_reference_leak(struct bpf_verifier_env *env, bool exception_exi
return 0;
for (i = 0; i < state->acquired_refs; i++) {
+ if (state->refs[i].type != REF_TYPE_PTR)
+ continue;
if (!exception_exit && state->in_callback_fn && state->refs[i].callback_ref != state->frameno)
continue;
verbose(env, "Unreleased reference id=%d alloc_insn=%d\n",
@@ -10399,7 +10466,7 @@ static int check_resource_leak(struct bpf_verifier_env *env, bool exception_exit
{
int err;
- if (check_lock && env->cur_state->active_lock.ptr) {
+ if (check_lock && cur_func(env)->active_locks) {
verbose(env, "%s cannot be used inside bpf_spin_lock-ed region\n", prefix);
return -EINVAL;
}
@@ -11620,10 +11687,9 @@ static int process_kf_arg_ptr_to_btf_id(struct bpf_verifier_env *env,
static int ref_set_non_owning(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
{
- struct bpf_verifier_state *state = env->cur_state;
struct btf_record *rec = reg_btf_record(reg);
- if (!state->active_lock.ptr) {
+ if (!cur_func(env)->active_locks) {
verbose(env, "verifier internal error: ref_set_non_owning w/o active lock\n");
return -EFAULT;
}
@@ -11720,6 +11786,7 @@ static int ref_convert_owning_non_owning(struct bpf_verifier_env *env, u32 ref_o
*/
static int check_reg_allocation_locked(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
{
+ struct bpf_reference_state *s;
void *ptr;
u32 id;
@@ -11736,10 +11803,10 @@ static int check_reg_allocation_locked(struct bpf_verifier_env *env, struct bpf_
}
id = reg->id;
- if (!env->cur_state->active_lock.ptr)
+ if (!cur_func(env)->active_locks)
return -EINVAL;
- if (env->cur_state->active_lock.ptr != ptr ||
- env->cur_state->active_lock.id != id) {
+ s = find_lock_state(env, REF_TYPE_LOCK, id, ptr);
+ if (!s) {
verbose(env, "held lock and object are not in the same allocation\n");
return -EINVAL;
}
@@ -17635,8 +17702,22 @@ static bool refsafe(struct bpf_func_state *old, struct bpf_func_state *cur,
return false;
for (i = 0; i < old->acquired_refs; i++) {
- if (!check_ids(old->refs[i].id, cur->refs[i].id, idmap))
+ if (!check_ids(old->refs[i].id, cur->refs[i].id, idmap) ||
+ old->refs[i].type != cur->refs[i].type)
+ return false;
+ switch (old->refs[i].type) {
+ case REF_TYPE_PTR:
+ if (old->refs[i].callback_ref != cur->refs[i].callback_ref)
+ return false;
+ break;
+ case REF_TYPE_LOCK:
+ if (old->refs[i].ptr != cur->refs[i].ptr)
+ return false;
+ break;
+ default:
+ WARN_ONCE(1, "Unhandled enum type for reference state: %d\n", old->refs[i].type);
return false;
+ }
}
return true;
@@ -17714,19 +17795,6 @@ static bool states_equal(struct bpf_verifier_env *env,
if (old->speculative && !cur->speculative)
return false;
- if (old->active_lock.ptr != cur->active_lock.ptr)
- return false;
-
- /* Old and cur active_lock's have to be either both present
- * or both absent.
- */
- if (!!old->active_lock.id != !!cur->active_lock.id)
- return false;
-
- if (old->active_lock.id &&
- !check_ids(old->active_lock.id, cur->active_lock.id, &env->idmap_scratch))
- return false;
-
if (old->active_rcu_lock != cur->active_rcu_lock)
return false;
@@ -18625,7 +18693,7 @@ static int do_check(struct bpf_verifier_env *env)
return -EINVAL;
}
- if (env->cur_state->active_lock.ptr) {
+ if (cur_func(env)->active_locks) {
if ((insn->src_reg == BPF_REG_0 && insn->imm != BPF_FUNC_spin_unlock) ||
(insn->src_reg == BPF_PSEUDO_KFUNC_CALL &&
(insn->off != 0 || !is_bpf_graph_api_kfunc(insn->imm)))) {