diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2020-02-24 15:01:34 +0100 |
---|---|---|
committer | Alexei Starovoitov <ast@kernel.org> | 2020-02-24 16:12:20 -0800 |
commit | dbca151cad736c99f4817076daf9fd02ed0c2daa (patch) | |
tree | 1c700844ea53f82eec530d9fdf37d7776f9938df /kernel/bpf/hashtab.c | |
parent | 2ed905c521e56aead6987df94c083efb0ee59895 (diff) | |
download | linux-dbca151cad736c99f4817076daf9fd02ed0c2daa.tar.gz linux-dbca151cad736c99f4817076daf9fd02ed0c2daa.tar.bz2 linux-dbca151cad736c99f4817076daf9fd02ed0c2daa.zip |
bpf: Update locking comment in hashtab code
The comment where the bucket lock is acquired says:
/* bpf_map_update_elem() can be called in_irq() */
which is not really helpful and aside of that it does not explain the
subtle details of the hash bucket locks expecially in the context of BPF
and perf, kprobes and tracing.
Add a comment at the top of the file which explains the protection scopes
and the details how potential deadlocks are prevented.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/20200224145642.755793061@linutronix.de
Diffstat (limited to 'kernel/bpf/hashtab.c')
-rw-r--r-- | kernel/bpf/hashtab.c | 24 |
1 files changed, 20 insertions, 4 deletions
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index a1468e3f5af2..65711a220fe0 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c @@ -27,6 +27,26 @@ .map_delete_batch = \ generic_map_delete_batch +/* + * The bucket lock has two protection scopes: + * + * 1) Serializing concurrent operations from BPF programs on differrent + * CPUs + * + * 2) Serializing concurrent operations from BPF programs and sys_bpf() + * + * BPF programs can execute in any context including perf, kprobes and + * tracing. As there are almost no limits where perf, kprobes and tracing + * can be invoked from the lock operations need to be protected against + * deadlocks. Deadlocks can be caused by recursion and by an invocation in + * the lock held section when functions which acquire this lock are invoked + * from sys_bpf(). BPF recursion is prevented by incrementing the per CPU + * variable bpf_prog_active, which prevents BPF programs attached to perf + * events, kprobes and tracing to be invoked before the prior invocation + * from one of these contexts completed. sys_bpf() uses the same mechanism + * by pinning the task to the current CPU and incrementing the recursion + * protection accross the map operation. + */ struct bucket { struct hlist_nulls_head head; raw_spinlock_t lock; @@ -884,7 +904,6 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value, */ } - /* bpf_map_update_elem() can be called in_irq() */ raw_spin_lock_irqsave(&b->lock, flags); l_old = lookup_elem_raw(head, hash, key, key_size); @@ -964,7 +983,6 @@ static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value, return -ENOMEM; memcpy(l_new->key + round_up(map->key_size, 8), value, map->value_size); - /* bpf_map_update_elem() can be called in_irq() */ raw_spin_lock_irqsave(&b->lock, flags); l_old = lookup_elem_raw(head, hash, key, key_size); @@ -1019,7 +1037,6 @@ static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key, b = __select_bucket(htab, hash); head = &b->head; - /* bpf_map_update_elem() can be called in_irq() */ raw_spin_lock_irqsave(&b->lock, flags); l_old = lookup_elem_raw(head, hash, key, key_size); @@ -1083,7 +1100,6 @@ static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key, return -ENOMEM; } - /* bpf_map_update_elem() can be called in_irq() */ raw_spin_lock_irqsave(&b->lock, flags); l_old = lookup_elem_raw(head, hash, key, key_size); |