summaryrefslogtreecommitdiffstats
path: root/kernel/bpf
diff options
context:
space:
mode:
authorMartin KaFai Lau <kafai@fb.com>2017-06-05 12:15:46 -0700
committerDavid S. Miller <davem@davemloft.net>2017-06-06 15:41:22 -0400
commitdc4bb0e2356149aee4cdae061936f3bbdd45595c (patch)
tree519cc48437a65eade73ab7cb150e32076cbbc17e /kernel/bpf
parent8ea4fae926afd81f4d7fd43444562afc8629f77c (diff)
downloadlinux-dc4bb0e2356149aee4cdae061936f3bbdd45595c.tar.gz
linux-dc4bb0e2356149aee4cdae061936f3bbdd45595c.tar.bz2
linux-dc4bb0e2356149aee4cdae061936f3bbdd45595c.zip
bpf: Introduce bpf_prog ID
This patch generates an unique ID for each BPF_PROG_LOAD-ed prog. It is worth to note that each BPF_PROG_LOAD-ed prog will have a different ID even they have the same bpf instructions. The ID is generated by the existing idr_alloc_cyclic(). The ID is ranged from [1, INT_MAX). It is allocated in cyclic manner, so an ID will get reused every 2 billion BPF_PROG_LOAD. The bpf_prog_alloc_id() is done after bpf_prog_select_runtime() because the jit process may have allocated a new prog. Hence, we need to ensure the value of pointer 'prog' will not be changed any more before storing the prog to the prog_idr. After bpf_prog_select_runtime(), the prog is read-only. Hence, the id is stored in 'struct bpf_prog_aux'. Signed-off-by: Martin KaFai Lau <kafai@fb.com> Acked-by: Alexei Starovoitov <ast@fb.com> Acked-by: Daniel Borkmann <daniel@iogearbox.net> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'kernel/bpf')
-rw-r--r--kernel/bpf/syscall.c40
1 files changed, 39 insertions, 1 deletions
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 59da103adb85..2a1b32b470f1 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -22,8 +22,11 @@
#include <linux/filter.h>
#include <linux/version.h>
#include <linux/kernel.h>
+#include <linux/idr.h>
DEFINE_PER_CPU(int, bpf_prog_active);
+static DEFINE_IDR(prog_idr);
+static DEFINE_SPINLOCK(prog_idr_lock);
int sysctl_unprivileged_bpf_disabled __read_mostly;
@@ -650,6 +653,34 @@ static void bpf_prog_uncharge_memlock(struct bpf_prog *prog)
free_uid(user);
}
+static int bpf_prog_alloc_id(struct bpf_prog *prog)
+{
+ int id;
+
+ spin_lock_bh(&prog_idr_lock);
+ id = idr_alloc_cyclic(&prog_idr, prog, 1, INT_MAX, GFP_ATOMIC);
+ if (id > 0)
+ prog->aux->id = id;
+ spin_unlock_bh(&prog_idr_lock);
+
+ /* id is in [1, INT_MAX) */
+ if (WARN_ON_ONCE(!id))
+ return -ENOSPC;
+
+ return id > 0 ? 0 : id;
+}
+
+static void bpf_prog_free_id(struct bpf_prog *prog)
+{
+ /* cBPF to eBPF migrations are currently not in the idr store. */
+ if (!prog->aux->id)
+ return;
+
+ spin_lock_bh(&prog_idr_lock);
+ idr_remove(&prog_idr, prog->aux->id);
+ spin_unlock_bh(&prog_idr_lock);
+}
+
static void __bpf_prog_put_rcu(struct rcu_head *rcu)
{
struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
@@ -663,6 +694,7 @@ void bpf_prog_put(struct bpf_prog *prog)
{
if (atomic_dec_and_test(&prog->aux->refcnt)) {
trace_bpf_prog_put_rcu(prog);
+ bpf_prog_free_id(prog);
bpf_prog_kallsyms_del(prog);
call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
}
@@ -857,15 +889,21 @@ static int bpf_prog_load(union bpf_attr *attr)
if (err < 0)
goto free_used_maps;
+ err = bpf_prog_alloc_id(prog);
+ if (err)
+ goto free_used_maps;
+
err = bpf_prog_new_fd(prog);
if (err < 0)
/* failed to allocate fd */
- goto free_used_maps;
+ goto free_id;
bpf_prog_kallsyms_add(prog);
trace_bpf_prog_load(prog, err);
return err;
+free_id:
+ bpf_prog_free_id(prog);
free_used_maps:
free_used_maps(prog->aux);
free_prog: