summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorHou Tao <houtao1@huawei.com>2023-09-08 21:39:22 +0800
committerAlexei Starovoitov <ast@kernel.org>2023-09-11 12:41:37 -0700
commitc930472552022bd09aab3cd946ba3f243070d5c7 (patch)
treee38f453dca80f3838bd511a479362857907e1149 /kernel
parentb1d53958b69312e43c118d4093d8f93d3f6f80af (diff)
downloadlinux-stable-c930472552022bd09aab3cd946ba3f243070d5c7.tar.gz
linux-stable-c930472552022bd09aab3cd946ba3f243070d5c7.tar.bz2
linux-stable-c930472552022bd09aab3cd946ba3f243070d5c7.zip
bpf: Ensure unit_size is matched with slab cache object size
Add extra check in bpf_mem_alloc_init() to ensure the unit_size of bpf_mem_cache is matched with the object_size of underlying slab cache. If these two sizes are unmatched, print a warning once and return -EINVAL in bpf_mem_alloc_init(), so the mismatch can be found early and the potential issue can be prevented. Suggested-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: Hou Tao <houtao1@huawei.com> Link: https://lore.kernel.org/r/20230908133923.2675053-4-houtao@huaweicloud.com Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/bpf/memalloc.c33
1 files changed, 31 insertions, 2 deletions
diff --git a/kernel/bpf/memalloc.c b/kernel/bpf/memalloc.c
index 90c1ed8210a2..1c22b90e754a 100644
--- a/kernel/bpf/memalloc.c
+++ b/kernel/bpf/memalloc.c
@@ -486,6 +486,24 @@ static void prefill_mem_cache(struct bpf_mem_cache *c, int cpu)
alloc_bulk(c, c->unit_size <= 256 ? 4 : 1, cpu_to_node(cpu), false);
}
+static int check_obj_size(struct bpf_mem_cache *c, unsigned int idx)
+{
+ struct llist_node *first;
+ unsigned int obj_size;
+
+ first = c->free_llist.first;
+ if (!first)
+ return 0;
+
+ obj_size = ksize(first);
+ if (obj_size != c->unit_size) {
+ WARN_ONCE(1, "bpf_mem_cache[%u]: unexpected object size %u, expect %u\n",
+ idx, obj_size, c->unit_size);
+ return -EINVAL;
+ }
+ return 0;
+}
+
/* When size != 0 bpf_mem_cache for each cpu.
* This is typical bpf hash map use case when all elements have equal size.
*
@@ -496,10 +514,10 @@ static void prefill_mem_cache(struct bpf_mem_cache *c, int cpu)
int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu)
{
static u16 sizes[NUM_CACHES] = {96, 192, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096};
+ int cpu, i, err, unit_size, percpu_size = 0;
struct bpf_mem_caches *cc, __percpu *pcc;
struct bpf_mem_cache *c, __percpu *pc;
struct obj_cgroup *objcg = NULL;
- int cpu, i, unit_size, percpu_size = 0;
if (size) {
pc = __alloc_percpu_gfp(sizeof(*pc), 8, GFP_KERNEL);
@@ -537,6 +555,7 @@ int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu)
pcc = __alloc_percpu_gfp(sizeof(*cc), 8, GFP_KERNEL);
if (!pcc)
return -ENOMEM;
+ err = 0;
#ifdef CONFIG_MEMCG_KMEM
objcg = get_obj_cgroup_from_current();
#endif
@@ -557,10 +576,20 @@ int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu)
if (i != bpf_mem_cache_idx(c->unit_size))
continue;
prefill_mem_cache(c, cpu);
+ err = check_obj_size(c, i);
+ if (err)
+ goto out;
}
}
+
+out:
ma->caches = pcc;
- return 0;
+ /* refill_work is either zeroed or initialized, so it is safe to
+ * call irq_work_sync().
+ */
+ if (err)
+ bpf_mem_alloc_destroy(ma);
+ return err;
}
static void drain_mem_cache(struct bpf_mem_cache *c)