summaryrefslogtreecommitdiffstats
path: root/kernel/bpf/memalloc.c
diff options
context:
space:
mode:
authorHou Tao <houtao1@huawei.com>2023-09-08 21:39:21 +0800
committerAlexei Starovoitov <ast@kernel.org>2023-09-11 12:41:37 -0700
commitb1d53958b69312e43c118d4093d8f93d3f6f80af (patch)
treeb8128293cd5b81af6bd4391dddbc90396024fc8e /kernel/bpf/memalloc.c
parentd52b59315bf5e86e83c00bfae47cedd388dad6a8 (diff)
downloadlinux-stable-b1d53958b69312e43c118d4093d8f93d3f6f80af.tar.gz
linux-stable-b1d53958b69312e43c118d4093d8f93d3f6f80af.tar.bz2
linux-stable-b1d53958b69312e43c118d4093d8f93d3f6f80af.zip
bpf: Don't prefill for unused bpf_mem_cache
When the unit_size of a bpf_mem_cache is unmatched with the object_size of the underlying slab cache, the bpf_mem_cache will not be used, and the allocation will be redirected to a bpf_mem_cache with a bigger unit_size instead, so there is no need to prefill for these unused bpf_mem_caches. Signed-off-by: Hou Tao <houtao1@huawei.com> Link: https://lore.kernel.org/r/20230908133923.2675053-3-houtao@huaweicloud.com Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'kernel/bpf/memalloc.c')
-rw-r--r--kernel/bpf/memalloc.c16
1 files changed, 14 insertions, 2 deletions
diff --git a/kernel/bpf/memalloc.c b/kernel/bpf/memalloc.c
index 98d9e96fba3c..90c1ed8210a2 100644
--- a/kernel/bpf/memalloc.c
+++ b/kernel/bpf/memalloc.c
@@ -459,8 +459,7 @@ static void notrace irq_work_raise(struct bpf_mem_cache *c)
* Typical case will be between 11K and 116K closer to 11K.
* bpf progs can and should share bpf_mem_cache when possible.
*/
-
-static void prefill_mem_cache(struct bpf_mem_cache *c, int cpu)
+static void init_refill_work(struct bpf_mem_cache *c)
{
init_irq_work(&c->refill_work, bpf_mem_refill);
if (c->unit_size <= 256) {
@@ -476,7 +475,10 @@ static void prefill_mem_cache(struct bpf_mem_cache *c, int cpu)
c->high_watermark = max(96 * 256 / c->unit_size, 3);
}
c->batch = max((c->high_watermark - c->low_watermark) / 4 * 3, 1);
+}
+static void prefill_mem_cache(struct bpf_mem_cache *c, int cpu)
+{
/* To avoid consuming memory assume that 1st run of bpf
* prog won't be doing more than 4 map_update_elem from
* irq disabled region
@@ -521,6 +523,7 @@ int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu)
c->objcg = objcg;
c->percpu_size = percpu_size;
c->tgt = c;
+ init_refill_work(c);
prefill_mem_cache(c, cpu);
}
ma->cache = pc;
@@ -544,6 +547,15 @@ int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu)
c->unit_size = sizes[i];
c->objcg = objcg;
c->tgt = c;
+
+ init_refill_work(c);
+ /* Another bpf_mem_cache will be used when allocating
+ * c->unit_size in bpf_mem_alloc(), so doesn't prefill
+ * for the bpf_mem_cache because these free objects will
+ * never be used.
+ */
+ if (i != bpf_mem_cache_idx(c->unit_size))
+ continue;
prefill_mem_cache(c, cpu);
}
}