summaryrefslogtreecommitdiffstats
path: root/kernel/bpf
diff options
context:
space:
mode:
authorYonghong Song <yonghong.song@linux.dev>2023-12-21 19:17:55 -0800
committerAlexei Starovoitov <ast@kernel.org>2024-01-03 21:08:25 -0800
commit0e2ba9f96f9b82893ba19170ae48d46003f8ef44 (patch)
tree5b253ef511576f6cf1247fad149ecdd81401f5a0 /kernel/bpf
parent5b95e638f134e552b5ba2976326c02babe248615 (diff)
downloadlinux-0e2ba9f96f9b82893ba19170ae48d46003f8ef44.tar.gz
linux-0e2ba9f96f9b82893ba19170ae48d46003f8ef44.tar.bz2
linux-0e2ba9f96f9b82893ba19170ae48d46003f8ef44.zip
bpf: Use smaller low/high marks for percpu allocation
Currently, refill low/high marks are set with the assumption of normal non-percpu memory allocation. For example, for an allocation size 256, for non-percpu memory allocation, low mark is 32 and high mark is 96, resulting in the batch allocation of 48 elements and the allocated memory will be 48 * 256 = 12KB for this particular cpu. Assuming an 128-cpu system, the total memory consumption across all cpus will be 12K * 128 = 1.5MB memory. This might be okay for non-percpu allocation, but may not be good for percpu allocation, which will consume 1.5MB * 128 = 192MB memory in the worst case if every cpu has a chance of memory allocation. In practice, percpu allocation is very rare compared to non-percpu allocation. So let us have smaller low/high marks which can avoid unnecessary memory consumption. Signed-off-by: Yonghong Song <yonghong.song@linux.dev> Acked-by: Hou Tao <houtao1@huawei.com> Link: https://lore.kernel.org/r/20231222031755.1289671-1-yonghong.song@linux.dev Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'kernel/bpf')
-rw-r--r--kernel/bpf/memalloc.c8
1 files changed, 7 insertions, 1 deletions
diff --git a/kernel/bpf/memalloc.c b/kernel/bpf/memalloc.c
index a8ee6fb8401c..460c8f38fed6 100644
--- a/kernel/bpf/memalloc.c
+++ b/kernel/bpf/memalloc.c
@@ -464,11 +464,17 @@ static void notrace irq_work_raise(struct bpf_mem_cache *c)
* consume ~ 11 Kbyte per cpu.
* Typical case will be between 11K and 116K closer to 11K.
* bpf progs can and should share bpf_mem_cache when possible.
+ *
+ * Percpu allocation is typically rare. To avoid potential unnecessary large
+ * memory consumption, set low_mark = 1 and high_mark = 3, resulting in c->batch = 1.
*/
static void init_refill_work(struct bpf_mem_cache *c)
{
init_irq_work(&c->refill_work, bpf_mem_refill);
- if (c->unit_size <= 256) {
+ if (c->percpu_size) {
+ c->low_watermark = 1;
+ c->high_watermark = 3;
+ } else if (c->unit_size <= 256) {
c->low_watermark = 32;
c->high_watermark = 96;
} else {