From d1a02358d48d659c2400fa3bbaf9cde2cf9f5040 Mon Sep 17 00:00:00 2001 From: YiFei Zhu Date: Fri, 28 Jul 2023 04:33:59 +0000 Subject: bpf: Non-atomically allocate freelist during prefill In internal testing of test_maps, we sometimes observed failures like: test_maps: test_maps.c:173: void test_hashmap_percpu(unsigned int, void *): Assertion `bpf_map_update_elem(fd, &key, value, BPF_ANY) == 0' failed. where the errno is ENOMEM. After some troubleshooting and enabling the warnings, we saw: [ 91.304708] percpu: allocation failed, size=8 align=8 atomic=1, atomic alloc failed, no space left [ 91.304716] CPU: 51 PID: 24145 Comm: test_maps Kdump: loaded Tainted: G N 6.1.38-smp-DEV #7 [ 91.304719] Hardware name: Google Astoria/astoria, BIOS 0.20230627.0-0 06/27/2023 [ 91.304721] Call Trace: [ 91.304724] [ 91.304730] [] dump_stack_lvl+0x59/0x88 [ 91.304737] [] dump_stack+0x10/0x18 [ 91.304738] [] pcpu_alloc+0x6fc/0x870 [ 91.304741] [] __alloc_percpu_gfp+0x12/0x20 [ 91.304743] [] alloc_bulk+0xde/0x1e0 [ 91.304746] [] bpf_mem_alloc_init+0xd2/0x2f0 [ 91.304747] [] htab_map_alloc+0x479/0x650 [ 91.304750] [] map_create+0x140/0x2e0 [ 91.304752] [] __sys_bpf+0x5a3/0x6c0 [ 91.304753] [] __x64_sys_bpf+0x1c/0x30 [ 91.304754] [] do_syscall_64+0x5a/0x80 [ 91.304756] [] entry_SYSCALL_64_after_hwframe+0x63/0xcd This makes sense, because in atomic context, percpu allocation would not create new chunks; it would only create in non-atomic contexts. And if during prefill all precpu chunks are full, -ENOMEM would happen immediately upon next unit_alloc. Prefill phase does not actually run in atomic context, so we can use this fact to allocate non-atomically with GFP_KERNEL instead of GFP_NOWAIT. This avoids the immediate -ENOMEM. GFP_NOWAIT has to be used in unit_alloc when bpf program runs in atomic context. Even if bpf program runs in non-atomic context, in most cases, rcu read lock is enabled for the program so GFP_NOWAIT is still needed. This is often also the case for BPF_MAP_UPDATE_ELEM syscalls. Signed-off-by: YiFei Zhu Acked-by: Yonghong Song Acked-by: Hou Tao Link: https://lore.kernel.org/r/20230728043359.3324347-1-zhuyifei@google.com Signed-off-by: Alexei Starovoitov --- kernel/bpf/memalloc.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) (limited to 'kernel/bpf') diff --git a/kernel/bpf/memalloc.c b/kernel/bpf/memalloc.c index 14d9b1a9a4ca..9c49ae53deaf 100644 --- a/kernel/bpf/memalloc.c +++ b/kernel/bpf/memalloc.c @@ -201,12 +201,16 @@ static void add_obj_to_free_list(struct bpf_mem_cache *c, void *obj) } /* Mostly runs from irq_work except __init phase. */ -static void alloc_bulk(struct bpf_mem_cache *c, int cnt, int node) +static void alloc_bulk(struct bpf_mem_cache *c, int cnt, int node, bool atomic) { struct mem_cgroup *memcg = NULL, *old_memcg; + gfp_t gfp; void *obj; int i; + gfp = __GFP_NOWARN | __GFP_ACCOUNT; + gfp |= atomic ? GFP_NOWAIT : GFP_KERNEL; + for (i = 0; i < cnt; i++) { /* * For every 'c' llist_del_first(&c->free_by_rcu_ttrace); is @@ -238,7 +242,7 @@ static void alloc_bulk(struct bpf_mem_cache *c, int cnt, int node) * will allocate from the current numa node which is what we * want here. */ - obj = __alloc(c, node, GFP_NOWAIT | __GFP_NOWARN | __GFP_ACCOUNT); + obj = __alloc(c, node, gfp); if (!obj) break; add_obj_to_free_list(c, obj); @@ -429,7 +433,7 @@ static void bpf_mem_refill(struct irq_work *work) /* irq_work runs on this cpu and kmalloc will allocate * from the current numa node which is what we want here. */ - alloc_bulk(c, c->batch, NUMA_NO_NODE); + alloc_bulk(c, c->batch, NUMA_NO_NODE, true); else if (cnt > c->high_watermark) free_bulk(c); @@ -477,7 +481,7 @@ static void prefill_mem_cache(struct bpf_mem_cache *c, int cpu) * prog won't be doing more than 4 map_update_elem from * irq disabled region */ - alloc_bulk(c, c->unit_size <= 256 ? 4 : 1, cpu_to_node(cpu)); + alloc_bulk(c, c->unit_size <= 256 ? 4 : 1, cpu_to_node(cpu), false); } /* When size != 0 bpf_mem_cache for each cpu. -- cgit v1.2.3