summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorYan Zhai <yan@cloudflare.com>2024-03-19 13:44:40 -0700
committerSasha Levin <sashal@kernel.org>2024-03-26 18:18:59 -0400
commitc3e54a754bc5ecc9abe99f32b9736f9eb15c5381 (patch)
tree454709754fc30e5c5e57b6d7f7a6c546d2a81f7f
parentd1d024b6601862cf8593bbccbb416c45dc17dc11 (diff)
downloadlinux-stable-c3e54a754bc5ecc9abe99f32b9736f9eb15c5381.tar.gz
linux-stable-c3e54a754bc5ecc9abe99f32b9736f9eb15c5381.tar.bz2
linux-stable-c3e54a754bc5ecc9abe99f32b9736f9eb15c5381.zip
bpf: report RCU QS in cpumap kthread
[ Upstream commit 00bf63122459e87193ee7f1bc6161c83a525569f ] When there are heavy load, cpumap kernel threads can be busy polling packets from redirect queues and block out RCU tasks from reaching quiescent states. It is insufficient to just call cond_resched() in such context. Periodically raise a consolidated RCU QS before cond_resched fixes the problem. Fixes: 6710e1126934 ("bpf: introduce new bpf cpu map type BPF_MAP_TYPE_CPUMAP") Reviewed-by: Jesper Dangaard Brouer <hawk@kernel.org> Signed-off-by: Yan Zhai <yan@cloudflare.com> Acked-by: Paul E. McKenney <paulmck@kernel.org> Acked-by: Jesper Dangaard Brouer <hawk@kernel.org> Link: https://lore.kernel.org/r/c17b9f1517e19d813da3ede5ed33ee18496bb5d8.1710877680.git.yan@cloudflare.com Signed-off-by: Jakub Kicinski <kuba@kernel.org> Signed-off-by: Sasha Levin <sashal@kernel.org>
-rw-r--r--kernel/bpf/cpumap.c3
1 files changed, 3 insertions, 0 deletions
diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c
index ef82ffc90cbe..8f1d390bcbde 100644
--- a/kernel/bpf/cpumap.c
+++ b/kernel/bpf/cpumap.c
@@ -262,6 +262,7 @@ static int cpu_map_bpf_prog_run(struct bpf_cpu_map_entry *rcpu, void **frames,
static int cpu_map_kthread_run(void *data)
{
struct bpf_cpu_map_entry *rcpu = data;
+ unsigned long last_qs = jiffies;
complete(&rcpu->kthread_running);
set_current_state(TASK_INTERRUPTIBLE);
@@ -287,10 +288,12 @@ static int cpu_map_kthread_run(void *data)
if (__ptr_ring_empty(rcpu->queue)) {
schedule();
sched = 1;
+ last_qs = jiffies;
} else {
__set_current_state(TASK_RUNNING);
}
} else {
+ rcu_softirq_qs_periodic(last_qs);
sched = cond_resched();
}