summaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2024-09-27 10:02:39 -1000
committerTejun Heo <tj@kernel.org>2024-09-27 10:02:39 -1000
commit1bbcfe620e03aafc542b1c649dea0a9499a4490f (patch)
tree0bddf3f6bff7d1c3dbd5d4402b27e95afcad39cb /kernel/sched
parent6f34d8d382d64e7d8e77f5a9ddfd06f4c04937b0 (diff)
downloadlinux-stable-1bbcfe620e03aafc542b1c649dea0a9499a4490f.tar.gz
linux-stable-1bbcfe620e03aafc542b1c649dea0a9499a4490f.tar.bz2
linux-stable-1bbcfe620e03aafc542b1c649dea0a9499a4490f.zip
sched_ext: Relocate check_hotplug_seq() call in scx_ops_enable()
check_hotplug_seq() is used to detect CPU hotplug event which occurred while the BPF scheduler is being loaded so that initialization can be retried if CPU hotplug events take place before the CPU hotplug callbacks are online. As such, the best place to call it is in the same cpu_read_lock() section that enables the CPU hotplug ops. Currently, it is called in the next cpus_read_lock() block in scx_ops_enable(). The side effect of this placement is a small window in which hotplug sequence detection can trigger unnecessarily, which isn't critical. Move check_hotplug_seq() invocation to the same cpus_read_lock() block as the hotplug operation enablement to close the window and get the invocation out of the way for planned locking updates. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: David Vernet <void@manifault.com>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/ext.c3
1 files changed, 1 insertions, 2 deletions
diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
index d6f6bf6caecc..e8ab7e5ee2dd 100644
--- a/kernel/sched/ext.c
+++ b/kernel/sched/ext.c
@@ -5050,6 +5050,7 @@ static int scx_ops_enable(struct sched_ext_ops *ops, struct bpf_link *link)
if (((void (**)(void))ops)[i])
static_branch_enable_cpuslocked(&scx_has_op[i]);
+ check_hotplug_seq(ops);
cpus_read_unlock();
ret = validate_ops(ops);
@@ -5098,8 +5099,6 @@ static int scx_ops_enable(struct sched_ext_ops *ops, struct bpf_link *link)
cpus_read_lock();
scx_cgroup_lock();
- check_hotplug_seq(ops);
-
for (i = SCX_OPI_NORMAL_BEGIN; i < SCX_OPI_NORMAL_END; i++)
if (((void (**)(void))ops)[i])
static_branch_enable_cpuslocked(&scx_has_op[i]);