diff options
author | Chengming Zhou <zhouchengming@bytedance.com> | 2022-08-18 20:48:03 +0800 |
---|---|---|
committer | Peter Zijlstra <peterz@infradead.org> | 2022-08-23 11:01:19 +0200 |
commit | df16b71c686cb096774e30153c9ce6756450796c (patch) | |
tree | 91b3b04f50be7489b5d8c0ccca0abf4a3e79ced9 /kernel/sched | |
parent | 7e2edaf61814fb6aa363989d718950c023b882d4 (diff) | |
download | linux-stable-df16b71c686cb096774e30153c9ce6756450796c.tar.gz linux-stable-df16b71c686cb096774e30153c9ce6756450796c.tar.bz2 linux-stable-df16b71c686cb096774e30153c9ce6756450796c.zip |
sched/fair: Allow changing cgroup of new forked task
commit 7dc603c9028e ("sched/fair: Fix PELT integrity for new tasks")
introduce a TASK_NEW state and an unnessary limitation that would fail
when changing cgroup of new forked task.
Because at that time, we can't handle task_change_group_fair() for new
forked fair task which hasn't been woken up by wake_up_new_task(),
which will cause detach on an unattached task sched_avg problem.
This patch delete this unnessary limitation by adding check before do
detach or attach in task_change_group_fair().
So cpu_cgrp_subsys.can_attach() has nothing to do for fair tasks,
only define it in #ifdef CONFIG_RT_GROUP_SCHED.
Signed-off-by: Chengming Zhou <zhouchengming@bytedance.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Vincent Guittot <vincent.guittot@linaro.org>
Link: https://lore.kernel.org/r/20220818124805.601-8-zhouchengming@bytedance.com
Diffstat (limited to 'kernel/sched')
-rw-r--r-- | kernel/sched/core.c | 25 | ||||
-rw-r--r-- | kernel/sched/fair.c | 7 |
2 files changed, 12 insertions, 20 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index e74e79f783af..603a80ec9b0e 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -10238,36 +10238,19 @@ static void cpu_cgroup_css_free(struct cgroup_subsys_state *css) sched_unregister_group(tg); } +#ifdef CONFIG_RT_GROUP_SCHED static int cpu_cgroup_can_attach(struct cgroup_taskset *tset) { struct task_struct *task; struct cgroup_subsys_state *css; - int ret = 0; cgroup_taskset_for_each(task, css, tset) { -#ifdef CONFIG_RT_GROUP_SCHED if (!sched_rt_can_attach(css_tg(css), task)) return -EINVAL; -#endif - /* - * Serialize against wake_up_new_task() such that if it's - * running, we're sure to observe its full state. - */ - raw_spin_lock_irq(&task->pi_lock); - /* - * Avoid calling sched_move_task() before wake_up_new_task() - * has happened. This would lead to problems with PELT, due to - * move wanting to detach+attach while we're not attached yet. - */ - if (READ_ONCE(task->__state) == TASK_NEW) - ret = -EINVAL; - raw_spin_unlock_irq(&task->pi_lock); - - if (ret) - break; } - return ret; + return 0; } +#endif static void cpu_cgroup_attach(struct cgroup_taskset *tset) { @@ -11103,7 +11086,9 @@ struct cgroup_subsys cpu_cgrp_subsys = { .css_released = cpu_cgroup_css_released, .css_free = cpu_cgroup_css_free, .css_extra_stat_show = cpu_extra_stat_show, +#ifdef CONFIG_RT_GROUP_SCHED .can_attach = cpu_cgroup_can_attach, +#endif .attach = cpu_cgroup_attach, .legacy_cftypes = cpu_legacy_files, .dfl_cftypes = cpu_files, diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index e92bc053aff6..fd1aa4c92b2d 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -11676,6 +11676,13 @@ void init_cfs_rq(struct cfs_rq *cfs_rq) #ifdef CONFIG_FAIR_GROUP_SCHED static void task_change_group_fair(struct task_struct *p) { + /* + * We couldn't detach or attach a forked task which + * hasn't been woken up by wake_up_new_task(). + */ + if (READ_ONCE(p->__state) == TASK_NEW) + return; + detach_task_cfs_rq(p); #ifdef CONFIG_SMP |