diff options
author | Tejun Heo <tj@kernel.org> | 2024-09-04 10:24:59 -1000 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2024-09-04 10:24:59 -1000 |
commit | e179e80c5d4fef458c3cbc3ad4ea17c6d42c0446 (patch) | |
tree | d75b716cf10d81ccc4897bec4212566748541677 | |
parent | 41082c1d1d2bf6c3e989785fd1def0f09cede446 (diff) | |
download | linux-e179e80c5d4fef458c3cbc3ad4ea17c6d42c0446.tar.gz linux-e179e80c5d4fef458c3cbc3ad4ea17c6d42c0446.tar.bz2 linux-e179e80c5d4fef458c3cbc3ad4ea17c6d42c0446.zip |
sched: Introduce CONFIG_GROUP_SCHED_WEIGHT
sched_ext will soon add cgroup cpu.weigh support. The cgroup interface code
is currently gated behind CONFIG_FAIR_GROUP_SCHED. As the fair class and/or
SCX may implement the feature, put the interface code behind the new
CONFIG_CGROUP_SCHED_WEIGHT which is selected by CONFIG_FAIR_GROUP_SCHED.
This allows either sched class to enable the itnerface code without ading
more complex CONFIG tests.
When !CONFIG_FAIR_GROUP_SCHED, a dummy version of sched_group_set_shares()
is added to support later CONFIG_CGROUP_SCHED_WEIGHT &&
!CONFIG_FAIR_GROUP_SCHED builds.
No functional changes.
Signed-off-by: Tejun Heo <tj@kernel.org>
-rw-r--r-- | init/Kconfig | 4 | ||||
-rw-r--r-- | kernel/sched/core.c | 14 | ||||
-rw-r--r-- | kernel/sched/sched.h | 4 |
3 files changed, 14 insertions, 8 deletions
diff --git a/init/Kconfig b/init/Kconfig index a465ea9525bd..84332d3594d0 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -1024,9 +1024,13 @@ menuconfig CGROUP_SCHED tasks. if CGROUP_SCHED +config GROUP_SCHED_WEIGHT + def_bool n + config FAIR_GROUP_SCHED bool "Group scheduling for SCHED_OTHER" depends on CGROUP_SCHED + select GROUP_SCHED_WEIGHT default CGROUP_SCHED config CFS_BANDWIDTH diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 362918de593f..d302115b1522 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -9193,7 +9193,7 @@ static int cpu_uclamp_max_show(struct seq_file *sf, void *v) } #endif /* CONFIG_UCLAMP_TASK_GROUP */ -#ifdef CONFIG_FAIR_GROUP_SCHED +#ifdef CONFIG_GROUP_SCHED_WEIGHT static unsigned long tg_weight(struct task_group *tg) { return scale_load_down(tg->shares); @@ -9212,6 +9212,7 @@ static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css, { return tg_weight(css_tg(css)); } +#endif /* CONFIG_GROUP_SCHED_WEIGHT */ #ifdef CONFIG_CFS_BANDWIDTH static DEFINE_MUTEX(cfs_constraints_mutex); @@ -9557,7 +9558,6 @@ static int cpu_cfs_local_stat_show(struct seq_file *sf, void *v) return 0; } #endif /* CONFIG_CFS_BANDWIDTH */ -#endif /* CONFIG_FAIR_GROUP_SCHED */ #ifdef CONFIG_RT_GROUP_SCHED static int cpu_rt_runtime_write(struct cgroup_subsys_state *css, @@ -9585,7 +9585,7 @@ static u64 cpu_rt_period_read_uint(struct cgroup_subsys_state *css, } #endif /* CONFIG_RT_GROUP_SCHED */ -#ifdef CONFIG_FAIR_GROUP_SCHED +#ifdef CONFIG_GROUP_SCHED_WEIGHT static s64 cpu_idle_read_s64(struct cgroup_subsys_state *css, struct cftype *cft) { @@ -9600,7 +9600,7 @@ static int cpu_idle_write_s64(struct cgroup_subsys_state *css, #endif static struct cftype cpu_legacy_files[] = { -#ifdef CONFIG_FAIR_GROUP_SCHED +#ifdef CONFIG_GROUP_SCHED_WEIGHT { .name = "shares", .read_u64 = cpu_shares_read_u64, @@ -9710,7 +9710,7 @@ static int cpu_local_stat_show(struct seq_file *sf, return 0; } -#ifdef CONFIG_FAIR_GROUP_SCHED +#ifdef CONFIG_GROUP_SCHED_WEIGHT static u64 cpu_weight_read_u64(struct cgroup_subsys_state *css, struct cftype *cft) @@ -9764,7 +9764,7 @@ static int cpu_weight_nice_write_s64(struct cgroup_subsys_state *css, return sched_group_set_shares(css_tg(css), scale_load(weight)); } -#endif +#endif /* CONFIG_GROUP_SCHED_WEIGHT */ static void __maybe_unused cpu_period_quota_print(struct seq_file *sf, long period, long quota) @@ -9824,7 +9824,7 @@ static ssize_t cpu_max_write(struct kernfs_open_file *of, #endif static struct cftype cpu_files[] = { -#ifdef CONFIG_FAIR_GROUP_SCHED +#ifdef CONFIG_GROUP_SCHED_WEIGHT { .name = "weight", .flags = CFTYPE_NOT_ON_ROOT, diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index b052ce72cdcd..0163f4af1c6e 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -483,7 +483,7 @@ struct task_group { }; -#ifdef CONFIG_FAIR_GROUP_SCHED +#ifdef CONFIG_GROUP_SCHED_WEIGHT #define ROOT_TASK_GROUP_LOAD NICE_0_LOAD /* @@ -575,6 +575,8 @@ extern void set_task_rq_fair(struct sched_entity *se, static inline void set_task_rq_fair(struct sched_entity *se, struct cfs_rq *prev, struct cfs_rq *next) { } #endif /* CONFIG_SMP */ +#else /* !CONFIG_FAIR_GROUP_SCHED */ +static inline int sched_group_set_shares(struct task_group *tg, unsigned long shares) { return 0; } #endif /* CONFIG_FAIR_GROUP_SCHED */ #else /* CONFIG_CGROUP_SCHED */ |