summaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorAlex Shi <alexs@kernel.org>2024-02-10 19:39:22 +0800
committerIngo Molnar <mingo@kernel.org>2024-02-28 15:43:17 +0100
commitfbc449864e0d2ee2c16f3af2d1e9093b9b8d7ad0 (patch)
tree369e326110976cb3ea58560178482f9ad596e501 /kernel/sched
parent45de20623475049c424bc0b89f42efca54995edd (diff)
downloadlinux-fbc449864e0d2ee2c16f3af2d1e9093b9b8d7ad0.tar.gz
linux-fbc449864e0d2ee2c16f3af2d1e9093b9b8d7ad0.tar.bz2
linux-fbc449864e0d2ee2c16f3af2d1e9093b9b8d7ad0.zip
sched/fair: Check the SD_ASYM_PACKING flag in sched_use_asym_prio()
sched_use_asym_prio() checks whether CPU priorities should be used. It makes sense to check for the SD_ASYM_PACKING() inside the function. Since both sched_asym() and sched_group_asym() use sched_use_asym_prio(), remove the now superfluous checks for the flag in various places. Signed-off-by: Alex Shi <alexs@kernel.org> Signed-off-by: Ingo Molnar <mingo@kernel.org> Tested-by: Ricardo Neri <ricardo.neri-calderon@linux.intel.com> Reviewed-by: Ricardo Neri <ricardo.neri-calderon@linux.intel.com> Reviewed-by: Vincent Guittot <vincent.guittot@linaro.org> Link: https://lore.kernel.org/r/20240210113924.1130448-4-alexs@kernel.org
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/fair.c16
1 files changed, 7 insertions, 9 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 475e2ca66b63..39781a666c08 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -9744,6 +9744,9 @@ group_type group_classify(unsigned int imbalance_pct,
*/
static bool sched_use_asym_prio(struct sched_domain *sd, int cpu)
{
+ if (!(sd->flags & SD_ASYM_PACKING))
+ return false;
+
if (!sched_smt_active())
return true;
@@ -9937,11 +9940,9 @@ static inline void update_sg_lb_stats(struct lb_env *env,
sgs->group_weight = group->group_weight;
/* Check if dst CPU is idle and preferred to this group */
- if (!local_group && env->sd->flags & SD_ASYM_PACKING &&
- env->idle != CPU_NOT_IDLE && sgs->sum_h_nr_running &&
- sched_group_asym(env, sgs, group)) {
+ if (!local_group && env->idle != CPU_NOT_IDLE && sgs->sum_h_nr_running &&
+ sched_group_asym(env, sgs, group))
sgs->group_asym_packing = 1;
- }
/* Check for loaded SMT group to be balanced to dst CPU */
if (!local_group && smt_balance(env, sgs, group))
@@ -11024,9 +11025,7 @@ static struct rq *find_busiest_queue(struct lb_env *env,
* If balancing between cores, let lower priority CPUs help
* SMT cores with more than one busy sibling.
*/
- if ((env->sd->flags & SD_ASYM_PACKING) &&
- sched_asym(env->sd, i, env->dst_cpu) &&
- nr_running == 1)
+ if (sched_asym(env->sd, i, env->dst_cpu) && nr_running == 1)
continue;
switch (env->migration_type) {
@@ -11122,8 +11121,7 @@ asym_active_balance(struct lb_env *env)
* the lower priority @env::dst_cpu help it. Do not follow
* CPU priority.
*/
- return env->idle != CPU_NOT_IDLE && (env->sd->flags & SD_ASYM_PACKING) &&
- sched_use_asym_prio(env->sd, env->dst_cpu) &&
+ return env->idle != CPU_NOT_IDLE && sched_use_asym_prio(env->sd, env->dst_cpu) &&
(sched_asym_prefer(env->dst_cpu, env->src_cpu) ||
!sched_use_asym_prio(env->sd, env->src_cpu));
}