diff options
author | Vincent Guittot <vincent.guittot@linaro.org> | 2021-02-24 14:30:04 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2021-03-06 12:40:21 +0100 |
commit | 7a82e5f52a3506bc35a4dc04d53ad2c9daf82e7f (patch) | |
tree | dec1abc82a53513bc4a12915ecb8d83b5b6430fc /kernel/sched | |
parent | 64f84f273592d17dcdca20244168ad9f525a39c3 (diff) | |
download | linux-7a82e5f52a3506bc35a4dc04d53ad2c9daf82e7f.tar.gz linux-7a82e5f52a3506bc35a4dc04d53ad2c9daf82e7f.tar.bz2 linux-7a82e5f52a3506bc35a4dc04d53ad2c9daf82e7f.zip |
sched/fair: Merge for each idle cpu loop of ILB
Remove the specific case for handling this_cpu outside for_each_cpu() loop
when running ILB. Instead we use for_each_cpu_wrap() and start with the
next cpu after this_cpu so we will continue to finish with this_cpu.
update_nohz_stats() is now used for this_cpu too and will prevents
unnecessary update. We don't need a special case for handling the update of
nohz.next_balance for this_cpu anymore because it is now handled by the
loop like others.
Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Reviewed-by: Valentin Schneider <valentin.schneider@arm.com>
Link: https://lkml.kernel.org/r/20210224133007.28644-5-vincent.guittot@linaro.org
Diffstat (limited to 'kernel/sched')
-rw-r--r-- | kernel/sched/fair.c | 32 |
1 files changed, 7 insertions, 25 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 1b91030f97cd..3c00918f158f 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -10043,22 +10043,9 @@ out: * When the cpu is attached to null domain for ex, it will not be * updated. */ - if (likely(update_next_balance)) { + if (likely(update_next_balance)) rq->next_balance = next_balance; -#ifdef CONFIG_NO_HZ_COMMON - /* - * If this CPU has been elected to perform the nohz idle - * balance. Other idle CPUs have already rebalanced with - * nohz_idle_balance() and nohz.next_balance has been - * updated accordingly. This CPU is now running the idle load - * balance for itself and we need to update the - * nohz.next_balance accordingly. - */ - if ((idle == CPU_IDLE) && time_after(nohz.next_balance, rq->next_balance)) - nohz.next_balance = rq->next_balance; -#endif - } } static inline int on_null_domain(struct rq *rq) @@ -10385,8 +10372,12 @@ static void _nohz_idle_balance(struct rq *this_rq, unsigned int flags, */ smp_mb(); - for_each_cpu(balance_cpu, nohz.idle_cpus_mask) { - if (balance_cpu == this_cpu || !idle_cpu(balance_cpu)) + /* + * Start with the next CPU after this_cpu so we will end with this_cpu and let a + * chance for other idle cpu to pull load. + */ + for_each_cpu_wrap(balance_cpu, nohz.idle_cpus_mask, this_cpu+1) { + if (!idle_cpu(balance_cpu)) continue; /* @@ -10432,15 +10423,6 @@ static void _nohz_idle_balance(struct rq *this_rq, unsigned int flags, if (likely(update_next_balance)) nohz.next_balance = next_balance; - /* Newly idle CPU doesn't need an update */ - if (idle != CPU_NEWLY_IDLE) { - update_blocked_averages(this_cpu); - has_blocked_load |= this_rq->has_blocked_load; - } - - if (flags & NOHZ_BALANCE_KICK) - rebalance_domains(this_rq, CPU_IDLE); - WRITE_ONCE(nohz.next_blocked, now + msecs_to_jiffies(LOAD_AVG_PERIOD)); |