summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorValentin Schneider <valentin.schneider@arm.com>2020-11-02 18:45:14 +0000
committerPeter Zijlstra <peterz@infradead.org>2020-11-10 18:39:06 +0100
commitdc824eb898534cd8e34582874dae3bb7cf2fa008 (patch)
treeff57347cb391f48949bfeb7102b5450b7b1182ff /kernel
parent3aef1551e942860a3881087171ef0cd45f6ebda7 (diff)
downloadlinux-stable-dc824eb898534cd8e34582874dae3bb7cf2fa008.tar.gz
linux-stable-dc824eb898534cd8e34582874dae3bb7cf2fa008.tar.bz2
linux-stable-dc824eb898534cd8e34582874dae3bb7cf2fa008.zip
sched/fair: Dissociate wakeup decisions from SD flag value
The CFS wakeup code will only ever go through EAS / its fast path on "regular" wakeups (i.e. not on forks or execs). These are currently gated by a check against 'sd_flag', which would be SD_BALANCE_WAKE at wakeup. However, we now have a flag that explicitly tells us whether a wakeup is a "regular" one, so hinge those conditions on that flag instead. Signed-off-by: Valentin Schneider <valentin.schneider@arm.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lkml.kernel.org/r/20201102184514.2733-4-valentin.schneider@arm.com
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/fair.c5
1 files changed, 2 insertions, 3 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index b1596fa21bbe..6691e28fa3da 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -6705,7 +6705,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int wake_flags)
/* SD_flags and WF_flags share the first nibble */
int sd_flag = wake_flags & 0xF;
- if (sd_flag & SD_BALANCE_WAKE) {
+ if (wake_flags & WF_TTWU) {
record_wakee(p);
if (sched_energy_enabled()) {
@@ -6742,9 +6742,8 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int wake_flags)
if (unlikely(sd)) {
/* Slow path */
new_cpu = find_idlest_cpu(sd, p, cpu, prev_cpu, sd_flag);
- } else if (sd_flag & SD_BALANCE_WAKE) { /* XXX always ? */
+ } else if (wake_flags & WF_TTWU) { /* XXX always ? */
/* Fast path */
-
new_cpu = select_idle_sibling(p, prev_cpu, new_cpu);
if (want_affine)