summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-10-19 10:03:55 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2016-10-19 10:03:55 -0700
commit893e2c5c9fedeccf89653b0ad17df69e88dbd707 (patch)
tree93ad9b19f49d6449e0155a5c7bb8d3040d883e24 /kernel
parent8a1e377e55f2dca5c689926313beeaa8ac2adb22 (diff)
parentb5a9b340789b2b24c6896bcf7a065c31a4db671c (diff)
downloadlinux-stable-893e2c5c9fedeccf89653b0ad17df69e88dbd707.tar.gz
linux-stable-893e2c5c9fedeccf89653b0ad17df69e88dbd707.tar.bz2
linux-stable-893e2c5c9fedeccf89653b0ad17df69e88dbd707.zip
Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler fix from Ingo Molnar: "This fixes a group scheduling related performance/interactivity regression introduced in v4.8, which affects certain hardware environments where cpu_possible_mask != cpu_present_mask" * 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: sched/fair: Fix incorrect task group ->load_avg
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/fair.c9
1 files changed, 8 insertions, 1 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 76ee7de1859d..d941c97dfbc3 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -690,7 +690,14 @@ void init_entity_runnable_average(struct sched_entity *se)
* will definitely be update (after enqueue).
*/
sa->period_contrib = 1023;
- sa->load_avg = scale_load_down(se->load.weight);
+ /*
+ * Tasks are intialized with full load to be seen as heavy tasks until
+ * they get a chance to stabilize to their real load level.
+ * Group entities are intialized with zero load to reflect the fact that
+ * nothing has been attached to the task group yet.
+ */
+ if (entity_is_task(se))
+ sa->load_avg = scale_load_down(se->load.weight);
sa->load_sum = sa->load_avg * LOAD_AVG_MAX;
/*
* At this point, util_avg won't be used in select_task_rq_fair anyway