diff options
author | Dietmar Eggemann <dietmar.eggemann@arm.com> | 2019-04-24 09:45:56 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2019-06-03 11:49:37 +0200 |
commit | f2bedc4705659216bd60948029ad8dfedf923ad9 (patch) | |
tree | fb4764c4451398314005c61c5e0f0d1e542c0773 | |
parent | 3bd3706251ee8ab67e69d9340ac2abdca217e733 (diff) | |
download | linux-stable-f2bedc4705659216bd60948029ad8dfedf923ad9.tar.gz linux-stable-f2bedc4705659216bd60948029ad8dfedf923ad9.tar.bz2 linux-stable-f2bedc4705659216bd60948029ad8dfedf923ad9.zip |
sched/fair: Remove rq->load
The CFS class is the only one maintaining and using the CPU wide load
(rq->load(.weight)). The last use case of the CPU wide load in CFS's
set_next_entity() can be replaced by using the load of the CFS class
(rq->cfs.load(.weight)) instead.
Signed-off-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: https://lkml.kernel.org/r/20190424084556.604-1-dietmar.eggemann@arm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r-- | kernel/sched/debug.c | 2 | ||||
-rw-r--r-- | kernel/sched/fair.c | 7 | ||||
-rw-r--r-- | kernel/sched/sched.h | 2 |
3 files changed, 2 insertions, 9 deletions
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index 678bfb9bd87f..150043e1d716 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c @@ -656,8 +656,6 @@ do { \ SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x)) P(nr_running); - SEQ_printf(m, " .%-30s: %lu\n", "load", - rq->load.weight); P(nr_switches); P(nr_load_updates); P(nr_uninterruptible); diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 8691a8fffe40..08b1cb06f968 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -2686,8 +2686,6 @@ static void account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se) { update_load_add(&cfs_rq->load, se->load.weight); - if (!parent_entity(se)) - update_load_add(&rq_of(cfs_rq)->load, se->load.weight); #ifdef CONFIG_SMP if (entity_is_task(se)) { struct rq *rq = rq_of(cfs_rq); @@ -2703,8 +2701,6 @@ static void account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) { update_load_sub(&cfs_rq->load, se->load.weight); - if (!parent_entity(se)) - update_load_sub(&rq_of(cfs_rq)->load, se->load.weight); #ifdef CONFIG_SMP if (entity_is_task(se)) { account_numa_dequeue(rq_of(cfs_rq), task_of(se)); @@ -4100,7 +4096,8 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) * least twice that of our own weight (i.e. dont track it * when there are only lesser-weight tasks around): */ - if (schedstat_enabled() && rq_of(cfs_rq)->load.weight >= 2*se->load.weight) { + if (schedstat_enabled() && + rq_of(cfs_rq)->cfs.load.weight >= 2*se->load.weight) { schedstat_set(se->statistics.slice_max, max((u64)schedstat_val(se->statistics.slice_max), se->sum_exec_runtime - se->prev_sum_exec_runtime)); diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index b52ed1ada0be..c308410675ed 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -830,8 +830,6 @@ struct rq { atomic_t nohz_flags; #endif /* CONFIG_NO_HZ_COMMON */ - /* capture load from *all* tasks on this CPU: */ - struct load_weight load; unsigned long nr_load_updates; u64 nr_switches; |