From ceb6ba45dc8074d2a1ec1117463dc94a20d4203d Mon Sep 17 00:00:00 2001 From: Vincent Guittot Date: Thu, 1 Jul 2021 19:18:37 +0200 Subject: sched/fair: Sync load_sum with load_avg after dequeue commit 9e077b52d86a ("sched/pelt: Check that *_avg are null when *_sum are") reported some inconsitencies between *_avg and *_sum. commit 1c35b07e6d39 ("sched/fair: Ensure _sum and _avg values stay consistent") fixed some but one remains when dequeuing load. sync the cfs's load_sum with its load_avg after dequeuing the load of a sched_entity. Fixes: 9e077b52d86a ("sched/pelt: Check that *_avg are null when *_sum are") Reported-by: Sachin Sant Signed-off-by: Vincent Guittot Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Odin Ugedal Tested-by: Sachin Sant Link: https://lore.kernel.org/r/20210701171837.32156-1-vincent.guittot@linaro.org --- kernel/sched/fair.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 45edf61eed73..1e263c9cdc13 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3037,8 +3037,9 @@ enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) static inline void dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { + u32 divider = get_pelt_divider(&se->avg); sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg); - sub_positive(&cfs_rq->avg.load_sum, se_weight(se) * se->avg.load_sum); + cfs_rq->avg.load_sum = cfs_rq->avg.load_avg * divider; } #else static inline void -- cgit v1.2.3