summaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorJosh Don <joshdon@google.com>2023-06-20 11:32:46 -0700
committerPeter Zijlstra <peterz@infradead.org>2023-07-13 15:21:49 +0200
commit79462e8c879afc7895b30014d31e2c1fd629bb1f (patch)
treee892190700d63aa4386c2c8d729010b786a4540e /kernel/sched
parent893cdaaa3977be6afb3a7f756fbfd7be83f68d8c (diff)
downloadlinux-79462e8c879afc7895b30014d31e2c1fd629bb1f.tar.gz
linux-79462e8c879afc7895b30014d31e2c1fd629bb1f.tar.bz2
linux-79462e8c879afc7895b30014d31e2c1fd629bb1f.zip
sched: don't account throttle time for empty groups
It is easy for a cfs_rq to become throttled even when it has no enqueued entities (for example, if we have just put_prev()'d the last runnable task of the cfs_rq, and the cfs_rq is out of quota). Avoid accounting this time towards total throttle time, since it otherwise falsely inflates the stats. Note that the dequeue path is special, since we normally disallow migrations when a task is in a throttled hierarchy (see throttled_lb_pair()). Signed-off-by: Josh Don <joshdon@google.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lore.kernel.org/r/20230620183247.737942-1-joshdon@google.com
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/fair.c18
1 files changed, 15 insertions, 3 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index a80a73909dc2..51ccae747795 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4787,6 +4787,7 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
}
static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
+static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq);
static inline bool cfs_bandwidth_used(void);
@@ -4873,8 +4874,14 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
if (cfs_rq->nr_running == 1) {
check_enqueue_throttle(cfs_rq);
- if (!throttled_hierarchy(cfs_rq))
+ if (!throttled_hierarchy(cfs_rq)) {
list_add_leaf_cfs_rq(cfs_rq);
+ } else {
+#ifdef CONFIG_CFS_BANDWIDTH
+ if (cfs_rq_throttled(cfs_rq) && !cfs_rq->throttled_clock)
+ cfs_rq->throttled_clock = rq_clock(rq_of(cfs_rq));
+#endif
+ }
}
}
@@ -5480,7 +5487,9 @@ done:
* throttled-list. rq->lock protects completion.
*/
cfs_rq->throttled = 1;
- cfs_rq->throttled_clock = rq_clock(rq);
+ SCHED_WARN_ON(cfs_rq->throttled_clock);
+ if (cfs_rq->nr_running)
+ cfs_rq->throttled_clock = rq_clock(rq);
return true;
}
@@ -5498,7 +5507,10 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
update_rq_clock(rq);
raw_spin_lock(&cfs_b->lock);
- cfs_b->throttled_time += rq_clock(rq) - cfs_rq->throttled_clock;
+ if (cfs_rq->throttled_clock) {
+ cfs_b->throttled_time += rq_clock(rq) - cfs_rq->throttled_clock;
+ cfs_rq->throttled_clock = 0;
+ }
list_del_rcu(&cfs_rq->throttled_list);
raw_spin_unlock(&cfs_b->lock);