diff options
author | Vincent Donnefort <vincent.donnefort@arm.com> | 2020-05-27 17:39:14 +0100 |
---|---|---|
committer | Peter Zijlstra <peterz@infradead.org> | 2020-06-15 14:10:02 +0200 |
commit | 4581bea8b4ec4de353369775dfef921191e393b3 (patch) | |
tree | ab0e45a3b8cb07f6d3a13a6e460ec34b9c006624 /kernel/sched/fair.c | |
parent | 1ca2034ed798aea72a68d3904bd39a6cbfbdf405 (diff) | |
download | linux-stable-4581bea8b4ec4de353369775dfef921191e393b3.tar.gz linux-stable-4581bea8b4ec4de353369775dfef921191e393b3.tar.bz2 linux-stable-4581bea8b4ec4de353369775dfef921191e393b3.zip |
sched/debug: Add new tracepoints to track util_est
The util_est signals are key elements for EAS task placement and
frequency selection. Having tracepoints to track these signals enables
load-tracking and schedutil testing and/or debugging by a toolkit.
Signed-off-by: Vincent Donnefort <vincent.donnefort@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Valentin Schneider <valentin.schneider@arm.com>
Link: https://lkml.kernel.org/r/1590597554-370150-1-git-send-email-vincent.donnefort@arm.com
Diffstat (limited to 'kernel/sched/fair.c')
-rw-r--r-- | kernel/sched/fair.c | 6 |
1 files changed, 6 insertions, 0 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 69da576f7f48..a785a9b262dd 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3922,6 +3922,8 @@ static inline void util_est_enqueue(struct cfs_rq *cfs_rq, enqueued = cfs_rq->avg.util_est.enqueued; enqueued += _task_util_est(p); WRITE_ONCE(cfs_rq->avg.util_est.enqueued, enqueued); + + trace_sched_util_est_cfs_tp(cfs_rq); } /* @@ -3952,6 +3954,8 @@ util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep) ue.enqueued -= min_t(unsigned int, ue.enqueued, _task_util_est(p)); WRITE_ONCE(cfs_rq->avg.util_est.enqueued, ue.enqueued); + trace_sched_util_est_cfs_tp(cfs_rq); + /* * Skip update of task's estimated utilization when the task has not * yet completed an activation, e.g. being migrated. @@ -4017,6 +4021,8 @@ util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep) ue.ewma >>= UTIL_EST_WEIGHT_SHIFT; done: WRITE_ONCE(p->se.avg.util_est, ue); + + trace_sched_util_est_se_tp(&p->se); } static inline int task_fits_capacity(struct task_struct *p, long capacity) |