summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorXunlei Pang <xlpang@linux.alibaba.com>2018-06-20 18:18:34 +0800
committerIngo Molnar <mingo@kernel.org>2018-07-03 09:17:29 +0200
commitf1d1be8aee6c461652aea8f58bedebaa73d7f4d3 (patch)
treed3d3500dbd34f3232e2b95aa9a7d48bef910d485 /kernel
parent512ac999d2755d2b7109e996a76b6fb8b888631d (diff)
downloadlinux-stable-f1d1be8aee6c461652aea8f58bedebaa73d7f4d3.tar.gz
linux-stable-f1d1be8aee6c461652aea8f58bedebaa73d7f4d3.tar.bz2
linux-stable-f1d1be8aee6c461652aea8f58bedebaa73d7f4d3.zip
sched/fair: Advance global expiration when period timer is restarted
When period gets restarted after some idle time, start_cfs_bandwidth() doesn't update the expiration information, expire_cfs_rq_runtime() will see cfs_rq->runtime_expires smaller than rq clock and go to the clock drift logic, wasting needless CPU cycles on the scheduler hot path. Update the global expiration in start_cfs_bandwidth() to avoid frequent expire_cfs_rq_runtime() calls once a new period begins. Signed-off-by: Xunlei Pang <xlpang@linux.alibaba.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Ben Segall <bsegall@google.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/20180620101834.24455-2-xlpang@linux.alibaba.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/fair.c15
1 files changed, 10 insertions, 5 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 791707c56886..840b92ee6f89 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5204,13 +5204,18 @@ static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)
void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
{
+ u64 overrun;
+
lockdep_assert_held(&cfs_b->lock);
- if (!cfs_b->period_active) {
- cfs_b->period_active = 1;
- hrtimer_forward_now(&cfs_b->period_timer, cfs_b->period);
- hrtimer_start_expires(&cfs_b->period_timer, HRTIMER_MODE_ABS_PINNED);
- }
+ if (cfs_b->period_active)
+ return;
+
+ cfs_b->period_active = 1;
+ overrun = hrtimer_forward_now(&cfs_b->period_timer, cfs_b->period);
+ cfs_b->runtime_expires += (overrun + 1) * ktime_to_ns(cfs_b->period);
+ cfs_b->expires_seq++;
+ hrtimer_start_expires(&cfs_b->period_timer, HRTIMER_MODE_ABS_PINNED);
}
static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)