From 2ed41a55023dc5be6742ca0eb8df5cb20e8dcaae Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 23 Jan 2018 20:34:30 +0100 Subject: sched/core: Optimize update_stats_*() These functions are already gated by schedstats_enabled(), there is no point in then issuing another static_branch for every individual update in them. Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar --- kernel/sched/fair.c | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) (limited to 'kernel/sched/fair.c') diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 7b6535987500..a6b8157197bf 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -871,7 +871,7 @@ update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se) likely(wait_start > prev_wait_start)) wait_start -= prev_wait_start; - schedstat_set(se->statistics.wait_start, wait_start); + __schedstat_set(se->statistics.wait_start, wait_start); } static inline void @@ -893,17 +893,17 @@ update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se) * time stamp can be adjusted to accumulate wait time * prior to migration. */ - schedstat_set(se->statistics.wait_start, delta); + __schedstat_set(se->statistics.wait_start, delta); return; } trace_sched_stat_wait(p, delta); } - schedstat_set(se->statistics.wait_max, + __schedstat_set(se->statistics.wait_max, max(schedstat_val(se->statistics.wait_max), delta)); - schedstat_inc(se->statistics.wait_count); - schedstat_add(se->statistics.wait_sum, delta); - schedstat_set(se->statistics.wait_start, 0); + __schedstat_inc(se->statistics.wait_count); + __schedstat_add(se->statistics.wait_sum, delta); + __schedstat_set(se->statistics.wait_start, 0); } static inline void @@ -928,10 +928,10 @@ update_stats_enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) delta = 0; if (unlikely(delta > schedstat_val(se->statistics.sleep_max))) - schedstat_set(se->statistics.sleep_max, delta); + __schedstat_set(se->statistics.sleep_max, delta); - schedstat_set(se->statistics.sleep_start, 0); - schedstat_add(se->statistics.sum_sleep_runtime, delta); + __schedstat_set(se->statistics.sleep_start, 0); + __schedstat_add(se->statistics.sum_sleep_runtime, delta); if (tsk) { account_scheduler_latency(tsk, delta >> 10, 1); @@ -945,15 +945,15 @@ update_stats_enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) delta = 0; if (unlikely(delta > schedstat_val(se->statistics.block_max))) - schedstat_set(se->statistics.block_max, delta); + __schedstat_set(se->statistics.block_max, delta); - schedstat_set(se->statistics.block_start, 0); - schedstat_add(se->statistics.sum_sleep_runtime, delta); + __schedstat_set(se->statistics.block_start, 0); + __schedstat_add(se->statistics.sum_sleep_runtime, delta); if (tsk) { if (tsk->in_iowait) { - schedstat_add(se->statistics.iowait_sum, delta); - schedstat_inc(se->statistics.iowait_count); + __schedstat_add(se->statistics.iowait_sum, delta); + __schedstat_inc(se->statistics.iowait_count); trace_sched_stat_iowait(tsk, delta); } @@ -1012,10 +1012,10 @@ update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) struct task_struct *tsk = task_of(se); if (tsk->state & TASK_INTERRUPTIBLE) - schedstat_set(se->statistics.sleep_start, + __schedstat_set(se->statistics.sleep_start, rq_clock(rq_of(cfs_rq))); if (tsk->state & TASK_UNINTERRUPTIBLE) - schedstat_set(se->statistics.block_start, + __schedstat_set(se->statistics.block_start, rq_clock(rq_of(cfs_rq))); } } -- cgit v1.2.3