summaryrefslogtreecommitdiffstats
path: root/kernel/sched_fair.c
diff options
context:
space:
mode:
authorMike Galbraith <efault@gmx.de>2008-10-17 15:33:21 +0200
committerIngo Molnar <mingo@elte.hu>2008-10-17 15:36:58 +0200
commitb0aa51b999c449e5e3f9faa1ee406e052d407fe7 (patch)
treec3c21be0aa90658469ef039065b260172ab9b974 /kernel/sched_fair.c
parentb968905292eaa52b25abb7b3e6c0841dac9f03ae (diff)
downloadlinux-b0aa51b999c449e5e3f9faa1ee406e052d407fe7.tar.gz
linux-b0aa51b999c449e5e3f9faa1ee406e052d407fe7.tar.bz2
linux-b0aa51b999c449e5e3f9faa1ee406e052d407fe7.zip
sched: minor fast-path overhead reduction
Greetings, 103638d added a bit of avoidable overhead to the fast-path. Use sysctl_sched_min_granularity instead of sched_slice() to restrict buddy wakeups. Signed-off-by: Mike Galbraith <efault@gmx.de> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r--kernel/sched_fair.c2
1 files changed, 1 insertions, 1 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 18fd17172eb6..67084936b602 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -747,7 +747,7 @@ pick_next(struct cfs_rq *cfs_rq, struct sched_entity *se)
struct rq *rq = rq_of(cfs_rq);
u64 pair_slice = rq->clock - cfs_rq->pair_start;
- if (!cfs_rq->next || pair_slice > sched_slice(cfs_rq, cfs_rq->next)) {
+ if (!cfs_rq->next || pair_slice > sysctl_sched_min_granularity) {
cfs_rq->pair_start = rq->clock;
return se;
}