summaryrefslogtreecommitdiffstats
path: root/kernel/sched_rt.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2008-09-20 23:38:02 +0200
committerIngo Molnar <mingo@elte.hu>2008-09-22 16:28:32 +0200
commit15afe09bf496ae10c989e1a375a6b5da7bd3e16e (patch)
tree4565659d1084e357eea42e6321a4d304ac950faa /kernel/sched_rt.c
parent09b22a2f678ae733801b888c44756d0abd686b8a (diff)
downloadlinux-15afe09bf496ae10c989e1a375a6b5da7bd3e16e.tar.gz
linux-15afe09bf496ae10c989e1a375a6b5da7bd3e16e.tar.bz2
linux-15afe09bf496ae10c989e1a375a6b5da7bd3e16e.zip
sched: wakeup preempt when small overlap
Lin Ming reported a 10% OLTP regression against 2.6.27-rc4. The difference seems to come from different preemption agressiveness, which affects the cache footprint of the workload and its effective cache trashing. Aggresively preempt a task if its avg overlap is very small, this should avoid the task going to sleep and find it still running when we schedule back to it - saving a wakeup. Reported-by: Lin Ming <ming.m.lin@intel.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_rt.c')
-rw-r--r--kernel/sched_rt.c2
1 files changed, 1 insertions, 1 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 552310798dad..6d2d0a5d030b 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -783,7 +783,7 @@ static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
/*
* Preempt the current task with a newly woken task if needed:
*/
-static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p)
+static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int sync)
{
if (p->prio < rq->curr->prio) {
resched_task(rq->curr);