diff options
author | Mike Galbraith <efault@gmx.de> | 2009-10-27 15:35:38 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-11-04 18:46:22 +0100 |
commit | a1f84a3ab8e002159498814eaa7e48c33752b04b (patch) | |
tree | 070b6c105c510460b314c20e17de4b5b89eb6a48 /kernel | |
parent | acc3f5d7cabbfd6cec71f0c1f9900621fa2d6ae7 (diff) | |
download | linux-a1f84a3ab8e002159498814eaa7e48c33752b04b.tar.gz linux-a1f84a3ab8e002159498814eaa7e48c33752b04b.tar.bz2 linux-a1f84a3ab8e002159498814eaa7e48c33752b04b.zip |
sched: Check for an idle shared cache in select_task_rq_fair()
When waking affine, check for an idle shared cache, and if
found, wake to that CPU/sibling instead of the waker's CPU.
This improves pgsql+oltp ramp up by roughly 8%. Possibly more
for other loads, depending on overlap. The trade-off is a
roughly 1% peak downturn if tasks are truly synchronous.
Signed-off-by: Mike Galbraith <efault@gmx.de>
Cc: Arjan van de Ven <arjan@infradead.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: <stable@kernel.org>
LKML-Reference: <1256654138.17752.7.camel@marge.simson.net>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched_fair.c | 33 |
1 files changed, 29 insertions, 4 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 4e777b47eeda..da87385683cc 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -1372,11 +1372,36 @@ static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flag want_sd = 0; } - if (want_affine && (tmp->flags & SD_WAKE_AFFINE) && - cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) { + if (want_affine && (tmp->flags & SD_WAKE_AFFINE)) { + int candidate = -1, i; - affine_sd = tmp; - want_affine = 0; + if (cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) + candidate = cpu; + + /* + * Check for an idle shared cache. + */ + if (tmp->flags & SD_PREFER_SIBLING) { + if (candidate == cpu) { + if (!cpu_rq(prev_cpu)->cfs.nr_running) + candidate = prev_cpu; + } + + if (candidate == -1 || candidate == cpu) { + for_each_cpu(i, sched_domain_span(tmp)) { + if (!cpu_rq(i)->cfs.nr_running) { + candidate = i; + break; + } + } + } + } + + if (candidate >= 0) { + affine_sd = tmp; + want_affine = 0; + cpu = candidate; + } } if (!want_sd && !want_affine) |