summaryrefslogtreecommitdiffstats
path: root/kernel/sched_fair.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-09-16 13:44:33 +0200
committerIngo Molnar <mingo@elte.hu>2009-09-16 16:44:32 +0200
commit3b6408942206f940dd538e980e9904e48f4b64f8 (patch)
tree7e0a0dc37d07e0308ef2294a5de1cd1ea42a388c /kernel/sched_fair.c
parent7c423e98856df9b941223a7e7845b2502ad84b00 (diff)
downloadlinux-3b6408942206f940dd538e980e9904e48f4b64f8.tar.gz
linux-3b6408942206f940dd538e980e9904e48f4b64f8.tar.bz2
linux-3b6408942206f940dd538e980e9904e48f4b64f8.zip
sched: Optimize cgroup vs wakeup a bit
We don't need to call update_shares() for each domain we iterate, just got the largets one. However, we should call it before wake_affine() as well, so that that can use up-to-date values too. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r--kernel/sched_fair.c23
1 files changed, 9 insertions, 14 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index acf16a8d934b..722d392b0dac 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1348,7 +1348,7 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
*/
static int select_task_rq_fair(struct task_struct *p, int sd_flag, int flags)
{
- struct sched_domain *tmp, *sd = NULL;
+ struct sched_domain *tmp, *shares = NULL, *sd = NULL;
int cpu = smp_processor_id();
int prev_cpu = task_cpu(p);
int new_cpu = cpu;
@@ -1387,22 +1387,14 @@ static int select_task_rq_fair(struct task_struct *p, int sd_flag, int flags)
break;
}
- switch (sd_flag) {
- case SD_BALANCE_WAKE:
- if (!sched_feat(LB_WAKEUP_UPDATE))
- break;
- case SD_BALANCE_FORK:
- case SD_BALANCE_EXEC:
- if (root_task_group_empty())
- break;
- update_shares(tmp);
- default:
- break;
- }
-
if (want_affine && (tmp->flags & SD_WAKE_AFFINE) &&
cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) {
+ if (sched_feat(LB_SHARES_UPDATE)) {
+ update_shares(tmp);
+ shares = tmp;
+ }
+
if (wake_affine(tmp, p, sync)) {
new_cpu = cpu;
goto out;
@@ -1417,6 +1409,9 @@ static int select_task_rq_fair(struct task_struct *p, int sd_flag, int flags)
sd = tmp;
}
+ if (sd && sd != shares && sched_feat(LB_SHARES_UPDATE))
+ update_shares(sd);
+
while (sd) {
struct sched_group *group;
int weight;