summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-03-16 21:21:47 +0100
committerIngo Molnar <mingo@elte.hu>2008-03-19 04:27:53 +0100
commitf48273860edfca2306236d0f0de609aab3f773d4 (patch)
tree2ee68d85fa99e54488d25780234e3e3fb2c0b982 /kernel
parentac192d3921a14e2c9080799e16959b4bd56f49d6 (diff)
downloadlinux-f48273860edfca2306236d0f0de609aab3f773d4.tar.gz
linux-f48273860edfca2306236d0f0de609aab3f773d4.tar.bz2
linux-f48273860edfca2306236d0f0de609aab3f773d4.zip
sched: clean up wakeup balancing, code flow
Clean up the code flow. No code changed: kernel/sched.o: text data bss dec hex filename 42521 2858 232 45611 b22b sched.o.before 42521 2858 232 45611 b22b sched.o.after md5: 09b31c44e9aff8666f72773dc433e2df sched.o.before.asm 09b31c44e9aff8666f72773dc433e2df sched.o.after.asm Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched_fair.c25
1 files changed, 10 insertions, 15 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 2d2be02b8e3b..b5a357396b49 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1040,7 +1040,7 @@ static int select_task_rq_fair(struct task_struct *p, int sync)
new_cpu = prev_cpu;
if (prev_cpu == this_cpu)
- goto out_set_cpu;
+ goto out;
/*
* 'this_sd' is the first domain that both
@@ -1054,13 +1054,13 @@ static int select_task_rq_fair(struct task_struct *p, int sync)
}
if (unlikely(!cpu_isset(this_cpu, p->cpus_allowed)))
- goto out_set_cpu;
+ goto out;
/*
* Check for affine wakeup and passive balancing possibilities.
*/
if (!this_sd)
- goto out_keep_cpu;
+ goto out;
idx = this_sd->wake_idx;
@@ -1069,11 +1069,11 @@ static int select_task_rq_fair(struct task_struct *p, int sync)
load = source_load(prev_cpu, idx);
this_load = target_load(this_cpu, idx);
- new_cpu = this_cpu; /* Wake to this CPU if we can */
-
if (wake_affine(rq, this_sd, p, prev_cpu, this_cpu, sync, idx,
- load, this_load, imbalance))
- goto out_set_cpu;
+ load, this_load, imbalance)) {
+ new_cpu = this_cpu;
+ goto out;
+ }
/*
* Start passive balancing when half the imbalance_pct
@@ -1083,17 +1083,12 @@ static int select_task_rq_fair(struct task_struct *p, int sync)
if (imbalance*this_load <= 100*load) {
schedstat_inc(this_sd, ttwu_move_balance);
schedstat_inc(p, se.nr_wakeups_passive);
- goto out_set_cpu;
+ new_cpu = this_cpu;
+ goto out;
}
}
-out_keep_cpu:
- /*
- * Could not wake to this_cpu.
- * Wake to the previous cpu instead:
- */
- new_cpu = prev_cpu;
-out_set_cpu:
+out:
return wake_idle(new_cpu, p);
}
#endif /* CONFIG_SMP */