summaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-09-01 10:34:38 +0200
committerIngo Molnar <mingo@elte.hu>2009-09-04 10:09:55 +0200
commitbdb94aa5dbd8b55e75f5a50b61312fe589e2c2d1 (patch)
tree388da3774a3c69f4a0a74285478226311ec41c90 /kernel/sched.c
parente9e9250bc78e7f6342517214c0178a529807964b (diff)
downloadlinux-bdb94aa5dbd8b55e75f5a50b61312fe589e2c2d1.tar.gz
linux-bdb94aa5dbd8b55e75f5a50b61312fe589e2c2d1.tar.bz2
linux-bdb94aa5dbd8b55e75f5a50b61312fe589e2c2d1.zip
sched: Try to deal with low capacity
When the capacity drops low, we want to migrate load away. Allow the load-balancer to remove all tasks when we hit rock bottom. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Tested-by: Andreas Herrmann <andreas.herrmann3@amd.com> Acked-by: Andreas Herrmann <andreas.herrmann3@amd.com> Acked-by: Gautham R Shenoy <ego@in.ibm.com> Cc: Balbir Singh <balbir@in.ibm.com> LKML-Reference: <20090901083826.342231003@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c33
1 files changed, 28 insertions, 5 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index ab532b5de40e..5f5b359b01b8 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3908,8 +3908,8 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
if ((max_cpu_load - min_cpu_load) > 2*avg_load_per_task)
sgs->group_imb = 1;
- sgs->group_capacity = group->__cpu_power / SCHED_LOAD_SCALE;
-
+ sgs->group_capacity =
+ DIV_ROUND_CLOSEST(group->__cpu_power, SCHED_LOAD_SCALE);
}
/**
@@ -3959,7 +3959,7 @@ static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu,
* and move all the excess tasks away.
*/
if (prefer_sibling)
- sgs.group_capacity = 1;
+ sgs.group_capacity = min(sgs.group_capacity, 1UL);
if (local_group) {
sds->this_load = sgs.avg_load;
@@ -4191,6 +4191,26 @@ ret:
return NULL;
}
+static struct sched_group *group_of(int cpu)
+{
+ struct sched_domain *sd = rcu_dereference(cpu_rq(cpu)->sd);
+
+ if (!sd)
+ return NULL;
+
+ return sd->groups;
+}
+
+static unsigned long power_of(int cpu)
+{
+ struct sched_group *group = group_of(cpu);
+
+ if (!group)
+ return SCHED_LOAD_SCALE;
+
+ return group->__cpu_power;
+}
+
/*
* find_busiest_queue - find the busiest runqueue among the cpus in group.
*/
@@ -4203,15 +4223,18 @@ find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle,
int i;
for_each_cpu(i, sched_group_cpus(group)) {
+ unsigned long power = power_of(i);
+ unsigned long capacity = DIV_ROUND_CLOSEST(power, SCHED_LOAD_SCALE);
unsigned long wl;
if (!cpumask_test_cpu(i, cpus))
continue;
rq = cpu_rq(i);
- wl = weighted_cpuload(i);
+ wl = weighted_cpuload(i) * SCHED_LOAD_SCALE;
+ wl /= power;
- if (rq->nr_running == 1 && wl > imbalance)
+ if (capacity && rq->nr_running == 1 && wl > imbalance)
continue;
if (wl > max_load) {