summaryrefslogtreecommitdiffstats
path: root/kernel/sched_fair.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2008-06-27 13:41:23 +0200
committerIngo Molnar <mingo@elte.hu>2008-06-27 14:31:36 +0200
commitc8cba857b4997d5b00451d01474638f6a153f713 (patch)
treea784dce37d72ae20a0efb81b8e498b504a207650 /kernel/sched_fair.c
parenta25b5aca8740ea99d5e18dfc71235a52b685dcf7 (diff)
downloadlinux-c8cba857b4997d5b00451d01474638f6a153f713.tar.gz
linux-c8cba857b4997d5b00451d01474638f6a153f713.tar.bz2
linux-c8cba857b4997d5b00451d01474638f6a153f713.zip
sched: simplify the group load balancer
While thinking about the previous patch - I realized that using per domain aggregate load values in load_balance_fair() is wrong. We should use the load value for that CPU. By not needing per domain hierarchical load values we don't need to store per domain aggregate shares, which greatly simplifies all the math. It basically falls apart in two separate computations: - per domain update of the shares - per CPU update of the hierarchical load Also get rid of the move_group_shares() stuff - just re-compute the shares again after a successful load balance. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com> Cc: Mike Galbraith <efault@gmx.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r--kernel/sched_fair.c15
1 files changed, 8 insertions, 7 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 03b9fbd9d648..7b8d664d6f22 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1421,17 +1421,20 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
struct task_group *tg;
rcu_read_lock();
+ update_h_load(busiest_cpu);
+
list_for_each_entry(tg, &task_groups, list) {
+ struct cfs_rq *busiest_cfs_rq = tg->cfs_rq[busiest_cpu];
long rem_load, moved_load;
/*
* empty group
*/
- if (!tg->cfs_rq[busiest_cpu]->task_weight)
+ if (!busiest_cfs_rq->task_weight)
continue;
- rem_load = rem_load_move * aggregate(tg, this_cpu)->rq_weight;
- rem_load /= aggregate(tg, this_cpu)->load + 1;
+ rem_load = rem_load_move * busiest_cfs_rq->load.weight;
+ rem_load /= busiest_cfs_rq->h_load + 1;
moved_load = __load_balance_fair(this_rq, this_cpu, busiest,
rem_load, sd, idle, all_pinned, this_best_prio,
@@ -1440,10 +1443,8 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
if (!moved_load)
continue;
- move_group_shares(tg, this_cpu, sd, busiest_cpu, this_cpu);
-
- moved_load *= aggregate(tg, this_cpu)->load;
- moved_load /= aggregate(tg, this_cpu)->rq_weight + 1;
+ moved_load *= busiest_cfs_rq->h_load;
+ moved_load /= busiest_cfs_rq->load.weight + 1;
rem_load_move -= moved_load;
if (rem_load_move < 0)