diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2009-09-01 10:34:34 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-09-04 10:09:53 +0200 |
commit | cc9fba7d7672fa3ed58d9d9ecb6c45b1351c29a6 (patch) | |
tree | a2ae08f8edad60c17134fb5ff60b0deb21dc96ab | |
parent | b5d978e0c7e79a7ff842e895c85a86b38c71f1cd (diff) | |
download | linux-cc9fba7d7672fa3ed58d9d9ecb6c45b1351c29a6.tar.gz linux-cc9fba7d7672fa3ed58d9d9ecb6c45b1351c29a6.tar.bz2 linux-cc9fba7d7672fa3ed58d9d9ecb6c45b1351c29a6.zip |
sched: Update the cpu_power sum during load-balance
In order to prepare for a more dynamic cpu_power, update the
group sum while walking the sched domains during load-balance.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Tested-by: Andreas Herrmann <andreas.herrmann3@amd.com>
Acked-by: Andreas Herrmann <andreas.herrmann3@amd.com>
Acked-by: Gautham R Shenoy <ego@in.ibm.com>
Cc: Balbir Singh <balbir@in.ibm.com>
LKML-Reference: <20090901083825.985050292@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r-- | kernel/sched.c | 33 |
1 files changed, 29 insertions, 4 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 9d64cec9ae1d..ecb4a47d4214 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -3699,6 +3699,28 @@ static inline int check_power_save_busiest_group(struct sd_lb_stats *sds, } #endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */ +static void update_sched_power(struct sched_domain *sd) +{ + struct sched_domain *child = sd->child; + struct sched_group *group, *sdg = sd->groups; + unsigned long power = sdg->__cpu_power; + + if (!child) { + /* compute cpu power for this cpu */ + return; + } + + sdg->__cpu_power = 0; + + group = child->groups; + do { + sdg->__cpu_power += group->__cpu_power; + group = group->next; + } while (group != child->groups); + + if (power != sdg->__cpu_power) + sdg->reciprocal_cpu_power = reciprocal_value(sdg->__cpu_power); +} /** * update_sg_lb_stats - Update sched_group's statistics for load balancing. @@ -3712,7 +3734,8 @@ static inline int check_power_save_busiest_group(struct sd_lb_stats *sds, * @balance: Should we balance. * @sgs: variable to hold the statistics for this group. */ -static inline void update_sg_lb_stats(struct sched_group *group, int this_cpu, +static inline void update_sg_lb_stats(struct sched_domain *sd, + struct sched_group *group, int this_cpu, enum cpu_idle_type idle, int load_idx, int *sd_idle, int local_group, const struct cpumask *cpus, int *balance, struct sg_lb_stats *sgs) @@ -3723,8 +3746,11 @@ static inline void update_sg_lb_stats(struct sched_group *group, int this_cpu, unsigned long sum_avg_load_per_task; unsigned long avg_load_per_task; - if (local_group) + if (local_group) { balance_cpu = group_first_cpu(group); + if (balance_cpu == this_cpu) + update_sched_power(sd); + } /* Tally up the load of all CPUs in the group */ sum_avg_load_per_task = avg_load_per_task = 0; @@ -3828,7 +3854,7 @@ static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu, local_group = cpumask_test_cpu(this_cpu, sched_group_cpus(group)); memset(&sgs, 0, sizeof(sgs)); - update_sg_lb_stats(group, this_cpu, idle, load_idx, sd_idle, + update_sg_lb_stats(sd, group, this_cpu, idle, load_idx, sd_idle, local_group, cpus, balance, &sgs); if (local_group && balance && !(*balance)) @@ -3863,7 +3889,6 @@ static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu, update_sd_power_savings_stats(group, sds, local_group, &sgs); group = group->next; } while (group != sd->groups); - } /** |