summaryrefslogtreecommitdiffstats
path: root/kernel/sched_fair.c
diff options
context:
space:
mode:
authorSrivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>2008-06-27 13:41:36 +0200
committerIngo Molnar <mingo@elte.hu>2008-06-27 14:31:45 +0200
commit243e0e7b7d3b54749ece2e879ecd7e2a11874443 (patch)
tree0dd6af7eb63d261d15d3720f77a9430387e3db42 /kernel/sched_fair.c
parent2398f2c6d34b43025f274fc42eaca34d23ec2320 (diff)
downloadlinux-243e0e7b7d3b54749ece2e879ecd7e2a11874443.tar.gz
linux-243e0e7b7d3b54749ece2e879ecd7e2a11874443.tar.bz2
linux-243e0e7b7d3b54749ece2e879ecd7e2a11874443.zip
sched: fix mult overflow
It was observed these mults can overflow. Signed-off-by: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com> Cc: Mike Galbraith <efault@gmx.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r--kernel/sched_fair.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 0d197be3e3e9..26ebe180cdea 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1477,7 +1477,7 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
struct cfs_rq *busiest_cfs_rq = tg->cfs_rq[busiest_cpu];
unsigned long busiest_h_load = busiest_cfs_rq->h_load;
unsigned long busiest_weight = busiest_cfs_rq->load.weight;
- long rem_load, moved_load;
+ u64 rem_load, moved_load;
/*
* empty group
@@ -1485,8 +1485,8 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
if (!busiest_cfs_rq->task_weight)
continue;
- rem_load = rem_load_move * busiest_weight;
- rem_load /= busiest_h_load + 1;
+ rem_load = (u64)rem_load_move * busiest_weight;
+ rem_load = div_u64(rem_load, busiest_h_load + 1);
moved_load = __load_balance_fair(this_rq, this_cpu, busiest,
rem_load, sd, idle, all_pinned, this_best_prio,
@@ -1496,7 +1496,7 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
continue;
moved_load *= busiest_h_load;
- moved_load /= busiest_weight + 1;
+ moved_load = div_u64(moved_load, busiest_weight + 1);
rem_load_move -= moved_load;
if (rem_load_move < 0)