summaryrefslogtreecommitdiffstats
path: root/kernel/sched_fair.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2008-04-19 19:45:00 +0200
committerIngo Molnar <mingo@elte.hu>2008-04-19 19:45:00 +0200
commitac884dec6d4a7df252150af875cffddf8f1d9c15 (patch)
tree6ba7140a8b6e7b332fd687d24de45d2f6ded8035 /kernel/sched_fair.c
parent58d6c2d72f8628f39e8689fbde8aa177fcf00a37 (diff)
downloadlinux-stable-ac884dec6d4a7df252150af875cffddf8f1d9c15.tar.gz
linux-stable-ac884dec6d4a7df252150af875cffddf8f1d9c15.tar.bz2
linux-stable-ac884dec6d4a7df252150af875cffddf8f1d9c15.zip
sched: fair-group scheduling vs latency
Currently FAIR_GROUP sched grows the scheduler latency outside of sysctl_sched_latency, invert this so it stays within. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r--kernel/sched_fair.c44
1 files changed, 31 insertions, 13 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index b89fec93a237..9e301a2bab6f 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -362,29 +362,47 @@ static u64 __sched_period(unsigned long nr_running)
*/
static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
- return calc_delta_mine(__sched_period(cfs_rq->nr_running),
- se->load.weight, &cfs_rq->load);
+ u64 slice = __sched_period(cfs_rq->nr_running);
+
+ for_each_sched_entity(se) {
+ cfs_rq = cfs_rq_of(se);
+
+ slice *= se->load.weight;
+ do_div(slice, cfs_rq->load.weight);
+ }
+
+
+ return slice;
}
/*
- * We calculate the vruntime slice.
+ * We calculate the vruntime slice of a to be inserted task
*
* vs = s/w = p/rw
*/
-static u64 __sched_vslice(unsigned long rq_weight, unsigned long nr_running)
+static u64 sched_vslice_add(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
- u64 vslice = __sched_period(nr_running);
+ unsigned long nr_running = cfs_rq->nr_running;
+ unsigned long weight;
+ u64 vslice;
- vslice *= NICE_0_LOAD;
- do_div(vslice, rq_weight);
+ if (!se->on_rq)
+ nr_running++;
- return vslice;
-}
+ vslice = __sched_period(nr_running);
-static u64 sched_vslice_add(struct cfs_rq *cfs_rq, struct sched_entity *se)
-{
- return __sched_vslice(cfs_rq->load.weight + se->load.weight,
- cfs_rq->nr_running + 1);
+ for_each_sched_entity(se) {
+ cfs_rq = cfs_rq_of(se);
+
+ weight = cfs_rq->load.weight;
+ if (!se->on_rq)
+ weight += se->load.weight;
+
+ vslice *= NICE_0_LOAD;
+ do_div(vslice, weight);
+ }
+
+ return vslice;
}
/*