summaryrefslogtreecommitdiffstats
path: root/kernel/sched_fair.c
diff options
context:
space:
mode:
authorMike Galbraith <efault@gmx.de>2009-01-02 12:16:42 +0100
committerIngo Molnar <mingo@elte.hu>2009-01-02 17:10:43 +0100
commit0a582440ff546e2c6610d1acec325e91b4efd313 (patch)
tree161a0943091c53bb2154121480b3e26ce5df1769 /kernel/sched_fair.c
parentb58602a4bac012b5f4fc12fe6b46ab237b610d5d (diff)
downloadlinux-0a582440ff546e2c6610d1acec325e91b4efd313.tar.gz
linux-0a582440ff546e2c6610d1acec325e91b4efd313.tar.bz2
linux-0a582440ff546e2c6610d1acec325e91b4efd313.zip
sched: fix sched_slice()
Impact: fix bad-interactivity buglet Fix sched_slice() to emit a sane result whether a task is currently enqueued or not. Signed-off-by: Mike Galbraith <efault@gmx.de> Tested-by: Jayson King <dev@jaysonking.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> kernel/sched_fair.c | 30 ++++++++++++------------------ 1 file changed, 12 insertions(+), 18 deletions(-)
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r--kernel/sched_fair.c30
1 files changed, 12 insertions, 18 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 5ad4440f0fc4..b808563f4f19 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -386,20 +386,6 @@ int sched_nr_latency_handler(struct ctl_table *table, int write,
#endif
/*
- * delta *= P[w / rw]
- */
-static inline unsigned long
-calc_delta_weight(unsigned long delta, struct sched_entity *se)
-{
- for_each_sched_entity(se) {
- delta = calc_delta_mine(delta,
- se->load.weight, &cfs_rq_of(se)->load);
- }
-
- return delta;
-}
-
-/*
* delta /= w
*/
static inline unsigned long
@@ -440,12 +426,20 @@ static u64 __sched_period(unsigned long nr_running)
*/
static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
- unsigned long nr_running = cfs_rq->nr_running;
+ u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
- if (unlikely(!se->on_rq))
- nr_running++;
+ for_each_sched_entity(se) {
+ struct load_weight *load = &cfs_rq->load;
- return calc_delta_weight(__sched_period(nr_running), se);
+ if (unlikely(!se->on_rq)) {
+ struct load_weight lw = cfs_rq->load;
+
+ update_load_add(&lw, se->load.weight);
+ load = &lw;
+ }
+ slice = calc_delta_mine(slice, se->load.weight, load);
+ }
+ return slice;
}
/*