diff options
author | Paul Turner <pjt@google.com> | 2010-11-15 15:47:04 -0800 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-11-18 13:27:48 +0100 |
commit | e33078baa4d30ad1d0e46d1f62b9e5a63a3e6ee3 (patch) | |
tree | 13d1a075c9cd41ce666cf21b0b209bd3d121f566 /kernel/sched_fair.c | |
parent | f0d7442a5924a802b66eef79b3708f77297bfb35 (diff) | |
download | linux-e33078baa4d30ad1d0e46d1f62b9e5a63a3e6ee3.tar.gz linux-e33078baa4d30ad1d0e46d1f62b9e5a63a3e6ee3.tar.bz2 linux-e33078baa4d30ad1d0e46d1f62b9e5a63a3e6ee3.zip |
sched: Fix update_cfs_load() synchronization
Using cfs_rq->nr_running is not sufficient to synchronize update_cfs_load with
the put path since nr_running accounting occurs at deactivation.
It's also not safe to make the removal decision based on load_avg as this fails
with both high periods and low shares. Resolve this by clipping history after
4 periods without activity.
Note: the above will always occur from update_shares() since in the
last-task-sleep-case that task will still be cfs_rq->curr when update_cfs_load
is called.
Signed-off-by: Paul Turner <pjt@google.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <20101115234937.933428187@google.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r-- | kernel/sched_fair.c | 33 |
1 files changed, 21 insertions, 12 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index d52b97a04e7a..a543a5b202a4 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -674,10 +674,11 @@ account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) } #if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED -static void update_cfs_load(struct cfs_rq *cfs_rq, int lb) +static void update_cfs_load(struct cfs_rq *cfs_rq) { u64 period = sched_avg_period(); u64 now, delta; + unsigned long load = cfs_rq->load.weight; if (!cfs_rq) return; @@ -685,9 +686,19 @@ static void update_cfs_load(struct cfs_rq *cfs_rq, int lb) now = rq_of(cfs_rq)->clock; delta = now - cfs_rq->load_stamp; + /* truncate load history at 4 idle periods */ + if (cfs_rq->load_stamp > cfs_rq->load_last && + now - cfs_rq->load_last > 4 * period) { + cfs_rq->load_period = 0; + cfs_rq->load_avg = 0; + } + cfs_rq->load_stamp = now; cfs_rq->load_period += delta; - cfs_rq->load_avg += delta * cfs_rq->load.weight; + if (load) { + cfs_rq->load_last = now; + cfs_rq->load_avg += delta * load; + } while (cfs_rq->load_period > period) { /* @@ -700,10 +711,8 @@ static void update_cfs_load(struct cfs_rq *cfs_rq, int lb) cfs_rq->load_avg /= 2; } - if (lb && !cfs_rq->nr_running) { - if (cfs_rq->load_avg < (period / 8)) - list_del_leaf_cfs_rq(cfs_rq); - } + if (!cfs_rq->curr && !cfs_rq->nr_running && !cfs_rq->load_avg) + list_del_leaf_cfs_rq(cfs_rq); } static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, @@ -750,7 +759,7 @@ static void update_cfs_shares(struct cfs_rq *cfs_rq, long weight_delta) reweight_entity(cfs_rq_of(se), se, shares); } #else /* CONFIG_FAIR_GROUP_SCHED */ -static inline void update_cfs_load(struct cfs_rq *cfs_rq, int lb) +static inline void update_cfs_load(struct cfs_rq *cfs_rq) { } @@ -880,7 +889,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) * Update run-time statistics of the 'current'. */ update_curr(cfs_rq); - update_cfs_load(cfs_rq, 0); + update_cfs_load(cfs_rq); update_cfs_shares(cfs_rq, se->load.weight); account_entity_enqueue(cfs_rq, se); @@ -941,7 +950,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) if (se != cfs_rq->curr) __dequeue_entity(cfs_rq, se); se->on_rq = 0; - update_cfs_load(cfs_rq, 0); + update_cfs_load(cfs_rq); account_entity_dequeue(cfs_rq, se); update_min_vruntime(cfs_rq); update_cfs_shares(cfs_rq, 0); @@ -1176,7 +1185,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) for_each_sched_entity(se) { struct cfs_rq *cfs_rq = cfs_rq_of(se); - update_cfs_load(cfs_rq, 0); + update_cfs_load(cfs_rq); update_cfs_shares(cfs_rq, 0); } @@ -1206,7 +1215,7 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) for_each_sched_entity(se) { struct cfs_rq *cfs_rq = cfs_rq_of(se); - update_cfs_load(cfs_rq, 0); + update_cfs_load(cfs_rq); update_cfs_shares(cfs_rq, 0); } @@ -2023,7 +2032,7 @@ static int tg_shares_up(struct task_group *tg, int cpu) raw_spin_lock_irqsave(&rq->lock, flags); update_rq_clock(rq); - update_cfs_load(cfs_rq, 1); + update_cfs_load(cfs_rq); load_avg = div64_u64(cfs_rq->load_avg, cfs_rq->load_period+1); load_avg -= cfs_rq->load_contribution; |