summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPaul Turner <pjt@google.com>2010-11-15 15:47:10 -0800
committerIngo Molnar <mingo@elte.hu>2010-11-18 13:27:50 +0100
commit9437178f623a19af5951808d880a8599f66ac150 (patch)
treea2f315092f0b7b31cf023a82175c6cf68fd5e04c /kernel
parentd6b5591829bd348a5fbe1c428d28dea00621cdba (diff)
downloadlinux-9437178f623a19af5951808d880a8599f66ac150.tar.gz
linux-9437178f623a19af5951808d880a8599f66ac150.tar.bz2
linux-9437178f623a19af5951808d880a8599f66ac150.zip
sched: Update tg->shares after cpu.shares write
Formerly sched_group_set_shares would force a rebalance by overflowing domain share sums. Now that per-cpu averages are maintained we can set the true value by issuing an update_cfs_shares() following a tg->shares update. Also initialize tg se->load to 0 for consistency since we'll now set correct weights on enqueue. Signed-off-by: Paul Turner <pjt@google.com?> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <20101115234938.465521344@google.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c42
1 files changed, 11 insertions, 31 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index e914a716e1d4..550cf3a02377 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -7646,7 +7646,7 @@ static void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
se->cfs_rq = parent->my_q;
se->my_q = cfs_rq;
- update_load_set(&se->load, tg->shares);
+ update_load_set(&se->load, 0);
se->parent = parent;
}
#endif
@@ -8274,37 +8274,12 @@ void sched_move_task(struct task_struct *tsk)
#endif /* CONFIG_CGROUP_SCHED */
#ifdef CONFIG_FAIR_GROUP_SCHED
-static void __set_se_shares(struct sched_entity *se, unsigned long shares)
-{
- struct cfs_rq *cfs_rq = se->cfs_rq;
- int on_rq;
-
- on_rq = se->on_rq;
- if (on_rq)
- dequeue_entity(cfs_rq, se, 0);
-
- update_load_set(&se->load, shares);
-
- if (on_rq)
- enqueue_entity(cfs_rq, se, 0);
-}
-
-static void set_se_shares(struct sched_entity *se, unsigned long shares)
-{
- struct cfs_rq *cfs_rq = se->cfs_rq;
- struct rq *rq = cfs_rq->rq;
- unsigned long flags;
-
- raw_spin_lock_irqsave(&rq->lock, flags);
- __set_se_shares(se, shares);
- raw_spin_unlock_irqrestore(&rq->lock, flags);
-}
-
static DEFINE_MUTEX(shares_mutex);
int sched_group_set_shares(struct task_group *tg, unsigned long shares)
{
int i;
+ unsigned long flags;
/*
* We can't change the weight of the root cgroup.
@@ -8323,10 +8298,15 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares)
tg->shares = shares;
for_each_possible_cpu(i) {
- /*
- * force a rebalance
- */
- set_se_shares(tg->se[i], shares);
+ struct rq *rq = cpu_rq(i);
+ struct sched_entity *se;
+
+ se = tg->se[i];
+ /* Propagate contribution to hierarchy */
+ raw_spin_lock_irqsave(&rq->lock, flags);
+ for_each_sched_entity(se)
+ update_cfs_shares(group_cfs_rq(se), 0);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
}
done: