diff options
author | Peter Zijlstra <peterz@infradead.org> | 2015-06-11 14:46:39 +0200 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2015-06-19 00:25:26 +0200 |
commit | 4c9a4bc89a9cca8128bce67d6bc8870d6b7ee0b2 (patch) | |
tree | abb5e04295a140ebe7c12ed16c6c8e70da8a33b1 /kernel | |
parent | dbc7f069b93a249340e974d6e8f55656280d8701 (diff) | |
download | linux-stable-4c9a4bc89a9cca8128bce67d6bc8870d6b7ee0b2.tar.gz linux-stable-4c9a4bc89a9cca8128bce67d6bc8870d6b7ee0b2.tar.bz2 linux-stable-4c9a4bc89a9cca8128bce67d6bc8870d6b7ee0b2.zip |
sched: Allow balance callbacks for check_class_changed()
In order to remove dropping rq->lock from the
switched_{to,from}()/prio_changed() sched_class methods, run the
balance callbacks after it.
We need to remove dropping rq->lock because its buggy,
suppose using sched_setattr()/sched_setscheduler() to change a running
task from FIFO to OTHER.
By the time we get to switched_from_rt() the task is already enqueued
on the cfs runqueues. If switched_from_rt() does pull_rt_task() and
drops rq->lock, load-balancing can come in and move our task @p to
another rq.
The subsequent switched_to_fair() still assumes @p is on @rq and bad
things will happen.
By using balance callbacks we delay the load-balancing operations
{rt,dl}x{push,pull} until we've done all the important work and the
task is fully set up.
Furthermore, the balance callbacks do not know about @p, therefore
they cannot get confused like this.
Reported-by: Mike Galbraith <umgwanakikbuti@gmail.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: ktkhai@parallels.com
Cc: rostedt@goodmis.org
Cc: juri.lelli@gmail.com
Cc: pang.xunlei@linaro.org
Cc: oleg@redhat.com
Cc: wanpeng.li@linux.intel.com
Link: http://lkml.kernel.org/r/20150611124742.615343911@infradead.org
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched/core.c | 25 |
1 files changed, 22 insertions, 3 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index b610ef9e522f..ef546e349e75 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1001,7 +1001,11 @@ inline int task_curr(const struct task_struct *p) } /* - * Can drop rq->lock because from sched_class::switched_from() methods drop it. + * switched_from, switched_to and prio_changed must _NOT_ drop rq->lock, + * use the balance_callback list if you want balancing. + * + * this means any call to check_class_changed() must be followed by a call to + * balance_callback(). */ static inline void check_class_changed(struct rq *rq, struct task_struct *p, const struct sched_class *prev_class, @@ -1010,7 +1014,7 @@ static inline void check_class_changed(struct rq *rq, struct task_struct *p, if (prev_class != p->sched_class) { if (prev_class->switched_from) prev_class->switched_from(rq, p); - /* Possble rq->lock 'hole'. */ + p->sched_class->switched_to(rq, p); } else if (oldprio != p->prio || dl_task(p)) p->sched_class->prio_changed(rq, p, oldprio); @@ -1491,8 +1495,12 @@ ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags) p->state = TASK_RUNNING; #ifdef CONFIG_SMP - if (p->sched_class->task_woken) + if (p->sched_class->task_woken) { + /* + * XXX can drop rq->lock; most likely ok. + */ p->sched_class->task_woken(rq, p); + } if (rq->idle_stamp) { u64 delta = rq_clock(rq) - rq->idle_stamp; @@ -3100,7 +3108,11 @@ void rt_mutex_setprio(struct task_struct *p, int prio) check_class_changed(rq, p, prev_class, oldprio); out_unlock: + preempt_disable(); /* avoid rq from going away on us */ __task_rq_unlock(rq); + + balance_callback(rq); + preempt_enable(); } #endif @@ -3661,11 +3673,18 @@ change: } check_class_changed(rq, p, prev_class, oldprio); + preempt_disable(); /* avoid rq from going away on us */ task_rq_unlock(rq, p, &flags); if (pi) rt_mutex_adjust_pi(p); + /* + * Run balance callbacks after we've adjusted the PI chain. + */ + balance_callback(rq); + preempt_enable(); + return 0; } |