diff options
author | Kirill Tkhai <ktkhai@parallels.com> | 2014-09-12 15:03:34 +0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2014-09-19 12:35:21 +0200 |
commit | a15b12ac36ad4e7b856a4ae54937ae26a51aebad (patch) | |
tree | 78a87ec22776757a000e214313b78fabe3b6fbb6 /kernel/sched | |
parent | 1ba93d42727c44001aa8ccffd39c8ab5705379e2 (diff) | |
download | linux-a15b12ac36ad4e7b856a4ae54937ae26a51aebad.tar.gz linux-a15b12ac36ad4e7b856a4ae54937ae26a51aebad.tar.bz2 linux-a15b12ac36ad4e7b856a4ae54937ae26a51aebad.zip |
sched: Do not stop cpu in set_cpus_allowed_ptr() if task is not running
If a task is queued but not running on it rq, we can simply migrate
it without migration thread and switching of context.
Signed-off-by: Kirill Tkhai <ktkhai@parallels.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Link: http://lkml.kernel.org/r/1410519814.3569.7.camel@tkhai
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r-- | kernel/sched/core.c | 47 |
1 files changed, 32 insertions, 15 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 5536397a0309..4b1ddebed54a 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -4629,6 +4629,33 @@ void init_idle(struct task_struct *idle, int cpu) } #ifdef CONFIG_SMP +/* + * move_queued_task - move a queued task to new rq. + * + * Returns (locked) new rq. Old rq's lock is released. + */ +static struct rq *move_queued_task(struct task_struct *p, int new_cpu) +{ + struct rq *rq = task_rq(p); + + lockdep_assert_held(&rq->lock); + + dequeue_task(rq, p, 0); + p->on_rq = TASK_ON_RQ_MIGRATING; + set_task_cpu(p, new_cpu); + raw_spin_unlock(&rq->lock); + + rq = cpu_rq(new_cpu); + + raw_spin_lock(&rq->lock); + BUG_ON(task_cpu(p) != new_cpu); + p->on_rq = TASK_ON_RQ_QUEUED; + enqueue_task(rq, p, 0); + check_preempt_curr(rq, p, 0); + + return rq; +} + void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) { if (p->sched_class && p->sched_class->set_cpus_allowed) @@ -4685,14 +4712,15 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) goto out; dest_cpu = cpumask_any_and(cpu_active_mask, new_mask); - if (task_on_rq_queued(p) || p->state == TASK_WAKING) { + if (task_running(rq, p) || p->state == TASK_WAKING) { struct migration_arg arg = { p, dest_cpu }; /* Need help from migration thread: drop lock and wait. */ task_rq_unlock(rq, p, &flags); stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg); tlb_migrate_finish(p->mm); return 0; - } + } else if (task_on_rq_queued(p)) + rq = move_queued_task(p, dest_cpu); out: task_rq_unlock(rq, p, &flags); @@ -4735,19 +4763,8 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu) * If we're not on a rq, the next wake-up will ensure we're * placed properly. */ - if (task_on_rq_queued(p)) { - dequeue_task(rq, p, 0); - p->on_rq = TASK_ON_RQ_MIGRATING; - set_task_cpu(p, dest_cpu); - raw_spin_unlock(&rq->lock); - - rq = cpu_rq(dest_cpu); - raw_spin_lock(&rq->lock); - BUG_ON(task_rq(p) != rq); - p->on_rq = TASK_ON_RQ_QUEUED; - enqueue_task(rq, p, 0); - check_preempt_curr(rq, p, 0); - } + if (task_on_rq_queued(p)) + rq = move_queued_task(p, dest_cpu); done: ret = 1; fail: |