diff options
author | Kirill Korotaev <dev@sw.ru> | 2006-06-27 02:54:32 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-06-27 17:32:44 -0700 |
commit | efc30814a88bdbe2bfe4ac94de2eb089ad80bee3 (patch) | |
tree | 1134d7ccda13c09cf426af0524c13633292eac5b /kernel/sched.c | |
parent | cc94abfcbc9fed0048365ce1fb8dc81353408bf8 (diff) | |
download | linux-stable-efc30814a88bdbe2bfe4ac94de2eb089ad80bee3.tar.gz linux-stable-efc30814a88bdbe2bfe4ac94de2eb089ad80bee3.tar.bz2 linux-stable-efc30814a88bdbe2bfe4ac94de2eb089ad80bee3.zip |
[PATCH] sched: CPU hotplug race vs. set_cpus_allowed()
There is a race between set_cpus_allowed() and move_task_off_dead_cpu().
__migrate_task() doesn't report any err code, so task can be left on its
runqueue if its cpus_allowed mask changed so that dest_cpu is not longer a
possible target. Also, chaning cpus_allowed mask requires rq->lock being
held.
Signed-off-by: Kirill Korotaev <dev@openvz.org>
Acked-By: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 18 |
1 files changed, 14 insertions, 4 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 235c421631d6..678335a8b390 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -4412,13 +4412,16 @@ EXPORT_SYMBOL_GPL(set_cpus_allowed); * * So we race with normal scheduler movements, but that's OK, as long * as the task is no longer on this CPU. + * + * Returns non-zero if task was successfully migrated. */ -static void __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu) +static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu) { runqueue_t *rq_dest, *rq_src; + int ret = 0; if (unlikely(cpu_is_offline(dest_cpu))) - return; + return ret; rq_src = cpu_rq(src_cpu); rq_dest = cpu_rq(dest_cpu); @@ -4446,9 +4449,10 @@ static void __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu) if (TASK_PREEMPTS_CURR(p, rq_dest)) resched_task(rq_dest->curr); } - + ret = 1; out: double_rq_unlock(rq_src, rq_dest); + return ret; } /* @@ -4518,9 +4522,12 @@ wait_to_die: /* Figure out where task on dead CPU should go, use force if neccessary. */ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *tsk) { + runqueue_t *rq; + unsigned long flags; int dest_cpu; cpumask_t mask; +restart: /* On same node? */ mask = node_to_cpumask(cpu_to_node(dead_cpu)); cpus_and(mask, mask, tsk->cpus_allowed); @@ -4532,8 +4539,10 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *tsk) /* No more Mr. Nice Guy. */ if (dest_cpu == NR_CPUS) { + rq = task_rq_lock(tsk, &flags); cpus_setall(tsk->cpus_allowed); dest_cpu = any_online_cpu(tsk->cpus_allowed); + task_rq_unlock(rq, &flags); /* * Don't tell them about moving exiting tasks or @@ -4545,7 +4554,8 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *tsk) "longer affine to cpu%d\n", tsk->pid, tsk->comm, dead_cpu); } - __migrate_task(tsk, dead_cpu, dest_cpu); + if (!__migrate_task(tsk, dead_cpu, dest_cpu)) + goto restart; } /* |