summaryrefslogtreecommitdiffstats
path: root/kernel/stop_machine.c
diff options
context:
space:
mode:
authorOleg Nesterov <oleg@redhat.com>2015-10-08 19:01:41 +0200
committerIngo Molnar <mingo@kernel.org>2015-10-20 10:23:55 +0200
commitd8bc853582bfd81a9c08ca6922aeb01570080ccc (patch)
tree20b0dd94f84060559063c561cd0599a565e1a39b /kernel/stop_machine.c
parent5caa1c089aebcb83ccd5b79a3b88b0aa58288d05 (diff)
downloadlinux-d8bc853582bfd81a9c08ca6922aeb01570080ccc.tar.gz
linux-d8bc853582bfd81a9c08ca6922aeb01570080ccc.tar.bz2
linux-d8bc853582bfd81a9c08ca6922aeb01570080ccc.zip
stop_machine: Change cpu_stop_queue_two_works() to rely on stopper->enabled
Change cpu_stop_queue_two_works() to ensure that both CPU's have stopper->enabled == T or fail otherwise. This way stop_two_cpus() no longer needs to check cpu_active() to avoid the deadlock. This patch doesn't remove these checks, we will do this later. Note: we need to take both stopper->lock's at the same time, but this will also help to remove lglock from stop_machine.c, so I hope this is fine. Signed-off-by: Oleg Nesterov <oleg@redhat.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rik van Riel <riel@redhat.com> Cc: Tejun Heo <tj@kernel.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: heiko.carstens@de.ibm.com Link: http://lkml.kernel.org/r/20151008170141.GA25537@redhat.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/stop_machine.c')
-rw-r--r--kernel/stop_machine.c29
1 files changed, 20 insertions, 9 deletions
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index 688d6b37bb59..91fbb109de6c 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -219,12 +219,27 @@ static int multi_cpu_stop(void *data)
static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1,
int cpu2, struct cpu_stop_work *work2)
{
+ struct cpu_stopper *stopper1 = per_cpu_ptr(&cpu_stopper, cpu1);
+ struct cpu_stopper *stopper2 = per_cpu_ptr(&cpu_stopper, cpu2);
+ int err;
+
lg_double_lock(&stop_cpus_lock, cpu1, cpu2);
- cpu_stop_queue_work(cpu1, work1);
- cpu_stop_queue_work(cpu2, work2);
+ spin_lock_irq(&stopper1->lock);
+ spin_lock_nested(&stopper2->lock, SINGLE_DEPTH_NESTING);
+
+ err = -ENOENT;
+ if (!stopper1->enabled || !stopper2->enabled)
+ goto unlock;
+
+ err = 0;
+ __cpu_stop_queue_work(stopper1, work1);
+ __cpu_stop_queue_work(stopper2, work2);
+unlock:
+ spin_unlock(&stopper2->lock);
+ spin_unlock_irq(&stopper1->lock);
lg_double_unlock(&stop_cpus_lock, cpu1, cpu2);
- return 0;
+ return err;
}
/**
* stop_two_cpus - stops two cpus
@@ -261,12 +276,8 @@ int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *
set_state(&msdata, MULTI_STOP_PREPARE);
/*
- * If we observe both CPUs active we know _cpu_down() cannot yet have
- * queued its stop_machine works and therefore ours will get executed
- * first. Or its not either one of our CPUs that's getting unplugged,
- * in which case we don't care.
- *
- * This relies on the stopper workqueues to be FIFO.
+ * We do not want to migrate to inactive CPU. FIXME: move this
+ * into migrate_swap_stop() callback.
*/
if (!cpu_active(cpu1) || !cpu_active(cpu2)) {
preempt_enable();