summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2012-04-27 12:51:43 +0100
committerRussell King <rmk+kernel@arm.linux.org.uk>2012-04-28 11:01:31 +0100
commit6fa99b7f80b4a7ed2cf616eae393bb6d9d51ba8f (patch)
treea52bc9e2d1e774125ba920ec9f269a9c3587f481
parent6a1c53124aa161eb624ce7b1e40ade728186d34c (diff)
downloadlinux-stable-6fa99b7f80b4a7ed2cf616eae393bb6d9d51ba8f.tar.gz
linux-stable-6fa99b7f80b4a7ed2cf616eae393bb6d9d51ba8f.tar.bz2
linux-stable-6fa99b7f80b4a7ed2cf616eae393bb6d9d51ba8f.zip
ARM: 7405/1: kexec: call platform_cpu_kill on the killer rather than the victim
When performing a kexec on an SMP system, the secondary cores are stopped by calling machine_shutdown(), which in turn issues IPIs to offline the other CPUs. Unfortunately, this isn't enough to reboot the cores into a new kernel (since they are just executing a cpu_relax loop somewhere in memory) so we make use of platform_cpu_kill, part of the CPU hotplug implementation, to place the cores somewhere safe. This function expects to be called on the killing CPU for each core that it takes out. This patch moves the platform_cpu_kill callback out of the IPI handler and into smp_send_stop, therefore ensuring that it executes on the killing CPU rather than on the victim, matching what the hotplug code requires. Cc: stable@vger.kernel.org Reported-by: Russell King <rmk+kernel@arm.linux.org.uk> Signed-off-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
-rw-r--r--arch/arm/kernel/smp.c28
1 files changed, 17 insertions, 11 deletions
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index addbbe8028c2..f6a4d32b0421 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -510,10 +510,6 @@ static void ipi_cpu_stop(unsigned int cpu)
local_fiq_disable();
local_irq_disable();
-#ifdef CONFIG_HOTPLUG_CPU
- platform_cpu_kill(cpu);
-#endif
-
while (1)
cpu_relax();
}
@@ -576,17 +572,25 @@ void smp_send_reschedule(int cpu)
smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
}
+#ifdef CONFIG_HOTPLUG_CPU
+static void smp_kill_cpus(cpumask_t *mask)
+{
+ unsigned int cpu;
+ for_each_cpu(cpu, mask)
+ platform_cpu_kill(cpu);
+}
+#else
+static void smp_kill_cpus(cpumask_t *mask) { }
+#endif
+
void smp_send_stop(void)
{
unsigned long timeout;
+ struct cpumask mask;
- if (num_online_cpus() > 1) {
- struct cpumask mask;
- cpumask_copy(&mask, cpu_online_mask);
- cpumask_clear_cpu(smp_processor_id(), &mask);
-
- smp_cross_call(&mask, IPI_CPU_STOP);
- }
+ cpumask_copy(&mask, cpu_online_mask);
+ cpumask_clear_cpu(smp_processor_id(), &mask);
+ smp_cross_call(&mask, IPI_CPU_STOP);
/* Wait up to one second for other CPUs to stop */
timeout = USEC_PER_SEC;
@@ -595,6 +599,8 @@ void smp_send_stop(void)
if (num_online_cpus() > 1)
pr_warning("SMP: failed to stop secondary CPUs\n");
+
+ smp_kill_cpus(&mask);
}
/*