summaryrefslogtreecommitdiffstats
path: root/arch/ppc64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ppc64')
-rw-r--r--arch/ppc64/kernel/head.S2
-rw-r--r--arch/ppc64/kernel/machine_kexec.c12
-rw-r--r--arch/ppc64/kernel/misc.S6
-rw-r--r--arch/ppc64/kernel/mpic.c4
-rw-r--r--arch/ppc64/kernel/mpic.h2
-rw-r--r--arch/ppc64/kernel/prom.c2
-rw-r--r--arch/ppc64/kernel/xics.c31
-rw-r--r--arch/ppc64/mm/numa.c7
-rw-r--r--arch/ppc64/xmon/xmon.c3
9 files changed, 45 insertions, 24 deletions
diff --git a/arch/ppc64/kernel/head.S b/arch/ppc64/kernel/head.S
index 74fc3bc68604..784f56d4684c 100644
--- a/arch/ppc64/kernel/head.S
+++ b/arch/ppc64/kernel/head.S
@@ -2071,7 +2071,7 @@ _GLOBAL(hmt_start_secondary)
blr
#endif
-#if defined(CONFIG_SMP) && !defined(CONFIG_PPC_ISERIES)
+#if defined(CONFIG_KEXEC) || (defined(CONFIG_SMP) && !defined(CONFIG_PPC_ISERIES))
_GLOBAL(smp_release_cpus)
/* All secondary cpus are spinning on a common
* spinloop, release them all now so they can start
diff --git a/arch/ppc64/kernel/machine_kexec.c b/arch/ppc64/kernel/machine_kexec.c
index fdb2fc649d72..4775f12a013c 100644
--- a/arch/ppc64/kernel/machine_kexec.c
+++ b/arch/ppc64/kernel/machine_kexec.c
@@ -185,7 +185,7 @@ void kexec_copy_flush(struct kimage *image)
void kexec_smp_down(void *arg)
{
if (ppc_md.cpu_irq_down)
- ppc_md.cpu_irq_down();
+ ppc_md.cpu_irq_down(1);
local_irq_disable();
kexec_smp_wait();
@@ -232,7 +232,7 @@ static void kexec_prepare_cpus(void)
/* after we tell the others to go down */
if (ppc_md.cpu_irq_down)
- ppc_md.cpu_irq_down();
+ ppc_md.cpu_irq_down(0);
put_cpu();
@@ -243,15 +243,19 @@ static void kexec_prepare_cpus(void)
static void kexec_prepare_cpus(void)
{
+ extern void smp_release_cpus(void);
/*
* move the secondarys to us so that we can copy
* the new kernel 0-0x100 safely
*
* do this if kexec in setup.c ?
+ *
+ * We need to release the cpus if we are ever going from an
+ * UP to an SMP kernel.
*/
- smp_relase_cpus();
+ smp_release_cpus();
if (ppc_md.cpu_irq_down)
- ppc_md.cpu_irq_down();
+ ppc_md.cpu_irq_down(0);
local_irq_disable();
}
diff --git a/arch/ppc64/kernel/misc.S b/arch/ppc64/kernel/misc.S
index 59f4f9973818..a05b50b738e9 100644
--- a/arch/ppc64/kernel/misc.S
+++ b/arch/ppc64/kernel/misc.S
@@ -1129,6 +1129,9 @@ _GLOBAL(sys_call_table32)
.llong .compat_sys_waitid
.llong .sys32_ioprio_set
.llong .sys32_ioprio_get
+ .llong .sys_inotify_init /* 275 */
+ .llong .sys_inotify_add_watch
+ .llong .sys_inotify_rm_watch
.balign 8
_GLOBAL(sys_call_table)
@@ -1407,3 +1410,6 @@ _GLOBAL(sys_call_table)
.llong .sys_waitid
.llong .sys_ioprio_set
.llong .sys_ioprio_get
+ .llong .sys_inotify_init /* 275 */
+ .llong .sys_inotify_add_watch
+ .llong .sys_inotify_rm_watch
diff --git a/arch/ppc64/kernel/mpic.c b/arch/ppc64/kernel/mpic.c
index e8fbab1df37f..cc262a05ddb4 100644
--- a/arch/ppc64/kernel/mpic.c
+++ b/arch/ppc64/kernel/mpic.c
@@ -794,10 +794,10 @@ void mpic_setup_this_cpu(void)
/*
* XXX: someone who knows mpic should check this.
- * do we need to eoi the ipi here (see xics comments)?
+ * do we need to eoi the ipi including for kexec cpu here (see xics comments)?
* or can we reset the mpic in the new kernel?
*/
-void mpic_teardown_this_cpu(void)
+void mpic_teardown_this_cpu(int secondary)
{
struct mpic *mpic = mpic_primary;
unsigned long flags;
diff --git a/arch/ppc64/kernel/mpic.h b/arch/ppc64/kernel/mpic.h
index 99fbbc9a084c..ca78a7f10528 100644
--- a/arch/ppc64/kernel/mpic.h
+++ b/arch/ppc64/kernel/mpic.h
@@ -256,7 +256,7 @@ extern unsigned int mpic_irq_get_priority(unsigned int irq);
extern void mpic_setup_this_cpu(void);
/* Clean up for kexec (or cpu offline or ...) */
-extern void mpic_teardown_this_cpu(void);
+extern void mpic_teardown_this_cpu(int secondary);
/* Request IPIs on primary mpic */
extern void mpic_request_ipis(void);
diff --git a/arch/ppc64/kernel/prom.c b/arch/ppc64/kernel/prom.c
index 47727a6f7346..5aca01ddd81f 100644
--- a/arch/ppc64/kernel/prom.c
+++ b/arch/ppc64/kernel/prom.c
@@ -916,6 +916,7 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
}
}
+#ifdef CONFIG_ALTIVEC
/* Check if we have a VMX and eventually update CPU features */
prop = (u32 *)get_flat_dt_prop(node, "ibm,vmx", NULL);
if (prop && (*prop) > 0) {
@@ -929,6 +930,7 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC;
cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC;
}
+#endif /* CONFIG_ALTIVEC */
/*
* Check for an SMT capable CPU and set the CPU feature. We do
diff --git a/arch/ppc64/kernel/xics.c b/arch/ppc64/kernel/xics.c
index 677c4450984a..d9dc6f28d050 100644
--- a/arch/ppc64/kernel/xics.c
+++ b/arch/ppc64/kernel/xics.c
@@ -647,29 +647,30 @@ static void xics_set_affinity(unsigned int virq, cpumask_t cpumask)
}
}
-void xics_teardown_cpu(void)
+void xics_teardown_cpu(int secondary)
{
int cpu = smp_processor_id();
- int status;
ops->cppr_info(cpu, 0x00);
iosync();
/*
- * we need to EOI the IPI if we got here from kexec down IPI
- *
- * xics doesn't care if we duplicate an EOI as long as we
- * don't EOI and raise priority.
- *
- * probably need to check all the other interrupts too
- * should we be flagging idle loop instead?
- * or creating some task to be scheduled?
+ * Some machines need to have at least one cpu in the GIQ,
+ * so leave the master cpu in the group.
*/
- ops->xirr_info_set(cpu, XICS_IPI);
-
- status = rtas_set_indicator(GLOBAL_INTERRUPT_QUEUE,
- (1UL << interrupt_server_size) - 1 - default_distrib_server, 0);
- WARN_ON(status != 0);
+ if (secondary) {
+ /*
+ * we need to EOI the IPI if we got here from kexec down IPI
+ *
+ * probably need to check all the other interrupts too
+ * should we be flagging idle loop instead?
+ * or creating some task to be scheduled?
+ */
+ ops->xirr_info_set(cpu, XICS_IPI);
+ rtas_set_indicator(GLOBAL_INTERRUPT_QUEUE,
+ (1UL << interrupt_server_size) - 1 -
+ default_distrib_server, 0);
+ }
}
#ifdef CONFIG_HOTPLUG_CPU
diff --git a/arch/ppc64/mm/numa.c b/arch/ppc64/mm/numa.c
index cafd91aef289..0b191f2de016 100644
--- a/arch/ppc64/mm/numa.c
+++ b/arch/ppc64/mm/numa.c
@@ -647,7 +647,12 @@ void __init do_init_bootmem(void)
new_range:
mem_start = read_n_cells(addr_cells, &memcell_buf);
mem_size = read_n_cells(size_cells, &memcell_buf);
- numa_domain = numa_enabled ? of_node_numa_domain(memory) : 0;
+ if (numa_enabled) {
+ numa_domain = of_node_numa_domain(memory);
+ if (numa_domain >= MAX_NUMNODES)
+ numa_domain = 0;
+ } else
+ numa_domain = 0;
if (numa_domain != nid)
continue;
diff --git a/arch/ppc64/xmon/xmon.c b/arch/ppc64/xmon/xmon.c
index 7f6e13a4b71e..05539439e6bc 100644
--- a/arch/ppc64/xmon/xmon.c
+++ b/arch/ppc64/xmon/xmon.c
@@ -329,13 +329,16 @@ int xmon_core(struct pt_regs *regs, int fromipi)
printf("cpu 0x%x: Exception %lx %s in xmon, "
"returning to main loop\n",
cpu, regs->trap, getvecname(TRAP(regs)));
+ release_output_lock();
longjmp(xmon_fault_jmp[cpu], 1);
}
if (setjmp(recurse_jmp) != 0) {
if (!in_xmon || !xmon_gate) {
+ get_output_lock();
printf("xmon: WARNING: bad recursive fault "
"on cpu 0x%x\n", cpu);
+ release_output_lock();
goto waiting;
}
secondary = !(xmon_taken && cpu == xmon_owner);