summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBrian King <brking@linux.vnet.ibm.com>2009-08-28 12:06:29 +0000
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2009-09-02 16:19:01 +1000
commit46db2f86a3b2a94e0b33e0b4548fb7b7b6bdff66 (patch)
tree6aef8ab146a54d04dd207b0f85f362a4aee3ef5d
parentb8e4a7dae53760b9791aca96e74366078692d90f (diff)
downloadlinux-stable-46db2f86a3b2a94e0b33e0b4548fb7b7b6bdff66.tar.gz
linux-stable-46db2f86a3b2a94e0b33e0b4548fb7b7b6bdff66.tar.bz2
linux-stable-46db2f86a3b2a94e0b33e0b4548fb7b7b6bdff66.zip
powerpc/pseries: Fix to handle slb resize across migration
The SLB can change sizes across a live migration, which was not being handled, resulting in possible machine crashes during migration if migrating to a machine which has a smaller max SLB size than the source machine. Fix this by first reducing the SLB size to the minimum possible value, which is 32, prior to migration. Then during the device tree update which occurs after migration, we make the call to ensure the SLB gets updated. Also add the slb_size to the lparcfg output so that the migration tools can check to make sure the kernel has this capability before allowing migration in scenarios where the SLB size will change. BenH: Fixed #include <asm/mmu-hash64.h> -> <asm/mmu.h> to avoid breaking ppc32 build Signed-off-by: Brian King <brking@linux.vnet.ibm.com> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
-rw-r--r--arch/powerpc/include/asm/mmu-hash64.h2
-rw-r--r--arch/powerpc/kernel/lparcfg.c3
-rw-r--r--arch/powerpc/kernel/rtas.c7
-rw-r--r--arch/powerpc/mm/slb.c16
-rw-r--r--arch/powerpc/platforms/pseries/reconfig.c9
5 files changed, 31 insertions, 6 deletions
diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h
index b537903b9fca..bebe31c2e907 100644
--- a/arch/powerpc/include/asm/mmu-hash64.h
+++ b/arch/powerpc/include/asm/mmu-hash64.h
@@ -41,6 +41,7 @@ extern char initial_stab[];
#define SLB_NUM_BOLTED 3
#define SLB_CACHE_ENTRIES 8
+#define SLB_MIN_SIZE 32
/* Bits in the SLB ESID word */
#define SLB_ESID_V ASM_CONST(0x0000000008000000) /* valid */
@@ -276,6 +277,7 @@ extern void slb_flush_and_rebolt(void);
extern void stab_initialize(unsigned long stab);
extern void slb_vmalloc_update(void);
+extern void slb_set_size(u16 size);
#endif /* __ASSEMBLY__ */
/*
diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c
index 2419cc706ff1..ed0ac4e4b8d8 100644
--- a/arch/powerpc/kernel/lparcfg.c
+++ b/arch/powerpc/kernel/lparcfg.c
@@ -35,6 +35,7 @@
#include <asm/prom.h>
#include <asm/vdso_datapage.h>
#include <asm/vio.h>
+#include <asm/mmu.h>
#define MODULE_VERS "1.8"
#define MODULE_NAME "lparcfg"
@@ -537,6 +538,8 @@ static int pseries_lparcfg_data(struct seq_file *m, void *v)
seq_printf(m, "shared_processor_mode=%d\n", lppaca[0].shared_proc);
+ seq_printf(m, "slb_size=%d\n", mmu_slb_size);
+
return 0;
}
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index c434823b8c83..bf90361bb70f 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -39,6 +39,7 @@
#include <asm/smp.h>
#include <asm/atomic.h>
#include <asm/time.h>
+#include <asm/mmu.h>
struct rtas_t rtas = {
.lock = __RAW_SPIN_LOCK_UNLOCKED
@@ -713,6 +714,7 @@ static void rtas_percpu_suspend_me(void *info)
{
long rc = H_SUCCESS;
unsigned long msr_save;
+ u16 slb_size = mmu_slb_size;
int cpu;
struct rtas_suspend_me_data *data =
(struct rtas_suspend_me_data *)info;
@@ -735,13 +737,16 @@ static void rtas_percpu_suspend_me(void *info)
/* All other cpus are in H_JOIN, this cpu does
* the suspend.
*/
+ slb_set_size(SLB_MIN_SIZE);
printk(KERN_DEBUG "calling ibm,suspend-me on cpu %i\n",
smp_processor_id());
data->error = rtas_call(data->token, 0, 1, NULL);
- if (data->error)
+ if (data->error) {
printk(KERN_DEBUG "ibm,suspend-me returned %d\n",
data->error);
+ slb_set_size(slb_size);
+ }
} else {
printk(KERN_ERR "H_JOIN on cpu %i failed with rc = %ld\n",
smp_processor_id(), rc);
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index 07961c5c169e..1d98ecc8eecd 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -249,14 +249,22 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
static inline void patch_slb_encoding(unsigned int *insn_addr,
unsigned int immed)
{
- /* Assume the instruction had a "0" immediate value, just
- * "or" in the new value
- */
- *insn_addr |= immed;
+ *insn_addr = (*insn_addr & 0xffff0000) | immed;
flush_icache_range((unsigned long)insn_addr, 4+
(unsigned long)insn_addr);
}
+void slb_set_size(u16 size)
+{
+ extern unsigned int *slb_compare_rr_to_size;
+
+ if (mmu_slb_size == size)
+ return;
+
+ mmu_slb_size = size;
+ patch_slb_encoding(slb_compare_rr_to_size, mmu_slb_size);
+}
+
void slb_initialize(void)
{
unsigned long linear_llp, vmalloc_llp, io_llp;
diff --git a/arch/powerpc/platforms/pseries/reconfig.c b/arch/powerpc/platforms/pseries/reconfig.c
index b6f1b137d427..2e2bbe120b90 100644
--- a/arch/powerpc/platforms/pseries/reconfig.c
+++ b/arch/powerpc/platforms/pseries/reconfig.c
@@ -20,6 +20,7 @@
#include <asm/machdep.h>
#include <asm/uaccess.h>
#include <asm/pSeries_reconfig.h>
+#include <asm/mmu.h>
@@ -439,9 +440,15 @@ static int do_update_property(char *buf, size_t bufsize)
if (!newprop)
return -ENOMEM;
+ if (!strcmp(name, "slb-size") || !strcmp(name, "ibm,slb-size"))
+ slb_set_size(*(int *)value);
+
oldprop = of_find_property(np, name,NULL);
- if (!oldprop)
+ if (!oldprop) {
+ if (strlen(name))
+ return prom_add_property(np, newprop);
return -ENODEV;
+ }
rc = prom_update_property(np, newprop, oldprop);
if (rc)