summaryrefslogtreecommitdiffstats
path: root/arch/ppc64/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ppc64/mm')
-rw-r--r--arch/ppc64/mm/Makefile2
-rw-r--r--arch/ppc64/mm/hash_low.S8
-rw-r--r--arch/ppc64/mm/hash_native.c164
-rw-r--r--arch/ppc64/mm/hash_utils.c16
-rw-r--r--arch/ppc64/mm/hugetlbpage.c16
-rw-r--r--arch/ppc64/mm/init.c29
-rw-r--r--arch/ppc64/mm/numa.c10
-rw-r--r--arch/ppc64/mm/stab.c35
8 files changed, 183 insertions, 97 deletions
diff --git a/arch/ppc64/mm/Makefile b/arch/ppc64/mm/Makefile
index ac522d57b2a7..3695d00d347f 100644
--- a/arch/ppc64/mm/Makefile
+++ b/arch/ppc64/mm/Makefile
@@ -6,6 +6,6 @@ EXTRA_CFLAGS += -mno-minimal-toc
obj-y := fault.o init.o imalloc.o hash_utils.o hash_low.o tlb.o \
slb_low.o slb.o stab.o mmap.o
-obj-$(CONFIG_DISCONTIGMEM) += numa.o
+obj-$(CONFIG_NEED_MULTIPLE_NODES) += numa.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
obj-$(CONFIG_PPC_MULTIPLATFORM) += hash_native.o
diff --git a/arch/ppc64/mm/hash_low.S b/arch/ppc64/mm/hash_low.S
index c23d46956dd9..fbff24827ae7 100644
--- a/arch/ppc64/mm/hash_low.S
+++ b/arch/ppc64/mm/hash_low.S
@@ -170,9 +170,7 @@ htab_insert_pte:
/* Call ppc_md.hpte_insert */
ld r7,STK_PARM(r4)(r1) /* Retreive new pp bits */
mr r4,r29 /* Retreive va */
- li r6,0 /* primary slot */
- li r8,0 /* not bolted and not large */
- li r9,0
+ li r6,0 /* no vflags */
_GLOBAL(htab_call_hpte_insert1)
bl . /* Will be patched by htab_finish_init() */
cmpdi 0,r3,0
@@ -192,9 +190,7 @@ _GLOBAL(htab_call_hpte_insert1)
/* Call ppc_md.hpte_insert */
ld r7,STK_PARM(r4)(r1) /* Retreive new pp bits */
mr r4,r29 /* Retreive va */
- li r6,1 /* secondary slot */
- li r8,0 /* not bolted and not large */
- li r9,0
+ li r6,HPTE_V_SECONDARY@l /* secondary slot */
_GLOBAL(htab_call_hpte_insert2)
bl . /* Will be patched by htab_finish_init() */
cmpdi 0,r3,0
diff --git a/arch/ppc64/mm/hash_native.c b/arch/ppc64/mm/hash_native.c
index 52b6b9305341..a6abd3a979bf 100644
--- a/arch/ppc64/mm/hash_native.c
+++ b/arch/ppc64/mm/hash_native.c
@@ -27,9 +27,9 @@
static DEFINE_SPINLOCK(native_tlbie_lock);
-static inline void native_lock_hpte(HPTE *hptep)
+static inline void native_lock_hpte(hpte_t *hptep)
{
- unsigned long *word = &hptep->dw0.dword0;
+ unsigned long *word = &hptep->v;
while (1) {
if (!test_and_set_bit(HPTE_LOCK_BIT, word))
@@ -39,32 +39,28 @@ static inline void native_lock_hpte(HPTE *hptep)
}
}
-static inline void native_unlock_hpte(HPTE *hptep)
+static inline void native_unlock_hpte(hpte_t *hptep)
{
- unsigned long *word = &hptep->dw0.dword0;
+ unsigned long *word = &hptep->v;
asm volatile("lwsync":::"memory");
clear_bit(HPTE_LOCK_BIT, word);
}
long native_hpte_insert(unsigned long hpte_group, unsigned long va,
- unsigned long prpn, int secondary,
- unsigned long hpteflags, int bolted, int large)
+ unsigned long prpn, unsigned long vflags,
+ unsigned long rflags)
{
unsigned long arpn = physRpn_to_absRpn(prpn);
- HPTE *hptep = htab_address + hpte_group;
- Hpte_dword0 dw0;
- HPTE lhpte;
+ hpte_t *hptep = htab_address + hpte_group;
+ unsigned long hpte_v, hpte_r;
int i;
for (i = 0; i < HPTES_PER_GROUP; i++) {
- dw0 = hptep->dw0.dw0;
-
- if (!dw0.v) {
+ if (! (hptep->v & HPTE_V_VALID)) {
/* retry with lock held */
native_lock_hpte(hptep);
- dw0 = hptep->dw0.dw0;
- if (!dw0.v)
+ if (! (hptep->v & HPTE_V_VALID))
break;
native_unlock_hpte(hptep);
}
@@ -75,56 +71,45 @@ long native_hpte_insert(unsigned long hpte_group, unsigned long va,
if (i == HPTES_PER_GROUP)
return -1;
- lhpte.dw1.dword1 = 0;
- lhpte.dw1.dw1.rpn = arpn;
- lhpte.dw1.flags.flags = hpteflags;
-
- lhpte.dw0.dword0 = 0;
- lhpte.dw0.dw0.avpn = va >> 23;
- lhpte.dw0.dw0.h = secondary;
- lhpte.dw0.dw0.bolted = bolted;
- lhpte.dw0.dw0.v = 1;
-
- if (large) {
- lhpte.dw0.dw0.l = 1;
- lhpte.dw0.dw0.avpn &= ~0x1UL;
- }
-
- hptep->dw1.dword1 = lhpte.dw1.dword1;
+ hpte_v = (va >> 23) << HPTE_V_AVPN_SHIFT | vflags | HPTE_V_VALID;
+ if (vflags & HPTE_V_LARGE)
+ va &= ~(1UL << HPTE_V_AVPN_SHIFT);
+ hpte_r = (arpn << HPTE_R_RPN_SHIFT) | rflags;
+ hptep->r = hpte_r;
/* Guarantee the second dword is visible before the valid bit */
__asm__ __volatile__ ("eieio" : : : "memory");
-
/*
* Now set the first dword including the valid bit
* NOTE: this also unlocks the hpte
*/
- hptep->dw0.dword0 = lhpte.dw0.dword0;
+ hptep->v = hpte_v;
__asm__ __volatile__ ("ptesync" : : : "memory");
- return i | (secondary << 3);
+ return i | (!!(vflags & HPTE_V_SECONDARY) << 3);
}
static long native_hpte_remove(unsigned long hpte_group)
{
- HPTE *hptep;
- Hpte_dword0 dw0;
+ hpte_t *hptep;
int i;
int slot_offset;
+ unsigned long hpte_v;
/* pick a random entry to start at */
slot_offset = mftb() & 0x7;
for (i = 0; i < HPTES_PER_GROUP; i++) {
hptep = htab_address + hpte_group + slot_offset;
- dw0 = hptep->dw0.dw0;
+ hpte_v = hptep->v;
- if (dw0.v && !dw0.bolted) {
+ if ((hpte_v & HPTE_V_VALID) && !(hpte_v & HPTE_V_BOLTED)) {
/* retry with lock held */
native_lock_hpte(hptep);
- dw0 = hptep->dw0.dw0;
- if (dw0.v && !dw0.bolted)
+ hpte_v = hptep->v;
+ if ((hpte_v & HPTE_V_VALID)
+ && !(hpte_v & HPTE_V_BOLTED))
break;
native_unlock_hpte(hptep);
}
@@ -137,15 +122,15 @@ static long native_hpte_remove(unsigned long hpte_group)
return -1;
/* Invalidate the hpte. NOTE: this also unlocks it */
- hptep->dw0.dword0 = 0;
+ hptep->v = 0;
return i;
}
-static inline void set_pp_bit(unsigned long pp, HPTE *addr)
+static inline void set_pp_bit(unsigned long pp, hpte_t *addr)
{
unsigned long old;
- unsigned long *p = &addr->dw1.dword1;
+ unsigned long *p = &addr->r;
__asm__ __volatile__(
"1: ldarx %0,0,%3\n\
@@ -163,11 +148,11 @@ static inline void set_pp_bit(unsigned long pp, HPTE *addr)
*/
static long native_hpte_find(unsigned long vpn)
{
- HPTE *hptep;
+ hpte_t *hptep;
unsigned long hash;
unsigned long i, j;
long slot;
- Hpte_dword0 dw0;
+ unsigned long hpte_v;
hash = hpt_hash(vpn, 0);
@@ -175,10 +160,11 @@ static long native_hpte_find(unsigned long vpn)
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
for (i = 0; i < HPTES_PER_GROUP; i++) {
hptep = htab_address + slot;
- dw0 = hptep->dw0.dw0;
+ hpte_v = hptep->v;
- if ((dw0.avpn == (vpn >> 11)) && dw0.v &&
- (dw0.h == j)) {
+ if ((HPTE_V_AVPN_VAL(hpte_v) == (vpn >> 11))
+ && (hpte_v & HPTE_V_VALID)
+ && ( !!(hpte_v & HPTE_V_SECONDARY) == j)) {
/* HPTE matches */
if (j)
slot = -slot;
@@ -195,20 +181,21 @@ static long native_hpte_find(unsigned long vpn)
static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
unsigned long va, int large, int local)
{
- HPTE *hptep = htab_address + slot;
- Hpte_dword0 dw0;
+ hpte_t *hptep = htab_address + slot;
+ unsigned long hpte_v;
unsigned long avpn = va >> 23;
int ret = 0;
if (large)
- avpn &= ~0x1UL;
+ avpn &= ~1;
native_lock_hpte(hptep);
- dw0 = hptep->dw0.dw0;
+ hpte_v = hptep->v;
/* Even if we miss, we need to invalidate the TLB */
- if ((dw0.avpn != avpn) || !dw0.v) {
+ if ((HPTE_V_AVPN_VAL(hpte_v) != avpn)
+ || !(hpte_v & HPTE_V_VALID)) {
native_unlock_hpte(hptep);
ret = -1;
} else {
@@ -244,7 +231,7 @@ static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea)
{
unsigned long vsid, va, vpn, flags = 0;
long slot;
- HPTE *hptep;
+ hpte_t *hptep;
int lock_tlbie = !cpu_has_feature(CPU_FTR_LOCKLESS_TLBIE);
vsid = get_kernel_vsid(ea);
@@ -269,26 +256,27 @@ static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea)
static void native_hpte_invalidate(unsigned long slot, unsigned long va,
int large, int local)
{
- HPTE *hptep = htab_address + slot;
- Hpte_dword0 dw0;
+ hpte_t *hptep = htab_address + slot;
+ unsigned long hpte_v;
unsigned long avpn = va >> 23;
unsigned long flags;
int lock_tlbie = !cpu_has_feature(CPU_FTR_LOCKLESS_TLBIE);
if (large)
- avpn &= ~0x1UL;
+ avpn &= ~1;
local_irq_save(flags);
native_lock_hpte(hptep);
- dw0 = hptep->dw0.dw0;
+ hpte_v = hptep->v;
/* Even if we miss, we need to invalidate the TLB */
- if ((dw0.avpn != avpn) || !dw0.v) {
+ if ((HPTE_V_AVPN_VAL(hpte_v) != avpn)
+ || !(hpte_v & HPTE_V_VALID)) {
native_unlock_hpte(hptep);
} else {
/* Invalidate the hpte. NOTE: this also unlocks it */
- hptep->dw0.dword0 = 0;
+ hptep->v = 0;
}
/* Invalidate the tlb */
@@ -304,13 +292,57 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long va,
local_irq_restore(flags);
}
+/*
+ * clear all mappings on kexec. All cpus are in real mode (or they will
+ * be when they isi), and we are the only one left. We rely on our kernel
+ * mapping being 0xC0's and the hardware ignoring those two real bits.
+ *
+ * TODO: add batching support when enabled. remember, no dynamic memory here,
+ * athough there is the control page available...
+ */
+static void native_hpte_clear(void)
+{
+ unsigned long slot, slots, flags;
+ hpte_t *hptep = htab_address;
+ unsigned long hpte_v;
+ unsigned long pteg_count;
+
+ pteg_count = htab_hash_mask + 1;
+
+ local_irq_save(flags);
+
+ /* we take the tlbie lock and hold it. Some hardware will
+ * deadlock if we try to tlbie from two processors at once.
+ */
+ spin_lock(&native_tlbie_lock);
+
+ slots = pteg_count * HPTES_PER_GROUP;
+
+ for (slot = 0; slot < slots; slot++, hptep++) {
+ /*
+ * we could lock the pte here, but we are the only cpu
+ * running, right? and for crash dump, we probably
+ * don't want to wait for a maybe bad cpu.
+ */
+ hpte_v = hptep->v;
+
+ if (hpte_v & HPTE_V_VALID) {
+ hptep->v = 0;
+ tlbie(slot2va(hpte_v, slot), hpte_v & HPTE_V_LARGE);
+ }
+ }
+
+ spin_unlock(&native_tlbie_lock);
+ local_irq_restore(flags);
+}
+
static void native_flush_hash_range(unsigned long context,
unsigned long number, int local)
{
unsigned long vsid, vpn, va, hash, secondary, slot, flags, avpn;
int i, j;
- HPTE *hptep;
- Hpte_dword0 dw0;
+ hpte_t *hptep;
+ unsigned long hpte_v;
struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
/* XXX fix for large ptes */
@@ -346,14 +378,15 @@ static void native_flush_hash_range(unsigned long context,
native_lock_hpte(hptep);
- dw0 = hptep->dw0.dw0;
+ hpte_v = hptep->v;
/* Even if we miss, we need to invalidate the TLB */
- if ((dw0.avpn != avpn) || !dw0.v) {
+ if ((HPTE_V_AVPN_VAL(hpte_v) != avpn)
+ || !(hpte_v & HPTE_V_VALID)) {
native_unlock_hpte(hptep);
} else {
/* Invalidate the hpte. NOTE: this also unlocks it */
- hptep->dw0.dword0 = 0;
+ hptep->v = 0;
}
j++;
@@ -415,7 +448,8 @@ void hpte_init_native(void)
ppc_md.hpte_updatepp = native_hpte_updatepp;
ppc_md.hpte_updateboltedpp = native_hpte_updateboltedpp;
ppc_md.hpte_insert = native_hpte_insert;
- ppc_md.hpte_remove = native_hpte_remove;
+ ppc_md.hpte_remove = native_hpte_remove;
+ ppc_md.hpte_clear_all = native_hpte_clear;
if (tlb_batching_enabled())
ppc_md.flush_hash_range = native_flush_hash_range;
htab_finish_init();
diff --git a/arch/ppc64/mm/hash_utils.c b/arch/ppc64/mm/hash_utils.c
index 1647b1c6f28e..623b5d130c31 100644
--- a/arch/ppc64/mm/hash_utils.c
+++ b/arch/ppc64/mm/hash_utils.c
@@ -75,8 +75,8 @@
extern unsigned long dart_tablebase;
#endif /* CONFIG_U3_DART */
-HPTE *htab_address;
-unsigned long htab_hash_mask;
+hpte_t *htab_address;
+unsigned long htab_hash_mask;
extern unsigned long _SDR1;
@@ -97,11 +97,15 @@ static inline void create_pte_mapping(unsigned long start, unsigned long end,
unsigned long addr;
unsigned int step;
unsigned long tmp_mode;
+ unsigned long vflags;
- if (large)
+ if (large) {
step = 16*MB;
- else
+ vflags = HPTE_V_BOLTED | HPTE_V_LARGE;
+ } else {
step = 4*KB;
+ vflags = HPTE_V_BOLTED;
+ }
for (addr = start; addr < end; addr += step) {
unsigned long vpn, hash, hpteg;
@@ -129,12 +133,12 @@ static inline void create_pte_mapping(unsigned long start, unsigned long end,
if (systemcfg->platform & PLATFORM_LPAR)
ret = pSeries_lpar_hpte_insert(hpteg, va,
virt_to_abs(addr) >> PAGE_SHIFT,
- 0, tmp_mode, 1, large);
+ vflags, tmp_mode);
else
#endif /* CONFIG_PPC_PSERIES */
ret = native_hpte_insert(hpteg, va,
virt_to_abs(addr) >> PAGE_SHIFT,
- 0, tmp_mode, 1, large);
+ vflags, tmp_mode);
if (ret == -1) {
ppc64_terminate_msg(0x20, "create_pte_mapping");
diff --git a/arch/ppc64/mm/hugetlbpage.c b/arch/ppc64/mm/hugetlbpage.c
index fdcfe97c75c1..f9524602818d 100644
--- a/arch/ppc64/mm/hugetlbpage.c
+++ b/arch/ppc64/mm/hugetlbpage.c
@@ -583,7 +583,7 @@ int hash_huge_page(struct mm_struct *mm, unsigned long access,
pte_t *ptep;
unsigned long va, vpn;
pte_t old_pte, new_pte;
- unsigned long hpteflags, prpn;
+ unsigned long rflags, prpn;
long slot;
int err = 1;
@@ -626,9 +626,9 @@ int hash_huge_page(struct mm_struct *mm, unsigned long access,
old_pte = *ptep;
new_pte = old_pte;
- hpteflags = 0x2 | (! (pte_val(new_pte) & _PAGE_RW));
+ rflags = 0x2 | (! (pte_val(new_pte) & _PAGE_RW));
/* _PAGE_EXEC -> HW_NO_EXEC since it's inverted */
- hpteflags |= ((pte_val(new_pte) & _PAGE_EXEC) ? 0 : HW_NO_EXEC);
+ rflags |= ((pte_val(new_pte) & _PAGE_EXEC) ? 0 : HW_NO_EXEC);
/* Check if pte already has an hpte (case 2) */
if (unlikely(pte_val(old_pte) & _PAGE_HASHPTE)) {
@@ -641,7 +641,7 @@ int hash_huge_page(struct mm_struct *mm, unsigned long access,
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
slot += (pte_val(old_pte) & _PAGE_GROUP_IX) >> 12;
- if (ppc_md.hpte_updatepp(slot, hpteflags, va, 1, local) == -1)
+ if (ppc_md.hpte_updatepp(slot, rflags, va, 1, local) == -1)
pte_val(old_pte) &= ~_PAGE_HPTEFLAGS;
}
@@ -661,10 +661,10 @@ repeat:
/* Add in WIMG bits */
/* XXX We should store these in the pte */
- hpteflags |= _PAGE_COHERENT;
+ rflags |= _PAGE_COHERENT;
- slot = ppc_md.hpte_insert(hpte_group, va, prpn, 0,
- hpteflags, 0, 1);
+ slot = ppc_md.hpte_insert(hpte_group, va, prpn,
+ HPTE_V_LARGE, rflags);
/* Primary is full, try the secondary */
if (unlikely(slot == -1)) {
@@ -672,7 +672,7 @@ repeat:
hpte_group = ((~hash & htab_hash_mask) *
HPTES_PER_GROUP) & ~0x7UL;
slot = ppc_md.hpte_insert(hpte_group, va, prpn,
- 1, hpteflags, 0, 1);
+ HPTE_V_LARGE, rflags);
if (slot == -1) {
if (mftb() & 0x1)
hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
diff --git a/arch/ppc64/mm/init.c b/arch/ppc64/mm/init.c
index 6fa1e6490b57..e58a24d42879 100644
--- a/arch/ppc64/mm/init.c
+++ b/arch/ppc64/mm/init.c
@@ -98,7 +98,7 @@ void show_mem(void)
printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
for_each_pgdat(pgdat) {
for (i = 0; i < pgdat->node_spanned_pages; i++) {
- page = pgdat->node_mem_map + i;
+ page = pgdat_page_nr(pgdat, i);
total++;
if (PageReserved(page))
reserved++;
@@ -180,9 +180,10 @@ static int map_io_page(unsigned long ea, unsigned long pa, int flags)
hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
/* Panic if a pte grpup is full */
- if (ppc_md.hpte_insert(hpteg, va, pa >> PAGE_SHIFT, 0,
- _PAGE_NO_CACHE|_PAGE_GUARDED|PP_RWXX,
- 1, 0) == -1) {
+ if (ppc_md.hpte_insert(hpteg, va, pa >> PAGE_SHIFT,
+ HPTE_V_BOLTED,
+ _PAGE_NO_CACHE|_PAGE_GUARDED|PP_RWXX)
+ == -1) {
panic("map_io_page: could not insert mapping");
}
}
@@ -531,7 +532,7 @@ EXPORT_SYMBOL(page_is_ram);
* Initialize the bootmem system and give it all the memory we
* have available.
*/
-#ifndef CONFIG_DISCONTIGMEM
+#ifndef CONFIG_NEED_MULTIPLE_NODES
void __init do_init_bootmem(void)
{
unsigned long i;
@@ -553,12 +554,20 @@ void __init do_init_bootmem(void)
max_pfn = max_low_pfn;
- /* add all physical memory to the bootmem map. Also find the first */
+ /* Add all physical memory to the bootmem map, mark each area
+ * present.
+ */
for (i=0; i < lmb.memory.cnt; i++) {
unsigned long physbase, size;
+ unsigned long start_pfn, end_pfn;
physbase = lmb.memory.region[i].physbase;
size = lmb.memory.region[i].size;
+
+ start_pfn = physbase >> PAGE_SHIFT;
+ end_pfn = start_pfn + (size >> PAGE_SHIFT);
+ memory_present(0, start_pfn, end_pfn);
+
free_bootmem(physbase, size);
}
@@ -597,7 +606,7 @@ void __init paging_init(void)
free_area_init_node(0, NODE_DATA(0), zones_size,
__pa(PAGE_OFFSET) >> PAGE_SHIFT, zholes_size);
}
-#endif /* CONFIG_DISCONTIGMEM */
+#endif /* ! CONFIG_NEED_MULTIPLE_NODES */
static struct kcore_list kcore_vmem;
@@ -628,7 +637,7 @@ module_init(setup_kcore);
void __init mem_init(void)
{
-#ifdef CONFIG_DISCONTIGMEM
+#ifdef CONFIG_NEED_MULTIPLE_NODES
int nid;
#endif
pg_data_t *pgdat;
@@ -639,7 +648,7 @@ void __init mem_init(void)
num_physpages = max_low_pfn; /* RAM is assumed contiguous */
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
-#ifdef CONFIG_DISCONTIGMEM
+#ifdef CONFIG_NEED_MULTIPLE_NODES
for_each_online_node(nid) {
if (NODE_DATA(nid)->node_spanned_pages != 0) {
printk("freeing bootmem node %x\n", nid);
@@ -654,7 +663,7 @@ void __init mem_init(void)
for_each_pgdat(pgdat) {
for (i = 0; i < pgdat->node_spanned_pages; i++) {
- page = pgdat->node_mem_map + i;
+ page = pgdat_page_nr(pgdat, i);
if (PageReserved(page))
reservedpages++;
}
diff --git a/arch/ppc64/mm/numa.c b/arch/ppc64/mm/numa.c
index ea862ec643d3..0b191f2de016 100644
--- a/arch/ppc64/mm/numa.c
+++ b/arch/ppc64/mm/numa.c
@@ -440,6 +440,8 @@ new_range:
for (i = start ; i < (start+size); i += MEMORY_INCREMENT)
numa_memory_lookup_table[i >> MEMORY_INCREMENT_SHIFT] =
numa_domain;
+ memory_present(numa_domain, start >> PAGE_SHIFT,
+ (start + size) >> PAGE_SHIFT);
if (--ranges)
goto new_range;
@@ -481,6 +483,7 @@ static void __init setup_nonnuma(void)
for (i = 0 ; i < top_of_ram; i += MEMORY_INCREMENT)
numa_memory_lookup_table[i >> MEMORY_INCREMENT_SHIFT] = 0;
+ memory_present(0, 0, init_node_data[0].node_end_pfn);
}
static void __init dump_numa_topology(void)
@@ -644,7 +647,12 @@ void __init do_init_bootmem(void)
new_range:
mem_start = read_n_cells(addr_cells, &memcell_buf);
mem_size = read_n_cells(size_cells, &memcell_buf);
- numa_domain = numa_enabled ? of_node_numa_domain(memory) : 0;
+ if (numa_enabled) {
+ numa_domain = of_node_numa_domain(memory);
+ if (numa_domain >= MAX_NUMNODES)
+ numa_domain = 0;
+ } else
+ numa_domain = 0;
if (numa_domain != nid)
continue;
diff --git a/arch/ppc64/mm/stab.c b/arch/ppc64/mm/stab.c
index df4bbe14153c..1b83f002bf27 100644
--- a/arch/ppc64/mm/stab.c
+++ b/arch/ppc64/mm/stab.c
@@ -18,6 +18,8 @@
#include <asm/mmu_context.h>
#include <asm/paca.h>
#include <asm/cputable.h>
+#include <asm/lmb.h>
+#include <asm/abs_addr.h>
struct stab_entry {
unsigned long esid_data;
@@ -224,6 +226,39 @@ void switch_stab(struct task_struct *tsk, struct mm_struct *mm)
extern void slb_initialize(void);
/*
+ * Allocate segment tables for secondary CPUs. These must all go in
+ * the first (bolted) segment, so that do_stab_bolted won't get a
+ * recursive segment miss on the segment table itself.
+ */
+void stabs_alloc(void)
+{
+ int cpu;
+
+ if (cpu_has_feature(CPU_FTR_SLB))
+ return;
+
+ for_each_cpu(cpu) {
+ unsigned long newstab;
+
+ if (cpu == 0)
+ continue; /* stab for CPU 0 is statically allocated */
+
+ newstab = lmb_alloc_base(PAGE_SIZE, PAGE_SIZE, 1<<SID_SHIFT);
+ if (! newstab)
+ panic("Unable to allocate segment table for CPU %d.\n",
+ cpu);
+
+ newstab += KERNELBASE;
+
+ memset((void *)newstab, 0, PAGE_SIZE);
+
+ paca[cpu].stab_addr = newstab;
+ paca[cpu].stab_real = virt_to_abs(newstab);
+ printk(KERN_DEBUG "Segment table for CPU %d at 0x%lx virtual, 0x%lx absolute\n", cpu, paca[cpu].stab_addr, paca[cpu].stab_real);
+ }
+}
+
+/*
* Build an entry for the base kernel segment and put it into
* the segment table or SLB. All other segment table or SLB
* entries are faulted in.