summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorGrant Erickson <gerickson@nuovations.com>2008-10-29 11:41:14 +0000
committerJosh Boyer <jwboyer@linux.vnet.ibm.com>2008-11-13 10:10:56 -0500
commit5907630ffc2b2d133de2db18963ee5a6c5af7878 (patch)
tree60d41ab37ba170ba88e002915d8034fe47c7b9ac /arch
parentcb8fdc69a2a80e81e1280ec58afd2c3217ac8a7f (diff)
downloadlinux-5907630ffc2b2d133de2db18963ee5a6c5af7878.tar.gz
linux-5907630ffc2b2d133de2db18963ee5a6c5af7878.tar.bz2
linux-5907630ffc2b2d133de2db18963ee5a6c5af7878.zip
powerpc/40x: Limit allocable DRAM during early mapping
If the size of DRAM is not an exact power of two, we may not have covered DRAM in its entirety with large 16 and 4 MiB pages. If that is the case, we can get non-recoverable page faults when doing the final PTE mappings for the non-large page PTEs. Consequently, we restrict the top end of DRAM currently allocable by updating '__initial_memory_limit_addr' so that calls to the LMB to allocate PTEs for "tail" coverage with normal-sized pages (or other reasons) do not attempt to allocate outside the allowed range. Signed-off-by: Grant Erickson <gerickson@nuovations.com> Signed-off-by: Josh Boyer <jwboyer@linux.vnet.ibm.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/mm/40x_mmu.c16
1 files changed, 14 insertions, 2 deletions
diff --git a/arch/powerpc/mm/40x_mmu.c b/arch/powerpc/mm/40x_mmu.c
index cecbbc76f624..29954dc28942 100644
--- a/arch/powerpc/mm/40x_mmu.c
+++ b/arch/powerpc/mm/40x_mmu.c
@@ -93,7 +93,7 @@ void __init MMU_init_hw(void)
unsigned long __init mmu_mapin_ram(void)
{
- unsigned long v, s;
+ unsigned long v, s, mapped;
phys_addr_t p;
v = KERNELBASE;
@@ -130,5 +130,17 @@ unsigned long __init mmu_mapin_ram(void)
s -= LARGE_PAGE_SIZE_4M;
}
- return total_lowmem - s;
+ mapped = total_lowmem - s;
+
+ /* If the size of RAM is not an exact power of two, we may not
+ * have covered RAM in its entirety with 16 and 4 MiB
+ * pages. Consequently, restrict the top end of RAM currently
+ * allocable so that calls to the LMB to allocate PTEs for "tail"
+ * coverage with normal-sized pages (or other reasons) do not
+ * attempt to allocate outside the allowed range.
+ */
+
+ __initial_memory_limit_addr = memstart_addr + mapped;
+
+ return mapped;
}