summaryrefslogtreecommitdiffstats
path: root/arch/parisc/mm/init.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/parisc/mm/init.c')
-rw-r--r--arch/parisc/mm/init.c52
1 files changed, 19 insertions, 33 deletions
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index 059187a3ded7..d0b166256f1a 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -79,36 +79,6 @@ static struct resource sysram_resources[MAX_PHYSMEM_RANGES] __read_mostly;
physmem_range_t pmem_ranges[MAX_PHYSMEM_RANGES] __read_mostly;
int npmem_ranges __read_mostly;
-/*
- * get_memblock() allocates pages via memblock.
- * We can't use memblock_find_in_range(0, KERNEL_INITIAL_SIZE) here since it
- * doesn't allocate from bottom to top which is needed because we only created
- * the initial mapping up to KERNEL_INITIAL_SIZE in the assembly bootup code.
- */
-static void * __init get_memblock(unsigned long size)
-{
- static phys_addr_t search_addr __initdata;
- phys_addr_t phys;
-
- if (!search_addr)
- search_addr = PAGE_ALIGN(__pa((unsigned long) &_end));
- search_addr = ALIGN(search_addr, size);
- while (!memblock_is_region_memory(search_addr, size) ||
- memblock_is_region_reserved(search_addr, size)) {
- search_addr += size;
- }
- phys = search_addr;
-
- if (phys)
- memblock_reserve(phys, size);
- else
- panic("get_memblock() failed.\n");
-
- memset(__va(phys), 0, size);
-
- return __va(phys);
-}
-
#ifdef CONFIG_64BIT
#define MAX_MEM (~0UL)
#else /* !CONFIG_64BIT */
@@ -321,6 +291,13 @@ static void __init setup_bootmem(void)
max_pfn = start_pfn + npages;
}
+ /*
+ * We can't use memblock top-down allocations because we only
+ * created the initial mapping up to KERNEL_INITIAL_SIZE in
+ * the assembly bootup code.
+ */
+ memblock_set_bottom_up(true);
+
/* IOMMU is always used to access "high mem" on those boxes
* that can support enough mem that a PCI device couldn't
* directly DMA to any physical addresses.
@@ -442,7 +419,10 @@ static void __init map_pages(unsigned long start_vaddr,
*/
if (!pmd) {
- pmd = (pmd_t *) get_memblock(PAGE_SIZE << PMD_ORDER);
+ pmd = memblock_alloc(PAGE_SIZE << PMD_ORDER,
+ PAGE_SIZE << PMD_ORDER);
+ if (!pmd)
+ panic("pmd allocation failed.\n");
pmd = (pmd_t *) __pa(pmd);
}
@@ -461,7 +441,10 @@ static void __init map_pages(unsigned long start_vaddr,
pg_table = (pte_t *)pmd_address(*pmd);
if (!pg_table) {
- pg_table = (pte_t *) get_memblock(PAGE_SIZE);
+ pg_table = memblock_alloc(PAGE_SIZE,
+ PAGE_SIZE);
+ if (!pg_table)
+ panic("page table allocation failed\n");
pg_table = (pte_t *) __pa(pg_table);
}
@@ -700,7 +683,10 @@ static void __init pagetable_init(void)
}
#endif
- empty_zero_page = get_memblock(PAGE_SIZE);
+ empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+ if (!empty_zero_page)
+ panic("zero page allocation failed.\n");
+
}
static void __init gateway_init(void)