summaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/mmu.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm/mmu.c')
-rw-r--r--arch/arm/mm/mmu.c62
1 files changed, 30 insertions, 32 deletions
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index c24e29c0b9a4..f02f872ea8a9 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -726,13 +726,8 @@ EXPORT_SYMBOL(phys_mem_access_prot);
static void __init *early_alloc(unsigned long sz)
{
- void *ptr = memblock_alloc(sz, sz);
+ return memblock_alloc_or_panic(sz, sz);
- if (!ptr)
- panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
- __func__, sz, sz);
-
- return ptr;
}
static void *__init late_alloc(unsigned long sz)
@@ -1027,10 +1022,7 @@ void __init iotable_init(struct map_desc *io_desc, int nr)
if (!nr)
return;
- svm = memblock_alloc(sizeof(*svm) * nr, __alignof__(*svm));
- if (!svm)
- panic("%s: Failed to allocate %zu bytes align=0x%zx\n",
- __func__, sizeof(*svm) * nr, __alignof__(*svm));
+ svm = memblock_alloc_or_panic(sizeof(*svm) * nr, __alignof__(*svm));
for (md = io_desc; nr; md++, nr--) {
create_mapping(md);
@@ -1052,10 +1044,7 @@ void __init vm_reserve_area_early(unsigned long addr, unsigned long size,
struct vm_struct *vm;
struct static_vm *svm;
- svm = memblock_alloc(sizeof(*svm), __alignof__(*svm));
- if (!svm)
- panic("%s: Failed to allocate %zu bytes align=0x%zx\n",
- __func__, sizeof(*svm), __alignof__(*svm));
+ svm = memblock_alloc_or_panic(sizeof(*svm), __alignof__(*svm));
vm = &svm->vm;
vm->addr = (void *)addr;
@@ -1403,18 +1392,6 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
}
/*
- * Map the kernel if it is XIP.
- * It is always first in the modulearea.
- */
-#ifdef CONFIG_XIP_KERNEL
- map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
- map.virtual = MODULES_VADDR;
- map.length = ((unsigned long)_exiprom - map.virtual + ~SECTION_MASK) & SECTION_MASK;
- map.type = MT_ROM;
- create_mapping(&map);
-#endif
-
- /*
* Map the cache flushing regions.
*/
#ifdef FLUSH_BASE
@@ -1603,12 +1580,27 @@ static void __init map_kernel(void)
* This will only persist until we turn on proper memory management later on
* and we remap the whole kernel with page granularity.
*/
+#ifdef CONFIG_XIP_KERNEL
+ phys_addr_t kernel_nx_start = kernel_sec_start;
+#else
phys_addr_t kernel_x_start = kernel_sec_start;
phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
phys_addr_t kernel_nx_start = kernel_x_end;
+#endif
phys_addr_t kernel_nx_end = kernel_sec_end;
struct map_desc map;
+ /*
+ * Map the kernel if it is XIP.
+ * It is always first in the modulearea.
+ */
+#ifdef CONFIG_XIP_KERNEL
+ map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
+ map.virtual = MODULES_VADDR;
+ map.length = ((unsigned long)_exiprom - map.virtual + ~SECTION_MASK) & SECTION_MASK;
+ map.type = MT_ROM;
+ create_mapping(&map);
+#else
map.pfn = __phys_to_pfn(kernel_x_start);
map.virtual = __phys_to_virt(kernel_x_start);
map.length = kernel_x_end - kernel_x_start;
@@ -1618,7 +1610,7 @@ static void __init map_kernel(void)
/* If the nx part is small it may end up covered by the tail of the RWX section */
if (kernel_x_end == kernel_nx_end)
return;
-
+#endif
map.pfn = __phys_to_pfn(kernel_nx_start);
map.virtual = __phys_to_virt(kernel_nx_start);
map.length = kernel_nx_end - kernel_nx_start;
@@ -1638,7 +1630,7 @@ static void __init early_paging_init(const struct machine_desc *mdesc)
{
pgtables_remap *lpae_pgtables_remap;
unsigned long pa_pgd;
- unsigned int cr, ttbcr;
+ u32 cr, ttbcr, tmp;
long long offset;
if (!mdesc->pv_fixup)
@@ -1687,9 +1679,10 @@ static void __init early_paging_init(const struct machine_desc *mdesc)
*/
cr = get_cr();
set_cr(cr & ~(CR_I | CR_C));
- asm("mrc p15, 0, %0, c2, c0, 2" : "=r" (ttbcr));
- asm volatile("mcr p15, 0, %0, c2, c0, 2"
- : : "r" (ttbcr & ~(3 << 8 | 3 << 10)));
+ ttbcr = cpu_get_ttbcr();
+ /* Disable all kind of caching of the translation table */
+ tmp = ttbcr & ~(TTBCR_ORGN0_MASK | TTBCR_IRGN0_MASK);
+ cpu_set_ttbcr(tmp);
flush_cache_all();
/*
@@ -1701,7 +1694,7 @@ static void __init early_paging_init(const struct machine_desc *mdesc)
lpae_pgtables_remap(offset, pa_pgd);
/* Re-enable the caches and cacheable TLB walks */
- asm volatile("mcr p15, 0, %0, c2, c0, 2" : : "r" (ttbcr));
+ cpu_set_ttbcr(ttbcr);
set_cr(cr);
}
@@ -1763,6 +1756,11 @@ void __init paging_init(const struct machine_desc *mdesc)
{
void *zero_page;
+#ifdef CONFIG_XIP_KERNEL
+ /* Store the kernel RW RAM region start/end in these variables */
+ kernel_sec_start = CONFIG_PHYS_OFFSET & SECTION_MASK;
+ kernel_sec_end = round_up(__pa(_end), SECTION_SIZE);
+#endif
pr_debug("physical kernel sections: 0x%08llx-0x%08llx\n",
kernel_sec_start, kernel_sec_end);