summaryrefslogtreecommitdiffstats
path: root/arch/arm64/mm
diff options
context:
space:
mode:
authorJungseok Lee <jays.lee@samsung.com>2014-05-12 18:40:51 +0900
committerCatalin Marinas <catalin.marinas@arm.com>2014-07-23 15:27:40 +0100
commitc79b954bf6c006f2d3dd9d01f231abeead13a410 (patch)
treeb7d4375c12d2b61554af0e5eb7596b22c60c643e /arch/arm64/mm
parent57e0139041a978c0cfa4d2366c96ea3418e7d553 (diff)
downloadlinux-c79b954bf6c006f2d3dd9d01f231abeead13a410.tar.gz
linux-c79b954bf6c006f2d3dd9d01f231abeead13a410.tar.bz2
linux-c79b954bf6c006f2d3dd9d01f231abeead13a410.zip
arm64: mm: Implement 4 levels of translation tables
This patch implements 4 levels of translation tables since 3 levels of page tables with 4KB pages cannot support 40-bit physical address space described in [1] due to the following issue. It is a restriction that kernel logical memory map with 4KB + 3 levels (0xffffffc000000000-0xffffffffffffffff) cannot cover RAM region from 544GB to 1024GB in [1]. Specifically, ARM64 kernel fails to create mapping for this region in map_mem function since __phys_to_virt for this region reaches to address overflow. If SoC design follows the document, [1], over 32GB RAM would be placed from 544GB. Even 64GB system is supposed to use the region from 544GB to 576GB for only 32GB RAM. Naturally, it would reach to enable 4 levels of page tables to avoid hacking __virt_to_phys and __phys_to_virt. However, it is recommended 4 levels of page table should be only enabled if memory map is too sparse or there is about 512GB RAM. References ---------- [1]: Principles of ARM Memory Maps, White Paper, Issue C Signed-off-by: Jungseok Lee <jays.lee@samsung.com> Reviewed-by: Sungjinn Chung <sungjinn.chung@samsung.com> Acked-by: Kukjin Kim <kgene.kim@samsung.com> Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org> Reviewed-by: Steve Capper <steve.capper@linaro.org> [catalin.marinas@arm.com: MEMBLOCK_INITIAL_LIMIT removed, same as PUD_SIZE] [catalin.marinas@arm.com: early_ioremap_init() updated for 4 levels] [catalin.marinas@arm.com: 48-bit VA depends on BROKEN until KVM is fixed] Signed-off-by: Catalin Marinas <catalin.marinas@arm.com> Tested-by: Jungseok Lee <jungseoklee85@gmail.com>
Diffstat (limited to 'arch/arm64/mm')
-rw-r--r--arch/arm64/mm/fault.c1
-rw-r--r--arch/arm64/mm/ioremap.c6
-rw-r--r--arch/arm64/mm/mmu.c14
3 files changed, 17 insertions, 4 deletions
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index bcc965e2cce1..41cb6d3d6075 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -62,6 +62,7 @@ void show_pte(struct mm_struct *mm, unsigned long addr)
break;
pud = pud_offset(pgd, addr);
+ printk(", *pud=%016llx", pud_val(*pud));
if (pud_none(*pud) || pud_bad(*pud))
break;
diff --git a/arch/arm64/mm/ioremap.c b/arch/arm64/mm/ioremap.c
index 69000efa015e..d5e969e7b576 100644
--- a/arch/arm64/mm/ioremap.c
+++ b/arch/arm64/mm/ioremap.c
@@ -104,9 +104,12 @@ void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size)
EXPORT_SYMBOL(ioremap_cache);
static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
-#ifndef CONFIG_ARM64_64K_PAGES
+#ifndef CONFIG_ARM64_2_LEVELS
static pte_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss;
#endif
+#ifdef CONFIG_ARM64_4_LEVELS
+static pte_t bm_pud[PTRS_PER_PUD] __page_aligned_bss;
+#endif
static inline pud_t * __init early_ioremap_pud(unsigned long addr)
{
@@ -144,6 +147,7 @@ void __init early_ioremap_init(void)
unsigned long addr = fix_to_virt(FIX_BTMAP_BEGIN);
pgd = pgd_offset_k(addr);
+ pgd_populate(&init_mm, pgd, bm_pud);
pud = pud_offset(pgd, addr);
pud_populate(&init_mm, pud, bm_pmd);
pmd = pmd_offset(pud, addr);
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index c43f1dd19489..c55567283cde 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -32,6 +32,7 @@
#include <asm/setup.h>
#include <asm/sizes.h>
#include <asm/tlb.h>
+#include <asm/memblock.h>
#include <asm/mmu_context.h>
#include "mm.h"
@@ -204,9 +205,16 @@ static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
unsigned long end, unsigned long phys,
int map_io)
{
- pud_t *pud = pud_offset(pgd, addr);
+ pud_t *pud;
unsigned long next;
+ if (pgd_none(*pgd)) {
+ pud = early_alloc(PTRS_PER_PUD * sizeof(pud_t));
+ pgd_populate(&init_mm, pgd, pud);
+ }
+ BUG_ON(pgd_bad(*pgd));
+
+ pud = pud_offset(pgd, addr);
do {
next = pud_addr_end(addr, end);
@@ -290,10 +298,10 @@ static void __init map_mem(void)
* memory addressable from the initial direct kernel mapping.
*
* The initial direct kernel mapping, located at swapper_pg_dir,
- * gives us PGDIR_SIZE memory starting from PHYS_OFFSET (which must be
+ * gives us PUD_SIZE memory starting from PHYS_OFFSET (which must be
* aligned to 2MB as per Documentation/arm64/booting.txt).
*/
- limit = PHYS_OFFSET + PGDIR_SIZE;
+ limit = PHYS_OFFSET + PUD_SIZE;
memblock_set_current_limit(limit);
/* map all the memory banks */