From d10efa21a9374829a662ec1377237e85f430c024 Mon Sep 17 00:00:00 2001 From: Qinglin Pan Date: Thu, 27 Jan 2022 10:48:41 +0800 Subject: riscv: mm: Control p4d's folding by pgtable_l5_enabled To determine pgtable level at boot time, we can not use helper functions in include/asm-generic/pgtable-nop4d.h and must implement these functions. This patch uses pgtable_l5_enabled variable instead of including pgtable-nop4d.h to controle p4d's folding, and implements corresponding helper functions. Signed-off-by: Qinglin Pan Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/pgalloc.h | 49 +++++++++++++++++ arch/riscv/include/asm/pgtable-64.h | 106 +++++++++++++++++++++++++++++++++++- arch/riscv/include/asm/pgtable.h | 4 +- arch/riscv/mm/init.c | 2 + 4 files changed, 158 insertions(+), 3 deletions(-) diff --git a/arch/riscv/include/asm/pgalloc.h b/arch/riscv/include/asm/pgalloc.h index 11823004b87a..947f23d7b6af 100644 --- a/arch/riscv/include/asm/pgalloc.h +++ b/arch/riscv/include/asm/pgalloc.h @@ -59,6 +59,26 @@ static inline void p4d_populate_safe(struct mm_struct *mm, p4d_t *p4d, } } +static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4d) +{ + if (pgtable_l5_enabled) { + unsigned long pfn = virt_to_pfn(p4d); + + set_pgd(pgd, __pgd((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE)); + } +} + +static inline void pgd_populate_safe(struct mm_struct *mm, pgd_t *pgd, + p4d_t *p4d) +{ + if (pgtable_l5_enabled) { + unsigned long pfn = virt_to_pfn(p4d); + + set_pgd_safe(pgd, + __pgd((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE)); + } +} + #define pud_alloc_one pud_alloc_one static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) { @@ -76,6 +96,35 @@ static inline void pud_free(struct mm_struct *mm, pud_t *pud) } #define __pud_free_tlb(tlb, pud, addr) pud_free((tlb)->mm, pud) + +#define p4d_alloc_one p4d_alloc_one +static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long addr) +{ + if (pgtable_l5_enabled) { + gfp_t gfp = GFP_PGTABLE_USER; + + if (mm == &init_mm) + gfp = GFP_PGTABLE_KERNEL; + return (p4d_t *)get_zeroed_page(gfp); + } + + return NULL; +} + +static inline void __p4d_free(struct mm_struct *mm, p4d_t *p4d) +{ + BUG_ON((unsigned long)p4d & (PAGE_SIZE-1)); + free_page((unsigned long)p4d); +} + +#define p4d_free p4d_free +static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d) +{ + if (pgtable_l5_enabled) + __p4d_free(mm, p4d); +} + +#define __p4d_free_tlb(tlb, p4d, addr) p4d_free((tlb)->mm, p4d) #endif /* __PAGETABLE_PMD_FOLDED */ static inline pgd_t *pgd_alloc(struct mm_struct *mm) diff --git a/arch/riscv/include/asm/pgtable-64.h b/arch/riscv/include/asm/pgtable-64.h index bbbdd66e5e2f..7e246e9f8d70 100644 --- a/arch/riscv/include/asm/pgtable-64.h +++ b/arch/riscv/include/asm/pgtable-64.h @@ -9,16 +9,24 @@ #include extern bool pgtable_l4_enabled; +extern bool pgtable_l5_enabled; #define PGDIR_SHIFT_L3 30 #define PGDIR_SHIFT_L4 39 +#define PGDIR_SHIFT_L5 48 #define PGDIR_SIZE_L3 (_AC(1, UL) << PGDIR_SHIFT_L3) -#define PGDIR_SHIFT (pgtable_l4_enabled ? PGDIR_SHIFT_L4 : PGDIR_SHIFT_L3) +#define PGDIR_SHIFT (pgtable_l5_enabled ? PGDIR_SHIFT_L5 : \ + (pgtable_l4_enabled ? PGDIR_SHIFT_L4 : PGDIR_SHIFT_L3)) /* Size of region mapped by a page global directory */ #define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT) #define PGDIR_MASK (~(PGDIR_SIZE - 1)) +/* p4d is folded into pgd in case of 4-level page table */ +#define P4D_SHIFT 39 +#define P4D_SIZE (_AC(1, UL) << P4D_SHIFT) +#define P4D_MASK (~(P4D_SIZE - 1)) + /* pud is folded into pgd in case of 3-level page table */ #define PUD_SHIFT 30 #define PUD_SIZE (_AC(1, UL) << PUD_SHIFT) @@ -29,6 +37,15 @@ extern bool pgtable_l4_enabled; #define PMD_SIZE (_AC(1, UL) << PMD_SHIFT) #define PMD_MASK (~(PMD_SIZE - 1)) +/* Page 4th Directory entry */ +typedef struct { + unsigned long p4d; +} p4d_t; + +#define p4d_val(x) ((x).p4d) +#define __p4d(x) ((p4d_t) { (x) }) +#define PTRS_PER_P4D (PAGE_SIZE / sizeof(p4d_t)) + /* Page Upper Directory entry */ typedef struct { unsigned long pud; @@ -99,6 +116,15 @@ static inline struct page *pud_page(pud_t pud) return pfn_to_page(pud_val(pud) >> _PAGE_PFN_SHIFT); } +#define mm_p4d_folded mm_p4d_folded +static inline bool mm_p4d_folded(struct mm_struct *mm) +{ + if (pgtable_l5_enabled) + return false; + + return true; +} + #define mm_pud_folded mm_pud_folded static inline bool mm_pud_folded(struct mm_struct *mm) { @@ -128,6 +154,9 @@ static inline unsigned long _pmd_pfn(pmd_t pmd) #define pud_ERROR(e) \ pr_err("%s:%d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e)) +#define p4d_ERROR(e) \ + pr_err("%s:%d: bad p4d %016lx.\n", __FILE__, __LINE__, p4d_val(e)) + static inline void set_p4d(p4d_t *p4dp, p4d_t p4d) { if (pgtable_l4_enabled) @@ -166,6 +195,16 @@ static inline void p4d_clear(p4d_t *p4d) set_p4d(p4d, __p4d(0)); } +static inline p4d_t pfn_p4d(unsigned long pfn, pgprot_t prot) +{ + return __p4d((pfn << _PAGE_PFN_SHIFT) | pgprot_val(prot)); +} + +static inline unsigned long _p4d_pfn(p4d_t p4d) +{ + return p4d_val(p4d) >> _PAGE_PFN_SHIFT; +} + static inline pud_t *p4d_pgtable(p4d_t p4d) { if (pgtable_l4_enabled) @@ -173,6 +212,7 @@ static inline pud_t *p4d_pgtable(p4d_t p4d) return (pud_t *)pud_pgtable((pud_t) { p4d_val(p4d) }); } +#define p4d_page_vaddr(p4d) ((unsigned long)p4d_pgtable(p4d)) static inline struct page *p4d_page(p4d_t p4d) { @@ -190,4 +230,68 @@ static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address) return (pud_t *)p4d; } +static inline void set_pgd(pgd_t *pgdp, pgd_t pgd) +{ + if (pgtable_l5_enabled) + *pgdp = pgd; + else + set_p4d((p4d_t *)pgdp, (p4d_t){ pgd_val(pgd) }); +} + +static inline int pgd_none(pgd_t pgd) +{ + if (pgtable_l5_enabled) + return (pgd_val(pgd) == 0); + + return 0; +} + +static inline int pgd_present(pgd_t pgd) +{ + if (pgtable_l5_enabled) + return (pgd_val(pgd) & _PAGE_PRESENT); + + return 1; +} + +static inline int pgd_bad(pgd_t pgd) +{ + if (pgtable_l5_enabled) + return !pgd_present(pgd); + + return 0; +} + +static inline void pgd_clear(pgd_t *pgd) +{ + if (pgtable_l5_enabled) + set_pgd(pgd, __pgd(0)); +} + +static inline p4d_t *pgd_pgtable(pgd_t pgd) +{ + if (pgtable_l5_enabled) + return (p4d_t *)pfn_to_virt(pgd_val(pgd) >> _PAGE_PFN_SHIFT); + + return (p4d_t *)p4d_pgtable((p4d_t) { pgd_val(pgd) }); +} +#define pgd_page_vaddr(pgd) ((unsigned long)pgd_pgtable(pgd)) + +static inline struct page *pgd_page(pgd_t pgd) +{ + return pfn_to_page(pgd_val(pgd) >> _PAGE_PFN_SHIFT); +} +#define pgd_page(pgd) pgd_page(pgd) + +#define p4d_index(addr) (((addr) >> P4D_SHIFT) & (PTRS_PER_P4D - 1)) + +#define p4d_offset p4d_offset +static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address) +{ + if (pgtable_l5_enabled) + return pgd_pgtable(*pgd) + p4d_index(address); + + return (p4d_t *)pgd; +} + #endif /* _ASM_RISCV_PGTABLE_64_H */ diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h index 7e949f25c933..58fece3133e9 100644 --- a/arch/riscv/include/asm/pgtable.h +++ b/arch/riscv/include/asm/pgtable.h @@ -62,7 +62,8 @@ * position vmemmap directly below the VMALLOC region. */ #ifdef CONFIG_64BIT -#define VA_BITS (pgtable_l4_enabled ? 48 : 39) +#define VA_BITS (pgtable_l5_enabled ? \ + 57 : (pgtable_l4_enabled ? 48 : 39)) #else #define VA_BITS 32 #endif @@ -102,7 +103,6 @@ #ifndef __ASSEMBLY__ -#include #include #include #include diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index cf4d018b7d66..9e4ed02e3d56 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c @@ -45,7 +45,9 @@ u64 satp_mode = SATP_MODE_32; EXPORT_SYMBOL(satp_mode); bool pgtable_l4_enabled = IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_XIP_KERNEL); +bool pgtable_l5_enabled = false; EXPORT_SYMBOL(pgtable_l4_enabled); +EXPORT_SYMBOL(pgtable_l5_enabled); phys_addr_t phys_ram_base __ro_after_init; EXPORT_SYMBOL(phys_ram_base); -- cgit v1.2.3