diff options
author | Palmer Dabbelt <palmer@rivosinc.com> | 2024-02-01 13:25:57 -0800 |
---|---|---|
committer | Palmer Dabbelt <palmer@rivosinc.com> | 2024-02-01 13:25:57 -0800 |
commit | 168b849728c2c47ddaab1408049561100bb7ea51 (patch) | |
tree | 34cfaa47d84d10b2424b9325019c77db6849e35f | |
parent | d9807d60c145836043ffa602328ea1d66dc458b1 (diff) | |
parent | a179a4bfb694f80f2709a1d0398469e787acb974 (diff) | |
download | linux-stable-168b849728c2c47ddaab1408049561100bb7ea51.tar.gz linux-stable-168b849728c2c47ddaab1408049561100bb7ea51.tar.bz2 linux-stable-168b849728c2c47ddaab1408049561100bb7ea51.zip |
Merge patch series "svnapot fixes"
Alexandre Ghiti <alexghiti@rivosinc.com> says:
While merging riscv napot and arm64 contpte support, I noticed we did
not abide by the specification which states that we should clear a
napot mapping before setting a new one, called "break before make" in
arm64 (patch 1). And also that we did not add the new hugetlb page size
added by napot in hugetlb_mask_last_page() (patch 2).
* b4-shazam-merge:
riscv: Fix hugetlb_mask_last_page() when NAPOT is enabled
riscv: Fix set_huge_pte_at() for NAPOT mapping
Link: https://lore.kernel.org/r/20240117195741.1926459-1-alexghiti@rivosinc.com
Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
-rw-r--r-- | arch/riscv/mm/hugetlbpage.c | 62 |
1 files changed, 60 insertions, 2 deletions
diff --git a/arch/riscv/mm/hugetlbpage.c b/arch/riscv/mm/hugetlbpage.c index 431596c0e20e..87406b26c3da 100644 --- a/arch/riscv/mm/hugetlbpage.c +++ b/arch/riscv/mm/hugetlbpage.c @@ -125,6 +125,26 @@ pte_t *huge_pte_offset(struct mm_struct *mm, return pte; } +unsigned long hugetlb_mask_last_page(struct hstate *h) +{ + unsigned long hp_size = huge_page_size(h); + + switch (hp_size) { +#ifndef __PAGETABLE_PMD_FOLDED + case PUD_SIZE: + return P4D_SIZE - PUD_SIZE; +#endif + case PMD_SIZE: + return PUD_SIZE - PMD_SIZE; + case napot_cont_size(NAPOT_CONT64KB_ORDER): + return PMD_SIZE - napot_cont_size(NAPOT_CONT64KB_ORDER); + default: + break; + } + + return 0UL; +} + static pte_t get_clear_contig(struct mm_struct *mm, unsigned long addr, pte_t *ptep, @@ -177,13 +197,36 @@ pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags) return entry; } +static void clear_flush(struct mm_struct *mm, + unsigned long addr, + pte_t *ptep, + unsigned long pgsize, + unsigned long ncontig) +{ + struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0); + unsigned long i, saddr = addr; + + for (i = 0; i < ncontig; i++, addr += pgsize, ptep++) + ptep_get_and_clear(mm, addr, ptep); + + flush_tlb_range(&vma, saddr, addr); +} + +/* + * When dealing with NAPOT mappings, the privileged specification indicates that + * "if an update needs to be made, the OS generally should first mark all of the + * PTEs invalid, then issue SFENCE.VMA instruction(s) covering all 4 KiB regions + * within the range, [...] then update the PTE(s), as described in Section + * 4.2.1.". That's the equivalent of the Break-Before-Make approach used by + * arm64. + */ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte, unsigned long sz) { - unsigned long hugepage_shift; + unsigned long hugepage_shift, pgsize; int i, pte_num; if (sz >= PGDIR_SIZE) @@ -198,7 +241,22 @@ void set_huge_pte_at(struct mm_struct *mm, hugepage_shift = PAGE_SHIFT; pte_num = sz >> hugepage_shift; - for (i = 0; i < pte_num; i++, ptep++, addr += (1 << hugepage_shift)) + pgsize = 1 << hugepage_shift; + + if (!pte_present(pte)) { + for (i = 0; i < pte_num; i++, ptep++, addr += pgsize) + set_ptes(mm, addr, ptep, pte, 1); + return; + } + + if (!pte_napot(pte)) { + set_ptes(mm, addr, ptep, pte, 1); + return; + } + + clear_flush(mm, addr, ptep, pgsize, pte_num); + + for (i = 0; i < pte_num; i++, ptep++, addr += pgsize) set_pte_at(mm, addr, ptep, pte); } |