diff options
Diffstat (limited to 'arch/arm64/mm')
-rw-r--r-- | arch/arm64/mm/Makefile | 2 | ||||
-rw-r--r-- | arch/arm64/mm/dma-mapping.c | 17 | ||||
-rw-r--r-- | arch/arm64/mm/fault.c | 36 | ||||
-rw-r--r-- | arch/arm64/mm/fixmap.c | 203 | ||||
-rw-r--r-- | arch/arm64/mm/init.c | 34 | ||||
-rw-r--r-- | arch/arm64/mm/mmu.c | 288 | ||||
-rw-r--r-- | arch/arm64/mm/pageattr.c | 7 | ||||
-rw-r--r-- | arch/arm64/mm/proc.S | 6 | ||||
-rw-r--r-- | arch/arm64/mm/ptdump.c | 2 |
9 files changed, 310 insertions, 285 deletions
diff --git a/arch/arm64/mm/Makefile b/arch/arm64/mm/Makefile index ff1e800ba7a1..dbd1bc95967d 100644 --- a/arch/arm64/mm/Makefile +++ b/arch/arm64/mm/Makefile @@ -2,7 +2,7 @@ obj-y := dma-mapping.o extable.o fault.o init.o \ cache.o copypage.o flush.o \ ioremap.o mmap.o pgd.o mmu.o \ - context.o proc.o pageattr.o + context.o proc.o pageattr.o fixmap.o obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o obj-$(CONFIG_PTDUMP_CORE) += ptdump.o obj-$(CONFIG_PTDUMP_DEBUGFS) += ptdump_debugfs.o diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index 5240f6acad64..3cb101e8cb29 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c @@ -36,22 +36,7 @@ void arch_dma_prep_coherent(struct page *page, size_t size) { unsigned long start = (unsigned long)page_address(page); - /* - * The architecture only requires a clean to the PoC here in order to - * meet the requirements of the DMA API. However, some vendors (i.e. - * Qualcomm) abuse the DMA API for transferring buffers from the - * non-secure to the secure world, resetting the system if a non-secure - * access shows up after the buffer has been transferred: - * - * https://lore.kernel.org/r/20221114110329.68413-1-manivannan.sadhasivam@linaro.org - * - * Using clean+invalidate appears to make this issue less likely, but - * the drivers themselves still need fixing as the CPU could issue a - * speculative read from the buffer via the linear mapping irrespective - * of the cache maintenance we use. Once the drivers are fixed, we can - * relax this to a clean operation. - */ - dcache_clean_inval_poc(start, start + size); + dcache_clean_poc(start, start + size); } #ifdef CONFIG_IOMMU_DMA diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index f4cb0f85ccf4..9e0db5c387e3 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -535,6 +535,9 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr, unsigned long vm_flags; unsigned int mm_flags = FAULT_FLAG_DEFAULT; unsigned long addr = untagged_addr(far); +#ifdef CONFIG_PER_VMA_LOCK + struct vm_area_struct *vma; +#endif if (kprobe_page_fault(regs, esr)) return 0; @@ -585,6 +588,36 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr, perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr); +#ifdef CONFIG_PER_VMA_LOCK + if (!(mm_flags & FAULT_FLAG_USER)) + goto lock_mmap; + + vma = lock_vma_under_rcu(mm, addr); + if (!vma) + goto lock_mmap; + + if (!(vma->vm_flags & vm_flags)) { + vma_end_read(vma); + goto lock_mmap; + } + fault = handle_mm_fault(vma, addr & PAGE_MASK, + mm_flags | FAULT_FLAG_VMA_LOCK, regs); + vma_end_read(vma); + + if (!(fault & VM_FAULT_RETRY)) { + count_vm_vma_lock_event(VMA_LOCK_SUCCESS); + goto done; + } + count_vm_vma_lock_event(VMA_LOCK_RETRY); + + /* Quick path to respond to signals */ + if (fault_signal_pending(fault, regs)) { + if (!user_mode(regs)) + goto no_context; + return 0; + } +lock_mmap: +#endif /* CONFIG_PER_VMA_LOCK */ /* * As per x86, we may deadlock here. However, since the kernel only * validly references user space from well defined areas of the code, @@ -628,6 +661,9 @@ retry: } mmap_read_unlock(mm); +#ifdef CONFIG_PER_VMA_LOCK +done: +#endif /* * Handle the "normal" (no error) case first. */ diff --git a/arch/arm64/mm/fixmap.c b/arch/arm64/mm/fixmap.c new file mode 100644 index 000000000000..c0a3301203bd --- /dev/null +++ b/arch/arm64/mm/fixmap.c @@ -0,0 +1,203 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Fixmap manipulation code + */ + +#include <linux/bug.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/libfdt.h> +#include <linux/memory.h> +#include <linux/mm.h> +#include <linux/sizes.h> + +#include <asm/fixmap.h> +#include <asm/kernel-pgtable.h> +#include <asm/pgalloc.h> +#include <asm/tlbflush.h> + +#define NR_BM_PTE_TABLES \ + SPAN_NR_ENTRIES(FIXADDR_TOT_START, FIXADDR_TOP, PMD_SHIFT) +#define NR_BM_PMD_TABLES \ + SPAN_NR_ENTRIES(FIXADDR_TOT_START, FIXADDR_TOP, PUD_SHIFT) + +static_assert(NR_BM_PMD_TABLES == 1); + +#define __BM_TABLE_IDX(addr, shift) \ + (((addr) >> (shift)) - (FIXADDR_TOT_START >> (shift))) + +#define BM_PTE_TABLE_IDX(addr) __BM_TABLE_IDX(addr, PMD_SHIFT) + +static pte_t bm_pte[NR_BM_PTE_TABLES][PTRS_PER_PTE] __page_aligned_bss; +static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss __maybe_unused; +static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss __maybe_unused; + +static inline pte_t *fixmap_pte(unsigned long addr) +{ + return &bm_pte[BM_PTE_TABLE_IDX(addr)][pte_index(addr)]; +} + +static void __init early_fixmap_init_pte(pmd_t *pmdp, unsigned long addr) +{ + pmd_t pmd = READ_ONCE(*pmdp); + pte_t *ptep; + + if (pmd_none(pmd)) { + ptep = bm_pte[BM_PTE_TABLE_IDX(addr)]; + __pmd_populate(pmdp, __pa_symbol(ptep), PMD_TYPE_TABLE); + } +} + +static void __init early_fixmap_init_pmd(pud_t *pudp, unsigned long addr, + unsigned long end) +{ + unsigned long next; + pud_t pud = READ_ONCE(*pudp); + pmd_t *pmdp; + + if (pud_none(pud)) + __pud_populate(pudp, __pa_symbol(bm_pmd), PUD_TYPE_TABLE); + + pmdp = pmd_offset_kimg(pudp, addr); + do { + next = pmd_addr_end(addr, end); + early_fixmap_init_pte(pmdp, addr); + } while (pmdp++, addr = next, addr != end); +} + + +static void __init early_fixmap_init_pud(p4d_t *p4dp, unsigned long addr, + unsigned long end) +{ + p4d_t p4d = READ_ONCE(*p4dp); + pud_t *pudp; + + if (CONFIG_PGTABLE_LEVELS > 3 && !p4d_none(p4d) && + p4d_page_paddr(p4d) != __pa_symbol(bm_pud)) { + /* + * We only end up here if the kernel mapping and the fixmap + * share the top level pgd entry, which should only happen on + * 16k/4 levels configurations. + */ + BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES)); + } + + if (p4d_none(p4d)) + __p4d_populate(p4dp, __pa_symbol(bm_pud), P4D_TYPE_TABLE); + + pudp = pud_offset_kimg(p4dp, addr); + early_fixmap_init_pmd(pudp, addr, end); +} + +/* + * The p*d_populate functions call virt_to_phys implicitly so they can't be used + * directly on kernel symbols (bm_p*d). This function is called too early to use + * lm_alias so __p*d_populate functions must be used to populate with the + * physical address from __pa_symbol. + */ +void __init early_fixmap_init(void) +{ + unsigned long addr = FIXADDR_TOT_START; + unsigned long end = FIXADDR_TOP; + + pgd_t *pgdp = pgd_offset_k(addr); + p4d_t *p4dp = p4d_offset(pgdp, addr); + + early_fixmap_init_pud(p4dp, addr, end); +} + +/* + * Unusually, this is also called in IRQ context (ghes_iounmap_irq) so if we + * ever need to use IPIs for TLB broadcasting, then we're in trouble here. + */ +void __set_fixmap(enum fixed_addresses idx, + phys_addr_t phys, pgprot_t flags) +{ + unsigned long addr = __fix_to_virt(idx); + pte_t *ptep; + + BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses); + + ptep = fixmap_pte(addr); + + if (pgprot_val(flags)) { + set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, flags)); + } else { + pte_clear(&init_mm, addr, ptep); + flush_tlb_kernel_range(addr, addr+PAGE_SIZE); + } +} + +void *__init fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot) +{ + const u64 dt_virt_base = __fix_to_virt(FIX_FDT); + phys_addr_t dt_phys_base; + int offset; + void *dt_virt; + + /* + * Check whether the physical FDT address is set and meets the minimum + * alignment requirement. Since we are relying on MIN_FDT_ALIGN to be + * at least 8 bytes so that we can always access the magic and size + * fields of the FDT header after mapping the first chunk, double check + * here if that is indeed the case. + */ + BUILD_BUG_ON(MIN_FDT_ALIGN < 8); + if (!dt_phys || dt_phys % MIN_FDT_ALIGN) + return NULL; + + dt_phys_base = round_down(dt_phys, PAGE_SIZE); + offset = dt_phys % PAGE_SIZE; + dt_virt = (void *)dt_virt_base + offset; + + /* map the first chunk so we can read the size from the header */ + create_mapping_noalloc(dt_phys_base, dt_virt_base, PAGE_SIZE, prot); + + if (fdt_magic(dt_virt) != FDT_MAGIC) + return NULL; + + *size = fdt_totalsize(dt_virt); + if (*size > MAX_FDT_SIZE) + return NULL; + + if (offset + *size > PAGE_SIZE) { + create_mapping_noalloc(dt_phys_base, dt_virt_base, + offset + *size, prot); + } + + return dt_virt; +} + +/* + * Copy the fixmap region into a new pgdir. + */ +void __init fixmap_copy(pgd_t *pgdir) +{ + if (!READ_ONCE(pgd_val(*pgd_offset_pgd(pgdir, FIXADDR_TOT_START)))) { + /* + * The fixmap falls in a separate pgd to the kernel, and doesn't + * live in the carveout for the swapper_pg_dir. We can simply + * re-use the existing dir for the fixmap. + */ + set_pgd(pgd_offset_pgd(pgdir, FIXADDR_TOT_START), + READ_ONCE(*pgd_offset_k(FIXADDR_TOT_START))); + } else if (CONFIG_PGTABLE_LEVELS > 3) { + pgd_t *bm_pgdp; + p4d_t *bm_p4dp; + pud_t *bm_pudp; + /* + * The fixmap shares its top level pgd entry with the kernel + * mapping. This can really only occur when we are running + * with 16k/4 levels, so we can simply reuse the pud level + * entry instead. + */ + BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES)); + bm_pgdp = pgd_offset_pgd(pgdir, FIXADDR_TOT_START); + bm_p4dp = p4d_offset(bm_pgdp, FIXADDR_TOT_START); + bm_pudp = pud_set_fixmap_offset(bm_p4dp, FIXADDR_TOT_START); + pud_populate(&init_mm, bm_pudp, lm_alias(bm_pmd)); + pud_clear_fixmap(); + } else { + BUG(); + } +} diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 58a0bb2c17f1..66e70ca47680 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -61,34 +61,8 @@ EXPORT_SYMBOL(memstart_addr); * unless restricted on specific platforms (e.g. 30-bit on Raspberry Pi 4). * In such case, ZONE_DMA32 covers the rest of the 32-bit addressable memory, * otherwise it is empty. - * - * Memory reservation for crash kernel either done early or deferred - * depending on DMA memory zones configs (ZONE_DMA) -- - * - * In absence of ZONE_DMA configs arm64_dma_phys_limit initialized - * here instead of max_zone_phys(). This lets early reservation of - * crash kernel memory which has a dependency on arm64_dma_phys_limit. - * Reserving memory early for crash kernel allows linear creation of block - * mappings (greater than page-granularity) for all the memory bank rangs. - * In this scheme a comparatively quicker boot is observed. - * - * If ZONE_DMA configs are defined, crash kernel memory reservation - * is delayed until DMA zone memory range size initialization performed in - * zone_sizes_init(). The defer is necessary to steer clear of DMA zone - * memory range to avoid overlap allocation. So crash kernel memory boundaries - * are not known when mapping all bank memory ranges, which otherwise means - * not possible to exclude crash kernel range from creating block mappings - * so page-granularity mappings are created for the entire memory range. - * Hence a slightly slower boot is observed. - * - * Note: Page-granularity mappings are necessary for crash kernel memory - * range for shrinking its size via /sys/kernel/kexec_crash_size interface. */ -#if IS_ENABLED(CONFIG_ZONE_DMA) || IS_ENABLED(CONFIG_ZONE_DMA32) phys_addr_t __ro_after_init arm64_dma_phys_limit; -#else -phys_addr_t __ro_after_init arm64_dma_phys_limit = PHYS_MASK + 1; -#endif /* Current arm64 boot protocol requires 2MB alignment */ #define CRASH_ALIGN SZ_2M @@ -248,6 +222,8 @@ static void __init zone_sizes_init(void) if (!arm64_dma_phys_limit) arm64_dma_phys_limit = dma32_phys_limit; #endif + if (!arm64_dma_phys_limit) + arm64_dma_phys_limit = PHYS_MASK + 1; max_zone_pfns[ZONE_NORMAL] = max_pfn; free_area_init(max_zone_pfns); @@ -408,9 +384,6 @@ void __init arm64_memblock_init(void) early_init_fdt_scan_reserved_mem(); - if (!defer_reserve_crashkernel()) - reserve_crashkernel(); - high_memory = __va(memblock_end_of_DRAM() - 1) + 1; } @@ -457,8 +430,7 @@ void __init bootmem_init(void) * request_standard_resources() depends on crashkernel's memory being * reserved, so do it here. */ - if (defer_reserve_crashkernel()) - reserve_crashkernel(); + reserve_crashkernel(); memblock_dump_all(); } diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 6f9d8898a025..af6bc8403ee4 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -24,6 +24,7 @@ #include <linux/mm.h> #include <linux/vmalloc.h> #include <linux/set_memory.h> +#include <linux/kfence.h> #include <asm/barrier.h> #include <asm/cputype.h> @@ -38,6 +39,7 @@ #include <asm/ptdump.h> #include <asm/tlbflush.h> #include <asm/pgalloc.h> +#include <asm/kfence.h> #define NO_BLOCK_MAPPINGS BIT(0) #define NO_CONT_MAPPINGS BIT(1) @@ -71,10 +73,6 @@ long __section(".mmuoff.data.write") __early_cpu_boot_status; unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss; EXPORT_SYMBOL(empty_zero_page); -static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss; -static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss __maybe_unused; -static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss __maybe_unused; - static DEFINE_SPINLOCK(swapper_pgdir_lock); static DEFINE_MUTEX(fixmap_lock); @@ -450,8 +448,8 @@ static phys_addr_t pgd_pgtable_alloc(int shift) * without allocating new levels of table. Note that this permits the * creation of new section or page entries. */ -static void __init create_mapping_noalloc(phys_addr_t phys, unsigned long virt, - phys_addr_t size, pgprot_t prot) +void __init create_mapping_noalloc(phys_addr_t phys, unsigned long virt, + phys_addr_t size, pgprot_t prot) { if ((virt >= PAGE_END) && (virt < VMALLOC_START)) { pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n", @@ -510,20 +508,59 @@ void __init mark_linear_text_alias_ro(void) PAGE_KERNEL_RO); } -static bool crash_mem_map __initdata; +#ifdef CONFIG_KFENCE + +bool __ro_after_init kfence_early_init = !!CONFIG_KFENCE_SAMPLE_INTERVAL; -static int __init enable_crash_mem_map(char *arg) +/* early_param() will be parsed before map_mem() below. */ +static int __init parse_kfence_early_init(char *arg) { - /* - * Proper parameter parsing is done by reserve_crashkernel(). We only - * need to know if the linear map has to avoid block mappings so that - * the crashkernel reservations can be unmapped later. - */ - crash_mem_map = true; + int val; + if (get_option(&arg, &val)) + kfence_early_init = !!val; return 0; } -early_param("crashkernel", enable_crash_mem_map); +early_param("kfence.sample_interval", parse_kfence_early_init); + +static phys_addr_t __init arm64_kfence_alloc_pool(void) +{ + phys_addr_t kfence_pool; + + if (!kfence_early_init) + return 0; + + kfence_pool = memblock_phys_alloc(KFENCE_POOL_SIZE, PAGE_SIZE); + if (!kfence_pool) { + pr_err("failed to allocate kfence pool\n"); + kfence_early_init = false; + return 0; + } + + /* Temporarily mark as NOMAP. */ + memblock_mark_nomap(kfence_pool, KFENCE_POOL_SIZE); + + return kfence_pool; +} + +static void __init arm64_kfence_map_pool(phys_addr_t kfence_pool, pgd_t *pgdp) +{ + if (!kfence_pool) + return; + + /* KFENCE pool needs page-level mapping. */ + __map_memblock(pgdp, kfence_pool, kfence_pool + KFENCE_POOL_SIZE, + pgprot_tagged(PAGE_KERNEL), + NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS); + memblock_clear_nomap(kfence_pool, KFENCE_POOL_SIZE); + __kfence_pool = phys_to_virt(kfence_pool); +} +#else /* CONFIG_KFENCE */ + +static inline phys_addr_t arm64_kfence_alloc_pool(void) { return 0; } +static inline void arm64_kfence_map_pool(phys_addr_t kfence_pool, pgd_t *pgdp) { } + +#endif /* CONFIG_KFENCE */ static void __init map_mem(pgd_t *pgdp) { @@ -531,6 +568,7 @@ static void __init map_mem(pgd_t *pgdp) phys_addr_t kernel_start = __pa_symbol(_stext); phys_addr_t kernel_end = __pa_symbol(__init_begin); phys_addr_t start, end; + phys_addr_t early_kfence_pool; int flags = NO_EXEC_MAPPINGS; u64 i; @@ -543,6 +581,8 @@ static void __init map_mem(pgd_t *pgdp) */ BUILD_BUG_ON(pgd_index(direct_map_end - 1) == pgd_index(direct_map_end)); + early_kfence_pool = arm64_kfence_alloc_pool(); + if (can_set_direct_map()) flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; @@ -554,16 +594,6 @@ static void __init map_mem(pgd_t *pgdp) */ memblock_mark_nomap(kernel_start, kernel_end - kernel_start); -#ifdef CONFIG_KEXEC_CORE - if (crash_mem_map) { - if (defer_reserve_crashkernel()) - flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; - else if (crashk_res.end) - memblock_mark_nomap(crashk_res.start, - resource_size(&crashk_res)); - } -#endif - /* map all the memory banks */ for_each_mem_range(i, &start, &end) { if (start >= end) @@ -590,24 +620,7 @@ static void __init map_mem(pgd_t *pgdp) __map_memblock(pgdp, kernel_start, kernel_end, PAGE_KERNEL, NO_CONT_MAPPINGS); memblock_clear_nomap(kernel_start, kernel_end - kernel_start); - - /* - * Use page-level mappings here so that we can shrink the region - * in page granularity and put back unused memory to buddy system - * through /sys/kernel/kexec_crash_size interface. - */ -#ifdef CONFIG_KEXEC_CORE - if (crash_mem_map && !defer_reserve_crashkernel()) { - if (crashk_res.end) { - __map_memblock(pgdp, crashk_res.start, - crashk_res.end + 1, - PAGE_KERNEL, - NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS); - memblock_clear_nomap(crashk_res.start, - resource_size(&crashk_res)); - } - } -#endif + arm64_kfence_map_pool(early_kfence_pool, pgdp); } void mark_rodata_ro(void) @@ -734,34 +747,7 @@ static void __init map_kernel(pgd_t *pgdp) &vmlinux_initdata, 0, VM_NO_GUARD); map_kernel_segment(pgdp, _data, _end, PAGE_KERNEL, &vmlinux_data, 0, 0); - if (!READ_ONCE(pgd_val(*pgd_offset_pgd(pgdp, FIXADDR_START)))) { - /* - * The fixmap falls in a separate pgd to the kernel, and doesn't - * live in the carveout for the swapper_pg_dir. We can simply - * re-use the existing dir for the fixmap. - */ - set_pgd(pgd_offset_pgd(pgdp, FIXADDR_START), - READ_ONCE(*pgd_offset_k(FIXADDR_START))); - } else if (CONFIG_PGTABLE_LEVELS > 3) { - pgd_t *bm_pgdp; - p4d_t *bm_p4dp; - pud_t *bm_pudp; - /* - * The fixmap shares its top level pgd entry with the kernel - * mapping. This can really only occur when we are running - * with 16k/4 levels, so we can simply reuse the pud level - * entry instead. - */ - BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES)); - bm_pgdp = pgd_offset_pgd(pgdp, FIXADDR_START); - bm_p4dp = p4d_offset(bm_pgdp, FIXADDR_START); - bm_pudp = pud_set_fixmap_offset(bm_p4dp, FIXADDR_START); - pud_populate(&init_mm, bm_pudp, lm_alias(bm_pmd)); - pud_clear_fixmap(); - } else { - BUG(); - } - + fixmap_copy(pgdp); kasan_copy_shadow(pgdp); } @@ -1176,166 +1162,6 @@ void vmemmap_free(unsigned long start, unsigned long end, } #endif /* CONFIG_MEMORY_HOTPLUG */ -static inline pud_t *fixmap_pud(unsigned long addr) -{ - pgd_t *pgdp = pgd_offset_k(addr); - p4d_t *p4dp = p4d_offset(pgdp, addr); - p4d_t p4d = READ_ONCE(*p4dp); - - BUG_ON(p4d_none(p4d) || p4d_bad(p4d)); - - return pud_offset_kimg(p4dp, addr); -} - -static inline pmd_t *fixmap_pmd(unsigned long addr) -{ - pud_t *pudp = fixmap_pud(addr); - pud_t pud = READ_ONCE(*pudp); - - BUG_ON(pud_none(pud) || pud_bad(pud)); - - return pmd_offset_kimg(pudp, addr); -} - -static inline pte_t *fixmap_pte(unsigned long addr) -{ - return &bm_pte[pte_index(addr)]; -} - -/* - * The p*d_populate functions call virt_to_phys implicitly so they can't be used - * directly on kernel symbols (bm_p*d). This function is called too early to use - * lm_alias so __p*d_populate functions must be used to populate with the - * physical address from __pa_symbol. - */ -void __init early_fixmap_init(void) -{ - pgd_t *pgdp; - p4d_t *p4dp, p4d; - pud_t *pudp; - pmd_t *pmdp; - unsigned long addr = FIXADDR_START; - - pgdp = pgd_offset_k(addr); - p4dp = p4d_offset(pgdp, addr); - p4d = READ_ONCE(*p4dp); - if (CONFIG_PGTABLE_LEVELS > 3 && - !(p4d_none(p4d) || p4d_page_paddr(p4d) == __pa_symbol(bm_pud))) { - /* - * We only end up here if the kernel mapping and the fixmap - * share the top level pgd entry, which should only happen on - * 16k/4 levels configurations. - */ - BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES)); - pudp = pud_offset_kimg(p4dp, addr); - } else { - if (p4d_none(p4d)) - __p4d_populate(p4dp, __pa_symbol(bm_pud), P4D_TYPE_TABLE); - pudp = fixmap_pud(addr); - } - if (pud_none(READ_ONCE(*pudp))) - __pud_populate(pudp, __pa_symbol(bm_pmd), PUD_TYPE_TABLE); - pmdp = fixmap_pmd(addr); - __pmd_populate(pmdp, __pa_symbol(bm_pte), PMD_TYPE_TABLE); - - /* - * The boot-ioremap range spans multiple pmds, for which - * we are not prepared: - */ - BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT) - != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT)); - - if ((pmdp != fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN))) - || pmdp != fixmap_pmd(fix_to_virt(FIX_BTMAP_END))) { - WARN_ON(1); - pr_warn("pmdp %p != %p, %p\n", - pmdp, fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)), - fixmap_pmd(fix_to_virt(FIX_BTMAP_END))); - pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n", - fix_to_virt(FIX_BTMAP_BEGIN)); - pr_warn("fix_to_virt(FIX_BTMAP_END): %08lx\n", - fix_to_virt(FIX_BTMAP_END)); - - pr_warn("FIX_BTMAP_END: %d\n", FIX_BTMAP_END); - pr_warn("FIX_BTMAP_BEGIN: %d\n", FIX_BTMAP_BEGIN); - } -} - -/* - * Unusually, this is also called in IRQ context (ghes_iounmap_irq) so if we - * ever need to use IPIs for TLB broadcasting, then we're in trouble here. - */ -void __set_fixmap(enum fixed_addresses idx, - phys_addr_t phys, pgprot_t flags) -{ - unsigned long addr = __fix_to_virt(idx); - pte_t *ptep; - - BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses); - - ptep = fixmap_pte(addr); - - if (pgprot_val(flags)) { - set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, flags)); - } else { - pte_clear(&init_mm, addr, ptep); - flush_tlb_kernel_range(addr, addr+PAGE_SIZE); - } -} - -void *__init fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot) -{ - const u64 dt_virt_base = __fix_to_virt(FIX_FDT); - int offset; - void *dt_virt; - - /* - * Check whether the physical FDT address is set and meets the minimum - * alignment requirement. Since we are relying on MIN_FDT_ALIGN to be - * at least 8 bytes so that we can always access the magic and size - * fields of the FDT header after mapping the first chunk, double check - * here if that is indeed the case. - */ - BUILD_BUG_ON(MIN_FDT_ALIGN < 8); - if (!dt_phys || dt_phys % MIN_FDT_ALIGN) - return NULL; - - /* - * Make sure that the FDT region can be mapped without the need to - * allocate additional translation table pages, so that it is safe - * to call create_mapping_noalloc() this early. - * - * On 64k pages, the FDT will be mapped using PTEs, so we need to - * be in the same PMD as the rest of the fixmap. - * On 4k pages, we'll use section mappings for the FDT so we only - * have to be in the same PUD. - */ - BUILD_BUG_ON(dt_virt_base % SZ_2M); - - BUILD_BUG_ON(__fix_to_virt(FIX_FDT_END) >> SWAPPER_TABLE_SHIFT != - __fix_to_virt(FIX_BTMAP_BEGIN) >> SWAPPER_TABLE_SHIFT); - - offset = dt_phys % SWAPPER_BLOCK_SIZE; - dt_virt = (void *)dt_virt_base + offset; - - /* map the first chunk so we can read the size from the header */ - create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE), - dt_virt_base, SWAPPER_BLOCK_SIZE, prot); - - if (fdt_magic(dt_virt) != FDT_MAGIC) - return NULL; - - *size = fdt_totalsize(dt_virt); - if (*size > MAX_FDT_SIZE) - return NULL; - - if (offset + *size > SWAPPER_BLOCK_SIZE) - create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base, - round_up(offset + *size, SWAPPER_BLOCK_SIZE), prot); - - return dt_virt; -} - int pud_set_huge(pud_t *pudp, phys_addr_t phys, pgprot_t prot) { pud_t new_pud = pfn_pud(__phys_to_pfn(phys), mk_pud_sect_prot(prot)); diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c index 79dd201c59d8..8e2017ba5f1b 100644 --- a/arch/arm64/mm/pageattr.c +++ b/arch/arm64/mm/pageattr.c @@ -11,6 +11,7 @@ #include <asm/cacheflush.h> #include <asm/set_memory.h> #include <asm/tlbflush.h> +#include <asm/kfence.h> struct page_change_data { pgprot_t set_mask; @@ -22,12 +23,14 @@ bool rodata_full __ro_after_init = IS_ENABLED(CONFIG_RODATA_FULL_DEFAULT_ENABLED bool can_set_direct_map(void) { /* - * rodata_full, DEBUG_PAGEALLOC and KFENCE require linear map to be + * rodata_full and DEBUG_PAGEALLOC require linear map to be * mapped at page granularity, so that it is possible to * protect/unprotect single pages. + * + * KFENCE pool requires page-granular mapping if initialized late. */ return (rodata_enabled && rodata_full) || debug_pagealloc_enabled() || - IS_ENABLED(CONFIG_KFENCE); + arm64_kfence_can_set_direct_map(); } static int change_page_range(pte_t *ptep, unsigned long addr, void *data) diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index 91410f488090..c2cb437821ca 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -167,7 +167,7 @@ alternative_else_nop_endif SYM_FUNC_END(cpu_do_resume) #endif - .pushsection ".idmap.text", "awx" + .pushsection ".idmap.text", "a" .macro __idmap_cpu_set_reserved_ttbr1, tmp1, tmp2 adrp \tmp1, reserved_pg_dir @@ -201,7 +201,7 @@ SYM_FUNC_END(idmap_cpu_replace_ttbr1) #define KPTI_NG_PTE_FLAGS (PTE_ATTRINDX(MT_NORMAL) | SWAPPER_PTE_FLAGS) - .pushsection ".idmap.text", "awx" + .pushsection ".idmap.text", "a" .macro kpti_mk_tbl_ng, type, num_entries add end_\type\()p, cur_\type\()p, #\num_entries * 8 @@ -400,7 +400,7 @@ SYM_FUNC_END(idmap_kpti_install_ng_mappings) * Output: * Return in x0 the value of the SCTLR_EL1 register. */ - .pushsection ".idmap.text", "awx" + .pushsection ".idmap.text", "a" SYM_FUNC_START(__cpu_setup) tlbi vmalle1 // Invalidate local TLB dsb nsh diff --git a/arch/arm64/mm/ptdump.c b/arch/arm64/mm/ptdump.c index 9bc4066c5bf3..e305b6593c4e 100644 --- a/arch/arm64/mm/ptdump.c +++ b/arch/arm64/mm/ptdump.c @@ -45,7 +45,7 @@ static struct addr_marker address_markers[] = { { MODULES_END, "Modules end" }, { VMALLOC_START, "vmalloc() area" }, { VMALLOC_END, "vmalloc() end" }, - { FIXADDR_START, "Fixmap start" }, + { FIXADDR_TOT_START, "Fixmap start" }, { FIXADDR_TOP, "Fixmap end" }, { PCI_IO_START, "PCI I/O start" }, { PCI_IO_END, "PCI I/O end" }, |