diff options
Diffstat (limited to 'arch/sparc/mm')
-rw-r--r-- | arch/sparc/mm/Makefile | 2 | ||||
-rw-r--r-- | arch/sparc/mm/execmem.c | 21 | ||||
-rw-r--r-- | arch/sparc/mm/hugetlbpage.c | 125 | ||||
-rw-r--r-- | arch/sparc/mm/init_32.c | 31 | ||||
-rw-r--r-- | arch/sparc/mm/init_64.c | 24 | ||||
-rw-r--r-- | arch/sparc/mm/leon_mm.c | 8 | ||||
-rw-r--r-- | arch/sparc/mm/srmmu.c | 56 | ||||
-rw-r--r-- | arch/sparc/mm/tlb.c | 12 |
8 files changed, 54 insertions, 225 deletions
diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile index 809d993f6d88..2d1752108d77 100644 --- a/arch/sparc/mm/Makefile +++ b/arch/sparc/mm/Makefile @@ -14,3 +14,5 @@ obj-$(CONFIG_SPARC32) += leon_mm.o # Only used by sparc64 obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o + +obj-$(CONFIG_EXECMEM) += execmem.o diff --git a/arch/sparc/mm/execmem.c b/arch/sparc/mm/execmem.c new file mode 100644 index 000000000000..0fac97dd5728 --- /dev/null +++ b/arch/sparc/mm/execmem.c @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/mm.h> +#include <linux/execmem.h> + +static struct execmem_info execmem_info __ro_after_init; + +struct execmem_info __init *execmem_arch_setup(void) +{ + execmem_info = (struct execmem_info){ + .ranges = { + [EXECMEM_DEFAULT] = { + .start = MODULES_VADDR, + .end = MODULES_END, + .pgprot = PAGE_KERNEL, + .alignment = 1, + }, + }, + }; + + return &execmem_info; +} diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c index b432500c13a5..80504148d8a5 100644 --- a/arch/sparc/mm/hugetlbpage.c +++ b/arch/sparc/mm/hugetlbpage.c @@ -19,117 +19,6 @@ #include <asm/cacheflush.h> #include <asm/mmu_context.h> -/* Slightly simplified from the non-hugepage variant because by - * definition we don't have to worry about any page coloring stuff - */ - -static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp, - unsigned long addr, - unsigned long len, - unsigned long pgoff, - unsigned long flags) -{ - struct hstate *h = hstate_file(filp); - unsigned long task_size = TASK_SIZE; - struct vm_unmapped_area_info info; - - if (test_thread_flag(TIF_32BIT)) - task_size = STACK_TOP32; - - info.flags = 0; - info.length = len; - info.low_limit = TASK_UNMAPPED_BASE; - info.high_limit = min(task_size, VA_EXCLUDE_START); - info.align_mask = PAGE_MASK & ~huge_page_mask(h); - info.align_offset = 0; - addr = vm_unmapped_area(&info); - - if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) { - VM_BUG_ON(addr != -ENOMEM); - info.low_limit = VA_EXCLUDE_END; - info.high_limit = task_size; - addr = vm_unmapped_area(&info); - } - - return addr; -} - -static unsigned long -hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, - const unsigned long len, - const unsigned long pgoff, - const unsigned long flags) -{ - struct hstate *h = hstate_file(filp); - struct mm_struct *mm = current->mm; - unsigned long addr = addr0; - struct vm_unmapped_area_info info; - - /* This should only ever run for 32-bit processes. */ - BUG_ON(!test_thread_flag(TIF_32BIT)); - - info.flags = VM_UNMAPPED_AREA_TOPDOWN; - info.length = len; - info.low_limit = PAGE_SIZE; - info.high_limit = mm->mmap_base; - info.align_mask = PAGE_MASK & ~huge_page_mask(h); - info.align_offset = 0; - addr = vm_unmapped_area(&info); - - /* - * A failed mmap() very likely causes application failure, - * so fall back to the bottom-up function here. This scenario - * can happen with large stack limits and large mmap() - * allocations. - */ - if (addr & ~PAGE_MASK) { - VM_BUG_ON(addr != -ENOMEM); - info.flags = 0; - info.low_limit = TASK_UNMAPPED_BASE; - info.high_limit = STACK_TOP32; - addr = vm_unmapped_area(&info); - } - - return addr; -} - -unsigned long -hugetlb_get_unmapped_area(struct file *file, unsigned long addr, - unsigned long len, unsigned long pgoff, unsigned long flags) -{ - struct hstate *h = hstate_file(file); - struct mm_struct *mm = current->mm; - struct vm_area_struct *vma; - unsigned long task_size = TASK_SIZE; - - if (test_thread_flag(TIF_32BIT)) - task_size = STACK_TOP32; - - if (len & ~huge_page_mask(h)) - return -EINVAL; - if (len > task_size) - return -ENOMEM; - - if (flags & MAP_FIXED) { - if (prepare_hugepage_range(file, addr, len)) - return -EINVAL; - return addr; - } - - if (addr) { - addr = ALIGN(addr, huge_page_size(h)); - vma = find_vma(mm, addr); - if (task_size - len >= addr && - (!vma || addr + len <= vm_start_gap(vma))) - return addr; - } - if (mm->get_unmapped_area == arch_get_unmapped_area) - return hugetlb_get_unmapped_area_bottomup(file, addr, len, - pgoff, flags); - else - return hugetlb_get_unmapped_area_topdown(file, addr, len, - pgoff, flags); -} static pte_t sun4u_hugepage_shift_to_tte(pte_t entry, unsigned int shift) { @@ -371,7 +260,7 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, } pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, - pte_t *ptep) + pte_t *ptep, unsigned long sz) { unsigned int i, nptes, orig_shift, shift; unsigned long size; @@ -407,18 +296,6 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, return entry; } -int pmd_huge(pmd_t pmd) -{ - return !pmd_none(pmd) && - (pmd_val(pmd) & (_PAGE_VALID|_PAGE_PMD_HUGE)) != _PAGE_VALID; -} - -int pud_huge(pud_t pud) -{ - return !pud_none(pud) && - (pud_val(pud) & (_PAGE_VALID|_PAGE_PUD_HUGE)) != _PAGE_VALID; -} - static void hugetlb_free_pte_range(struct mmu_gather *tlb, pmd_t *pmd, unsigned long addr) { diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c index d96a14ffceeb..fdc93dd12c3e 100644 --- a/arch/sparc/mm/init_32.c +++ b/arch/sparc/mm/init_32.c @@ -232,19 +232,7 @@ static void __init taint_real_pages(void) } } -static void map_high_region(unsigned long start_pfn, unsigned long end_pfn) -{ - unsigned long tmp; - -#ifdef CONFIG_DEBUG_HIGHMEM - printk("mapping high region %08lx - %08lx\n", start_pfn, end_pfn); -#endif - - for (tmp = start_pfn; tmp < end_pfn; tmp++) - free_highmem_page(pfn_to_page(tmp)); -} - -void __init mem_init(void) +void __init arch_mm_preinit(void) { int i; @@ -274,23 +262,6 @@ void __init mem_init(void) memset(sparc_valid_addr_bitmap, 0, i << 2); taint_real_pages(); - - max_mapnr = last_valid_pfn - pfn_base; - high_memory = __va(max_low_pfn << PAGE_SHIFT); - memblock_free_all(); - - for (i = 0; sp_banks[i].num_bytes != 0; i++) { - unsigned long start_pfn = sp_banks[i].base_addr >> PAGE_SHIFT; - unsigned long end_pfn = (sp_banks[i].base_addr + sp_banks[i].num_bytes) >> PAGE_SHIFT; - - if (end_pfn <= highstart_pfn) - continue; - - if (start_pfn < highstart_pfn) - start_pfn = highstart_pfn; - - map_high_region(start_pfn, end_pfn); - } } void sparc_flush_page_to_ram(struct page *page) diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 1ca9054d9b97..760818950464 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -490,7 +490,7 @@ void flush_dcache_folio(struct folio *folio) } set_dcache_dirty(folio, this_cpu); } else { - /* We could delay the flush for the !page_mapping + /* We could delay the flush for the !folio_mapping * case too. But that case is for exec env/arg * pages and those are %99 certainly going to get * faulted into the tlb (and thus flushed) anyways. @@ -1075,14 +1075,9 @@ static void __init allocate_node_data(int nid) { struct pglist_data *p; unsigned long start_pfn, end_pfn; -#ifdef CONFIG_NUMA - NODE_DATA(nid) = memblock_alloc_node(sizeof(struct pglist_data), - SMP_CACHE_BYTES, nid); - if (!NODE_DATA(nid)) { - prom_printf("Cannot allocate pglist_data for nid[%d]\n", nid); - prom_halt(); - } +#ifdef CONFIG_NUMA + alloc_node_data(nid); NODE_DATA(nid)->node_id = nid; #endif @@ -1115,11 +1110,9 @@ static void init_node_masks_nonnuma(void) } #ifdef CONFIG_NUMA -struct pglist_data *node_data[MAX_NUMNODES]; EXPORT_SYMBOL(numa_cpu_lookup_table); EXPORT_SYMBOL(numa_cpumask_lookup_table); -EXPORT_SYMBOL(node_data); static int scan_pio_for_cfg_handle(struct mdesc_handle *md, u64 pio, u32 cfg_handle) @@ -2512,10 +2505,6 @@ static void __init register_page_bootmem_info(void) } void __init mem_init(void) { - high_memory = __va(last_valid_pfn << PAGE_SHIFT); - - memblock_free_all(); - /* * Must be done after boot memory is put on freelist, because here we * might set fields in deferred struct pages that have not yet been @@ -2640,11 +2629,6 @@ int __meminit vmemmap_populate(unsigned long vstart, unsigned long vend, return 0; } - -void vmemmap_free(unsigned long start, unsigned long end, - struct vmem_altmap *altmap) -{ -} #endif /* CONFIG_SPARSEMEM_VMEMMAP */ /* These are actually filled in at boot time by sun4{u,v}_pgprot_init() */ @@ -2927,7 +2911,7 @@ static void __pte_free(pgtable_t pte) { struct ptdesc *ptdesc = virt_to_ptdesc(pte); - pagetable_pte_dtor(ptdesc); + pagetable_dtor(ptdesc); pagetable_free(ptdesc); } diff --git a/arch/sparc/mm/leon_mm.c b/arch/sparc/mm/leon_mm.c index ec61ff1f96b7..1dc9b3d70eda 100644 --- a/arch/sparc/mm/leon_mm.c +++ b/arch/sparc/mm/leon_mm.c @@ -39,12 +39,10 @@ unsigned long leon_swprobe(unsigned long vaddr, unsigned long *paddr) unsigned int ctxtbl; unsigned int pgd, pmd, ped; unsigned int ptr; - unsigned int lvl, pte, paddrbase; + unsigned int lvl, pte; unsigned int ctx; unsigned int paddr_calc; - paddrbase = 0; - if (srmmu_swprobe_trace) printk(KERN_INFO "swprobe: trace on\n"); @@ -73,7 +71,6 @@ unsigned long leon_swprobe(unsigned long vaddr, unsigned long *paddr) printk(KERN_INFO "swprobe: pgd is entry level 3\n"); lvl = 3; pte = pgd; - paddrbase = pgd & _SRMMU_PTE_PMASK_LEON; goto ready; } if (((pgd & SRMMU_ET_MASK) != SRMMU_ET_PTD)) { @@ -96,7 +93,6 @@ unsigned long leon_swprobe(unsigned long vaddr, unsigned long *paddr) printk(KERN_INFO "swprobe: pmd is entry level 2\n"); lvl = 2; pte = pmd; - paddrbase = pmd & _SRMMU_PTE_PMASK_LEON; goto ready; } if (((pmd & SRMMU_ET_MASK) != SRMMU_ET_PTD)) { @@ -124,7 +120,6 @@ unsigned long leon_swprobe(unsigned long vaddr, unsigned long *paddr) printk(KERN_INFO "swprobe: ped is entry level 1\n"); lvl = 1; pte = ped; - paddrbase = ped & _SRMMU_PTE_PMASK_LEON; goto ready; } if (((ped & SRMMU_ET_MASK) != SRMMU_ET_PTD)) { @@ -147,7 +142,6 @@ unsigned long leon_swprobe(unsigned long vaddr, unsigned long *paddr) printk(KERN_INFO "swprobe: ptr is entry level 0\n"); lvl = 0; pte = ptr; - paddrbase = ptr & _SRMMU_PTE_PMASK_LEON; goto ready; } if (srmmu_swprobe_trace) diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c index 852085ada368..dd32711022f5 100644 --- a/arch/sparc/mm/srmmu.c +++ b/arch/sparc/mm/srmmu.c @@ -277,19 +277,13 @@ static void __init srmmu_nocache_init(void) bitmap_bits = srmmu_nocache_size >> SRMMU_NOCACHE_BITMAP_SHIFT; - srmmu_nocache_pool = memblock_alloc(srmmu_nocache_size, + srmmu_nocache_pool = memblock_alloc_or_panic(srmmu_nocache_size, SRMMU_NOCACHE_ALIGN_MAX); - if (!srmmu_nocache_pool) - panic("%s: Failed to allocate %lu bytes align=0x%x\n", - __func__, srmmu_nocache_size, SRMMU_NOCACHE_ALIGN_MAX); memset(srmmu_nocache_pool, 0, srmmu_nocache_size); srmmu_nocache_bitmap = - memblock_alloc(BITS_TO_LONGS(bitmap_bits) * sizeof(long), + memblock_alloc_or_panic(BITS_TO_LONGS(bitmap_bits) * sizeof(long), SMP_CACHE_BYTES); - if (!srmmu_nocache_bitmap) - panic("%s: Failed to allocate %zu bytes\n", __func__, - BITS_TO_LONGS(bitmap_bits) * sizeof(long)); bit_map_init(&srmmu_nocache_map, srmmu_nocache_bitmap, bitmap_bits); srmmu_swapper_pg_dir = __srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE); @@ -372,7 +366,7 @@ void pte_free(struct mm_struct *mm, pgtable_t ptep) page = pfn_to_page(__nocache_pa((unsigned long)ptep) >> PAGE_SHIFT); spin_lock(&mm->page_table_lock); if (page_ref_dec_return(page) == 1) - pagetable_pte_dtor(page_ptdesc(page)); + pagetable_dtor(page_ptdesc(page)); spin_unlock(&mm->page_table_lock); srmmu_free_nocache(ptep, SRMMU_PTE_TABLE_SIZE); @@ -452,9 +446,7 @@ static void __init sparc_context_init(int numctx) unsigned long size; size = numctx * sizeof(struct ctx_list); - ctx_list_pool = memblock_alloc(size, SMP_CACHE_BYTES); - if (!ctx_list_pool) - panic("%s: Failed to allocate %lu bytes\n", __func__, size); + ctx_list_pool = memblock_alloc_or_panic(size, SMP_CACHE_BYTES); for (ctx = 0; ctx < numctx; ctx++) { struct ctx_list *clist; @@ -1653,13 +1645,15 @@ static void smp_flush_tlb_all(void) local_ops->tlb_all(); } +static bool any_other_mm_cpus(struct mm_struct *mm) +{ + return cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids; +} + static void smp_flush_cache_mm(struct mm_struct *mm) { if (mm->context != NO_CONTEXT) { - cpumask_t cpu_mask; - cpumask_copy(&cpu_mask, mm_cpumask(mm)); - cpumask_clear_cpu(smp_processor_id(), &cpu_mask); - if (!cpumask_empty(&cpu_mask)) + if (any_other_mm_cpus(mm)) xc1(local_ops->cache_mm, (unsigned long)mm); local_ops->cache_mm(mm); } @@ -1668,10 +1662,7 @@ static void smp_flush_cache_mm(struct mm_struct *mm) static void smp_flush_tlb_mm(struct mm_struct *mm) { if (mm->context != NO_CONTEXT) { - cpumask_t cpu_mask; - cpumask_copy(&cpu_mask, mm_cpumask(mm)); - cpumask_clear_cpu(smp_processor_id(), &cpu_mask); - if (!cpumask_empty(&cpu_mask)) { + if (any_other_mm_cpus(mm)) { xc1(local_ops->tlb_mm, (unsigned long)mm); if (atomic_read(&mm->mm_users) == 1 && current->active_mm == mm) cpumask_copy(mm_cpumask(mm), @@ -1688,10 +1679,7 @@ static void smp_flush_cache_range(struct vm_area_struct *vma, struct mm_struct *mm = vma->vm_mm; if (mm->context != NO_CONTEXT) { - cpumask_t cpu_mask; - cpumask_copy(&cpu_mask, mm_cpumask(mm)); - cpumask_clear_cpu(smp_processor_id(), &cpu_mask); - if (!cpumask_empty(&cpu_mask)) + if (any_other_mm_cpus(mm)) xc3(local_ops->cache_range, (unsigned long)vma, start, end); local_ops->cache_range(vma, start, end); @@ -1705,10 +1693,7 @@ static void smp_flush_tlb_range(struct vm_area_struct *vma, struct mm_struct *mm = vma->vm_mm; if (mm->context != NO_CONTEXT) { - cpumask_t cpu_mask; - cpumask_copy(&cpu_mask, mm_cpumask(mm)); - cpumask_clear_cpu(smp_processor_id(), &cpu_mask); - if (!cpumask_empty(&cpu_mask)) + if (any_other_mm_cpus(mm)) xc3(local_ops->tlb_range, (unsigned long)vma, start, end); local_ops->tlb_range(vma, start, end); @@ -1720,10 +1705,7 @@ static void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page) struct mm_struct *mm = vma->vm_mm; if (mm->context != NO_CONTEXT) { - cpumask_t cpu_mask; - cpumask_copy(&cpu_mask, mm_cpumask(mm)); - cpumask_clear_cpu(smp_processor_id(), &cpu_mask); - if (!cpumask_empty(&cpu_mask)) + if (any_other_mm_cpus(mm)) xc2(local_ops->cache_page, (unsigned long)vma, page); local_ops->cache_page(vma, page); } @@ -1734,10 +1716,7 @@ static void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) struct mm_struct *mm = vma->vm_mm; if (mm->context != NO_CONTEXT) { - cpumask_t cpu_mask; - cpumask_copy(&cpu_mask, mm_cpumask(mm)); - cpumask_clear_cpu(smp_processor_id(), &cpu_mask); - if (!cpumask_empty(&cpu_mask)) + if (any_other_mm_cpus(mm)) xc2(local_ops->tlb_page, (unsigned long)vma, page); local_ops->tlb_page(vma, page); } @@ -1759,10 +1738,7 @@ static void smp_flush_page_to_ram(unsigned long page) static void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr) { - cpumask_t cpu_mask; - cpumask_copy(&cpu_mask, mm_cpumask(mm)); - cpumask_clear_cpu(smp_processor_id(), &cpu_mask); - if (!cpumask_empty(&cpu_mask)) + if (any_other_mm_cpus(mm)) xc2(local_ops->sig_insns, (unsigned long)mm, insn_addr); local_ops->sig_insns(mm, insn_addr); } diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c index b44d79d778c7..a35ddcca5e76 100644 --- a/arch/sparc/mm/tlb.c +++ b/arch/sparc/mm/tlb.c @@ -52,8 +52,10 @@ out: void arch_enter_lazy_mmu_mode(void) { - struct tlb_batch *tb = this_cpu_ptr(&tlb_batch); + struct tlb_batch *tb; + preempt_disable(); + tb = this_cpu_ptr(&tlb_batch); tb->active = 1; } @@ -64,6 +66,7 @@ void arch_leave_lazy_mmu_mode(void) if (tb->tlb_nr) flush_tlb_pending(); tb->active = 0; + preempt_enable(); } static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr, @@ -183,12 +186,12 @@ static void __set_pmd_acct(struct mm_struct *mm, unsigned long addr, * hugetlb_pte_count. */ if (pmd_val(pmd) & _PAGE_PMD_HUGE) { - if (is_huge_zero_page(pmd_page(pmd))) + if (is_huge_zero_pmd(pmd)) mm->context.hugetlb_pte_count++; else mm->context.thp_pte_count++; } else { - if (is_huge_zero_page(pmd_page(orig))) + if (is_huge_zero_pmd(orig)) mm->context.hugetlb_pte_count--; else mm->context.thp_pte_count--; @@ -249,6 +252,7 @@ pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, { pmd_t old, entry; + VM_WARN_ON_ONCE(!pmd_present(*pmdp)); entry = __pmd(pmd_val(*pmdp) & ~_PAGE_VALID); old = pmdp_establish(vma, address, pmdp, entry); flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); @@ -259,7 +263,7 @@ pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, * Sanity check pmd before doing the actual decrement. */ if ((pmd_val(entry) & _PAGE_PMD_HUGE) && - !is_huge_zero_page(pmd_page(entry))) + !is_huge_zero_pmd(entry)) (vma->vm_mm)->context.thp_pte_count--; return old; |