summaryrefslogtreecommitdiffstats
path: root/arch/arm64/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-07-08 09:54:55 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2019-07-08 09:54:55 -0700
commitdfd437a257924484b144ee750e60affc95562c6d (patch)
treed692781730ed72743b8abbc42a04a176cb2f1d6d /arch/arm64/mm
parent0ecfebd2b52404ae0c54a878c872bb93363ada36 (diff)
parent0c61efd322b75ed3143e3d130ebecbebf561adf5 (diff)
downloadlinux-stable-dfd437a257924484b144ee750e60affc95562c6d.tar.gz
linux-stable-dfd437a257924484b144ee750e60affc95562c6d.tar.bz2
linux-stable-dfd437a257924484b144ee750e60affc95562c6d.zip
Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 updates from Catalin Marinas: - arm64 support for syscall emulation via PTRACE_SYSEMU{,_SINGLESTEP} - Wire up VM_FLUSH_RESET_PERMS for arm64, allowing the core code to manage the permissions of executable vmalloc regions more strictly - Slight performance improvement by keeping softirqs enabled while touching the FPSIMD/SVE state (kernel_neon_begin/end) - Expose a couple of ARMv8.5 features to user (HWCAP): CondM (new XAFLAG and AXFLAG instructions for floating point comparison flags manipulation) and FRINT (rounding floating point numbers to integers) - Re-instate ARM64_PSEUDO_NMI support which was previously marked as BROKEN due to some bugs (now fixed) - Improve parking of stopped CPUs and implement an arm64-specific panic_smp_self_stop() to avoid warning on not being able to stop secondary CPUs during panic - perf: enable the ARM Statistical Profiling Extensions (SPE) on ACPI platforms - perf: DDR performance monitor support for iMX8QXP - cache_line_size() can now be set from DT or ACPI/PPTT if provided to cope with a system cache info not exposed via the CPUID registers - Avoid warning on hardware cache line size greater than ARCH_DMA_MINALIGN if the system is fully coherent - arm64 do_page_fault() and hugetlb cleanups - Refactor set_pte_at() to avoid redundant READ_ONCE(*ptep) - Ignore ACPI 5.1 FADTs reported as 5.0 (infer from the 'arm_boot_flags' introduced in 5.1) - CONFIG_RANDOMIZE_BASE now enabled in defconfig - Allow the selection of ARM64_MODULE_PLTS, currently only done via RANDOMIZE_BASE (and an erratum workaround), allowing modules to spill over into the vmalloc area - Make ZONE_DMA32 configurable * tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (54 commits) perf: arm_spe: Enable ACPI/Platform automatic module loading arm_pmu: acpi: spe: Add initial MADT/SPE probing ACPI/PPTT: Add function to return ACPI 6.3 Identical tokens ACPI/PPTT: Modify node flag detection to find last IDENTICAL x86/entry: Simplify _TIF_SYSCALL_EMU handling arm64: rename dump_instr as dump_kernel_instr arm64/mm: Drop [PTE|PMD]_TYPE_FAULT arm64: Implement panic_smp_self_stop() arm64: Improve parking of stopped CPUs arm64: Expose FRINT capabilities to userspace arm64: Expose ARMv8.5 CondM capability to userspace arm64: defconfig: enable CONFIG_RANDOMIZE_BASE arm64: ARM64_MODULES_PLTS must depend on MODULES arm64: bpf: do not allocate executable memory arm64/kprobes: set VM_FLUSH_RESET_PERMS on kprobe instruction pages arm64/mm: wire up CONFIG_ARCH_HAS_SET_DIRECT_MAP arm64: module: create module allocations without exec permissions arm64: Allow user selection of ARM64_MODULE_PLTS acpi/arm64: ignore 5.1 FADTs that are reported as 5.0 arm64: Allow selecting Pseudo-NMI again ...
Diffstat (limited to 'arch/arm64/mm')
-rw-r--r--arch/arm64/mm/dma-mapping.c12
-rw-r--r--arch/arm64/mm/fault.c61
-rw-r--r--arch/arm64/mm/hugetlbpage.c12
-rw-r--r--arch/arm64/mm/init.c5
-rw-r--r--arch/arm64/mm/mmu.c14
-rw-r--r--arch/arm64/mm/pageattr.c48
6 files changed, 92 insertions, 60 deletions
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 5992eb9a9a08..1d17dbeafe76 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -80,10 +80,6 @@ static int __swiotlb_mmap_pfn(struct vm_area_struct *vma,
static int __init arm64_dma_init(void)
{
- WARN_TAINT(ARCH_DMA_MINALIGN < cache_line_size(),
- TAINT_CPU_OUT_OF_SPEC,
- "ARCH_DMA_MINALIGN smaller than CTR_EL0.CWG (%d < %d)",
- ARCH_DMA_MINALIGN, cache_line_size());
return dma_atomic_pool_init(GFP_DMA32, __pgprot(PROT_NORMAL_NC));
}
arch_initcall(arm64_dma_init);
@@ -461,6 +457,14 @@ static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
const struct iommu_ops *iommu, bool coherent)
{
+ int cls = cache_line_size_of_cpu();
+
+ WARN_TAINT(!coherent && cls > ARCH_DMA_MINALIGN,
+ TAINT_CPU_OUT_OF_SPEC,
+ "%s %s: ARCH_DMA_MINALIGN smaller than CTR_EL0.CWG (%d < %d)",
+ dev_driver_string(dev), dev_name(dev),
+ ARCH_DMA_MINALIGN, cls);
+
dev->dma_coherent = coherent;
__iommu_setup_dma_ops(dev, dma_base, size, iommu);
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 2d115016feb4..c8c61b1eb479 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -384,40 +384,31 @@ static void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *re
#define VM_FAULT_BADACCESS 0x020000
static vm_fault_t __do_page_fault(struct mm_struct *mm, unsigned long addr,
- unsigned int mm_flags, unsigned long vm_flags,
- struct task_struct *tsk)
+ unsigned int mm_flags, unsigned long vm_flags)
{
- struct vm_area_struct *vma;
- vm_fault_t fault;
+ struct vm_area_struct *vma = find_vma(mm, addr);
- vma = find_vma(mm, addr);
- fault = VM_FAULT_BADMAP;
if (unlikely(!vma))
- goto out;
- if (unlikely(vma->vm_start > addr))
- goto check_stack;
+ return VM_FAULT_BADMAP;
/*
* Ok, we have a good vm_area for this memory access, so we can handle
* it.
*/
-good_area:
+ if (unlikely(vma->vm_start > addr)) {
+ if (!(vma->vm_flags & VM_GROWSDOWN))
+ return VM_FAULT_BADMAP;
+ if (expand_stack(vma, addr))
+ return VM_FAULT_BADMAP;
+ }
+
/*
* Check that the permissions on the VMA allow for the fault which
* occurred.
*/
- if (!(vma->vm_flags & vm_flags)) {
- fault = VM_FAULT_BADACCESS;
- goto out;
- }
-
+ if (!(vma->vm_flags & vm_flags))
+ return VM_FAULT_BADACCESS;
return handle_mm_fault(vma, addr & PAGE_MASK, mm_flags);
-
-check_stack:
- if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr))
- goto good_area;
-out:
- return fault;
}
static bool is_el0_instruction_abort(unsigned int esr)
@@ -425,12 +416,20 @@ static bool is_el0_instruction_abort(unsigned int esr)
return ESR_ELx_EC(esr) == ESR_ELx_EC_IABT_LOW;
}
+/*
+ * Note: not valid for EL1 DC IVAC, but we never use that such that it
+ * should fault. EL0 cannot issue DC IVAC (undef).
+ */
+static bool is_write_abort(unsigned int esr)
+{
+ return (esr & ESR_ELx_WNR) && !(esr & ESR_ELx_CM);
+}
+
static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
struct pt_regs *regs)
{
const struct fault_info *inf;
- struct task_struct *tsk;
- struct mm_struct *mm;
+ struct mm_struct *mm = current->mm;
vm_fault_t fault, major = 0;
unsigned long vm_flags = VM_READ | VM_WRITE;
unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
@@ -438,9 +437,6 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
if (notify_page_fault(regs, esr))
return 0;
- tsk = current;
- mm = tsk->mm;
-
/*
* If we're in an interrupt or have no user context, we must not take
* the fault.
@@ -453,7 +449,8 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
if (is_el0_instruction_abort(esr)) {
vm_flags = VM_EXEC;
- } else if ((esr & ESR_ELx_WNR) && !(esr & ESR_ELx_CM)) {
+ mm_flags |= FAULT_FLAG_INSTRUCTION;
+ } else if (is_write_abort(esr)) {
vm_flags = VM_WRITE;
mm_flags |= FAULT_FLAG_WRITE;
}
@@ -492,12 +489,14 @@ retry:
*/
might_sleep();
#ifdef CONFIG_DEBUG_VM
- if (!user_mode(regs) && !search_exception_tables(regs->pc))
+ if (!user_mode(regs) && !search_exception_tables(regs->pc)) {
+ up_read(&mm->mmap_sem);
goto no_context;
+ }
#endif
}
- fault = __do_page_fault(mm, addr, mm_flags, vm_flags, tsk);
+ fault = __do_page_fault(mm, addr, mm_flags, vm_flags);
major |= fault & VM_FAULT_MAJOR;
if (fault & VM_FAULT_RETRY) {
@@ -537,11 +536,11 @@ retry:
* that point.
*/
if (major) {
- tsk->maj_flt++;
+ current->maj_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs,
addr);
} else {
- tsk->min_flt++;
+ current->min_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs,
addr);
}
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index f475e54fbc43..bbeb6a5a6ba6 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -228,7 +228,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
if (sz == PUD_SIZE) {
ptep = (pte_t *)pudp;
- } else if (sz == (PAGE_SIZE * CONT_PTES)) {
+ } else if (sz == (CONT_PTE_SIZE)) {
pmdp = pmd_alloc(mm, pudp, addr);
WARN_ON(addr & (sz - 1));
@@ -246,7 +246,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
ptep = huge_pmd_share(mm, addr, pudp);
else
ptep = (pte_t *)pmd_alloc(mm, pudp, addr);
- } else if (sz == (PMD_SIZE * CONT_PMDS)) {
+ } else if (sz == (CONT_PMD_SIZE)) {
pmdp = pmd_alloc(mm, pudp, addr);
WARN_ON(addr & (sz - 1));
return (pte_t *)pmdp;
@@ -454,9 +454,9 @@ static int __init hugetlbpage_init(void)
#ifdef CONFIG_ARM64_4K_PAGES
add_huge_page_size(PUD_SIZE);
#endif
- add_huge_page_size(PMD_SIZE * CONT_PMDS);
+ add_huge_page_size(CONT_PMD_SIZE);
add_huge_page_size(PMD_SIZE);
- add_huge_page_size(PAGE_SIZE * CONT_PTES);
+ add_huge_page_size(CONT_PTE_SIZE);
return 0;
}
@@ -470,9 +470,9 @@ static __init int setup_hugepagesz(char *opt)
#ifdef CONFIG_ARM64_4K_PAGES
case PUD_SIZE:
#endif
- case PMD_SIZE * CONT_PMDS:
+ case CONT_PMD_SIZE:
case PMD_SIZE:
- case PAGE_SIZE * CONT_PTES:
+ case CONT_PTE_SIZE:
add_huge_page_size(ps);
return 1;
}
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 749c9b269f08..f3c795278def 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -180,8 +180,9 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
{
unsigned long max_zone_pfns[MAX_NR_ZONES] = {0};
- if (IS_ENABLED(CONFIG_ZONE_DMA32))
- max_zone_pfns[ZONE_DMA32] = PFN_DOWN(max_zone_dma_phys());
+#ifdef CONFIG_ZONE_DMA32
+ max_zone_pfns[ZONE_DMA32] = PFN_DOWN(max_zone_dma_phys());
+#endif
max_zone_pfns[ZONE_NORMAL] = max;
free_area_init_nodes(max_zone_pfns);
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index e5ae8663f230..3645f29bd814 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -765,7 +765,7 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
return 0;
}
-#endif /* CONFIG_ARM64_64K_PAGES */
+#endif /* !ARM64_SWAPPER_USES_SECTION_MAPS */
void vmemmap_free(unsigned long start, unsigned long end,
struct vmem_altmap *altmap)
{
@@ -960,32 +960,28 @@ int __init arch_ioremap_pmd_supported(void)
int pud_set_huge(pud_t *pudp, phys_addr_t phys, pgprot_t prot)
{
- pgprot_t sect_prot = __pgprot(PUD_TYPE_SECT |
- pgprot_val(mk_sect_prot(prot)));
- pud_t new_pud = pfn_pud(__phys_to_pfn(phys), sect_prot);
+ pud_t new_pud = pfn_pud(__phys_to_pfn(phys), mk_pud_sect_prot(prot));
/* Only allow permission changes for now */
if (!pgattr_change_is_safe(READ_ONCE(pud_val(*pudp)),
pud_val(new_pud)))
return 0;
- BUG_ON(phys & ~PUD_MASK);
+ VM_BUG_ON(phys & ~PUD_MASK);
set_pud(pudp, new_pud);
return 1;
}
int pmd_set_huge(pmd_t *pmdp, phys_addr_t phys, pgprot_t prot)
{
- pgprot_t sect_prot = __pgprot(PMD_TYPE_SECT |
- pgprot_val(mk_sect_prot(prot)));
- pmd_t new_pmd = pfn_pmd(__phys_to_pfn(phys), sect_prot);
+ pmd_t new_pmd = pfn_pmd(__phys_to_pfn(phys), mk_pmd_sect_prot(prot));
/* Only allow permission changes for now */
if (!pgattr_change_is_safe(READ_ONCE(pmd_val(*pmdp)),
pmd_val(new_pmd)))
return 0;
- BUG_ON(phys & ~PMD_MASK);
+ VM_BUG_ON(phys & ~PMD_MASK);
set_pmd(pmdp, new_pmd);
return 1;
}
diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
index 47b057bfa803..fcdcf6cd7677 100644
--- a/arch/arm64/mm/pageattr.c
+++ b/arch/arm64/mm/pageattr.c
@@ -151,17 +151,48 @@ int set_memory_valid(unsigned long addr, int numpages, int enable)
__pgprot(PTE_VALID));
}
-#ifdef CONFIG_DEBUG_PAGEALLOC
+int set_direct_map_invalid_noflush(struct page *page)
+{
+ struct page_change_data data = {
+ .set_mask = __pgprot(0),
+ .clear_mask = __pgprot(PTE_VALID),
+ };
+
+ if (!rodata_full)
+ return 0;
+
+ return apply_to_page_range(&init_mm,
+ (unsigned long)page_address(page),
+ PAGE_SIZE, change_page_range, &data);
+}
+
+int set_direct_map_default_noflush(struct page *page)
+{
+ struct page_change_data data = {
+ .set_mask = __pgprot(PTE_VALID | PTE_WRITE),
+ .clear_mask = __pgprot(PTE_RDONLY),
+ };
+
+ if (!rodata_full)
+ return 0;
+
+ return apply_to_page_range(&init_mm,
+ (unsigned long)page_address(page),
+ PAGE_SIZE, change_page_range, &data);
+}
+
void __kernel_map_pages(struct page *page, int numpages, int enable)
{
+ if (!debug_pagealloc_enabled() && !rodata_full)
+ return;
+
set_memory_valid((unsigned long)page_address(page), numpages, enable);
}
-#ifdef CONFIG_HIBERNATION
+
/*
- * When built with CONFIG_DEBUG_PAGEALLOC and CONFIG_HIBERNATION, this function
- * is used to determine if a linear map page has been marked as not-valid by
- * CONFIG_DEBUG_PAGEALLOC. Walk the page table and check the PTE_VALID bit.
- * This is based on kern_addr_valid(), which almost does what we need.
+ * This function is used to determine if a linear map page has been marked as
+ * not-valid. Walk the page table and check the PTE_VALID bit. This is based
+ * on kern_addr_valid(), which almost does what we need.
*
* Because this is only called on the kernel linear map, p?d_sect() implies
* p?d_present(). When debug_pagealloc is enabled, sections mappings are
@@ -175,6 +206,9 @@ bool kernel_page_present(struct page *page)
pte_t *ptep;
unsigned long addr = (unsigned long)page_address(page);
+ if (!debug_pagealloc_enabled() && !rodata_full)
+ return true;
+
pgdp = pgd_offset_k(addr);
if (pgd_none(READ_ONCE(*pgdp)))
return false;
@@ -196,5 +230,3 @@ bool kernel_page_present(struct page *page)
ptep = pte_offset_kernel(pmdp, addr);
return pte_valid(READ_ONCE(*ptep));
}
-#endif /* CONFIG_HIBERNATION */
-#endif /* CONFIG_DEBUG_PAGEALLOC */