diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-05-22 08:36:16 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-05-22 08:36:16 -0700 |
commit | 54dee406374ce8adb352c48e175176247cb8db7c (patch) | |
tree | 5a935f0e3bdbd28d869f5cd319891023e1152a3a | |
parent | 651bae980e3f3e6acf0d297ced08f9d7af71a8c9 (diff) | |
parent | 7a0a93c51799edc45ee57c6cc1679aa94f1e03d5 (diff) | |
download | linux-stable-54dee406374ce8adb352c48e175176247cb8db7c.tar.gz linux-stable-54dee406374ce8adb352c48e175176247cb8db7c.tar.bz2 linux-stable-54dee406374ce8adb352c48e175176247cb8db7c.zip |
Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 fixes from Will Deacon:
- Fix SPE probe failure when backing auxbuf with high-order pages
- Fix handling of DMA allocations from outside of the vmalloc area
- Fix generation of build-id ELF section for vDSO object
- Disable huge I/O mappings if kernel page table dumping is enabled
- A few other minor fixes (comments, kconfig etc)
* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
arm64: vdso: Explicitly add build-id option
arm64/mm: Inhibit huge-vmap with ptdump
arm64: Print physical address of page table base in show_pte()
arm64: don't trash config with compat symbol if COMPAT is disabled
arm64: assembler: Update comment above cond_yield_neon() macro
drivers/perf: arm_spe: Don't error on high-order pages for aux buf
arm64/iommu: handle non-remapped addresses in ->mmap and ->get_sgtable
-rw-r--r-- | arch/arm64/Kconfig | 2 | ||||
-rw-r--r-- | arch/arm64/include/asm/assembler.h | 11 | ||||
-rw-r--r-- | arch/arm64/kernel/vdso/Makefile | 4 | ||||
-rw-r--r-- | arch/arm64/mm/dma-mapping.c | 10 | ||||
-rw-r--r-- | arch/arm64/mm/fault.c | 5 | ||||
-rw-r--r-- | arch/arm64/mm/mmu.c | 11 | ||||
-rw-r--r-- | drivers/perf/arm_spe_pmu.c | 10 |
7 files changed, 30 insertions, 23 deletions
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 76f6e4765f49..ca9c175fb949 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -69,7 +69,7 @@ config ARM64 select ARCH_SUPPORTS_ATOMIC_RMW select ARCH_SUPPORTS_INT128 if GCC_VERSION >= 50000 || CC_IS_CLANG select ARCH_SUPPORTS_NUMA_BALANCING - select ARCH_WANT_COMPAT_IPC_PARSE_VERSION + select ARCH_WANT_COMPAT_IPC_PARSE_VERSION if COMPAT select ARCH_WANT_FRAME_POINTERS select ARCH_HAS_UBSAN_SANITIZE_ALL select ARM_AMBA diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 039fbd822ec6..92b6b7cf67dd 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -718,12 +718,11 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU * the output section, any use of such directives is undefined. * * The yield itself consists of the following: - * - Check whether the preempt count is exactly 1, in which case disabling - * preemption once will make the task preemptible. If this is not the case, - * yielding is pointless. - * - Check whether TIF_NEED_RESCHED is set, and if so, disable and re-enable - * kernel mode NEON (which will trigger a reschedule), and branch to the - * yield fixup code. + * - Check whether the preempt count is exactly 1 and a reschedule is also + * needed. If so, calling of preempt_enable() in kernel_neon_end() will + * trigger a reschedule. If it is not the case, yielding is pointless. + * - Disable and re-enable kernel mode NEON, and branch to the yield fixup + * code. * * This macro sequence may clobber all CPU state that is not guaranteed by the * AAPCS to be preserved across an ordinary function call. diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile index 744b9dbaba03..fa230ff09aa1 100644 --- a/arch/arm64/kernel/vdso/Makefile +++ b/arch/arm64/kernel/vdso/Makefile @@ -12,8 +12,8 @@ obj-vdso := gettimeofday.o note.o sigreturn.o targets := $(obj-vdso) vdso.so vdso.so.dbg obj-vdso := $(addprefix $(obj)/, $(obj-vdso)) -ldflags-y := -shared -nostdlib -soname=linux-vdso.so.1 \ - $(call ld-option, --hash-style=sysv) -n -T +ldflags-y := -shared -nostdlib -soname=linux-vdso.so.1 --hash-style=sysv \ + --build-id -n -T # Disable gcov profiling for VDSO code GCOV_PROFILE := n diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index 78c0a72f822c..674860e3e478 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c @@ -249,6 +249,11 @@ static int __iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma, if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) return ret; + if (!is_vmalloc_addr(cpu_addr)) { + unsigned long pfn = page_to_pfn(virt_to_page(cpu_addr)); + return __swiotlb_mmap_pfn(vma, pfn, size); + } + if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) { /* * DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped, @@ -272,6 +277,11 @@ static int __iommu_get_sgtable(struct device *dev, struct sg_table *sgt, unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; struct vm_struct *area = find_vm_area(cpu_addr); + if (!is_vmalloc_addr(cpu_addr)) { + struct page *page = virt_to_page(cpu_addr); + return __swiotlb_get_sgtable_page(sgt, page, size); + } + if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) { /* * DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped, diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 0cb0e09995e1..dda234bcc020 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -171,9 +171,10 @@ static void show_pte(unsigned long addr) return; } - pr_alert("%s pgtable: %luk pages, %u-bit VAs, pgdp = %p\n", + pr_alert("%s pgtable: %luk pages, %u-bit VAs, pgdp=%016lx\n", mm == &init_mm ? "swapper" : "user", PAGE_SIZE / SZ_1K, - mm == &init_mm ? VA_BITS : (int) vabits_user, mm->pgd); + mm == &init_mm ? VA_BITS : (int)vabits_user, + (unsigned long)virt_to_phys(mm->pgd)); pgdp = pgd_offset(mm, addr); pgd = READ_ONCE(*pgdp); pr_alert("[%016lx] pgd=%016llx", addr, pgd_val(pgd)); diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index a170c6369a68..a1bfc4413982 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -955,13 +955,18 @@ void *__init fixmap_remap_fdt(phys_addr_t dt_phys) int __init arch_ioremap_pud_supported(void) { - /* only 4k granule supports level 1 block mappings */ - return IS_ENABLED(CONFIG_ARM64_4K_PAGES); + /* + * Only 4k granule supports level 1 block mappings. + * SW table walks can't handle removal of intermediate entries. + */ + return IS_ENABLED(CONFIG_ARM64_4K_PAGES) && + !IS_ENABLED(CONFIG_ARM64_PTDUMP_DEBUGFS); } int __init arch_ioremap_pmd_supported(void) { - return 1; + /* See arch_ioremap_pud_supported() */ + return !IS_ENABLED(CONFIG_ARM64_PTDUMP_DEBUGFS); } int pud_set_huge(pud_t *pudp, phys_addr_t phys, pgprot_t prot) diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c index 7cb766dafe85..e120f933412a 100644 --- a/drivers/perf/arm_spe_pmu.c +++ b/drivers/perf/arm_spe_pmu.c @@ -855,16 +855,8 @@ static void *arm_spe_pmu_setup_aux(struct perf_event *event, void **pages, if (!pglist) goto out_free_buf; - for (i = 0; i < nr_pages; ++i) { - struct page *page = virt_to_page(pages[i]); - - if (PagePrivate(page)) { - pr_warn("unexpected high-order page for auxbuf!"); - goto out_free_pglist; - } - + for (i = 0; i < nr_pages; ++i) pglist[i] = virt_to_page(pages[i]); - } buf->base = vmap(pglist, nr_pages, VM_MAP, PAGE_KERNEL); if (!buf->base) |