From f4693c2716b35d0846fd45a4ad7db78bfb25efc8 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Thu, 8 Oct 2020 17:36:00 +0200 Subject: arm64: mm: extend linear region for 52-bit VA configurations For historical reasons, the arm64 kernel VA space is configured as two equally sized halves, i.e., on a 48-bit VA build, the VA space is split into a 47-bit vmalloc region and a 47-bit linear region. When support for 52-bit virtual addressing was added, this equal split was kept, resulting in a substantial waste of virtual address space in the linear region: 48-bit VA 52-bit VA 0xffff_ffff_ffff_ffff +-------------+ +-------------+ | vmalloc | | vmalloc | 0xffff_8000_0000_0000 +-------------+ _PAGE_END(48) +-------------+ | linear | : : 0xffff_0000_0000_0000 +-------------+ : : : : : : : : : : : : : : : : : currently : : unusable : : : : : : unused : : by : : : : : : : : hardware : : : : : : : 0xfff8_0000_0000_0000 : : _PAGE_END(52) +-------------+ : : | | : : | | : : | | : : | | : : | | : unusable : | | : : | linear | : by : | | : : | region | : hardware : | | : : | | : : | | : : | | : : | | : : | | : : | | 0xfff0_0000_0000_0000 +-------------+ PAGE_OFFSET +-------------+ As illustrated above, the 52-bit VA kernel uses 47 bits for the vmalloc space (as before), to ensure that a single 64k granule kernel image can support any 64k granule capable system, regardless of whether it supports the 52-bit virtual addressing extension. However, due to the fact that the VA space is still split in equal halves, the linear region is only 2^51 bytes in size, wasting almost half of the 52-bit VA space. Let's fix this, by abandoning the equal split, and simply assigning all VA space outside of the vmalloc region to the linear region. The KASAN shadow region is reconfigured so that it ends at the start of the vmalloc region, and grows downwards. That way, the arrangement of the vmalloc space (which contains kernel mappings, modules, BPF region, the vmemmap array etc) is identical between non-KASAN and KASAN builds, which aids debugging. Signed-off-by: Ard Biesheuvel Reviewed-by: Steve Capper Link: https://lore.kernel.org/r/20201008153602.9467-3-ardb@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/Kconfig | 20 ++++++++++---------- arch/arm64/include/asm/memory.h | 12 +++++------- arch/arm64/mm/init.c | 2 +- 3 files changed, 16 insertions(+), 18 deletions(-) (limited to 'arch') diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 1515f6f153a0..c6092cbb39af 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -331,16 +331,16 @@ config BROKEN_GAS_INST config KASAN_SHADOW_OFFSET hex depends on KASAN - default 0xdfffa00000000000 if (ARM64_VA_BITS_48 || ARM64_VA_BITS_52) && !KASAN_SW_TAGS - default 0xdfffd00000000000 if ARM64_VA_BITS_47 && !KASAN_SW_TAGS - default 0xdffffe8000000000 if ARM64_VA_BITS_42 && !KASAN_SW_TAGS - default 0xdfffffd000000000 if ARM64_VA_BITS_39 && !KASAN_SW_TAGS - default 0xdffffffa00000000 if ARM64_VA_BITS_36 && !KASAN_SW_TAGS - default 0xefff900000000000 if (ARM64_VA_BITS_48 || ARM64_VA_BITS_52) && KASAN_SW_TAGS - default 0xefffc80000000000 if ARM64_VA_BITS_47 && KASAN_SW_TAGS - default 0xeffffe4000000000 if ARM64_VA_BITS_42 && KASAN_SW_TAGS - default 0xefffffc800000000 if ARM64_VA_BITS_39 && KASAN_SW_TAGS - default 0xeffffff900000000 if ARM64_VA_BITS_36 && KASAN_SW_TAGS + default 0xdfff800000000000 if (ARM64_VA_BITS_48 || ARM64_VA_BITS_52) && !KASAN_SW_TAGS + default 0xdfffc00000000000 if ARM64_VA_BITS_47 && !KASAN_SW_TAGS + default 0xdffffe0000000000 if ARM64_VA_BITS_42 && !KASAN_SW_TAGS + default 0xdfffffc000000000 if ARM64_VA_BITS_39 && !KASAN_SW_TAGS + default 0xdffffff800000000 if ARM64_VA_BITS_36 && !KASAN_SW_TAGS + default 0xefff800000000000 if (ARM64_VA_BITS_48 || ARM64_VA_BITS_52) && KASAN_SW_TAGS + default 0xefffc00000000000 if ARM64_VA_BITS_47 && KASAN_SW_TAGS + default 0xeffffe0000000000 if ARM64_VA_BITS_42 && KASAN_SW_TAGS + default 0xefffffc000000000 if ARM64_VA_BITS_39 && KASAN_SW_TAGS + default 0xeffffff800000000 if ARM64_VA_BITS_36 && KASAN_SW_TAGS default 0xffffffffffffffff source "arch/arm64/Kconfig.platforms" diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index cd61239bae8c..8e89f9b9091e 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -44,7 +44,7 @@ #define _PAGE_OFFSET(va) (-(UL(1) << (va))) #define PAGE_OFFSET (_PAGE_OFFSET(VA_BITS)) #define KIMAGE_VADDR (MODULES_END) -#define BPF_JIT_REGION_START (KASAN_SHADOW_END) +#define BPF_JIT_REGION_START (_PAGE_END(VA_BITS_MIN)) #define BPF_JIT_REGION_SIZE (SZ_128M) #define BPF_JIT_REGION_END (BPF_JIT_REGION_START + BPF_JIT_REGION_SIZE) #define MODULES_END (MODULES_VADDR + MODULES_VSIZE) @@ -76,10 +76,11 @@ #define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL) #define KASAN_SHADOW_END ((UL(1) << (64 - KASAN_SHADOW_SCALE_SHIFT)) \ + KASAN_SHADOW_OFFSET) +#define PAGE_END (KASAN_SHADOW_END - (1UL << (vabits_actual - KASAN_SHADOW_SCALE_SHIFT))) #define KASAN_THREAD_SHIFT 1 #else #define KASAN_THREAD_SHIFT 0 -#define KASAN_SHADOW_END (_PAGE_END(VA_BITS_MIN)) +#define PAGE_END (_PAGE_END(VA_BITS_MIN)) #endif /* CONFIG_KASAN */ #define MIN_THREAD_SHIFT (14 + KASAN_THREAD_SHIFT) @@ -167,7 +168,6 @@ #include extern u64 vabits_actual; -#define PAGE_END (_PAGE_END(vabits_actual)) extern s64 memstart_addr; /* PHYS_OFFSET - the physical address of the start of memory. */ @@ -238,11 +238,9 @@ static inline const void *__tag_set(const void *addr, u8 tag) /* - * The linear kernel range starts at the bottom of the virtual address - * space. Testing the top bit for the start of the region is a - * sufficient check and avoids having to worry about the tag. + * The linear kernel range starts at the bottom of the virtual address space. */ -#define __is_lm_address(addr) (!(((u64)addr) & BIT(vabits_actual - 1))) +#define __is_lm_address(addr) (((u64)(addr) & ~PAGE_OFFSET) < (PAGE_END - PAGE_OFFSET)) #define __lm_to_phys(addr) (((addr) & ~PAGE_OFFSET) + PHYS_OFFSET) #define __kimg_to_phys(addr) ((addr) - kimage_voffset) diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 095540667f0f..7e15d92836d8 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -269,7 +269,7 @@ static void __init fdt_enforce_memory_region(void) void __init arm64_memblock_init(void) { - const s64 linear_region_size = BIT(vabits_actual - 1); + const s64 linear_region_size = PAGE_END - _PAGE_OFFSET(vabits_actual); /* Handle linux,usable-memory-range property */ fdt_enforce_memory_region(); -- cgit v1.2.3 From 8c96400d6a39be763130a5c493647c57726f7013 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Thu, 8 Oct 2020 17:36:01 +0200 Subject: arm64: mm: make vmemmap region a projection of the linear region Now that we have reverted the introduction of the vmemmap struct page pointer and the separate physvirt_offset, we can simplify things further, and place the vmemmap region in the VA space in such a way that virtual to page translations and vice versa can be implemented using a single arithmetic shift. One happy coincidence resulting from this is that the 48-bit/4k and 52-bit/64k configurations (which are assumed to be the two most prevalent) end up with the same placement of the vmemmap region. In a subsequent patch, we will take advantage of this, and unify the memory maps even more. Signed-off-by: Ard Biesheuvel Reviewed-by: Steve Capper Link: https://lore.kernel.org/r/20201008153602.9467-4-ardb@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/memory.h | 14 ++++++-------- arch/arm64/mm/init.c | 2 ++ 2 files changed, 8 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index 8e89f9b9091e..ecd6342e27ca 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -30,8 +30,8 @@ * keep a constant PAGE_OFFSET and "fallback" to using the higher end * of the VMEMMAP where 52-bit support is not available in hardware. */ -#define VMEMMAP_SIZE ((_PAGE_END(VA_BITS_MIN) - PAGE_OFFSET) \ - >> (PAGE_SHIFT - STRUCT_PAGE_MAX_SHIFT)) +#define VMEMMAP_SHIFT (PAGE_SHIFT - STRUCT_PAGE_MAX_SHIFT) +#define VMEMMAP_SIZE ((_PAGE_END(VA_BITS_MIN) - PAGE_OFFSET) >> VMEMMAP_SHIFT) /* * PAGE_OFFSET - the virtual address of the start of the linear map, at the @@ -50,7 +50,7 @@ #define MODULES_END (MODULES_VADDR + MODULES_VSIZE) #define MODULES_VADDR (BPF_JIT_REGION_END) #define MODULES_VSIZE (SZ_128M) -#define VMEMMAP_START (-VMEMMAP_SIZE - SZ_2M) +#define VMEMMAP_START (-(UL(1) << (VA_BITS - VMEMMAP_SHIFT))) #define VMEMMAP_END (VMEMMAP_START + VMEMMAP_SIZE) #define PCI_IO_END (VMEMMAP_START - SZ_2M) #define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE) @@ -308,15 +308,13 @@ static inline void *phys_to_virt(phys_addr_t x) #else #define page_to_virt(x) ({ \ __typeof__(x) __page = x; \ - u64 __idx = ((u64)__page - VMEMMAP_START) / sizeof(struct page);\ - u64 __addr = PAGE_OFFSET + (__idx * PAGE_SIZE); \ + u64 __addr = (u64)__page << VMEMMAP_SHIFT; \ (void *)__tag_set((const void *)__addr, page_kasan_tag(__page));\ }) #define virt_to_page(x) ({ \ - u64 __idx = (__tag_reset((u64)x) - PAGE_OFFSET) / PAGE_SIZE; \ - u64 __addr = VMEMMAP_START + (__idx * sizeof(struct page)); \ - (struct page *)__addr; \ + u64 __addr = __tag_reset((u64)(x)) & PAGE_MASK; \ + (struct page *)((s64)__addr >> VMEMMAP_SHIFT); \ }) #endif /* !CONFIG_SPARSEMEM_VMEMMAP || CONFIG_DEBUG_VIRTUAL */ diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 7e15d92836d8..3a5e9f9298e9 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -502,6 +502,8 @@ static void __init free_unused_memmap(void) */ void __init mem_init(void) { + BUILD_BUG_ON(!is_power_of_2(sizeof(struct page))); + if (swiotlb_force == SWIOTLB_FORCE || max_pfn > PFN_DOWN(arm64_dma_phys_limit ? : arm64_dma32_phys_limit)) swiotlb_init(1); -- cgit v1.2.3 From 9ad7c6d5e75b160c9ce5775db610d964af45b83f Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Thu, 8 Oct 2020 17:36:02 +0200 Subject: arm64: mm: tidy up top of kernel VA space Tidy up the way the top of the kernel VA space is organized, by mirroring the 256 MB region we have below the vmalloc space, and populating it top down with the PCI I/O space, some guard regions, and the fixmap region. The latter region is itself populated top down, and today only covers about 4 MB, and so 224 MB is ample, and no guard region is therefore required. The resulting layout is identical between 48-bit/4k and 52-bit/64k configurations. Signed-off-by: Ard Biesheuvel Reviewed-by: Steve Capper Link: https://lore.kernel.org/r/20201008153602.9467-5-ardb@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/memory.h | 4 ++-- arch/arm64/include/asm/pgtable.h | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index ecd6342e27ca..03e9b112bd94 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -52,9 +52,9 @@ #define MODULES_VSIZE (SZ_128M) #define VMEMMAP_START (-(UL(1) << (VA_BITS - VMEMMAP_SHIFT))) #define VMEMMAP_END (VMEMMAP_START + VMEMMAP_SIZE) -#define PCI_IO_END (VMEMMAP_START - SZ_2M) +#define PCI_IO_END (VMEMMAP_START - SZ_8M) #define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE) -#define FIXADDR_TOP (PCI_IO_START - SZ_2M) +#define FIXADDR_TOP (VMEMMAP_START - SZ_32M) #if VA_BITS > 48 #define VA_BITS_MIN (48) diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 4ff12a7adcfd..ec307b8bcb15 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -22,7 +22,7 @@ * and fixed mappings */ #define VMALLOC_START (MODULES_END) -#define VMALLOC_END (- PUD_SIZE - VMEMMAP_SIZE - SZ_64K) +#define VMALLOC_END (VMEMMAP_START - SZ_256M) #define vmemmap ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT)) -- cgit v1.2.3 From e2a2190a80ca0ebddd52c766caf08908d71fb949 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 26 Oct 2020 13:31:47 +0000 Subject: arm64: uaccess: move uao_* alternatives to asm-uaccess.h The uao_* alternative asm macros are only used by the uaccess assembly routines in arch/arm64/lib/, where they are included indirectly via asm-uaccess.h. Since they're specific to the uaccess assembly (and will lose the alternatives in subsequent patches), let's move them into asm-uaccess.h. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland Cc: Catalin Marinas Cc: Christoph Hellwig Cc: James Morse Cc: Will Deacon [will: update #include in mte.S to pull in uao asm macros] Signed-off-by: Will Deacon --- arch/arm64/include/asm/alternative.h | 59 ------------------------------------ arch/arm64/include/asm/asm-uaccess.h | 59 ++++++++++++++++++++++++++++++++++++ arch/arm64/lib/mte.S | 2 +- 3 files changed, 60 insertions(+), 60 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h index 619db9b4c9d5..5d6b89d26de4 100644 --- a/arch/arm64/include/asm/alternative.h +++ b/arch/arm64/include/asm/alternative.h @@ -224,65 +224,6 @@ alternative_endif _asm_extable 9999b, \label .endm -/* - * Generate the assembly for UAO alternatives with exception table entries. - * This is complicated as there is no post-increment or pair versions of the - * unprivileged instructions, and USER() only works for single instructions. - */ -#ifdef CONFIG_ARM64_UAO - .macro uao_ldp l, reg1, reg2, addr, post_inc - alternative_if_not ARM64_HAS_UAO -8888: ldp \reg1, \reg2, [\addr], \post_inc; -8889: nop; - nop; - alternative_else - ldtr \reg1, [\addr]; - ldtr \reg2, [\addr, #8]; - add \addr, \addr, \post_inc; - alternative_endif - - _asm_extable 8888b,\l; - _asm_extable 8889b,\l; - .endm - - .macro uao_stp l, reg1, reg2, addr, post_inc - alternative_if_not ARM64_HAS_UAO -8888: stp \reg1, \reg2, [\addr], \post_inc; -8889: nop; - nop; - alternative_else - sttr \reg1, [\addr]; - sttr \reg2, [\addr, #8]; - add \addr, \addr, \post_inc; - alternative_endif - - _asm_extable 8888b,\l; - _asm_extable 8889b,\l; - .endm - - .macro uao_user_alternative l, inst, alt_inst, reg, addr, post_inc - alternative_if_not ARM64_HAS_UAO -8888: \inst \reg, [\addr], \post_inc; - nop; - alternative_else - \alt_inst \reg, [\addr]; - add \addr, \addr, \post_inc; - alternative_endif - - _asm_extable 8888b,\l; - .endm -#else - .macro uao_ldp l, reg1, reg2, addr, post_inc - USER(\l, ldp \reg1, \reg2, [\addr], \post_inc) - .endm - .macro uao_stp l, reg1, reg2, addr, post_inc - USER(\l, stp \reg1, \reg2, [\addr], \post_inc) - .endm - .macro uao_user_alternative l, inst, alt_inst, reg, addr, post_inc - USER(\l, \inst \reg, [\addr], \post_inc) - .endm -#endif - #endif /* __ASSEMBLY__ */ /* diff --git a/arch/arm64/include/asm/asm-uaccess.h b/arch/arm64/include/asm/asm-uaccess.h index f68a0e64482a..479222ab82d4 100644 --- a/arch/arm64/include/asm/asm-uaccess.h +++ b/arch/arm64/include/asm/asm-uaccess.h @@ -58,4 +58,63 @@ alternative_else_nop_endif .endm #endif +/* + * Generate the assembly for UAO alternatives with exception table entries. + * This is complicated as there is no post-increment or pair versions of the + * unprivileged instructions, and USER() only works for single instructions. + */ +#ifdef CONFIG_ARM64_UAO + .macro uao_ldp l, reg1, reg2, addr, post_inc + alternative_if_not ARM64_HAS_UAO +8888: ldp \reg1, \reg2, [\addr], \post_inc; +8889: nop; + nop; + alternative_else + ldtr \reg1, [\addr]; + ldtr \reg2, [\addr, #8]; + add \addr, \addr, \post_inc; + alternative_endif + + _asm_extable 8888b,\l; + _asm_extable 8889b,\l; + .endm + + .macro uao_stp l, reg1, reg2, addr, post_inc + alternative_if_not ARM64_HAS_UAO +8888: stp \reg1, \reg2, [\addr], \post_inc; +8889: nop; + nop; + alternative_else + sttr \reg1, [\addr]; + sttr \reg2, [\addr, #8]; + add \addr, \addr, \post_inc; + alternative_endif + + _asm_extable 8888b,\l; + _asm_extable 8889b,\l; + .endm + + .macro uao_user_alternative l, inst, alt_inst, reg, addr, post_inc + alternative_if_not ARM64_HAS_UAO +8888: \inst \reg, [\addr], \post_inc; + nop; + alternative_else + \alt_inst \reg, [\addr]; + add \addr, \addr, \post_inc; + alternative_endif + + _asm_extable 8888b,\l; + .endm +#else + .macro uao_ldp l, reg1, reg2, addr, post_inc + USER(\l, ldp \reg1, \reg2, [\addr], \post_inc) + .endm + .macro uao_stp l, reg1, reg2, addr, post_inc + USER(\l, stp \reg1, \reg2, [\addr], \post_inc) + .endm + .macro uao_user_alternative l, inst, alt_inst, reg, addr, post_inc + USER(\l, \inst \reg, [\addr], \post_inc) + .endm +#endif + #endif diff --git a/arch/arm64/lib/mte.S b/arch/arm64/lib/mte.S index 03ca6d8b8670..cceed41bba15 100644 --- a/arch/arm64/lib/mte.S +++ b/arch/arm64/lib/mte.S @@ -4,7 +4,7 @@ */ #include -#include +#include #include #include #include -- cgit v1.2.3 From 7cda23da52ad793a578d290e7fcc9cdc1698bba8 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 30 Jun 2020 13:55:59 +0100 Subject: arm64: alternatives: Split up alternative.h asm/alternative.h contains both the macros needed to use alternatives, as well the type definitions and function prototypes for applying them. Split the header in two, so that alternatives can be used from core header files such as linux/compiler.h without the risk of circular includes Acked-by: Peter Zijlstra (Intel) Acked-by: Mark Rutland Signed-off-by: Will Deacon --- arch/arm64/include/asm/alternative-macros.h | 217 ++++++++++++++++++++++++++++ arch/arm64/include/asm/alternative.h | 208 +------------------------- arch/arm64/include/asm/asm-uaccess.h | 2 +- arch/arm64/include/asm/insn.h | 3 +- arch/arm64/kernel/proton-pack.c | 1 + 5 files changed, 222 insertions(+), 209 deletions(-) create mode 100644 arch/arm64/include/asm/alternative-macros.h (limited to 'arch') diff --git a/arch/arm64/include/asm/alternative-macros.h b/arch/arm64/include/asm/alternative-macros.h new file mode 100644 index 000000000000..5df500dcc627 --- /dev/null +++ b/arch/arm64/include/asm/alternative-macros.h @@ -0,0 +1,217 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_ALTERNATIVE_MACROS_H +#define __ASM_ALTERNATIVE_MACROS_H + +#include + +#define ARM64_CB_PATCH ARM64_NCAPS + +/* A64 instructions are always 32 bits. */ +#define AARCH64_INSN_SIZE 4 + +#ifndef __ASSEMBLY__ + +#include + +#define ALTINSTR_ENTRY(feature) \ + " .word 661b - .\n" /* label */ \ + " .word 663f - .\n" /* new instruction */ \ + " .hword " __stringify(feature) "\n" /* feature bit */ \ + " .byte 662b-661b\n" /* source len */ \ + " .byte 664f-663f\n" /* replacement len */ + +#define ALTINSTR_ENTRY_CB(feature, cb) \ + " .word 661b - .\n" /* label */ \ + " .word " __stringify(cb) "- .\n" /* callback */ \ + " .hword " __stringify(feature) "\n" /* feature bit */ \ + " .byte 662b-661b\n" /* source len */ \ + " .byte 664f-663f\n" /* replacement len */ + +/* + * alternative assembly primitive: + * + * If any of these .org directive fail, it means that insn1 and insn2 + * don't have the same length. This used to be written as + * + * .if ((664b-663b) != (662b-661b)) + * .error "Alternatives instruction length mismatch" + * .endif + * + * but most assemblers die if insn1 or insn2 have a .inst. This should + * be fixed in a binutils release posterior to 2.25.51.0.2 (anything + * containing commit 4e4d08cf7399b606 or c1baaddf8861). + * + * Alternatives with callbacks do not generate replacement instructions. + */ +#define __ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg_enabled) \ + ".if "__stringify(cfg_enabled)" == 1\n" \ + "661:\n\t" \ + oldinstr "\n" \ + "662:\n" \ + ".pushsection .altinstructions,\"a\"\n" \ + ALTINSTR_ENTRY(feature) \ + ".popsection\n" \ + ".subsection 1\n" \ + "663:\n\t" \ + newinstr "\n" \ + "664:\n\t" \ + ".org . - (664b-663b) + (662b-661b)\n\t" \ + ".org . - (662b-661b) + (664b-663b)\n\t" \ + ".previous\n" \ + ".endif\n" + +#define __ALTERNATIVE_CFG_CB(oldinstr, feature, cfg_enabled, cb) \ + ".if "__stringify(cfg_enabled)" == 1\n" \ + "661:\n\t" \ + oldinstr "\n" \ + "662:\n" \ + ".pushsection .altinstructions,\"a\"\n" \ + ALTINSTR_ENTRY_CB(feature, cb) \ + ".popsection\n" \ + "663:\n\t" \ + "664:\n\t" \ + ".endif\n" + +#define _ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg, ...) \ + __ALTERNATIVE_CFG(oldinstr, newinstr, feature, IS_ENABLED(cfg)) + +#define ALTERNATIVE_CB(oldinstr, cb) \ + __ALTERNATIVE_CFG_CB(oldinstr, ARM64_CB_PATCH, 1, cb) +#else + +#include + +.macro altinstruction_entry orig_offset alt_offset feature orig_len alt_len + .word \orig_offset - . + .word \alt_offset - . + .hword \feature + .byte \orig_len + .byte \alt_len +.endm + +.macro alternative_insn insn1, insn2, cap, enable = 1 + .if \enable +661: \insn1 +662: .pushsection .altinstructions, "a" + altinstruction_entry 661b, 663f, \cap, 662b-661b, 664f-663f + .popsection + .subsection 1 +663: \insn2 +664: .previous + .org . - (664b-663b) + (662b-661b) + .org . - (662b-661b) + (664b-663b) + .endif +.endm + +/* + * Alternative sequences + * + * The code for the case where the capability is not present will be + * assembled and linked as normal. There are no restrictions on this + * code. + * + * The code for the case where the capability is present will be + * assembled into a special section to be used for dynamic patching. + * Code for that case must: + * + * 1. Be exactly the same length (in bytes) as the default code + * sequence. + * + * 2. Not contain a branch target that is used outside of the + * alternative sequence it is defined in (branches into an + * alternative sequence are not fixed up). + */ + +/* + * Begin an alternative code sequence. + */ +.macro alternative_if_not cap + .set .Lasm_alt_mode, 0 + .pushsection .altinstructions, "a" + altinstruction_entry 661f, 663f, \cap, 662f-661f, 664f-663f + .popsection +661: +.endm + +.macro alternative_if cap + .set .Lasm_alt_mode, 1 + .pushsection .altinstructions, "a" + altinstruction_entry 663f, 661f, \cap, 664f-663f, 662f-661f + .popsection + .subsection 1 + .align 2 /* So GAS knows label 661 is suitably aligned */ +661: +.endm + +.macro alternative_cb cb + .set .Lasm_alt_mode, 0 + .pushsection .altinstructions, "a" + altinstruction_entry 661f, \cb, ARM64_CB_PATCH, 662f-661f, 0 + .popsection +661: +.endm + +/* + * Provide the other half of the alternative code sequence. + */ +.macro alternative_else +662: + .if .Lasm_alt_mode==0 + .subsection 1 + .else + .previous + .endif +663: +.endm + +/* + * Complete an alternative code sequence. + */ +.macro alternative_endif +664: + .if .Lasm_alt_mode==0 + .previous + .endif + .org . - (664b-663b) + (662b-661b) + .org . - (662b-661b) + (664b-663b) +.endm + +/* + * Callback-based alternative epilogue + */ +.macro alternative_cb_end +662: +.endm + +/* + * Provides a trivial alternative or default sequence consisting solely + * of NOPs. The number of NOPs is chosen automatically to match the + * previous case. + */ +.macro alternative_else_nop_endif +alternative_else + nops (662b-661b) / AARCH64_INSN_SIZE +alternative_endif +.endm + +#define _ALTERNATIVE_CFG(insn1, insn2, cap, cfg, ...) \ + alternative_insn insn1, insn2, cap, IS_ENABLED(cfg) + +.macro user_alt, label, oldinstr, newinstr, cond +9999: alternative_insn "\oldinstr", "\newinstr", \cond + _asm_extable 9999b, \label +.endm + +#endif /* __ASSEMBLY__ */ + +/* + * Usage: asm(ALTERNATIVE(oldinstr, newinstr, feature)); + * + * Usage: asm(ALTERNATIVE(oldinstr, newinstr, feature, CONFIG_FOO)); + * N.B. If CONFIG_FOO is specified, but not selected, the whole block + * will be omitted, including oldinstr. + */ +#define ALTERNATIVE(oldinstr, newinstr, ...) \ + _ALTERNATIVE_CFG(oldinstr, newinstr, __VA_ARGS__, 1) + +#endif /* __ASM_ALTERNATIVE_MACROS_H */ diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h index 5d6b89d26de4..a38b92e11811 100644 --- a/arch/arm64/include/asm/alternative.h +++ b/arch/arm64/include/asm/alternative.h @@ -2,17 +2,13 @@ #ifndef __ASM_ALTERNATIVE_H #define __ASM_ALTERNATIVE_H -#include -#include - -#define ARM64_CB_PATCH ARM64_NCAPS +#include #ifndef __ASSEMBLY__ #include #include #include -#include struct alt_instr { s32 orig_offset; /* offset to original instruction */ @@ -35,205 +31,5 @@ void apply_alternatives_module(void *start, size_t length); static inline void apply_alternatives_module(void *start, size_t length) { } #endif -#define ALTINSTR_ENTRY(feature) \ - " .word 661b - .\n" /* label */ \ - " .word 663f - .\n" /* new instruction */ \ - " .hword " __stringify(feature) "\n" /* feature bit */ \ - " .byte 662b-661b\n" /* source len */ \ - " .byte 664f-663f\n" /* replacement len */ - -#define ALTINSTR_ENTRY_CB(feature, cb) \ - " .word 661b - .\n" /* label */ \ - " .word " __stringify(cb) "- .\n" /* callback */ \ - " .hword " __stringify(feature) "\n" /* feature bit */ \ - " .byte 662b-661b\n" /* source len */ \ - " .byte 664f-663f\n" /* replacement len */ - -/* - * alternative assembly primitive: - * - * If any of these .org directive fail, it means that insn1 and insn2 - * don't have the same length. This used to be written as - * - * .if ((664b-663b) != (662b-661b)) - * .error "Alternatives instruction length mismatch" - * .endif - * - * but most assemblers die if insn1 or insn2 have a .inst. This should - * be fixed in a binutils release posterior to 2.25.51.0.2 (anything - * containing commit 4e4d08cf7399b606 or c1baaddf8861). - * - * Alternatives with callbacks do not generate replacement instructions. - */ -#define __ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg_enabled) \ - ".if "__stringify(cfg_enabled)" == 1\n" \ - "661:\n\t" \ - oldinstr "\n" \ - "662:\n" \ - ".pushsection .altinstructions,\"a\"\n" \ - ALTINSTR_ENTRY(feature) \ - ".popsection\n" \ - ".subsection 1\n" \ - "663:\n\t" \ - newinstr "\n" \ - "664:\n\t" \ - ".org . - (664b-663b) + (662b-661b)\n\t" \ - ".org . - (662b-661b) + (664b-663b)\n\t" \ - ".previous\n" \ - ".endif\n" - -#define __ALTERNATIVE_CFG_CB(oldinstr, feature, cfg_enabled, cb) \ - ".if "__stringify(cfg_enabled)" == 1\n" \ - "661:\n\t" \ - oldinstr "\n" \ - "662:\n" \ - ".pushsection .altinstructions,\"a\"\n" \ - ALTINSTR_ENTRY_CB(feature, cb) \ - ".popsection\n" \ - "663:\n\t" \ - "664:\n\t" \ - ".endif\n" - -#define _ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg, ...) \ - __ALTERNATIVE_CFG(oldinstr, newinstr, feature, IS_ENABLED(cfg)) - -#define ALTERNATIVE_CB(oldinstr, cb) \ - __ALTERNATIVE_CFG_CB(oldinstr, ARM64_CB_PATCH, 1, cb) -#else - -#include - -.macro altinstruction_entry orig_offset alt_offset feature orig_len alt_len - .word \orig_offset - . - .word \alt_offset - . - .hword \feature - .byte \orig_len - .byte \alt_len -.endm - -.macro alternative_insn insn1, insn2, cap, enable = 1 - .if \enable -661: \insn1 -662: .pushsection .altinstructions, "a" - altinstruction_entry 661b, 663f, \cap, 662b-661b, 664f-663f - .popsection - .subsection 1 -663: \insn2 -664: .previous - .org . - (664b-663b) + (662b-661b) - .org . - (662b-661b) + (664b-663b) - .endif -.endm - -/* - * Alternative sequences - * - * The code for the case where the capability is not present will be - * assembled and linked as normal. There are no restrictions on this - * code. - * - * The code for the case where the capability is present will be - * assembled into a special section to be used for dynamic patching. - * Code for that case must: - * - * 1. Be exactly the same length (in bytes) as the default code - * sequence. - * - * 2. Not contain a branch target that is used outside of the - * alternative sequence it is defined in (branches into an - * alternative sequence are not fixed up). - */ - -/* - * Begin an alternative code sequence. - */ -.macro alternative_if_not cap - .set .Lasm_alt_mode, 0 - .pushsection .altinstructions, "a" - altinstruction_entry 661f, 663f, \cap, 662f-661f, 664f-663f - .popsection -661: -.endm - -.macro alternative_if cap - .set .Lasm_alt_mode, 1 - .pushsection .altinstructions, "a" - altinstruction_entry 663f, 661f, \cap, 664f-663f, 662f-661f - .popsection - .subsection 1 - .align 2 /* So GAS knows label 661 is suitably aligned */ -661: -.endm - -.macro alternative_cb cb - .set .Lasm_alt_mode, 0 - .pushsection .altinstructions, "a" - altinstruction_entry 661f, \cb, ARM64_CB_PATCH, 662f-661f, 0 - .popsection -661: -.endm - -/* - * Provide the other half of the alternative code sequence. - */ -.macro alternative_else -662: - .if .Lasm_alt_mode==0 - .subsection 1 - .else - .previous - .endif -663: -.endm - -/* - * Complete an alternative code sequence. - */ -.macro alternative_endif -664: - .if .Lasm_alt_mode==0 - .previous - .endif - .org . - (664b-663b) + (662b-661b) - .org . - (662b-661b) + (664b-663b) -.endm - -/* - * Callback-based alternative epilogue - */ -.macro alternative_cb_end -662: -.endm - -/* - * Provides a trivial alternative or default sequence consisting solely - * of NOPs. The number of NOPs is chosen automatically to match the - * previous case. - */ -.macro alternative_else_nop_endif -alternative_else - nops (662b-661b) / AARCH64_INSN_SIZE -alternative_endif -.endm - -#define _ALTERNATIVE_CFG(insn1, insn2, cap, cfg, ...) \ - alternative_insn insn1, insn2, cap, IS_ENABLED(cfg) - -.macro user_alt, label, oldinstr, newinstr, cond -9999: alternative_insn "\oldinstr", "\newinstr", \cond - _asm_extable 9999b, \label -.endm - -#endif /* __ASSEMBLY__ */ - -/* - * Usage: asm(ALTERNATIVE(oldinstr, newinstr, feature)); - * - * Usage: asm(ALTERNATIVE(oldinstr, newinstr, feature, CONFIG_FOO)); - * N.B. If CONFIG_FOO is specified, but not selected, the whole block - * will be omitted, including oldinstr. - */ -#define ALTERNATIVE(oldinstr, newinstr, ...) \ - _ALTERNATIVE_CFG(oldinstr, newinstr, __VA_ARGS__, 1) - +#endif /* __ASSEMBLY__ */ #endif /* __ASM_ALTERNATIVE_H */ diff --git a/arch/arm64/include/asm/asm-uaccess.h b/arch/arm64/include/asm/asm-uaccess.h index 479222ab82d4..2c26ca5b7bb0 100644 --- a/arch/arm64/include/asm/asm-uaccess.h +++ b/arch/arm64/include/asm/asm-uaccess.h @@ -2,7 +2,7 @@ #ifndef __ASM_ASM_UACCESS_H #define __ASM_ASM_UACCESS_H -#include +#include #include #include #include diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h index 4b39293d0f72..4ebb9c054ccc 100644 --- a/arch/arm64/include/asm/insn.h +++ b/arch/arm64/include/asm/insn.h @@ -10,8 +10,7 @@ #include #include -/* A64 instructions are always 32 bits. */ -#define AARCH64_INSN_SIZE 4 +#include #ifndef __ASSEMBLY__ /* diff --git a/arch/arm64/kernel/proton-pack.c b/arch/arm64/kernel/proton-pack.c index c18eb7d41274..4b202e460e6d 100644 --- a/arch/arm64/kernel/proton-pack.c +++ b/arch/arm64/kernel/proton-pack.c @@ -24,6 +24,7 @@ #include #include +#include #include #include -- cgit v1.2.3 From 364a5a8ae8dc2dd457e2fefb4da3f3fd2c0ba8b1 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 30 Jun 2020 14:02:22 +0100 Subject: arm64: cpufeatures: Add capability for LDAPR instruction Armv8.3 introduced the LDAPR instruction, which provides weaker memory ordering semantics than LDARi (RCpc vs RCsc). Generally, we provide an RCsc implementation when implementing the Linux memory model, but LDAPR can be used as a useful alternative to dependency ordering, particularly when the compiler is capable of breaking the dependencies. Since LDAPR is not available on all CPUs, add a cpufeature to detect it at runtime and allow the instruction to be used with alternative code patching. Acked-by: Peter Zijlstra (Intel) Acked-by: Mark Rutland Signed-off-by: Will Deacon --- arch/arm64/Kconfig | 3 +++ arch/arm64/include/asm/cpucaps.h | 3 ++- arch/arm64/kernel/cpufeature.c | 10 ++++++++++ 3 files changed, 15 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 1515f6f153a0..0f8b2e35ba99 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1388,6 +1388,9 @@ config ARM64_PAN The feature is detected at runtime, and will remain as a 'nop' instruction if the cpu does not implement the feature. +config AS_HAS_LDAPR + def_bool $(as-instr,.arch_extension rcpc) + config ARM64_LSE_ATOMICS bool default ARM64_USE_LSE_ATOMICS diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h index e7d98997c09c..64ea0bb9f420 100644 --- a/arch/arm64/include/asm/cpucaps.h +++ b/arch/arm64/include/asm/cpucaps.h @@ -66,7 +66,8 @@ #define ARM64_HAS_TLB_RANGE 56 #define ARM64_MTE 57 #define ARM64_WORKAROUND_1508412 58 +#define ARM64_HAS_LDAPR 59 -#define ARM64_NCAPS 59 +#define ARM64_NCAPS 60 #endif /* __ASM_CPUCAPS_H */ diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index dcc165b3fc04..b7b6804cb931 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -2136,6 +2136,16 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .cpu_enable = cpu_enable_mte, }, #endif /* CONFIG_ARM64_MTE */ + { + .desc = "RCpc load-acquire (LDAPR)", + .capability = ARM64_HAS_LDAPR, + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .sys_reg = SYS_ID_AA64ISAR1_EL1, + .sign = FTR_UNSIGNED, + .field_pos = ID_AA64ISAR1_LRCPC_SHIFT, + .matches = has_cpuid_feature, + .min_field_value = 1, + }, {}, }; -- cgit v1.2.3 From 5af76fb4228701bd5377880b09b0216a5fd800ef Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 30 Jun 2020 14:06:04 +0100 Subject: arm64: alternatives: Remove READ_ONCE() usage during patch operation In preparation for patching the internals of READ_ONCE() itself, replace its usage on the alternatives patching patch with a volatile variable instead. Acked-by: Peter Zijlstra (Intel) Acked-by: Mark Rutland Signed-off-by: Will Deacon --- arch/arm64/kernel/alternative.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c index 73039949b5ce..a57cffb752e8 100644 --- a/arch/arm64/kernel/alternative.c +++ b/arch/arm64/kernel/alternative.c @@ -21,7 +21,8 @@ #define ALT_ORIG_PTR(a) __ALT_PTR(a, orig_offset) #define ALT_REPL_PTR(a) __ALT_PTR(a, alt_offset) -static int all_alternatives_applied; +/* Volatile, as we may be patching the guts of READ_ONCE() */ +static volatile int all_alternatives_applied; static DECLARE_BITMAP(applied_alternatives, ARM64_NCAPS); @@ -205,7 +206,7 @@ static int __apply_alternatives_multi_stop(void *unused) /* We always have a CPU 0 at this point (__init) */ if (smp_processor_id()) { - while (!READ_ONCE(all_alternatives_applied)) + while (!all_alternatives_applied) cpu_relax(); isb(); } else { @@ -217,7 +218,7 @@ static int __apply_alternatives_multi_stop(void *unused) BUG_ON(all_alternatives_applied); __apply_alternatives(®ion, false, remaining_capabilities); /* Barriers provided by the cache flushing */ - WRITE_ONCE(all_alternatives_applied, 1); + all_alternatives_applied = 1; } return 0; -- cgit v1.2.3 From e35123d83ee35c31f64ecfbdfabbe5142d3025b8 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 30 Jun 2020 14:02:48 +0100 Subject: arm64: lto: Strengthen READ_ONCE() to acquire when CONFIG_LTO=y When building with LTO, there is an increased risk of the compiler converting an address dependency headed by a READ_ONCE() invocation into a control dependency and consequently allowing for harmful reordering by the CPU. Ensure that such transformations are harmless by overriding the generic READ_ONCE() definition with one that provides acquire semantics when building with LTO. Acked-by: Peter Zijlstra (Intel) Acked-by: Mark Rutland Signed-off-by: Will Deacon --- arch/arm64/include/asm/rwonce.h | 73 +++++++++++++++++++++++++++++++++++++++ arch/arm64/kernel/vdso/Makefile | 2 +- arch/arm64/kernel/vdso32/Makefile | 2 +- arch/arm64/kernel/vmlinux.lds.S | 2 +- 4 files changed, 76 insertions(+), 3 deletions(-) create mode 100644 arch/arm64/include/asm/rwonce.h (limited to 'arch') diff --git a/arch/arm64/include/asm/rwonce.h b/arch/arm64/include/asm/rwonce.h new file mode 100644 index 000000000000..1bce62fa908a --- /dev/null +++ b/arch/arm64/include/asm/rwonce.h @@ -0,0 +1,73 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020 Google LLC. + */ +#ifndef __ASM_RWONCE_H +#define __ASM_RWONCE_H + +#ifdef CONFIG_LTO + +#include +#include + +#ifndef BUILD_VDSO + +#ifdef CONFIG_AS_HAS_LDAPR +#define __LOAD_RCPC(sfx, regs...) \ + ALTERNATIVE( \ + "ldar" #sfx "\t" #regs, \ + ".arch_extension rcpc\n" \ + "ldapr" #sfx "\t" #regs, \ + ARM64_HAS_LDAPR) +#else +#define __LOAD_RCPC(sfx, regs...) "ldar" #sfx "\t" #regs +#endif /* CONFIG_AS_HAS_LDAPR */ + +/* + * When building with LTO, there is an increased risk of the compiler + * converting an address dependency headed by a READ_ONCE() invocation + * into a control dependency and consequently allowing for harmful + * reordering by the CPU. + * + * Ensure that such transformations are harmless by overriding the generic + * READ_ONCE() definition with one that provides RCpc acquire semantics + * when building with LTO. + */ +#define __READ_ONCE(x) \ +({ \ + typeof(&(x)) __x = &(x); \ + int atomic = 1; \ + union { __unqual_scalar_typeof(*__x) __val; char __c[1]; } __u; \ + switch (sizeof(x)) { \ + case 1: \ + asm volatile(__LOAD_RCPC(b, %w0, %1) \ + : "=r" (*(__u8 *)__u.__c) \ + : "Q" (*__x) : "memory"); \ + break; \ + case 2: \ + asm volatile(__LOAD_RCPC(h, %w0, %1) \ + : "=r" (*(__u16 *)__u.__c) \ + : "Q" (*__x) : "memory"); \ + break; \ + case 4: \ + asm volatile(__LOAD_RCPC(, %w0, %1) \ + : "=r" (*(__u32 *)__u.__c) \ + : "Q" (*__x) : "memory"); \ + break; \ + case 8: \ + asm volatile(__LOAD_RCPC(, %0, %1) \ + : "=r" (*(__u64 *)__u.__c) \ + : "Q" (*__x) : "memory"); \ + break; \ + default: \ + atomic = 0; \ + } \ + atomic ? (typeof(*__x))__u.__val : (*(volatile typeof(__x))__x);\ +}) + +#endif /* !BUILD_VDSO */ +#endif /* CONFIG_LTO */ + +#include + +#endif /* __ASM_RWONCE_H */ diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile index d65f52264aba..a8f8e409e2bf 100644 --- a/arch/arm64/kernel/vdso/Makefile +++ b/arch/arm64/kernel/vdso/Makefile @@ -28,7 +28,7 @@ ldflags-y := -shared -nostdlib -soname=linux-vdso.so.1 --hash-style=sysv \ $(btildflags-y) -T ccflags-y := -fno-common -fno-builtin -fno-stack-protector -ffixed-x18 -ccflags-y += -DDISABLE_BRANCH_PROFILING +ccflags-y += -DDISABLE_BRANCH_PROFILING -DBUILD_VDSO CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE) -Os $(CC_FLAGS_SCS) $(GCC_PLUGINS_CFLAGS) KASAN_SANITIZE := n diff --git a/arch/arm64/kernel/vdso32/Makefile b/arch/arm64/kernel/vdso32/Makefile index 79280c53b9a6..a1e0f91e6cea 100644 --- a/arch/arm64/kernel/vdso32/Makefile +++ b/arch/arm64/kernel/vdso32/Makefile @@ -48,7 +48,7 @@ cc32-as-instr = $(call try-run,\ # As a result we set our own flags here. # KBUILD_CPPFLAGS and NOSTDINC_FLAGS from top-level Makefile -VDSO_CPPFLAGS := -D__KERNEL__ -nostdinc -isystem $(shell $(CC_COMPAT) -print-file-name=include) +VDSO_CPPFLAGS := -DBUILD_VDSO -D__KERNEL__ -nostdinc -isystem $(shell $(CC_COMPAT) -print-file-name=include) VDSO_CPPFLAGS += $(LINUXINCLUDE) # Common C and assembly flags diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index 1bda604f4c70..d6cdcf4aa6a5 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S @@ -201,7 +201,7 @@ SECTIONS INIT_CALLS CON_INITCALL INIT_RAM_FS - *(.init.rodata.* .init.bss) /* from the EFI stub */ + *(.init.altinstructions .init.rodata.* .init.bss) /* from the EFI stub */ } .exit.data : { EXIT_DATA -- cgit v1.2.3 From ba090f9cafd53dbabe0f0a8c4ccae44203d3731b Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Tue, 3 Nov 2020 14:49:04 +0100 Subject: arm64: kprobes: Remove redundant kprobe_step_ctx The kprobe_step_ctx (kcb->ss_ctx) has ss_pending and match_addr, but those are redundant because those can be replaced by KPROBE_HIT_SS and &cur_kprobe->ainsn.api.insn[1] respectively. To simplify the code, remove the kprobe_step_ctx. Signed-off-by: Masami Hiramatsu Reviewed-by: Jean-Philippe Brucker Acked-by: Will Deacon Link: https://lore.kernel.org/r/20201103134900.337243-2-jean-philippe@linaro.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/kprobes.h | 7 ----- arch/arm64/kernel/probes/kprobes.c | 53 +++++++++----------------------------- 2 files changed, 12 insertions(+), 48 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/kprobes.h b/arch/arm64/include/asm/kprobes.h index 8699ce30f587..5d38ff4a4806 100644 --- a/arch/arm64/include/asm/kprobes.h +++ b/arch/arm64/include/asm/kprobes.h @@ -28,18 +28,11 @@ struct prev_kprobe { unsigned int status; }; -/* Single step context for kprobe */ -struct kprobe_step_ctx { - unsigned long ss_pending; - unsigned long match_addr; -}; - /* per-cpu kprobe control block */ struct kprobe_ctlblk { unsigned int kprobe_status; unsigned long saved_irqflag; struct prev_kprobe prev_kprobe; - struct kprobe_step_ctx ss_ctx; }; void arch_remove_kprobe(struct kprobe *); diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c index f11a1a1f7026..89c64ada8732 100644 --- a/arch/arm64/kernel/probes/kprobes.c +++ b/arch/arm64/kernel/probes/kprobes.c @@ -34,7 +34,7 @@ DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); static void __kprobes -post_kprobe_handler(struct kprobe_ctlblk *, struct pt_regs *); +post_kprobe_handler(struct kprobe *, struct kprobe_ctlblk *, struct pt_regs *); static void __kprobes arch_prepare_ss_slot(struct kprobe *p) { @@ -68,7 +68,7 @@ static void __kprobes arch_simulate_insn(struct kprobe *p, struct pt_regs *regs) p->ainsn.api.handler((u32)p->opcode, (long)p->addr, regs); /* single step simulated, now go for post processing */ - post_kprobe_handler(kcb, regs); + post_kprobe_handler(p, kcb, regs); } int __kprobes arch_prepare_kprobe(struct kprobe *p) @@ -177,19 +177,6 @@ static void __kprobes kprobes_restore_local_irqflag(struct kprobe_ctlblk *kcb, regs->pstate |= kcb->saved_irqflag; } -static void __kprobes -set_ss_context(struct kprobe_ctlblk *kcb, unsigned long addr) -{ - kcb->ss_ctx.ss_pending = true; - kcb->ss_ctx.match_addr = addr + sizeof(kprobe_opcode_t); -} - -static void __kprobes clear_ss_context(struct kprobe_ctlblk *kcb) -{ - kcb->ss_ctx.ss_pending = false; - kcb->ss_ctx.match_addr = 0; -} - static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb, int reenter) @@ -209,7 +196,6 @@ static void __kprobes setup_singlestep(struct kprobe *p, /* prepare for single stepping */ slot = (unsigned long)p->ainsn.api.insn; - set_ss_context(kcb, slot); /* mark pending ss */ kprobes_save_local_irqflag(kcb, regs); instruction_pointer_set(regs, slot); } else { @@ -243,13 +229,8 @@ static int __kprobes reenter_kprobe(struct kprobe *p, } static void __kprobes -post_kprobe_handler(struct kprobe_ctlblk *kcb, struct pt_regs *regs) +post_kprobe_handler(struct kprobe *cur, struct kprobe_ctlblk *kcb, struct pt_regs *regs) { - struct kprobe *cur = kprobe_running(); - - if (!cur) - return; - /* return addr restore if non-branching insn */ if (cur->ainsn.api.restore != 0) instruction_pointer_set(regs, cur->ainsn.api.restore); @@ -364,33 +345,23 @@ static void __kprobes kprobe_handler(struct pt_regs *regs) */ } -static int __kprobes -kprobe_ss_hit(struct kprobe_ctlblk *kcb, unsigned long addr) -{ - if ((kcb->ss_ctx.ss_pending) - && (kcb->ss_ctx.match_addr == addr)) { - clear_ss_context(kcb); /* clear pending ss */ - return DBG_HOOK_HANDLED; - } - /* not ours, kprobes should ignore it */ - return DBG_HOOK_ERROR; -} - static int __kprobes kprobe_breakpoint_ss_handler(struct pt_regs *regs, unsigned int esr) { struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); - int retval; - - /* return error if this is not our step */ - retval = kprobe_ss_hit(kcb, instruction_pointer(regs)); + unsigned long addr = instruction_pointer(regs); + struct kprobe *cur = kprobe_running(); - if (retval == DBG_HOOK_HANDLED) { + if (cur && (kcb->kprobe_status == KPROBE_HIT_SS) + && ((unsigned long)&cur->ainsn.api.insn[1] == addr)) { kprobes_restore_local_irqflag(kcb, regs); - post_kprobe_handler(kcb, regs); + post_kprobe_handler(cur, kcb, regs); + + return DBG_HOOK_HANDLED; } - return retval; + /* not ours, kprobes should ignore it */ + return DBG_HOOK_ERROR; } static struct break_hook kprobes_break_ss_hook = { -- cgit v1.2.3 From 833be850f1cabd0e3b5337c0fcab20a6e936dd48 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Tue, 3 Nov 2020 10:22:29 +0000 Subject: arm64: consistently use reserved_pg_dir Depending on configuration options and specific code paths, we either use the empty_zero_page or the configuration-dependent reserved_ttbr0 as a reserved value for TTBR{0,1}_EL1. To simplify this code, let's always allocate and use the same reserved_pg_dir, replacing reserved_ttbr0. Note that this is allocated (and hence pre-zeroed), and is also marked as read-only in the kernel Image mapping. Keeping this separate from the empty_zero_page potentially helps with robustness as the empty_zero_page is used in a number of cases where a failure to map it read-only could allow it to become corrupted. The (presently unused) swapper_pg_end symbol is also removed, and comments are added wherever we rely on the offsets between the pre-allocated pg_dirs to keep these cases easily identifiable. Signed-off-by: Mark Rutland Cc: Will Deacon Link: https://lore.kernel.org/r/20201103102229.8542-1-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/asm-uaccess.h | 4 ++-- arch/arm64/include/asm/kernel-pgtable.h | 6 ------ arch/arm64/include/asm/mmu_context.h | 6 +++--- arch/arm64/include/asm/pgtable.h | 1 + arch/arm64/include/asm/uaccess.h | 4 ++-- arch/arm64/kernel/entry.S | 6 ++++-- arch/arm64/kernel/setup.c | 2 +- arch/arm64/kernel/vmlinux.lds.S | 8 +++----- arch/arm64/mm/proc.S | 2 +- 9 files changed, 17 insertions(+), 22 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/asm-uaccess.h b/arch/arm64/include/asm/asm-uaccess.h index f68a0e64482a..5ef624fef44a 100644 --- a/arch/arm64/include/asm/asm-uaccess.h +++ b/arch/arm64/include/asm/asm-uaccess.h @@ -15,10 +15,10 @@ .macro __uaccess_ttbr0_disable, tmp1 mrs \tmp1, ttbr1_el1 // swapper_pg_dir bic \tmp1, \tmp1, #TTBR_ASID_MASK - sub \tmp1, \tmp1, #RESERVED_TTBR0_SIZE // reserved_ttbr0 just before swapper_pg_dir + sub \tmp1, \tmp1, #PAGE_SIZE // reserved_pg_dir just before swapper_pg_dir msr ttbr0_el1, \tmp1 // set reserved TTBR0_EL1 isb - add \tmp1, \tmp1, #RESERVED_TTBR0_SIZE + add \tmp1, \tmp1, #PAGE_SIZE msr ttbr1_el1, \tmp1 // set reserved ASID isb .endm diff --git a/arch/arm64/include/asm/kernel-pgtable.h b/arch/arm64/include/asm/kernel-pgtable.h index 19ca76ea60d9..587c504a4c8b 100644 --- a/arch/arm64/include/asm/kernel-pgtable.h +++ b/arch/arm64/include/asm/kernel-pgtable.h @@ -89,12 +89,6 @@ #define INIT_DIR_SIZE (PAGE_SIZE * EARLY_PAGES(KIMAGE_VADDR, _end)) #define IDMAP_DIR_SIZE (IDMAP_PGTABLE_LEVELS * PAGE_SIZE) -#ifdef CONFIG_ARM64_SW_TTBR0_PAN -#define RESERVED_TTBR0_SIZE (PAGE_SIZE) -#else -#define RESERVED_TTBR0_SIZE (0) -#endif - /* Initial memory map size */ #if ARM64_SWAPPER_USES_SECTION_MAPS #define SWAPPER_BLOCK_SHIFT SECTION_SHIFT diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h index 0672236e1aea..5c72c20bd300 100644 --- a/arch/arm64/include/asm/mmu_context.h +++ b/arch/arm64/include/asm/mmu_context.h @@ -36,11 +36,11 @@ static inline void contextidr_thread_switch(struct task_struct *next) } /* - * Set TTBR0 to empty_zero_page. No translations will be possible via TTBR0. + * Set TTBR0 to reserved_pg_dir. No translations will be possible via TTBR0. */ static inline void cpu_set_reserved_ttbr0(void) { - unsigned long ttbr = phys_to_ttbr(__pa_symbol(empty_zero_page)); + unsigned long ttbr = phys_to_ttbr(__pa_symbol(reserved_pg_dir)); write_sysreg(ttbr, ttbr0_el1); isb(); @@ -195,7 +195,7 @@ static inline void update_saved_ttbr0(struct task_struct *tsk, return; if (mm == &init_mm) - ttbr = __pa_symbol(empty_zero_page); + ttbr = __pa_symbol(reserved_pg_dir); else ttbr = virt_to_phys(mm->pgd) | ASID(mm) << 48; diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 4ff12a7adcfd..74694e129f9a 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -519,6 +519,7 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; extern pgd_t idmap_pg_dir[PTRS_PER_PGD]; extern pgd_t idmap_pg_end[]; extern pgd_t tramp_pg_dir[PTRS_PER_PGD]; +extern pgd_t reserved_pg_dir[PTRS_PER_PGD]; extern void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd); diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index 991dd5f031e4..385a189f7d39 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -113,8 +113,8 @@ static inline void __uaccess_ttbr0_disable(void) local_irq_save(flags); ttbr = read_sysreg(ttbr1_el1); ttbr &= ~TTBR_ASID_MASK; - /* reserved_ttbr0 placed before swapper_pg_dir */ - write_sysreg(ttbr - RESERVED_TTBR0_SIZE, ttbr0_el1); + /* reserved_pg_dir placed before swapper_pg_dir */ + write_sysreg(ttbr - PAGE_SIZE, ttbr0_el1); isb(); /* Set reserved ASID */ write_sysreg(ttbr, ttbr1_el1); diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index b295fb912b12..6f31c2c06788 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -807,9 +807,10 @@ SYM_CODE_END(ret_to_user) */ .pushsection ".entry.tramp.text", "ax" + // Move from tramp_pg_dir to swapper_pg_dir .macro tramp_map_kernel, tmp mrs \tmp, ttbr1_el1 - add \tmp, \tmp, #(PAGE_SIZE + RESERVED_TTBR0_SIZE) + add \tmp, \tmp, #(2 * PAGE_SIZE) bic \tmp, \tmp, #USER_ASID_FLAG msr ttbr1_el1, \tmp #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003 @@ -826,9 +827,10 @@ alternative_else_nop_endif #endif /* CONFIG_QCOM_FALKOR_ERRATUM_1003 */ .endm + // Move from swapper_pg_dir to tramp_pg_dir .macro tramp_unmap_kernel, tmp mrs \tmp, ttbr1_el1 - sub \tmp, \tmp, #(PAGE_SIZE + RESERVED_TTBR0_SIZE) + sub \tmp, \tmp, #(2 * PAGE_SIZE) orr \tmp, \tmp, #USER_ASID_FLAG msr ttbr1_el1, \tmp /* diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index 133257ffd859..c28a9ec76b11 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -366,7 +366,7 @@ void __init __no_sanitize_address setup_arch(char **cmdline_p) * faults in case uaccess_enable() is inadvertently called by the init * thread. */ - init_task.thread_info.ttbr0 = __pa_symbol(empty_zero_page); + init_task.thread_info.ttbr0 = __pa_symbol(reserved_pg_dir); #endif if (boot_args[1] || boot_args[2] || boot_args[3]) { diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index 1bda604f4c70..30c102978942 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S @@ -164,13 +164,11 @@ SECTIONS . += PAGE_SIZE; #endif -#ifdef CONFIG_ARM64_SW_TTBR0_PAN - reserved_ttbr0 = .; - . += RESERVED_TTBR0_SIZE; -#endif + reserved_pg_dir = .; + . += PAGE_SIZE; + swapper_pg_dir = .; . += PAGE_SIZE; - swapper_pg_end = .; . = ALIGN(SEGMENT_ALIGN); __init_begin = .; diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index 23c326a06b2d..0eaf16b0442a 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -168,7 +168,7 @@ SYM_FUNC_END(cpu_do_resume) .pushsection ".idmap.text", "awx" .macro __idmap_cpu_set_reserved_ttbr1, tmp1, tmp2 - adrp \tmp1, empty_zero_page + adrp \tmp1, reserved_pg_dir phys_to_ttbr \tmp2, \tmp1 offset_ttbr1 \tmp2, \tmp1 msr ttbr1_el1, \tmp2 -- cgit v1.2.3 From 0edaee42eb8c0f3b767ec3c51bee4a3855aa2555 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Mon, 9 Nov 2020 17:08:36 +0530 Subject: arm64/smp: Drop the macro S(x,s) Mapping between IPI type index and its string is direct without requiring an additional offset. Hence the existing macro S(x, s) is now redundant and can just be dropped. This also makes the code clean and simple. Signed-off-by: Anshuman Khandual Acked-by: Mark Rutland Cc: Will Deacon Cc: Mark Rutland Cc: Marc Zyngier Cc: linux-arm-kernel@lists.infradead.org Cc: linux-kernel@vger.kernel.org Link: https://lore.kernel.org/r/1604921916-23368-1-git-send-email-anshuman.khandual@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/kernel/smp.c | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 09c96f57818c..65d18a618abe 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -786,14 +786,13 @@ void __init smp_prepare_cpus(unsigned int max_cpus) } static const char *ipi_types[NR_IPI] __tracepoint_string = { -#define S(x,s) [x] = s - S(IPI_RESCHEDULE, "Rescheduling interrupts"), - S(IPI_CALL_FUNC, "Function call interrupts"), - S(IPI_CPU_STOP, "CPU stop interrupts"), - S(IPI_CPU_CRASH_STOP, "CPU stop (for crash dump) interrupts"), - S(IPI_TIMER, "Timer broadcast interrupts"), - S(IPI_IRQ_WORK, "IRQ work interrupts"), - S(IPI_WAKEUP, "CPU wake-up interrupts"), + [IPI_RESCHEDULE] = "Rescheduling interrupts", + [IPI_CALL_FUNC] = "Function call interrupts", + [IPI_CPU_STOP] = "CPU stop interrupts", + [IPI_CPU_CRASH_STOP] = "CPU stop (for crash dump) interrupts", + [IPI_TIMER] = "Timer broadcast interrupts", + [IPI_IRQ_WORK] = "IRQ work interrupts", + [IPI_WAKEUP] = "CPU wake-up interrupts", }; static void smp_cross_call(const struct cpumask *target, unsigned int ipinr); -- cgit v1.2.3 From 97d6786e0669daa5c2f2d07a057f574e849dfd3e Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Wed, 14 Oct 2020 10:18:57 +0200 Subject: arm64: mm: account for hotplug memory when randomizing the linear region As a hardening measure, we currently randomize the placement of physical memory inside the linear region when KASLR is in effect. Since the random offset at which to place the available physical memory inside the linear region is chosen early at boot, it is based on the memblock description of memory, which does not cover hotplug memory. The consequence of this is that the randomization offset may be chosen such that any hotplugged memory located above memblock_end_of_DRAM() that appears later is pushed off the end of the linear region, where it cannot be accessed. So let's limit this randomization of the linear region to ensure that this can no longer happen, by using the CPU's addressable PA range instead. As it is guaranteed that no hotpluggable memory will appear that falls outside of that range, we can safely put this PA range sized window anywhere in the linear region. Signed-off-by: Ard Biesheuvel Cc: Anshuman Khandual Cc: Will Deacon Cc: Steven Price Cc: Robin Murphy Link: https://lore.kernel.org/r/20201014081857.3288-1-ardb@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/mm/init.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 095540667f0f..93458820bc53 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -348,15 +348,18 @@ void __init arm64_memblock_init(void) if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) { extern u16 memstart_offset_seed; - u64 range = linear_region_size - - (memblock_end_of_DRAM() - memblock_start_of_DRAM()); + u64 mmfr0 = read_cpuid(ID_AA64MMFR0_EL1); + int parange = cpuid_feature_extract_unsigned_field( + mmfr0, ID_AA64MMFR0_PARANGE_SHIFT); + s64 range = linear_region_size - + BIT(id_aa64mmfr0_parange_to_phys_shift(parange)); /* * If the size of the linear region exceeds, by a sufficient - * margin, the size of the region that the available physical - * memory spans, randomize the linear region as well. + * margin, the size of the region that the physical memory can + * span, randomize the linear region as well. */ - if (memstart_offset_seed > 0 && range >= ARM64_MEMSTART_ALIGN) { + if (memstart_offset_seed > 0 && range >= (s64)ARM64_MEMSTART_ALIGN) { range /= ARM64_MEMSTART_ALIGN; memstart_addr -= ARM64_MEMSTART_ALIGN * ((range * memstart_offset_seed) >> 16); -- cgit v1.2.3 From cb45babe1b80090fd0272da76bf39e781439f1d6 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Mon, 9 Nov 2020 09:58:55 +0530 Subject: arm64/mm/hotplug: Register boot memory hot remove notifier earlier This moves memory notifier registration earlier in the boot process from device_initcall() to early_initcall() which will help in guarding against potential early boot memory offline requests. Even though there should not be any actual offlinig requests till memory block devices are initialized with memory_dev_init() but then generic init sequence might just change in future. Hence an early registration for the memory event notifier would be helpful. While here, just skip the registration if CONFIG_MEMORY_HOTREMOVE is not enabled and also call out when memory notifier registration fails. Signed-off-by: Anshuman Khandual Reviewed-by: Gavin Shan Reviewed-by: Catalin Marinas Cc: Will Deacon Cc: Mark Rutland Cc: Marc Zyngier Cc: Steve Capper Cc: Mark Brown Cc: linux-arm-kernel@lists.infradead.org Cc: linux-kernel@vger.kernel.org Link: https://lore.kernel.org/r/1604896137-16644-2-git-send-email-anshuman.khandual@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/mm/mmu.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 1c0f3e02f731..71dd9d753b8b 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -1510,7 +1510,16 @@ static struct notifier_block prevent_bootmem_remove_nb = { static int __init prevent_bootmem_remove_init(void) { - return register_memory_notifier(&prevent_bootmem_remove_nb); + int ret = 0; + + if (!IS_ENABLED(CONFIG_MEMORY_HOTREMOVE)) + return ret; + + ret = register_memory_notifier(&prevent_bootmem_remove_nb); + if (ret) + pr_err("%s: Notifier registration failed %d\n", __func__, ret); + + return ret; } -device_initcall(prevent_bootmem_remove_init); +early_initcall(prevent_bootmem_remove_init); #endif -- cgit v1.2.3 From 9fb3d4a303380ea76ebf49d930a777dd9c9dbc25 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Mon, 9 Nov 2020 09:58:56 +0530 Subject: arm64/mm/hotplug: Enable MEM_OFFLINE event handling This enables MEM_OFFLINE memory event handling. It will help intercept any possible error condition such as if boot memory some how still got offlined even after an explicit notifier failure, potentially by a future change in generic hot plug framework. This would help detect such scenarios and help debug further. While here, also call out the first section being attempted for offline or got offlined. Signed-off-by: Anshuman Khandual Reviewed-by: Gavin Shan Cc: Will Deacon Cc: Mark Rutland Cc: Marc Zyngier Cc: Steve Capper Cc: Mark Brown Cc: linux-arm-kernel@lists.infradead.org Cc: linux-kernel@vger.kernel.org Link: https://lore.kernel.org/r/1604896137-16644-3-git-send-email-anshuman.khandual@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/mm/mmu.c | 34 ++++++++++++++++++++++++++++++++-- 1 file changed, 32 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 71dd9d753b8b..ca6d4952b733 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -1493,13 +1493,43 @@ static int prevent_bootmem_remove_notifier(struct notifier_block *nb, unsigned long end_pfn = arg->start_pfn + arg->nr_pages; unsigned long pfn = arg->start_pfn; - if (action != MEM_GOING_OFFLINE) + if ((action != MEM_GOING_OFFLINE) && (action != MEM_OFFLINE)) return NOTIFY_OK; for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) { + unsigned long start = PFN_PHYS(pfn); + unsigned long end = start + (1UL << PA_SECTION_SHIFT); + ms = __pfn_to_section(pfn); - if (early_section(ms)) + if (!early_section(ms)) + continue; + + if (action == MEM_GOING_OFFLINE) { + /* + * Boot memory removal is not supported. Prevent + * it via blocking any attempted offline request + * for the boot memory and just report it. + */ + pr_warn("Boot memory [%lx %lx] offlining attempted\n", start, end); return NOTIFY_BAD; + } else if (action == MEM_OFFLINE) { + /* + * This should have never happened. Boot memory + * offlining should have been prevented by this + * very notifier. Probably some memory removal + * procedure might have changed which would then + * require further debug. + */ + pr_err("Boot memory [%lx %lx] offlined\n", start, end); + + /* + * Core memory hotplug does not process a return + * code from the notifier for MEM_OFFLINE events. + * The error condition has been reported. Return + * from here as if ignored. + */ + return NOTIFY_DONE; + } } return NOTIFY_OK; } -- cgit v1.2.3 From fdd99a4103c91a2067c096a4931518cc05ca9206 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Mon, 9 Nov 2020 09:58:57 +0530 Subject: arm64/mm/hotplug: Ensure early memory sections are all online This adds a validation function that scans the entire boot memory and makes sure that all early memory sections are online. This check is essential for the memory notifier to work properly, as it cannot prevent any boot memory from offlining, if all sections are not online to begin with. Although the boot section scanning is selectively enabled with DEBUG_VM. Signed-off-by: Anshuman Khandual Cc: Will Deacon Cc: Mark Rutland Cc: Marc Zyngier Cc: Steve Capper Cc: Mark Brown Cc: linux-arm-kernel@lists.infradead.org Cc: linux-kernel@vger.kernel.org Link: https://lore.kernel.org/r/1604896137-16644-4-git-send-email-anshuman.khandual@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/mm/mmu.c | 48 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) (limited to 'arch') diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index ca6d4952b733..f293f2222f50 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -1538,6 +1538,53 @@ static struct notifier_block prevent_bootmem_remove_nb = { .notifier_call = prevent_bootmem_remove_notifier, }; +/* + * This ensures that boot memory sections on the platform are online + * from early boot. Memory sections could not be prevented from being + * offlined, unless for some reason they are not online to begin with. + * This helps validate the basic assumption on which the above memory + * event notifier works to prevent boot memory section offlining and + * its possible removal. + */ +static void validate_bootmem_online(void) +{ + phys_addr_t start, end, addr; + struct mem_section *ms; + u64 i; + + /* + * Scanning across all memblock might be expensive + * on some big memory systems. Hence enable this + * validation only with DEBUG_VM. + */ + if (!IS_ENABLED(CONFIG_DEBUG_VM)) + return; + + for_each_mem_range(i, &start, &end) { + for (addr = start; addr < end; addr += (1UL << PA_SECTION_SHIFT)) { + ms = __pfn_to_section(PHYS_PFN(addr)); + + /* + * All memory ranges in the system at this point + * should have been marked as early sections. + */ + WARN_ON(!early_section(ms)); + + /* + * Memory notifier mechanism here to prevent boot + * memory offlining depends on the fact that each + * early section memory on the system is initially + * online. Otherwise a given memory section which + * is already offline will be overlooked and can + * be removed completely. Call out such sections. + */ + if (!online_section(ms)) + pr_err("Boot memory [%llx %llx] is offline, can be removed\n", + addr, addr + (1UL << PA_SECTION_SHIFT)); + } + } +} + static int __init prevent_bootmem_remove_init(void) { int ret = 0; @@ -1545,6 +1592,7 @@ static int __init prevent_bootmem_remove_init(void) if (!IS_ENABLED(CONFIG_MEMORY_HOTREMOVE)) return ret; + validate_bootmem_online(); ret = register_memory_notifier(&prevent_bootmem_remove_nb); if (ret) pr_err("%s: Notifier registration failed %d\n", __func__, ret); -- cgit v1.2.3 From c1090bb10d5e15906d296936e64317e35c43f21d Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Tue, 10 Nov 2020 19:05:11 +0100 Subject: arm64: mm: don't assume struct page is always 64 bytes Commit 8c96400d6a39be7 simplified the page-to-virt and virt-to-page conversions, based on the assumption that struct page is always 64 bytes in size, in which case we can use a single signed shift to perform the conversion (provided that the vmemmap array is placed appropriately in the kernel VA space) Unfortunately, this assumption turns out not to hold, and so we need to revert part of this commit, and go back to an affine transformation. Given that all the quantities involved are compile time constants, this should not make any practical difference. Fixes: 8c96400d6a39 ("arm64: mm: make vmemmap region a projection of the linear region") Reported-by: Geert Uytterhoeven Signed-off-by: Ard Biesheuvel Link: https://lore.kernel.org/r/20201110180511.29083-1-ardb@kernel.org Tested-by: Geert Uytterhoeven Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/memory.h | 8 +++++--- arch/arm64/mm/init.c | 2 -- 2 files changed, 5 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index 03e9b112bd94..556cb2d62b5b 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -308,13 +308,15 @@ static inline void *phys_to_virt(phys_addr_t x) #else #define page_to_virt(x) ({ \ __typeof__(x) __page = x; \ - u64 __addr = (u64)__page << VMEMMAP_SHIFT; \ + u64 __idx = ((u64)__page - VMEMMAP_START) / sizeof(struct page);\ + u64 __addr = PAGE_OFFSET + (__idx * PAGE_SIZE); \ (void *)__tag_set((const void *)__addr, page_kasan_tag(__page));\ }) #define virt_to_page(x) ({ \ - u64 __addr = __tag_reset((u64)(x)) & PAGE_MASK; \ - (struct page *)((s64)__addr >> VMEMMAP_SHIFT); \ + u64 __idx = (__tag_reset((u64)x) - PAGE_OFFSET) / PAGE_SIZE; \ + u64 __addr = VMEMMAP_START + (__idx * sizeof(struct page)); \ + (struct page *)__addr; \ }) #endif /* !CONFIG_SPARSEMEM_VMEMMAP || CONFIG_DEBUG_VIRTUAL */ diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 3a5e9f9298e9..7e15d92836d8 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -502,8 +502,6 @@ static void __init free_unused_memmap(void) */ void __init mem_init(void) { - BUILD_BUG_ON(!is_power_of_2(sizeof(struct page))); - if (swiotlb_force == SWIOTLB_FORCE || max_pfn > PFN_DOWN(arm64_dma_phys_limit ? : arm64_dma32_phys_limit)) swiotlb_init(1); -- cgit v1.2.3 From 4b9cf23c179a27659a95c094ace658bb92f146e5 Mon Sep 17 00:00:00 2001 From: Ionela Voinescu Date: Fri, 6 Nov 2020 12:53:32 +0000 Subject: arm64: wrap and generalise counter read functions In preparation for other uses of Activity Monitors (AMU) cycle counters, place counter read functionality in generic functions that can reused: read_corecnt() and read_constcnt(). As a result, implement update_freq_counters_refs() to replace init_cpu_freq_invariance_counters() and both initialise and update the per-cpu reference variables. Signed-off-by: Ionela Voinescu Reviewed-by: Sudeep Holla Cc: Will Deacon Link: https://lore.kernel.org/r/20201106125334.21570-2-ionela.voinescu@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/cpufeature.h | 5 +++++ arch/arm64/include/asm/topology.h | 4 +++- arch/arm64/kernel/cpufeature.c | 5 +---- arch/arm64/kernel/topology.c | 23 ++++++++++++++--------- 4 files changed, 23 insertions(+), 14 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 97244d4feca9..751bd9d3376b 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -765,6 +765,11 @@ static inline bool cpu_has_hw_af(void) #ifdef CONFIG_ARM64_AMU_EXTN /* Check whether the cpu supports the Activity Monitors Unit (AMU) */ extern bool cpu_has_amu_feat(int cpu); +#else +static inline bool cpu_has_amu_feat(int cpu) +{ + return false; +} #endif static inline unsigned int get_vmid_bits(u64 mmfr1) diff --git a/arch/arm64/include/asm/topology.h b/arch/arm64/include/asm/topology.h index 11a465243f66..3b8dca4eb08d 100644 --- a/arch/arm64/include/asm/topology.h +++ b/arch/arm64/include/asm/topology.h @@ -16,12 +16,14 @@ int pcibus_to_node(struct pci_bus *bus); #include +void update_freq_counters_refs(void); +void topology_scale_freq_tick(void); + #ifdef CONFIG_ARM64_AMU_EXTN /* * Replace task scheduler's default counter-based * frequency-invariance scale factor setting. */ -void topology_scale_freq_tick(void); #define arch_scale_freq_tick topology_scale_freq_tick #endif /* CONFIG_ARM64_AMU_EXTN */ diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index dcc165b3fc04..1142970e985b 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -1526,16 +1526,13 @@ bool cpu_has_amu_feat(int cpu) return cpumask_test_cpu(cpu, &amu_cpus); } -/* Initialize the use of AMU counters for frequency invariance */ -extern void init_cpu_freq_invariance_counters(void); - static void cpu_amu_enable(struct arm64_cpu_capabilities const *cap) { if (has_cpuid_feature(cap, SCOPE_LOCAL_CPU)) { pr_info("detected CPU%d: Activity Monitors Unit (AMU)\n", smp_processor_id()); cpumask_set_cpu(smp_processor_id(), &amu_cpus); - init_cpu_freq_invariance_counters(); + update_freq_counters_refs(); } } diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c index 543c67cae02f..03f4882362ce 100644 --- a/arch/arm64/kernel/topology.c +++ b/arch/arm64/kernel/topology.c @@ -124,6 +124,12 @@ int __init parse_acpi_topology(void) #endif #ifdef CONFIG_ARM64_AMU_EXTN +#define read_corecnt() read_sysreg_s(SYS_AMEVCNTR0_CORE_EL0) +#define read_constcnt() read_sysreg_s(SYS_AMEVCNTR0_CONST_EL0) +#else +#define read_corecnt() (0UL) +#define read_constcnt() (0UL) +#endif #undef pr_fmt #define pr_fmt(fmt) "AMU: " fmt @@ -133,13 +139,10 @@ static DEFINE_PER_CPU(u64, arch_const_cycles_prev); static DEFINE_PER_CPU(u64, arch_core_cycles_prev); static cpumask_var_t amu_fie_cpus; -/* Initialize counter reference per-cpu variables for the current CPU */ -void init_cpu_freq_invariance_counters(void) +void update_freq_counters_refs(void) { - this_cpu_write(arch_core_cycles_prev, - read_sysreg_s(SYS_AMEVCNTR0_CORE_EL0)); - this_cpu_write(arch_const_cycles_prev, - read_sysreg_s(SYS_AMEVCNTR0_CONST_EL0)); + this_cpu_write(arch_core_cycles_prev, read_corecnt()); + this_cpu_write(arch_const_cycles_prev, read_constcnt()); } static int validate_cpu_freq_invariance_counters(int cpu) @@ -280,11 +283,14 @@ void topology_scale_freq_tick(void) if (!cpumask_test_cpu(cpu, amu_fie_cpus)) return; - const_cnt = read_sysreg_s(SYS_AMEVCNTR0_CONST_EL0); - core_cnt = read_sysreg_s(SYS_AMEVCNTR0_CORE_EL0); prev_const_cnt = this_cpu_read(arch_const_cycles_prev); prev_core_cnt = this_cpu_read(arch_core_cycles_prev); + update_freq_counters_refs(); + + const_cnt = this_cpu_read(arch_const_cycles_prev); + core_cnt = this_cpu_read(arch_core_cycles_prev); + if (unlikely(core_cnt <= prev_core_cnt || const_cnt <= prev_const_cnt)) goto store_and_exit; @@ -309,4 +315,3 @@ store_and_exit: this_cpu_write(arch_core_cycles_prev, core_cnt); this_cpu_write(arch_const_cycles_prev, const_cnt); } -#endif /* CONFIG_ARM64_AMU_EXTN */ -- cgit v1.2.3 From bc3b6562a1acc1f43ae84c7d69428cdd8ae1390e Mon Sep 17 00:00:00 2001 From: Ionela Voinescu Date: Fri, 6 Nov 2020 12:53:33 +0000 Subject: arm64: split counter validation function In order for the counter validation function to be reused, split validate_cpu_freq_invariance_counters() into: - freq_counters_valid(cpu) - check cpu for valid cycle counters - freq_inv_set_max_ratio(int cpu, u64 max_rate, u64 ref_rate) - generic function that sets the normalization ratio used by topology_scale_freq_tick() Signed-off-by: Ionela Voinescu Reviewed-by: Sudeep Holla Cc: Will Deacon Link: https://lore.kernel.org/r/20201106125334.21570-3-ionela.voinescu@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/kernel/topology.c | 44 ++++++++++++++++++++++++++------------------ 1 file changed, 26 insertions(+), 18 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c index 03f4882362ce..b8cb16e3a2cc 100644 --- a/arch/arm64/kernel/topology.c +++ b/arch/arm64/kernel/topology.c @@ -145,45 +145,49 @@ void update_freq_counters_refs(void) this_cpu_write(arch_const_cycles_prev, read_constcnt()); } -static int validate_cpu_freq_invariance_counters(int cpu) +static inline bool freq_counters_valid(int cpu) { - u64 max_freq_hz, ratio; - if (!cpu_has_amu_feat(cpu)) { pr_debug("CPU%d: counters are not supported.\n", cpu); - return -EINVAL; + return false; } if (unlikely(!per_cpu(arch_const_cycles_prev, cpu) || !per_cpu(arch_core_cycles_prev, cpu))) { pr_debug("CPU%d: cycle counters are not enabled.\n", cpu); - return -EINVAL; + return false; } - /* Convert maximum frequency from KHz to Hz and validate */ - max_freq_hz = cpufreq_get_hw_max_freq(cpu) * 1000; - if (unlikely(!max_freq_hz)) { - pr_debug("CPU%d: invalid maximum frequency.\n", cpu); + return true; +} + +static int freq_inv_set_max_ratio(int cpu, u64 max_rate, u64 ref_rate) +{ + u64 ratio; + + if (unlikely(!max_rate || !ref_rate)) { + pr_debug("CPU%d: invalid maximum or reference frequency.\n", + cpu); return -EINVAL; } /* * Pre-compute the fixed ratio between the frequency of the constant - * counter and the maximum frequency of the CPU. + * reference counter and the maximum frequency of the CPU. * - * const_freq - * arch_max_freq_scale = ---------------- * SCHED_CAPACITY_SCALE² - * cpuinfo_max_freq + * ref_rate + * arch_max_freq_scale = ---------- * SCHED_CAPACITY_SCALE² + * max_rate * * We use a factor of 2 * SCHED_CAPACITY_SHIFT -> SCHED_CAPACITY_SCALE² * in order to ensure a good resolution for arch_max_freq_scale for - * very low arch timer frequencies (down to the KHz range which should + * very low reference frequencies (down to the KHz range which should * be unlikely). */ - ratio = (u64)arch_timer_get_rate() << (2 * SCHED_CAPACITY_SHIFT); - ratio = div64_u64(ratio, max_freq_hz); + ratio = ref_rate << (2 * SCHED_CAPACITY_SHIFT); + ratio = div64_u64(ratio, max_rate); if (!ratio) { - WARN_ONCE(1, "System timer frequency too low.\n"); + WARN_ONCE(1, "Reference frequency too low.\n"); return -EINVAL; } @@ -230,8 +234,12 @@ static int __init init_amu_fie(void) } for_each_present_cpu(cpu) { - if (validate_cpu_freq_invariance_counters(cpu)) + if (!freq_counters_valid(cpu) || + freq_inv_set_max_ratio(cpu, + cpufreq_get_hw_max_freq(cpu) * 1000, + arch_timer_get_rate())) continue; + cpumask_set_cpu(cpu, valid_cpus); have_policy |= enable_policy_freq_counters(cpu, valid_cpus); } -- cgit v1.2.3 From 68c5debcc06d6d24f15dbf978780fc5efc147d5e Mon Sep 17 00:00:00 2001 From: Ionela Voinescu Date: Fri, 6 Nov 2020 12:53:34 +0000 Subject: arm64: implement CPPC FFH support using AMUs If Activity Monitors (AMUs) are present, two of the counters can be used to implement support for CPPC's (Collaborative Processor Performance Control) delivered and reference performance monitoring functionality using FFH (Functional Fixed Hardware). Given that counters for a certain CPU can only be read from that CPU, while FFH operations can be called from any CPU for any of the CPUs, use smp_call_function_single() to provide the requested values. Therefore, depending on the register addresses, the following values are returned: - 0x0 (DeliveredPerformanceCounterRegister): AMU core counter - 0x1 (ReferencePerformanceCounterRegister): AMU constant counter The use of Activity Monitors is hidden behind the generic cpu_read_{corecnt,constcnt}() functions. Read functionality for these two registers represents the only current FFH support for CPPC. Read operations for other register values or write operation for all registers are unsupported. Therefore, keep CPPC's FFH unsupported if no CPUs have valid AMU frequency counters. For this purpose, the get_cpu_with_amu_feat() is introduced. Signed-off-by: Ionela Voinescu Reviewed-by: Sudeep Holla Cc: Will Deacon Link: https://lore.kernel.org/r/20201106125334.21570-4-ionela.voinescu@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/cpufeature.h | 3 ++ arch/arm64/kernel/cpufeature.c | 10 ++++++ arch/arm64/kernel/topology.c | 64 +++++++++++++++++++++++++++++++++++++ 3 files changed, 77 insertions(+) (limited to 'arch') diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 751bd9d3376b..f5b44ac354dc 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -772,6 +772,9 @@ static inline bool cpu_has_amu_feat(int cpu) } #endif +/* Get a cpu that supports the Activity Monitors Unit (AMU) */ +extern int get_cpu_with_amu_feat(void); + static inline unsigned int get_vmid_bits(u64 mmfr1) { int vmid_bits; diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 1142970e985b..6b08ae74ad0a 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -1526,6 +1526,11 @@ bool cpu_has_amu_feat(int cpu) return cpumask_test_cpu(cpu, &amu_cpus); } +int get_cpu_with_amu_feat(void) +{ + return cpumask_any(&amu_cpus); +} + static void cpu_amu_enable(struct arm64_cpu_capabilities const *cap) { if (has_cpuid_feature(cap, SCOPE_LOCAL_CPU)) { @@ -1554,6 +1559,11 @@ static bool has_amu(const struct arm64_cpu_capabilities *cap, return true; } +#else +int get_cpu_with_amu_feat(void) +{ + return nr_cpu_ids; +} #endif #ifdef CONFIG_ARM64_VHE diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c index b8cb16e3a2cc..7c9b6a0ecd6a 100644 --- a/arch/arm64/kernel/topology.c +++ b/arch/arm64/kernel/topology.c @@ -147,6 +147,9 @@ void update_freq_counters_refs(void) static inline bool freq_counters_valid(int cpu) { + if ((cpu >= nr_cpu_ids) || !cpumask_test_cpu(cpu, cpu_present_mask)) + return false; + if (!cpu_has_amu_feat(cpu)) { pr_debug("CPU%d: counters are not supported.\n", cpu); return false; @@ -323,3 +326,64 @@ store_and_exit: this_cpu_write(arch_core_cycles_prev, core_cnt); this_cpu_write(arch_const_cycles_prev, const_cnt); } + +#ifdef CONFIG_ACPI_CPPC_LIB +#include + +static void cpu_read_corecnt(void *val) +{ + *(u64 *)val = read_corecnt(); +} + +static void cpu_read_constcnt(void *val) +{ + *(u64 *)val = read_constcnt(); +} + +static inline +int counters_read_on_cpu(int cpu, smp_call_func_t func, u64 *val) +{ + if (!cpu_has_amu_feat(cpu)) + return -EOPNOTSUPP; + + smp_call_function_single(cpu, func, val, 1); + + return 0; +} + +/* + * Refer to drivers/acpi/cppc_acpi.c for the description of the functions + * below. + */ +bool cpc_ffh_supported(void) +{ + return freq_counters_valid(get_cpu_with_amu_feat()); +} + +int cpc_read_ffh(int cpu, struct cpc_reg *reg, u64 *val) +{ + int ret = -EOPNOTSUPP; + + switch ((u64)reg->address) { + case 0x0: + ret = counters_read_on_cpu(cpu, cpu_read_corecnt, val); + break; + case 0x1: + ret = counters_read_on_cpu(cpu, cpu_read_constcnt, val); + break; + } + + if (!ret) { + *val &= GENMASK_ULL(reg->bit_offset + reg->bit_width - 1, + reg->bit_offset); + *val >>= reg->bit_offset; + } + + return ret; +} + +int cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val) +{ + return -EOPNOTSUPP; +} +#endif /* CONFIG_ACPI_CPPC_LIB */ -- cgit v1.2.3 From 74490422522d125451cac400fce5d4e6bb9e1fea Mon Sep 17 00:00:00 2001 From: Ionela Voinescu Date: Fri, 13 Nov 2020 15:53:28 +0000 Subject: arm64: abort counter_read_on_cpu() when irqs_disabled() Given that smp_call_function_single() can deadlock when interrupts are disabled, abort the SMP call if irqs_disabled(). This scenario is currently not possible given the function's uses, but safeguard this for potential future uses. Signed-off-by: Ionela Voinescu Cc: Will Deacon Acked-by: Mark Rutland Link: https://lore.kernel.org/r/20201113155328.4194-1-ionela.voinescu@arm.com [catalin.marinas@arm.com: modified following Mark's comment] Signed-off-by: Catalin Marinas --- arch/arm64/kernel/topology.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'arch') diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c index 7c9b6a0ecd6a..b8026ec684ba 100644 --- a/arch/arm64/kernel/topology.c +++ b/arch/arm64/kernel/topology.c @@ -343,9 +343,16 @@ static void cpu_read_constcnt(void *val) static inline int counters_read_on_cpu(int cpu, smp_call_func_t func, u64 *val) { + /* + * Abort call on counterless CPU or when interrupts are + * disabled - can lead to deadlock in smp sync call. + */ if (!cpu_has_amu_feat(cpu)) return -EOPNOTSUPP; + if (WARN_ON_ONCE(irqs_disabled())) + return -EPERM; + smp_call_function_single(cpu, func, val, 1); return 0; -- cgit v1.2.3 From e2a073dde9218cdef3c5431bddabf4549dd65fea Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Tue, 17 Nov 2020 13:47:27 +0100 Subject: arm64: omit [_text, _stext) from permanent kernel mapping In a previous patch, we increased the size of the EFI PE/COFF header to 64 KB, which resulted in the _stext symbol to appear at a fixed offset of 64 KB into the image. Since 64 KB is also the largest page size we support, this completely removes the need to map the first 64 KB of the kernel image, given that it only contains the arm64 Image header and the EFI header, neither of which we ever access again after booting the kernel. More importantly, we should avoid an executable mapping of non-executable and not entirely predictable data, to deal with the unlikely event that we inadvertently emitted something that looks like an opcode that could be used as a gadget for speculative execution. So let's limit the kernel mapping of .text to the [_stext, _etext) region, which matches the view of generic code (such as kallsyms) when it reasons about the boundaries of the kernel's .text section. Signed-off-by: Ard Biesheuvel Acked-by: Will Deacon Link: https://lore.kernel.org/r/20201117124729.12642-2-ardb@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/kernel/efi-header.S | 7 ------- arch/arm64/kernel/setup.c | 4 ++-- arch/arm64/kernel/vmlinux.lds.S | 2 +- arch/arm64/mm/init.c | 2 +- arch/arm64/mm/mmu.c | 10 +++++----- 5 files changed, 9 insertions(+), 16 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kernel/efi-header.S b/arch/arm64/kernel/efi-header.S index a71844fb923e..3ad4aecff033 100644 --- a/arch/arm64/kernel/efi-header.S +++ b/arch/arm64/kernel/efi-header.S @@ -140,13 +140,6 @@ efi_debug_entry: .set efi_debug_entry_size, . - efi_debug_entry #endif - /* - * EFI will load .text onwards at the 4k section alignment - * described in the PE/COFF header. To ensure that instruction - * sequences using an adrp and a :lo12: immediate will function - * correctly at this alignment, we must ensure that .text is - * placed at a 4k boundary in the Image to begin with. - */ .balign SEGMENT_ALIGN efi_header_end: .endm diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index 133257ffd859..fe1cf52f5f80 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -206,7 +206,7 @@ static void __init request_standard_resources(void) unsigned long i = 0; size_t res_size; - kernel_code.start = __pa_symbol(_text); + kernel_code.start = __pa_symbol(_stext); kernel_code.end = __pa_symbol(__init_begin - 1); kernel_data.start = __pa_symbol(_sdata); kernel_data.end = __pa_symbol(_end - 1); @@ -283,7 +283,7 @@ u64 cpu_logical_map(int cpu) void __init __no_sanitize_address setup_arch(char **cmdline_p) { - init_mm.start_code = (unsigned long) _text; + init_mm.start_code = (unsigned long) _stext; init_mm.end_code = (unsigned long) _etext; init_mm.end_data = (unsigned long) _edata; init_mm.brk = (unsigned long) _end; diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index 1bda604f4c70..48b222f1c700 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S @@ -121,7 +121,7 @@ SECTIONS _text = .; HEAD_TEXT } - .text : { /* Real text segment */ + .text : ALIGN(SEGMENT_ALIGN) { /* Real text segment */ _stext = .; /* Text and read-only data */ IRQENTRY_TEXT SOFTIRQENTRY_TEXT diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 095540667f0f..aa438b9d7f40 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -367,7 +367,7 @@ void __init arm64_memblock_init(void) * Register the kernel text, kernel data, initrd, and initial * pagetables with memblock. */ - memblock_reserve(__pa_symbol(_text), _end - _text); + memblock_reserve(__pa_symbol(_stext), _end - _stext); if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && phys_initrd_size) { /* the generic initrd code expects virtual addresses */ initrd_start = __phys_to_virt(phys_initrd_start); diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 1c0f3e02f731..e6f2accaeade 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -464,14 +464,14 @@ void __init mark_linear_text_alias_ro(void) /* * Remove the write permissions from the linear alias of .text/.rodata */ - update_mapping_prot(__pa_symbol(_text), (unsigned long)lm_alias(_text), - (unsigned long)__init_begin - (unsigned long)_text, + update_mapping_prot(__pa_symbol(_stext), (unsigned long)lm_alias(_stext), + (unsigned long)__init_begin - (unsigned long)_stext, PAGE_KERNEL_RO); } static void __init map_mem(pgd_t *pgdp) { - phys_addr_t kernel_start = __pa_symbol(_text); + phys_addr_t kernel_start = __pa_symbol(_stext); phys_addr_t kernel_end = __pa_symbol(__init_begin); phys_addr_t start, end; int flags = 0; @@ -506,7 +506,7 @@ static void __init map_mem(pgd_t *pgdp) } /* - * Map the linear alias of the [_text, __init_begin) interval + * Map the linear alias of the [_stext, __init_begin) interval * as non-executable now, and remove the write permission in * mark_linear_text_alias_ro() below (which will be called after * alternative patching has completed). This makes the contents @@ -665,7 +665,7 @@ static void __init map_kernel(pgd_t *pgdp) * Only rodata will be remapped with different permissions later on, * all other segments are allowed to use contiguous mappings. */ - map_kernel_segment(pgdp, _text, _etext, text_prot, &vmlinux_text, 0, + map_kernel_segment(pgdp, _stext, _etext, text_prot, &vmlinux_text, 0, VM_NO_GUARD); map_kernel_segment(pgdp, __start_rodata, __inittext_begin, PAGE_KERNEL, &vmlinux_rodata, NO_CONT_MAPPINGS, VM_NO_GUARD); -- cgit v1.2.3 From b50a3225cdffef43b76b294fa7fb3cd1f32f50d0 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Tue, 17 Nov 2020 13:47:28 +0100 Subject: arm64/head: avoid symbol names pointing into first 64 KB of kernel image We no longer map the first 64 KB of the kernel image, as there is nothing there that we ever need to refer back to once the kernel has booted. Even though facilities like kallsyms are very careful to only refer to the region that starts at _stext when mapping virtual addresses to symbol names, let's avoid any confusion by switching to local .L prefixed symbol names for the EFI header, as none of them have any significance to the rest of the kernel. Signed-off-by: Ard Biesheuvel Acked-by: Will Deacon Link: https://lore.kernel.org/r/20201117124729.12642-3-ardb@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/kernel/efi-header.S | 46 ++++++++++++++++++++---------------------- 1 file changed, 22 insertions(+), 24 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kernel/efi-header.S b/arch/arm64/kernel/efi-header.S index 3ad4aecff033..ddaf57d825b5 100644 --- a/arch/arm64/kernel/efi-header.S +++ b/arch/arm64/kernel/efi-header.S @@ -9,28 +9,26 @@ .macro __EFI_PE_HEADER .long PE_MAGIC -coff_header: .short IMAGE_FILE_MACHINE_ARM64 // Machine - .short section_count // NumberOfSections + .short .Lsection_count // NumberOfSections .long 0 // TimeDateStamp .long 0 // PointerToSymbolTable .long 0 // NumberOfSymbols - .short section_table - optional_header // SizeOfOptionalHeader + .short .Lsection_table - .Loptional_header // SizeOfOptionalHeader .short IMAGE_FILE_DEBUG_STRIPPED | \ IMAGE_FILE_EXECUTABLE_IMAGE | \ IMAGE_FILE_LINE_NUMS_STRIPPED // Characteristics -optional_header: +.Loptional_header: .short PE_OPT_MAGIC_PE32PLUS // PE32+ format .byte 0x02 // MajorLinkerVersion .byte 0x14 // MinorLinkerVersion - .long __initdata_begin - efi_header_end // SizeOfCode + .long __initdata_begin - .Lefi_header_end // SizeOfCode .long __pecoff_data_size // SizeOfInitializedData .long 0 // SizeOfUninitializedData .long __efistub_efi_pe_entry - _head // AddressOfEntryPoint - .long efi_header_end - _head // BaseOfCode + .long .Lefi_header_end - _head // BaseOfCode -extra_header_fields: .quad 0 // ImageBase .long SEGMENT_ALIGN // SectionAlignment .long PECOFF_FILE_ALIGNMENT // FileAlignment @@ -45,7 +43,7 @@ extra_header_fields: .long _end - _head // SizeOfImage // Everything before the kernel image is considered part of the header - .long efi_header_end - _head // SizeOfHeaders + .long .Lefi_header_end - _head // SizeOfHeaders .long 0 // CheckSum .short IMAGE_SUBSYSTEM_EFI_APPLICATION // Subsystem .short 0 // DllCharacteristics @@ -54,7 +52,7 @@ extra_header_fields: .quad 0 // SizeOfHeapReserve .quad 0 // SizeOfHeapCommit .long 0 // LoaderFlags - .long (section_table - .) / 8 // NumberOfRvaAndSizes + .long (.Lsection_table - .) / 8 // NumberOfRvaAndSizes .quad 0 // ExportTable .quad 0 // ImportTable @@ -64,17 +62,17 @@ extra_header_fields: .quad 0 // BaseRelocationTable #ifdef CONFIG_DEBUG_EFI - .long efi_debug_table - _head // DebugTable - .long efi_debug_table_size + .long .Lefi_debug_table - _head // DebugTable + .long .Lefi_debug_table_size #endif // Section table -section_table: +.Lsection_table: .ascii ".text\0\0\0" - .long __initdata_begin - efi_header_end // VirtualSize - .long efi_header_end - _head // VirtualAddress - .long __initdata_begin - efi_header_end // SizeOfRawData - .long efi_header_end - _head // PointerToRawData + .long __initdata_begin - .Lefi_header_end // VirtualSize + .long .Lefi_header_end - _head // VirtualAddress + .long __initdata_begin - .Lefi_header_end // SizeOfRawData + .long .Lefi_header_end - _head // PointerToRawData .long 0 // PointerToRelocations .long 0 // PointerToLineNumbers @@ -98,7 +96,7 @@ section_table: IMAGE_SCN_MEM_READ | \ IMAGE_SCN_MEM_WRITE // Characteristics - .set section_count, (. - section_table) / 40 + .set .Lsection_count, (. - .Lsection_table) / 40 #ifdef CONFIG_DEBUG_EFI /* @@ -114,21 +112,21 @@ section_table: __INITRODATA .align 2 -efi_debug_table: +.Lefi_debug_table: // EFI_IMAGE_DEBUG_DIRECTORY_ENTRY .long 0 // Characteristics .long 0 // TimeDateStamp .short 0 // MajorVersion .short 0 // MinorVersion .long IMAGE_DEBUG_TYPE_CODEVIEW // Type - .long efi_debug_entry_size // SizeOfData + .long .Lefi_debug_entry_size // SizeOfData .long 0 // RVA - .long efi_debug_entry - _head // FileOffset + .long .Lefi_debug_entry - _head // FileOffset - .set efi_debug_table_size, . - efi_debug_table + .set .Lefi_debug_table_size, . - .Lefi_debug_table .previous -efi_debug_entry: +.Lefi_debug_entry: // EFI_IMAGE_DEBUG_CODEVIEW_NB10_ENTRY .ascii "NB10" // Signature .long 0 // Unknown @@ -137,9 +135,9 @@ efi_debug_entry: .asciz VMLINUX_PATH - .set efi_debug_entry_size, . - efi_debug_entry + .set .Lefi_debug_entry_size, . - .Lefi_debug_entry #endif .balign SEGMENT_ALIGN -efi_header_end: +.Lefi_header_end: .endm -- cgit v1.2.3 From 7919385b9fb3cefe495310e5c44ca8a6d9c446e8 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Tue, 17 Nov 2020 13:47:29 +0100 Subject: arm64: head: tidy up the Image header definition Even though support for EFI boot remains entirely optional for arm64, it is unlikely that we will ever be able to repurpose the image header fields that the EFI loader relies on, i.e., the magic NOP at offset 0x0 and the PE header address at offset 0x3c. So let's factor out the differences into a 'efi_signature_nop' macro and a local symbol representing the PE header address, and move the conditional definitions into efi-header.S, taking into account whether CONFIG_EFI is enabled or not. While at it, switch to a signature NOP that behaves more like a NOP, i.e., one that only clobbers the flags. Signed-off-by: Ard Biesheuvel Acked-by: Will Deacon Link: https://lore.kernel.org/r/20201117124729.12642-4-ardb@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/kernel/efi-header.S | 43 ++++++++++++++++++++++++++++++++---------- arch/arm64/kernel/head.S | 19 ++----------------- 2 files changed, 35 insertions(+), 27 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kernel/efi-header.S b/arch/arm64/kernel/efi-header.S index ddaf57d825b5..28d8a5dca5f1 100644 --- a/arch/arm64/kernel/efi-header.S +++ b/arch/arm64/kernel/efi-header.S @@ -7,7 +7,27 @@ #include #include + .macro efi_signature_nop +#ifdef CONFIG_EFI +.L_head: + /* + * This ccmp instruction has no meaningful effect except that + * its opcode forms the magic "MZ" signature required by UEFI. + */ + ccmp x18, #0, #0xd, pl +#else + /* + * Bootloaders may inspect the opcode at the start of the kernel + * image to decide if the kernel is capable of booting via UEFI. + * So put an ordinary NOP here, not the "MZ.." pseudo-nop above. + */ + nop +#endif + .endm + .macro __EFI_PE_HEADER +#ifdef CONFIG_EFI + .set .Lpe_header_offset, . - .L_head .long PE_MAGIC .short IMAGE_FILE_MACHINE_ARM64 // Machine .short .Lsection_count // NumberOfSections @@ -26,8 +46,8 @@ .long __initdata_begin - .Lefi_header_end // SizeOfCode .long __pecoff_data_size // SizeOfInitializedData .long 0 // SizeOfUninitializedData - .long __efistub_efi_pe_entry - _head // AddressOfEntryPoint - .long .Lefi_header_end - _head // BaseOfCode + .long __efistub_efi_pe_entry - .L_head // AddressOfEntryPoint + .long .Lefi_header_end - .L_head // BaseOfCode .quad 0 // ImageBase .long SEGMENT_ALIGN // SectionAlignment @@ -40,10 +60,10 @@ .short 0 // MinorSubsystemVersion .long 0 // Win32VersionValue - .long _end - _head // SizeOfImage + .long _end - .L_head // SizeOfImage // Everything before the kernel image is considered part of the header - .long .Lefi_header_end - _head // SizeOfHeaders + .long .Lefi_header_end - .L_head // SizeOfHeaders .long 0 // CheckSum .short IMAGE_SUBSYSTEM_EFI_APPLICATION // Subsystem .short 0 // DllCharacteristics @@ -62,7 +82,7 @@ .quad 0 // BaseRelocationTable #ifdef CONFIG_DEBUG_EFI - .long .Lefi_debug_table - _head // DebugTable + .long .Lefi_debug_table - .L_head // DebugTable .long .Lefi_debug_table_size #endif @@ -70,9 +90,9 @@ .Lsection_table: .ascii ".text\0\0\0" .long __initdata_begin - .Lefi_header_end // VirtualSize - .long .Lefi_header_end - _head // VirtualAddress + .long .Lefi_header_end - .L_head // VirtualAddress .long __initdata_begin - .Lefi_header_end // SizeOfRawData - .long .Lefi_header_end - _head // PointerToRawData + .long .Lefi_header_end - .L_head // PointerToRawData .long 0 // PointerToRelocations .long 0 // PointerToLineNumbers @@ -84,9 +104,9 @@ .ascii ".data\0\0\0" .long __pecoff_data_size // VirtualSize - .long __initdata_begin - _head // VirtualAddress + .long __initdata_begin - .L_head // VirtualAddress .long __pecoff_data_rawsize // SizeOfRawData - .long __initdata_begin - _head // PointerToRawData + .long __initdata_begin - .L_head // PointerToRawData .long 0 // PointerToRelocations .long 0 // PointerToLineNumbers @@ -121,7 +141,7 @@ .long IMAGE_DEBUG_TYPE_CODEVIEW // Type .long .Lefi_debug_entry_size // SizeOfData .long 0 // RVA - .long .Lefi_debug_entry - _head // FileOffset + .long .Lefi_debug_entry - .L_head // FileOffset .set .Lefi_debug_table_size, . - .Lefi_debug_table .previous @@ -140,4 +160,7 @@ .balign SEGMENT_ALIGN .Lefi_header_end: +#else + .set .Lpe_header_offset, 0x0 +#endif .endm diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index d8d9caf02834..c1f8f2c5be47 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -58,21 +58,11 @@ * in the entry routines. */ __HEAD -_head: /* * DO NOT MODIFY. Image header expected by Linux boot-loaders. */ -#ifdef CONFIG_EFI - /* - * This add instruction has no meaningful effect except that - * its opcode forms the magic "MZ" signature required by UEFI. - */ - add x13, x18, #0x16 - b primary_entry -#else + efi_signature_nop // special NOP to identity as PE/COFF executable b primary_entry // branch to kernel start, magic - .long 0 // reserved -#endif .quad 0 // Image load offset from start of RAM, little-endian le64sym _kernel_size_le // Effective size of kernel image, little-endian le64sym _kernel_flags_le // Informative flags, little-endian @@ -80,14 +70,9 @@ _head: .quad 0 // reserved .quad 0 // reserved .ascii ARM64_IMAGE_MAGIC // Magic number -#ifdef CONFIG_EFI - .long pe_header - _head // Offset to the PE header. + .long .Lpe_header_offset // Offset to the PE header. -pe_header: __EFI_PE_HEADER -#else - .long 0 // reserved -#endif __INIT -- cgit v1.2.3 From 9f84f39f5515fd412398a1019e3f50ac3ab51a80 Mon Sep 17 00:00:00 2001 From: Sudarshan Rajagopalan Date: Wed, 14 Oct 2020 17:51:23 -0700 Subject: arm64/mm: add fallback option to allocate virtually contiguous memory When section mappings are enabled, we allocate vmemmap pages from physically continuous memory of size PMD_SIZE using vmemmap_alloc_block_buf(). Section mappings are good to reduce TLB pressure. But when system is highly fragmented and memory blocks are being hot-added at runtime, its possible that such physically continuous memory allocations can fail. Rather than failing the memory hot-add procedure, add a fallback option to allocate vmemmap pages from discontinuous pages using vmemmap_populate_basepages(). Signed-off-by: Sudarshan Rajagopalan Reviewed-by: Gavin Shan Reviewed-by: Anshuman Khandual Acked-by: Will Deacon Cc: Will Deacon Cc: Anshuman Khandual Cc: Mark Rutland Cc: Logan Gunthorpe Cc: David Hildenbrand Cc: Andrew Morton Cc: Steven Price Link: https://lore.kernel.org/r/d6c06f2ef39bbe6c715b2f6db76eb16155fdcee6.1602722808.git.sudaraja@codeaurora.org Signed-off-by: Catalin Marinas --- arch/arm64/mm/mmu.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 1c0f3e02f731..a773d2a1a2bf 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -1132,8 +1132,11 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, void *p = NULL; p = vmemmap_alloc_block_buf(PMD_SIZE, node, altmap); - if (!p) - return -ENOMEM; + if (!p) { + if (vmemmap_populate_basepages(addr, next, node, altmap)) + return -ENOMEM; + continue; + } pmd_set_huge(pmdp, __pa(p), __pgprot(PROT_SECT_NORMAL)); } else -- cgit v1.2.3 From 739003c6428387432d42b9b80be185cde93978aa Mon Sep 17 00:00:00 2001 From: Peter Collingbourne Date: Tue, 17 Nov 2020 19:20:51 -0800 Subject: arm64: mte: optimize asynchronous tag check fault flag check We don't need to check for MTE support before checking the flag because it can only be set if the hardware supports MTE. As a result we can unconditionally check the flag bit which is expected to be in a register and therefore the check can be done in a single instruction instead of first needing to load the hwcaps. On a DragonBoard 845c with a kernel built with CONFIG_ARM64_MTE=y with the powersave governor this reduces the cost of a kernel entry/exit (invalid syscall) from 465.1ns to 463.8ns. Signed-off-by: Peter Collingbourne Link: https://linux-review.googlesource.com/id/If4dc3501fd4e4f287322f17805509613cfe47d24 Link: https://lore.kernel.org/r/20201118032051.1405907-1-pcc@google.com [catalin.marinas@arm.com: remove IS_ENABLED(CONFIG_ARM64_MTE)] Signed-off-by: Catalin Marinas --- arch/arm64/kernel/syscall.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/kernel/syscall.c b/arch/arm64/kernel/syscall.c index e4c0dadf0d92..fe3fa8fd13c8 100644 --- a/arch/arm64/kernel/syscall.c +++ b/arch/arm64/kernel/syscall.c @@ -123,7 +123,7 @@ static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr, local_daif_restore(DAIF_PROCCTX); user_exit(); - if (system_supports_mte() && (flags & _TIF_MTE_ASYNC_FAULT)) { + if (flags & _TIF_MTE_ASYNC_FAULT) { /* * Process the asynchronous tag check fault before the actual * syscall. do_notify_resume() will send a signal to userspace -- cgit v1.2.3 From 791ab8b2e3db0c6e4295467d10398800ec29144c Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Wed, 18 Nov 2020 18:58:09 +0000 Subject: arm64: Ignore any DMA offsets in the max_zone_phys() calculation Currently, the kernel assumes that if RAM starts above 32-bit (or zone_bits), there is still a ZONE_DMA/DMA32 at the bottom of the RAM and such constrained devices have a hardwired DMA offset. In practice, we haven't noticed any such hardware so let's assume that we can expand ZONE_DMA32 to the available memory if no RAM below 4GB. Similarly, ZONE_DMA is expanded to the 4GB limit if no RAM addressable by zone_bits. Signed-off-by: Catalin Marinas Tested-by: Nicolas Saenz Julienne Reviewed-by: Nicolas Saenz Julienne Cc: Nicolas Saenz Julienne Cc: Robin Murphy Link: https://lore.kernel.org/r/20201118185809.1078362-1-catalin.marinas@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/mm/init.c | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 095540667f0f..6be62c5bc0a3 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -175,14 +175,21 @@ static void __init reserve_elfcorehdr(void) #endif /* CONFIG_CRASH_DUMP */ /* - * Return the maximum physical address for a zone with a given address size - * limit. It currently assumes that for memory starting above 4G, 32-bit - * devices will use a DMA offset. + * Return the maximum physical address for a zone accessible by the given bits + * limit. If DRAM starts above 32-bit, expand the zone to the maximum + * available memory, otherwise cap it at 32-bit. */ static phys_addr_t __init max_zone_phys(unsigned int zone_bits) { - phys_addr_t offset = memblock_start_of_DRAM() & GENMASK_ULL(63, zone_bits); - return min(offset + (1ULL << zone_bits), memblock_end_of_DRAM()); + phys_addr_t zone_mask = DMA_BIT_MASK(zone_bits); + phys_addr_t phys_start = memblock_start_of_DRAM(); + + if (phys_start > U32_MAX) + zone_mask = PHYS_ADDR_MAX; + else if (phys_start > zone_mask) + zone_mask = U32_MAX; + + return min(zone_mask, memblock_end_of_DRAM() - 1) + 1; } static void __init zone_sizes_init(unsigned long min, unsigned long max) -- cgit v1.2.3 From 2687275a5843d1089687f08fc64eb3f3b026a169 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Thu, 19 Nov 2020 17:55:56 +0000 Subject: arm64: Force NO_BLOCK_MAPPINGS if crashkernel reservation is required mem_init() currently relies on knowing the boundaries of the crashkernel reservation to map such region with page granularity for later unmapping via set_memory_valid(..., 0). If the crashkernel reservation is deferred, such boundaries are not known when the linear mapping is created. Simply parse the command line for "crashkernel" and, if found, create the linear map with NO_BLOCK_MAPPINGS. Signed-off-by: Catalin Marinas Tested-by: Nicolas Saenz Julienne Reviewed-by: Nicolas Saenz Julienne Acked-by: James Morse Cc: James Morse Cc: Nicolas Saenz Julienne Link: https://lore.kernel.org/r/20201119175556.18681-1-catalin.marinas@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/mm/mmu.c | 37 ++++++++++++++++--------------------- 1 file changed, 16 insertions(+), 21 deletions(-) (limited to 'arch') diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 1c0f3e02f731..7e9be6315067 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -469,6 +469,21 @@ void __init mark_linear_text_alias_ro(void) PAGE_KERNEL_RO); } +static bool crash_mem_map __initdata; + +static int __init enable_crash_mem_map(char *arg) +{ + /* + * Proper parameter parsing is done by reserve_crashkernel(). We only + * need to know if the linear map has to avoid block mappings so that + * the crashkernel reservations can be unmapped later. + */ + crash_mem_map = true; + + return 0; +} +early_param("crashkernel", enable_crash_mem_map); + static void __init map_mem(pgd_t *pgdp) { phys_addr_t kernel_start = __pa_symbol(_text); @@ -477,7 +492,7 @@ static void __init map_mem(pgd_t *pgdp) int flags = 0; u64 i; - if (rodata_full || debug_pagealloc_enabled()) + if (rodata_full || crash_mem_map || debug_pagealloc_enabled()) flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; /* @@ -487,11 +502,6 @@ static void __init map_mem(pgd_t *pgdp) * the following for-loop */ memblock_mark_nomap(kernel_start, kernel_end - kernel_start); -#ifdef CONFIG_KEXEC_CORE - if (crashk_res.end) - memblock_mark_nomap(crashk_res.start, - resource_size(&crashk_res)); -#endif /* map all the memory banks */ for_each_mem_range(i, &start, &end) { @@ -518,21 +528,6 @@ static void __init map_mem(pgd_t *pgdp) __map_memblock(pgdp, kernel_start, kernel_end, PAGE_KERNEL, NO_CONT_MAPPINGS); memblock_clear_nomap(kernel_start, kernel_end - kernel_start); - -#ifdef CONFIG_KEXEC_CORE - /* - * Use page-level mappings here so that we can shrink the region - * in page granularity and put back unused memory to buddy system - * through /sys/kernel/kexec_crash_size interface. - */ - if (crashk_res.end) { - __map_memblock(pgdp, crashk_res.start, crashk_res.end + 1, - PAGE_KERNEL, - NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS); - memblock_clear_nomap(crashk_res.start, - resource_size(&crashk_res)); - } -#endif } void mark_rodata_ro(void) -- cgit v1.2.3 From 0a30c53573b07d5561457e41fb0ab046cd857da5 Mon Sep 17 00:00:00 2001 From: Nicolas Saenz Julienne Date: Thu, 19 Nov 2020 18:53:53 +0100 Subject: arm64: mm: Move reserve_crashkernel() into mem_init() crashkernel might reserve memory located in ZONE_DMA. We plan to delay ZONE_DMA's initialization after unflattening the devicetree and ACPI's boot table initialization, so move it later in the boot process. Specifically into bootmem_init() since request_standard_resources() depends on it. Signed-off-by: Nicolas Saenz Julienne Tested-by: Jeremy Linton Link: https://lore.kernel.org/r/20201119175400.9995-2-nsaenzjulienne@suse.de Signed-off-by: Catalin Marinas --- arch/arm64/mm/init.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 6be62c5bc0a3..5cae22be54fa 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -393,8 +393,6 @@ void __init arm64_memblock_init(void) else arm64_dma32_phys_limit = PHYS_MASK + 1; - reserve_crashkernel(); - reserve_elfcorehdr(); high_memory = __va(memblock_end_of_DRAM() - 1) + 1; @@ -434,6 +432,12 @@ void __init bootmem_init(void) sparse_init(); zone_sizes_init(min, max); + /* + * request_standard_resources() depends on crashkernel's memory being + * reserved, so do it here. + */ + reserve_crashkernel(); + memblock_dump_all(); } -- cgit v1.2.3 From 9804f8c69b04a39d0ba41d19e6bdc6aa91c19725 Mon Sep 17 00:00:00 2001 From: Nicolas Saenz Julienne Date: Thu, 19 Nov 2020 18:53:54 +0100 Subject: arm64: mm: Move zone_dma_bits initialization into zone_sizes_init() zone_dma_bits's initialization happens earlier that it's actually needed, in arm64_memblock_init(). So move it into the more suitable zone_sizes_init(). Signed-off-by: Nicolas Saenz Julienne Tested-by: Jeremy Linton Link: https://lore.kernel.org/r/20201119175400.9995-3-nsaenzjulienne@suse.de Signed-off-by: Catalin Marinas --- arch/arm64/mm/init.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 5cae22be54fa..1609b61ef268 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -197,6 +197,8 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max) unsigned long max_zone_pfns[MAX_NR_ZONES] = {0}; #ifdef CONFIG_ZONE_DMA + zone_dma_bits = ARM64_ZONE_DMA_BITS; + arm64_dma_phys_limit = max_zone_phys(zone_dma_bits); max_zone_pfns[ZONE_DMA] = PFN_DOWN(arm64_dma_phys_limit); #endif #ifdef CONFIG_ZONE_DMA32 @@ -383,11 +385,6 @@ void __init arm64_memblock_init(void) early_init_fdt_scan_reserved_mem(); - if (IS_ENABLED(CONFIG_ZONE_DMA)) { - zone_dma_bits = ARM64_ZONE_DMA_BITS; - arm64_dma_phys_limit = max_zone_phys(ARM64_ZONE_DMA_BITS); - } - if (IS_ENABLED(CONFIG_ZONE_DMA32)) arm64_dma32_phys_limit = max_zone_phys(32); else -- cgit v1.2.3 From 8424ecdde7df99d5426e1a1fd9f0fb36f4183032 Mon Sep 17 00:00:00 2001 From: Nicolas Saenz Julienne Date: Thu, 19 Nov 2020 18:53:57 +0100 Subject: arm64: mm: Set ZONE_DMA size based on devicetree's dma-ranges We recently introduced a 1 GB sized ZONE_DMA to cater for platforms incorporating masters that can address less than 32 bits of DMA, in particular the Raspberry Pi 4, which has 4 or 8 GB of DRAM, but has peripherals that can only address up to 1 GB (and its PCIe host bridge can only access the bottom 3 GB) The DMA layer also needs to be able to allocate memory that is guaranteed to meet those DMA constraints, for bounce buffering as well as allocating the backing for consistent mappings. This is why the 1 GB ZONE_DMA was introduced recently. Unfortunately, it turns out the having a 1 GB ZONE_DMA as well as a ZONE_DMA32 causes problems with kdump, and potentially in other places where allocations cannot cross zone boundaries. Therefore, we should avoid having two separate DMA zones when possible. So, with the help of of_dma_get_max_cpu_address() get the topmost physical address accessible to all DMA masters in system and use that information to fine-tune ZONE_DMA's size. In the absence of addressing limited masters ZONE_DMA will span the whole 32-bit address space, otherwise, in the case of the Raspberry Pi 4 it'll only span the 30-bit address space, and have ZONE_DMA32 cover the rest of the 32-bit address space. Signed-off-by: Nicolas Saenz Julienne Link: https://lore.kernel.org/r/20201119175400.9995-6-nsaenzjulienne@suse.de Signed-off-by: Catalin Marinas --- arch/arm64/mm/init.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 1609b61ef268..b736890ad3d3 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -42,8 +42,6 @@ #include #include -#define ARM64_ZONE_DMA_BITS 30 - /* * We need to be able to catch inadvertent references to memstart_addr * that occur (potentially in generic code) before arm64_memblock_init() @@ -195,9 +193,11 @@ static phys_addr_t __init max_zone_phys(unsigned int zone_bits) static void __init zone_sizes_init(unsigned long min, unsigned long max) { unsigned long max_zone_pfns[MAX_NR_ZONES] = {0}; + unsigned int __maybe_unused dt_zone_dma_bits; #ifdef CONFIG_ZONE_DMA - zone_dma_bits = ARM64_ZONE_DMA_BITS; + dt_zone_dma_bits = fls64(of_dma_get_max_cpu_address(NULL)); + zone_dma_bits = min(32U, dt_zone_dma_bits); arm64_dma_phys_limit = max_zone_phys(zone_dma_bits); max_zone_pfns[ZONE_DMA] = PFN_DOWN(arm64_dma_phys_limit); #endif -- cgit v1.2.3 From 2b8652936f0ca9ca2e6c984ae76c7bfcda1b3f22 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Thu, 19 Nov 2020 18:53:58 +0100 Subject: arm64: mm: Set ZONE_DMA size based on early IORT scan We recently introduced a 1 GB sized ZONE_DMA to cater for platforms incorporating masters that can address less than 32 bits of DMA, in particular the Raspberry Pi 4, which has 4 or 8 GB of DRAM, but has peripherals that can only address up to 1 GB (and its PCIe host bridge can only access the bottom 3 GB) Instructing the DMA layer about these limitations is straight-forward, even though we had to fix some issues regarding memory limits set in the IORT for named components, and regarding the handling of ACPI _DMA methods. However, the DMA layer also needs to be able to allocate memory that is guaranteed to meet those DMA constraints, for bounce buffering as well as allocating the backing for consistent mappings. This is why the 1 GB ZONE_DMA was introduced recently. Unfortunately, it turns out the having a 1 GB ZONE_DMA as well as a ZONE_DMA32 causes problems with kdump, and potentially in other places where allocations cannot cross zone boundaries. Therefore, we should avoid having two separate DMA zones when possible. So let's do an early scan of the IORT, and only create the ZONE_DMA if we encounter any devices that need it. This puts the burden on the firmware to describe such limitations in the IORT, which may be redundant (and less precise) if _DMA methods are also being provided. However, it should be noted that this situation is highly unusual for arm64 ACPI machines. Also, the DMA subsystem still gives precedence to the _DMA method if implemented, and so we will not lose the ability to perform streaming DMA outside the ZONE_DMA if the _DMA method permits it. [nsaenz: unified implementation with DT's counterpart] Signed-off-by: Ard Biesheuvel Signed-off-by: Nicolas Saenz Julienne Tested-by: Jeremy Linton Acked-by: Lorenzo Pieralisi Acked-by: Hanjun Guo Cc: Jeremy Linton Cc: Lorenzo Pieralisi Cc: Nicolas Saenz Julienne Cc: Rob Herring Cc: Christoph Hellwig Cc: Robin Murphy Cc: Hanjun Guo Cc: Sudeep Holla Cc: Anshuman Khandual Link: https://lore.kernel.org/r/20201119175400.9995-7-nsaenzjulienne@suse.de Signed-off-by: Catalin Marinas --- arch/arm64/mm/init.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index b736890ad3d3..5e534d674c9c 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include @@ -193,11 +194,13 @@ static phys_addr_t __init max_zone_phys(unsigned int zone_bits) static void __init zone_sizes_init(unsigned long min, unsigned long max) { unsigned long max_zone_pfns[MAX_NR_ZONES] = {0}; + unsigned int __maybe_unused acpi_zone_dma_bits; unsigned int __maybe_unused dt_zone_dma_bits; #ifdef CONFIG_ZONE_DMA + acpi_zone_dma_bits = fls64(acpi_iort_dma_get_max_cpu_address()); dt_zone_dma_bits = fls64(of_dma_get_max_cpu_address(NULL)); - zone_dma_bits = min(32U, dt_zone_dma_bits); + zone_dma_bits = min3(32U, dt_zone_dma_bits, acpi_zone_dma_bits); arm64_dma_phys_limit = max_zone_phys(zone_dma_bits); max_zone_pfns[ZONE_DMA] = PFN_DOWN(arm64_dma_phys_limit); #endif -- cgit v1.2.3 From 8663daeac7a1fd1b200e3365ccc9403f026f2fc8 Mon Sep 17 00:00:00 2001 From: Helge Deller Date: Thu, 12 Nov 2020 18:53:31 -0800 Subject: parisc: Drop parisc special case for __sighandler_t I believe we can and *should* drop this parisc-specific typedef for __sighandler_t when compiling a 64-bit kernel. The reasons: 1. We don't have a 64-bit userspace yet, so nothing (on userspace side) can break. 2. Inside the Linux kernel, this is only used in kernel/signal.c, in function kernel_sigaction() where the signal handler is compared against SIG_IGN. SIG_IGN is defined as (__sighandler_t)1), so only the pointers are compared. 3. Even when a 64-bit userspace gets added at some point, I think __sighandler_t should be defined what it is: a function pointer struct. I compiled kernel/signal.c with and without the patch, and the produced code is identical in both cases. Signed-off-by: Helge Deller Signed-off-by: Peter Collingbourne Acked-by: "Eric W. Biederman" Reviewed-by: Peter Collingbourne Link: https://linux-review.googlesource.com/id/I21c43f21b264f339e3aa395626af838646f62d97 Link: https://lkml.kernel.org/r/a75b8eb7bb9eac1cf73fb119eb53e5892d6e9656.1605235762.git.pcc@google.com Signed-off-by: Eric W. Biederman --- arch/parisc/include/uapi/asm/signal.h | 8 -------- 1 file changed, 8 deletions(-) (limited to 'arch') diff --git a/arch/parisc/include/uapi/asm/signal.h b/arch/parisc/include/uapi/asm/signal.h index e605197b462c..d9c51769851a 100644 --- a/arch/parisc/include/uapi/asm/signal.h +++ b/arch/parisc/include/uapi/asm/signal.h @@ -85,16 +85,8 @@ struct siginfo; /* Type of a signal handler. */ -#if defined(__LP64__) -/* function pointers on 64-bit parisc are pointers to little structs and the - * compiler doesn't support code which changes or tests the address of - * the function in the little struct. This is really ugly -PB - */ -typedef char __user *__sighandler_t; -#else typedef void __signalfn_t(int); typedef __signalfn_t __user *__sighandler_t; -#endif typedef struct sigaltstack { void __user *ss_sp; -- cgit v1.2.3 From 161d36dfc7b56c357e5f291679c8e159527797a6 Mon Sep 17 00:00:00 2001 From: Peter Collingbourne Date: Thu, 12 Nov 2020 18:53:32 -0800 Subject: parisc: start using signal-defs.h We currently include signal-defs.h on all architectures except parisc. Make parisc fall in line. This will make maintenance easier once the flag bits are moved here. Signed-off-by: Peter Collingbourne Acked-by: Helge Deller Acked-by: "Eric W. Biederman" Link: https://linux-review.googlesource.com/id/If03a5135fb514fe96548fb74610e6c3586a04064 Link: https://lkml.kernel.org/r/be8f3680ef2d0a1a120994e3ae0b11d82f373279.1605235762.git.pcc@google.com Signed-off-by: Eric W. Biederman --- arch/parisc/include/uapi/asm/signal.h | 13 +------------ 1 file changed, 1 insertion(+), 12 deletions(-) (limited to 'arch') diff --git a/arch/parisc/include/uapi/asm/signal.h b/arch/parisc/include/uapi/asm/signal.h index d9c51769851a..9e6f87bc8a73 100644 --- a/arch/parisc/include/uapi/asm/signal.h +++ b/arch/parisc/include/uapi/asm/signal.h @@ -68,14 +68,7 @@ #define MINSIGSTKSZ 2048 #define SIGSTKSZ 8192 - -#define SIG_BLOCK 0 /* for blocking signals */ -#define SIG_UNBLOCK 1 /* for unblocking signals */ -#define SIG_SETMASK 2 /* for setting the signal mask */ - -#define SIG_DFL ((__sighandler_t)0) /* default signal handling */ -#define SIG_IGN ((__sighandler_t)1) /* ignore signal */ -#define SIG_ERR ((__sighandler_t)-1) /* error return from signal */ +#include # ifndef __ASSEMBLY__ @@ -84,10 +77,6 @@ /* Avoid too many header ordering problems. */ struct siginfo; -/* Type of a signal handler. */ -typedef void __signalfn_t(int); -typedef __signalfn_t __user *__sighandler_t; - typedef struct sigaltstack { void __user *ss_sp; int ss_flags; -- cgit v1.2.3 From 1d82b7898f2ad9cc414805aef23b99b742218f10 Mon Sep 17 00:00:00 2001 From: Peter Collingbourne Date: Thu, 12 Nov 2020 18:53:33 -0800 Subject: arch: move SA_* definitions to generic headers Most architectures with the exception of alpha, mips, parisc and sparc use the same values for these flags. Move their definitions into asm-generic/signal-defs.h and allow the architectures with non-standard values to override them. Also, document the non-standard flag values in order to make it easier to add new generic flags in the future. A consequence of this change is that on powerpc and x86, the constants' values aside from SA_RESETHAND change signedness from unsigned to signed. This is not expected to impact realistic use of these constants. In particular the typical use of the constants where they are or'ed together and assigned to sa_flags (or another int variable) would not be affected. Signed-off-by: Peter Collingbourne Acked-by: Geert Uytterhoeven Acked-by: "Eric W. Biederman" Reviewed-by: Dave Martin Link: https://linux-review.googlesource.com/id/Ia3849f18b8009bf41faca374e701cdca36974528 Link: https://lkml.kernel.org/r/b6d0d1ec34f9ee93e1105f14f288fba5f89d1f24.1605235762.git.pcc@google.com Signed-off-by: Eric W. Biederman --- arch/alpha/include/uapi/asm/signal.h | 14 -------------- arch/arm/include/uapi/asm/signal.h | 27 +++------------------------ arch/h8300/include/uapi/asm/signal.h | 24 ------------------------ arch/ia64/include/uapi/asm/signal.h | 24 ------------------------ arch/m68k/include/uapi/asm/signal.h | 24 ------------------------ arch/mips/include/uapi/asm/signal.h | 12 ------------ arch/parisc/include/uapi/asm/signal.h | 13 ------------- arch/powerpc/include/uapi/asm/signal.h | 24 ------------------------ arch/s390/include/uapi/asm/signal.h | 24 ------------------------ arch/sparc/include/uapi/asm/signal.h | 4 +--- arch/x86/include/uapi/asm/signal.h | 24 ------------------------ arch/xtensa/include/uapi/asm/signal.h | 24 ------------------------ 12 files changed, 4 insertions(+), 234 deletions(-) (limited to 'arch') diff --git a/arch/alpha/include/uapi/asm/signal.h b/arch/alpha/include/uapi/asm/signal.h index 74c750bf1c1a..a69dd8d080a8 100644 --- a/arch/alpha/include/uapi/asm/signal.h +++ b/arch/alpha/include/uapi/asm/signal.h @@ -60,20 +60,6 @@ typedef unsigned long sigset_t; #define SIGRTMIN 32 #define SIGRTMAX _NSIG -/* - * SA_FLAGS values: - * - * SA_ONSTACK indicates that a registered stack_t will be used. - * SA_RESTART flag to get restarting signals (which were the default long ago) - * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop. - * SA_RESETHAND clears the handler when the signal is delivered. - * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies. - * SA_NODEFER prevents the current signal from being masked in the handler. - * - * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single - * Unix names RESETHAND and NODEFER respectively. - */ - #define SA_ONSTACK 0x00000001 #define SA_RESTART 0x00000002 #define SA_NOCLDSTOP 0x00000004 diff --git a/arch/arm/include/uapi/asm/signal.h b/arch/arm/include/uapi/asm/signal.h index 9b4185ba4f8a..c9a3ea1d8d41 100644 --- a/arch/arm/include/uapi/asm/signal.h +++ b/arch/arm/include/uapi/asm/signal.h @@ -60,33 +60,12 @@ typedef unsigned long sigset_t; #define SIGSWI 32 /* - * SA_FLAGS values: - * - * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop. - * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies. - * SA_SIGINFO deliver the signal with SIGINFO structs - * SA_THIRTYTWO delivers the signal in 32-bit mode, even if the task - * is running in 26-bit. - * SA_ONSTACK allows alternate signal stacks (see sigaltstack(2)). - * SA_RESTART flag to get restarting signals (which were the default long ago) - * SA_NODEFER prevents the current signal from being masked in the handler. - * SA_RESETHAND clears the handler when the signal is delivered. - * - * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single - * Unix names RESETHAND and NODEFER respectively. + * SA_THIRTYTWO historically meant deliver the signal in 32-bit mode, even if + * the task is running in 26-bit. But since the kernel no longer supports + * 26-bit mode, the flag has no effect. */ -#define SA_NOCLDSTOP 0x00000001 -#define SA_NOCLDWAIT 0x00000002 -#define SA_SIGINFO 0x00000004 #define SA_THIRTYTWO 0x02000000 #define SA_RESTORER 0x04000000 -#define SA_ONSTACK 0x08000000 -#define SA_RESTART 0x10000000 -#define SA_NODEFER 0x40000000 -#define SA_RESETHAND 0x80000000 - -#define SA_NOMASK SA_NODEFER -#define SA_ONESHOT SA_RESETHAND #define MINSIGSTKSZ 2048 #define SIGSTKSZ 8192 diff --git a/arch/h8300/include/uapi/asm/signal.h b/arch/h8300/include/uapi/asm/signal.h index e15521037348..2cd0dce2b6a6 100644 --- a/arch/h8300/include/uapi/asm/signal.h +++ b/arch/h8300/include/uapi/asm/signal.h @@ -57,30 +57,6 @@ typedef unsigned long sigset_t; #define SIGRTMIN 32 #define SIGRTMAX _NSIG -/* - * SA_FLAGS values: - * - * SA_ONSTACK indicates that a registered stack_t will be used. - * SA_RESTART flag to get restarting signals (which were the default long ago) - * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop. - * SA_RESETHAND clears the handler when the signal is delivered. - * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies. - * SA_NODEFER prevents the current signal from being masked in the handler. - * - * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single - * Unix names RESETHAND and NODEFER respectively. - */ -#define SA_NOCLDSTOP 0x00000001 -#define SA_NOCLDWAIT 0x00000002 /* not supported yet */ -#define SA_SIGINFO 0x00000004 -#define SA_ONSTACK 0x08000000 -#define SA_RESTART 0x10000000 -#define SA_NODEFER 0x40000000 -#define SA_RESETHAND 0x80000000 - -#define SA_NOMASK SA_NODEFER -#define SA_ONESHOT SA_RESETHAND - #define SA_RESTORER 0x04000000 #define MINSIGSTKSZ 2048 diff --git a/arch/ia64/include/uapi/asm/signal.h b/arch/ia64/include/uapi/asm/signal.h index aa98ff1b9e22..38166a88e4c9 100644 --- a/arch/ia64/include/uapi/asm/signal.h +++ b/arch/ia64/include/uapi/asm/signal.h @@ -53,30 +53,6 @@ #define SIGRTMIN 32 #define SIGRTMAX _NSIG -/* - * SA_FLAGS values: - * - * SA_ONSTACK indicates that a registered stack_t will be used. - * SA_RESTART flag to get restarting signals (which were the default long ago) - * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop. - * SA_RESETHAND clears the handler when the signal is delivered. - * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies. - * SA_NODEFER prevents the current signal from being masked in the handler. - * - * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single - * Unix names RESETHAND and NODEFER respectively. - */ -#define SA_NOCLDSTOP 0x00000001 -#define SA_NOCLDWAIT 0x00000002 -#define SA_SIGINFO 0x00000004 -#define SA_ONSTACK 0x08000000 -#define SA_RESTART 0x10000000 -#define SA_NODEFER 0x40000000 -#define SA_RESETHAND 0x80000000 - -#define SA_NOMASK SA_NODEFER -#define SA_ONESHOT SA_RESETHAND - #define SA_RESTORER 0x04000000 /* diff --git a/arch/m68k/include/uapi/asm/signal.h b/arch/m68k/include/uapi/asm/signal.h index 915cc755a184..4619291df601 100644 --- a/arch/m68k/include/uapi/asm/signal.h +++ b/arch/m68k/include/uapi/asm/signal.h @@ -57,30 +57,6 @@ typedef unsigned long sigset_t; #define SIGRTMIN 32 #define SIGRTMAX _NSIG -/* - * SA_FLAGS values: - * - * SA_ONSTACK indicates that a registered stack_t will be used. - * SA_RESTART flag to get restarting signals (which were the default long ago) - * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop. - * SA_RESETHAND clears the handler when the signal is delivered. - * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies. - * SA_NODEFER prevents the current signal from being masked in the handler. - * - * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single - * Unix names RESETHAND and NODEFER respectively. - */ -#define SA_NOCLDSTOP 0x00000001 -#define SA_NOCLDWAIT 0x00000002 -#define SA_SIGINFO 0x00000004 -#define SA_ONSTACK 0x08000000 -#define SA_RESTART 0x10000000 -#define SA_NODEFER 0x40000000 -#define SA_RESETHAND 0x80000000 - -#define SA_NOMASK SA_NODEFER -#define SA_ONESHOT SA_RESETHAND - #define MINSIGSTKSZ 2048 #define SIGSTKSZ 8192 diff --git a/arch/mips/include/uapi/asm/signal.h b/arch/mips/include/uapi/asm/signal.h index 53104b10aae2..e6c78a15cb2f 100644 --- a/arch/mips/include/uapi/asm/signal.h +++ b/arch/mips/include/uapi/asm/signal.h @@ -62,18 +62,6 @@ typedef unsigned long old_sigset_t; /* at least 32 bits */ #define SIGRTMAX _NSIG /* - * SA_FLAGS values: - * - * SA_ONSTACK indicates that a registered stack_t will be used. - * SA_RESTART flag to get restarting signals (which were the default long ago) - * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop. - * SA_RESETHAND clears the handler when the signal is delivered. - * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies. - * SA_NODEFER prevents the current signal from being masked in the handler. - * - * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single - * Unix names RESETHAND and NODEFER respectively. - * * SA_RESTORER used to be defined as 0x04000000 but only the O32 ABI ever * supported its use and no libc was using it, so the entire sa-restorer * functionality was removed with lmo commit 39bffc12c3580ab for 2.5.48 diff --git a/arch/parisc/include/uapi/asm/signal.h b/arch/parisc/include/uapi/asm/signal.h index 9e6f87bc8a73..e5a2657477ac 100644 --- a/arch/parisc/include/uapi/asm/signal.h +++ b/arch/parisc/include/uapi/asm/signal.h @@ -41,19 +41,6 @@ #define SIGRTMIN 32 #define SIGRTMAX _NSIG -/* - * SA_FLAGS values: - * - * SA_ONSTACK indicates that a registered stack_t will be used. - * SA_RESTART flag to get restarting signals (which were the default long ago) - * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop. - * SA_RESETHAND clears the handler when the signal is delivered. - * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies. - * SA_NODEFER prevents the current signal from being masked in the handler. - * - * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single - * Unix names RESETHAND and NODEFER respectively. - */ #define SA_ONSTACK 0x00000001 #define SA_RESETHAND 0x00000004 #define SA_NOCLDSTOP 0x00000008 diff --git a/arch/powerpc/include/uapi/asm/signal.h b/arch/powerpc/include/uapi/asm/signal.h index 85b0a7aa43e7..04873dd311c2 100644 --- a/arch/powerpc/include/uapi/asm/signal.h +++ b/arch/powerpc/include/uapi/asm/signal.h @@ -60,30 +60,6 @@ typedef struct { #define SIGRTMIN 32 #define SIGRTMAX _NSIG -/* - * SA_FLAGS values: - * - * SA_ONSTACK is not currently supported, but will allow sigaltstack(2). - * SA_RESTART flag to get restarting signals (which were the default long ago) - * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop. - * SA_RESETHAND clears the handler when the signal is delivered. - * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies. - * SA_NODEFER prevents the current signal from being masked in the handler. - * - * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single - * Unix names RESETHAND and NODEFER respectively. - */ -#define SA_NOCLDSTOP 0x00000001U -#define SA_NOCLDWAIT 0x00000002U -#define SA_SIGINFO 0x00000004U -#define SA_ONSTACK 0x08000000U -#define SA_RESTART 0x10000000U -#define SA_NODEFER 0x40000000U -#define SA_RESETHAND 0x80000000U - -#define SA_NOMASK SA_NODEFER -#define SA_ONESHOT SA_RESETHAND - #define SA_RESTORER 0x04000000U #define MINSIGSTKSZ 2048 diff --git a/arch/s390/include/uapi/asm/signal.h b/arch/s390/include/uapi/asm/signal.h index 9a14a611ed82..0189f326aac5 100644 --- a/arch/s390/include/uapi/asm/signal.h +++ b/arch/s390/include/uapi/asm/signal.h @@ -65,30 +65,6 @@ typedef unsigned long sigset_t; #define SIGRTMIN 32 #define SIGRTMAX _NSIG -/* - * SA_FLAGS values: - * - * SA_ONSTACK indicates that a registered stack_t will be used. - * SA_RESTART flag to get restarting signals (which were the default long ago) - * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop. - * SA_RESETHAND clears the handler when the signal is delivered. - * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies. - * SA_NODEFER prevents the current signal from being masked in the handler. - * - * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single - * Unix names RESETHAND and NODEFER respectively. - */ -#define SA_NOCLDSTOP 0x00000001 -#define SA_NOCLDWAIT 0x00000002 -#define SA_SIGINFO 0x00000004 -#define SA_ONSTACK 0x08000000 -#define SA_RESTART 0x10000000 -#define SA_NODEFER 0x40000000 -#define SA_RESETHAND 0x80000000 - -#define SA_NOMASK SA_NODEFER -#define SA_ONESHOT SA_RESETHAND - #define SA_RESTORER 0x04000000 #define MINSIGSTKSZ 2048 diff --git a/arch/sparc/include/uapi/asm/signal.h b/arch/sparc/include/uapi/asm/signal.h index ff9505923b9a..53758d53ac0e 100644 --- a/arch/sparc/include/uapi/asm/signal.h +++ b/arch/sparc/include/uapi/asm/signal.h @@ -137,13 +137,11 @@ struct sigstack { #define SA_STACK _SV_SSTACK #define SA_ONSTACK _SV_SSTACK #define SA_RESTART _SV_INTR -#define SA_ONESHOT _SV_RESET +#define SA_RESETHAND _SV_RESET #define SA_NODEFER 0x20u #define SA_NOCLDWAIT 0x100u #define SA_SIGINFO 0x200u -#define SA_NOMASK SA_NODEFER - #define SIG_BLOCK 0x01 /* for blocking signals */ #define SIG_UNBLOCK 0x02 /* for unblocking signals */ #define SIG_SETMASK 0x04 /* for setting the signal mask */ diff --git a/arch/x86/include/uapi/asm/signal.h b/arch/x86/include/uapi/asm/signal.h index e5745d593dc7..164a22a72984 100644 --- a/arch/x86/include/uapi/asm/signal.h +++ b/arch/x86/include/uapi/asm/signal.h @@ -62,30 +62,6 @@ typedef unsigned long sigset_t; #define SIGRTMIN 32 #define SIGRTMAX _NSIG -/* - * SA_FLAGS values: - * - * SA_ONSTACK indicates that a registered stack_t will be used. - * SA_RESTART flag to get restarting signals (which were the default long ago) - * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop. - * SA_RESETHAND clears the handler when the signal is delivered. - * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies. - * SA_NODEFER prevents the current signal from being masked in the handler. - * - * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single - * Unix names RESETHAND and NODEFER respectively. - */ -#define SA_NOCLDSTOP 0x00000001u -#define SA_NOCLDWAIT 0x00000002u -#define SA_SIGINFO 0x00000004u -#define SA_ONSTACK 0x08000000u -#define SA_RESTART 0x10000000u -#define SA_NODEFER 0x40000000u -#define SA_RESETHAND 0x80000000u - -#define SA_NOMASK SA_NODEFER -#define SA_ONESHOT SA_RESETHAND - #define SA_RESTORER 0x04000000 #define MINSIGSTKSZ 2048 diff --git a/arch/xtensa/include/uapi/asm/signal.h b/arch/xtensa/include/uapi/asm/signal.h index 005dec5bfde4..79ddabaa4e5d 100644 --- a/arch/xtensa/include/uapi/asm/signal.h +++ b/arch/xtensa/include/uapi/asm/signal.h @@ -72,30 +72,6 @@ typedef struct { #define SIGRTMIN 32 #define SIGRTMAX (_NSIG-1) -/* - * SA_FLAGS values: - * - * SA_ONSTACK indicates that a registered stack_t will be used. - * SA_RESTART flag to get restarting signals (which were the default long ago) - * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop. - * SA_RESETHAND clears the handler when the signal is delivered. - * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies. - * SA_NODEFER prevents the current signal from being masked in the handler. - * - * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single - * Unix names RESETHAND and NODEFER respectively. - */ -#define SA_NOCLDSTOP 0x00000001 -#define SA_NOCLDWAIT 0x00000002 /* not supported yet */ -#define SA_SIGINFO 0x00000004 -#define SA_ONSTACK 0x08000000 -#define SA_RESTART 0x10000000 -#define SA_NODEFER 0x40000000 -#define SA_RESETHAND 0x80000000 - -#define SA_NOMASK SA_NODEFER -#define SA_ONESHOT SA_RESETHAND - #define SA_RESTORER 0x04000000 #define MINSIGSTKSZ 2048 -- cgit v1.2.3 From 23acdc76f1798b090bb9dcc90671cd29d929834e Mon Sep 17 00:00:00 2001 From: Peter Collingbourne Date: Thu, 12 Nov 2020 18:53:34 -0800 Subject: signal: clear non-uapi flag bits when passing/returning sa_flags Previously we were not clearing non-uapi flag bits in sigaction.sa_flags when storing the userspace-provided sa_flags or when returning them via oldact. Start doing so. This allows userspace to detect missing support for flag bits and allows the kernel to use non-uapi bits internally, as we are already doing in arch/x86 for two flag bits. Now that this change is in place, we no longer need the code in arch/x86 that was hiding these bits from userspace, so remove it. This is technically a userspace-visible behavior change for sigaction, as the unknown bits returned via oldact.sa_flags are no longer set. However, we are free to define the behavior for unknown bits exactly because their behavior is currently undefined, so for now we can define the meaning of each of them to be "clear the bit in oldact.sa_flags unless the bit becomes known in the future". Furthermore, this behavior is consistent with OpenBSD [1], illumos [2] and XNU [3] (FreeBSD [4] and NetBSD [5] fail the syscall if unknown bits are set). So there is some precedent for this behavior in other kernels, and in particular in XNU, which is probably the most popular kernel among those that I looked at, which means that this change is less likely to be a compatibility issue. Link: [1] https://github.com/openbsd/src/blob/f634a6a4b5bf832e9c1de77f7894ae2625e74484/sys/kern/kern_sig.c#L278 Link: [2] https://github.com/illumos/illumos-gate/blob/76f19f5fdc974fe5be5c82a556e43a4df93f1de1/usr/src/uts/common/syscall/sigaction.c#L86 Link: [3] https://github.com/apple/darwin-xnu/blob/a449c6a3b8014d9406c2ddbdc81795da24aa7443/bsd/kern/kern_sig.c#L480 Link: [4] https://github.com/freebsd/freebsd/blob/eded70c37057857c6e23fae51f86b8f8f43cd2d0/sys/kern/kern_sig.c#L699 Link: [5] https://github.com/NetBSD/src/blob/3365779becdcedfca206091a645a0e8e22b2946e/sys/kern/sys_sig.c#L473 Signed-off-by: Peter Collingbourne Reviewed-by: Dave Martin Acked-by: "Eric W. Biederman" Link: https://linux-review.googlesource.com/id/I35aab6f5be932505d90f3b3450c083b4db1eca86 Link: https://lkml.kernel.org/r/878dbcb5f47bc9b11881c81f745c0bef5c23f97f.1605235762.git.pcc@google.com Signed-off-by: Eric W. Biederman --- arch/arm/include/asm/signal.h | 2 ++ arch/parisc/include/asm/signal.h | 2 ++ arch/x86/kernel/signal_compat.c | 7 ------- 3 files changed, 4 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/arm/include/asm/signal.h b/arch/arm/include/asm/signal.h index 65530a042009..430be7774402 100644 --- a/arch/arm/include/asm/signal.h +++ b/arch/arm/include/asm/signal.h @@ -17,6 +17,8 @@ typedef struct { unsigned long sig[_NSIG_WORDS]; } sigset_t; +#define __ARCH_UAPI_SA_FLAGS (SA_THIRTYTWO | SA_RESTORER) + #define __ARCH_HAS_SA_RESTORER #include diff --git a/arch/parisc/include/asm/signal.h b/arch/parisc/include/asm/signal.h index 715c96ba2ec8..30dd1e43ef88 100644 --- a/arch/parisc/include/asm/signal.h +++ b/arch/parisc/include/asm/signal.h @@ -21,6 +21,8 @@ typedef struct { unsigned long sig[_NSIG_WORDS]; } sigset_t; +#define __ARCH_UAPI_SA_FLAGS _SA_SIGGFAULT + #include #endif /* !__ASSEMBLY */ diff --git a/arch/x86/kernel/signal_compat.c b/arch/x86/kernel/signal_compat.c index a7f3e12cfbdb..ddfd919be46c 100644 --- a/arch/x86/kernel/signal_compat.c +++ b/arch/x86/kernel/signal_compat.c @@ -165,16 +165,9 @@ void sigaction_compat_abi(struct k_sigaction *act, struct k_sigaction *oact) { signal_compat_build_tests(); - /* Don't leak in-kernel non-uapi flags to user-space */ - if (oact) - oact->sa.sa_flags &= ~(SA_IA32_ABI | SA_X32_ABI); - if (!act) return; - /* Don't let flags to be set from userspace */ - act->sa.sa_flags &= ~(SA_IA32_ABI | SA_X32_ABI); - if (in_ia32_syscall()) act->sa.sa_flags |= SA_IA32_ABI; if (in_x32_syscall()) -- cgit v1.2.3 From dceec3ff78076757311d92a388d50d0251fb7dbb Mon Sep 17 00:00:00 2001 From: Peter Collingbourne Date: Fri, 20 Nov 2020 12:33:46 -0800 Subject: arm64: expose FAR_EL1 tag bits in siginfo The kernel currently clears the tag bits (i.e. bits 56-63) in the fault address exposed via siginfo.si_addr and sigcontext.fault_address. However, the tag bits may be needed by tools in order to accurately diagnose memory errors, such as HWASan [1] or future tools based on the Memory Tagging Extension (MTE). Expose these bits via the arch_untagged_si_addr mechanism, so that they are only exposed to signal handlers with the SA_EXPOSE_TAGBITS flag set. [1] http://clang.llvm.org/docs/HardwareAssistedAddressSanitizerDesign.html Signed-off-by: Peter Collingbourne Reviewed-by: Catalin Marinas Link: https://linux-review.googlesource.com/id/Ia8876bad8c798e0a32df7c2ce1256c4771c81446 Link: https://lore.kernel.org/r/0010296597784267472fa13b39f8238d87a72cf8.1605904350.git.pcc@google.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/exception.h | 2 +- arch/arm64/include/asm/signal.h | 25 +++++++++++ arch/arm64/include/asm/system_misc.h | 2 +- arch/arm64/include/asm/traps.h | 6 +-- arch/arm64/kernel/debug-monitors.c | 5 +-- arch/arm64/kernel/entry-common.c | 2 - arch/arm64/kernel/ptrace.c | 7 +-- arch/arm64/kernel/sys_compat.c | 5 +-- arch/arm64/kernel/traps.c | 29 +++++++------ arch/arm64/mm/fault.c | 83 ++++++++++++++++++++++-------------- 10 files changed, 101 insertions(+), 65 deletions(-) create mode 100644 arch/arm64/include/asm/signal.h (limited to 'arch') diff --git a/arch/arm64/include/asm/exception.h b/arch/arm64/include/asm/exception.h index 99b9383cd036..2a8aa1884d8a 100644 --- a/arch/arm64/include/asm/exception.h +++ b/arch/arm64/include/asm/exception.h @@ -32,7 +32,7 @@ static inline u32 disr_to_esr(u64 disr) } asmlinkage void enter_from_user_mode(void); -void do_mem_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs); +void do_mem_abort(unsigned long far, unsigned int esr, struct pt_regs *regs); void do_undefinstr(struct pt_regs *regs); void do_bti(struct pt_regs *regs); asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr); diff --git a/arch/arm64/include/asm/signal.h b/arch/arm64/include/asm/signal.h new file mode 100644 index 000000000000..ef449f5f4ba8 --- /dev/null +++ b/arch/arm64/include/asm/signal.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ARM64_ASM_SIGNAL_H +#define __ARM64_ASM_SIGNAL_H + +#include +#include +#include + +static inline void __user *arch_untagged_si_addr(void __user *addr, + unsigned long sig, + unsigned long si_code) +{ + /* + * For historical reasons, all bits of the fault address are exposed as + * address bits for watchpoint exceptions. New architectures should + * handle the tag bits consistently. + */ + if (sig == SIGTRAP && si_code == TRAP_BRKPT) + return addr; + + return untagged_addr(addr); +} +#define arch_untagged_si_addr arch_untagged_si_addr + +#endif diff --git a/arch/arm64/include/asm/system_misc.h b/arch/arm64/include/asm/system_misc.h index 1ab63cfbbaf1..673be2d1263c 100644 --- a/arch/arm64/include/asm/system_misc.h +++ b/arch/arm64/include/asm/system_misc.h @@ -22,7 +22,7 @@ void die(const char *msg, struct pt_regs *regs, int err); struct siginfo; void arm64_notify_die(const char *str, struct pt_regs *regs, - int signo, int sicode, void __user *addr, + int signo, int sicode, unsigned long far, int err); void hook_debug_fault_code(int nr, int (*fn)(unsigned long, unsigned int, diff --git a/arch/arm64/include/asm/traps.h b/arch/arm64/include/asm/traps.h index d96dc2c7c09d..54f32a0675df 100644 --- a/arch/arm64/include/asm/traps.h +++ b/arch/arm64/include/asm/traps.h @@ -26,9 +26,9 @@ void register_undef_hook(struct undef_hook *hook); void unregister_undef_hook(struct undef_hook *hook); void force_signal_inject(int signal, int code, unsigned long address, unsigned int err); void arm64_notify_segfault(unsigned long addr); -void arm64_force_sig_fault(int signo, int code, void __user *addr, const char *str); -void arm64_force_sig_mceerr(int code, void __user *addr, short lsb, const char *str); -void arm64_force_sig_ptrace_errno_trap(int errno, void __user *addr, const char *str); +void arm64_force_sig_fault(int signo, int code, unsigned long far, const char *str); +void arm64_force_sig_mceerr(int code, unsigned long far, short lsb, const char *str); +void arm64_force_sig_ptrace_errno_trap(int errno, unsigned long far, const char *str); /* * Move regs->pc to next instruction and do necessary setup before it diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c index fa76151de6ff..4f3661eeb7ec 100644 --- a/arch/arm64/kernel/debug-monitors.c +++ b/arch/arm64/kernel/debug-monitors.c @@ -234,9 +234,8 @@ static void send_user_sigtrap(int si_code) if (interrupts_enabled(regs)) local_irq_enable(); - arm64_force_sig_fault(SIGTRAP, si_code, - (void __user *)instruction_pointer(regs), - "User debug trap"); + arm64_force_sig_fault(SIGTRAP, si_code, instruction_pointer(regs), + "User debug trap"); } static int single_step_handler(unsigned long unused, unsigned int esr, diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c index 43d4c329775f..dbbddfbf4a72 100644 --- a/arch/arm64/kernel/entry-common.c +++ b/arch/arm64/kernel/entry-common.c @@ -22,7 +22,6 @@ static void notrace el1_abort(struct pt_regs *regs, unsigned long esr) unsigned long far = read_sysreg(far_el1); local_daif_inherit(regs); - far = untagged_addr(far); do_mem_abort(far, esr, regs); } NOKPROBE_SYMBOL(el1_abort); @@ -114,7 +113,6 @@ static void notrace el0_da(struct pt_regs *regs, unsigned long esr) user_exit_irqoff(); local_daif_restore(DAIF_PROCCTX); - far = untagged_addr(far); do_mem_abort(far, esr, regs); } NOKPROBE_SYMBOL(el0_da); diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index f49b349e16a3..8ac487c84e37 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -192,14 +192,11 @@ static void ptrace_hbptriggered(struct perf_event *bp, break; } } - arm64_force_sig_ptrace_errno_trap(si_errno, - (void __user *)bkpt->trigger, + arm64_force_sig_ptrace_errno_trap(si_errno, bkpt->trigger, desc); } #endif - arm64_force_sig_fault(SIGTRAP, TRAP_HWBKPT, - (void __user *)(bkpt->trigger), - desc); + arm64_force_sig_fault(SIGTRAP, TRAP_HWBKPT, bkpt->trigger, desc); } /* diff --git a/arch/arm64/kernel/sys_compat.c b/arch/arm64/kernel/sys_compat.c index 3c18c2454089..265fe3eb1069 100644 --- a/arch/arm64/kernel/sys_compat.c +++ b/arch/arm64/kernel/sys_compat.c @@ -68,7 +68,7 @@ do_compat_cache_op(unsigned long start, unsigned long end, int flags) */ long compat_arm_syscall(struct pt_regs *regs, int scno) { - void __user *addr; + unsigned long addr; switch (scno) { /* @@ -111,8 +111,7 @@ long compat_arm_syscall(struct pt_regs *regs, int scno) break; } - addr = (void __user *)instruction_pointer(regs) - - (compat_thumb_mode(regs) ? 2 : 4); + addr = instruction_pointer(regs) - (compat_thumb_mode(regs) ? 2 : 4); arm64_notify_die("Oops - bad compat syscall(2)", regs, SIGILL, ILL_ILLTRP, addr, scno); diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index 8af4e0e85736..f4ddbe9ed3f1 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -170,32 +170,32 @@ static void arm64_show_signal(int signo, const char *str) __show_regs(regs); } -void arm64_force_sig_fault(int signo, int code, void __user *addr, +void arm64_force_sig_fault(int signo, int code, unsigned long far, const char *str) { arm64_show_signal(signo, str); if (signo == SIGKILL) force_sig(SIGKILL); else - force_sig_fault(signo, code, addr); + force_sig_fault(signo, code, (void __user *)far); } -void arm64_force_sig_mceerr(int code, void __user *addr, short lsb, +void arm64_force_sig_mceerr(int code, unsigned long far, short lsb, const char *str) { arm64_show_signal(SIGBUS, str); - force_sig_mceerr(code, addr, lsb); + force_sig_mceerr(code, (void __user *)far, lsb); } -void arm64_force_sig_ptrace_errno_trap(int errno, void __user *addr, +void arm64_force_sig_ptrace_errno_trap(int errno, unsigned long far, const char *str) { arm64_show_signal(SIGTRAP, str); - force_sig_ptrace_errno_trap(errno, addr); + force_sig_ptrace_errno_trap(errno, (void __user *)far); } void arm64_notify_die(const char *str, struct pt_regs *regs, - int signo, int sicode, void __user *addr, + int signo, int sicode, unsigned long far, int err) { if (user_mode(regs)) { @@ -203,7 +203,7 @@ void arm64_notify_die(const char *str, struct pt_regs *regs, current->thread.fault_address = 0; current->thread.fault_code = err; - arm64_force_sig_fault(signo, sicode, addr, str); + arm64_force_sig_fault(signo, sicode, far, str); } else { die(str, regs, err); } @@ -374,7 +374,7 @@ void force_signal_inject(int signal, int code, unsigned long address, unsigned i signal = SIGKILL; } - arm64_notify_die(desc, regs, signal, code, (void __user *)address, err); + arm64_notify_die(desc, regs, signal, code, address, err); } /* @@ -385,7 +385,7 @@ void arm64_notify_segfault(unsigned long addr) int code; mmap_read_lock(current->mm); - if (find_vma(current->mm, addr) == NULL) + if (find_vma(current->mm, untagged_addr(addr)) == NULL) code = SEGV_MAPERR; else code = SEGV_ACCERR; @@ -448,12 +448,13 @@ NOKPROBE_SYMBOL(do_ptrauth_fault); static void user_cache_maint_handler(unsigned int esr, struct pt_regs *regs) { - unsigned long address; + unsigned long tagged_address, address; int rt = ESR_ELx_SYS64_ISS_RT(esr); int crm = (esr & ESR_ELx_SYS64_ISS_CRM_MASK) >> ESR_ELx_SYS64_ISS_CRM_SHIFT; int ret = 0; - address = untagged_addr(pt_regs_read_reg(regs, rt)); + tagged_address = pt_regs_read_reg(regs, rt); + address = untagged_addr(tagged_address); switch (crm) { case ESR_ELx_SYS64_ISS_CRM_DC_CVAU: /* DC CVAU, gets promoted */ @@ -480,7 +481,7 @@ static void user_cache_maint_handler(unsigned int esr, struct pt_regs *regs) } if (ret) - arm64_notify_segfault(address); + arm64_notify_segfault(tagged_address); else arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); } @@ -772,7 +773,7 @@ asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr) */ void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr) { - void __user *pc = (void __user *)instruction_pointer(regs); + unsigned long pc = instruction_pointer(regs); current->thread.fault_address = 0; current->thread.fault_code = esr; diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 1ee94002801f..29a6b8c9e830 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -40,7 +40,7 @@ #include struct fault_info { - int (*fn)(unsigned long addr, unsigned int esr, + int (*fn)(unsigned long far, unsigned int esr, struct pt_regs *regs); int sig; int code; @@ -385,8 +385,11 @@ static void set_thread_esr(unsigned long address, unsigned int esr) current->thread.fault_code = esr; } -static void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *regs) +static void do_bad_area(unsigned long far, unsigned int esr, + struct pt_regs *regs) { + unsigned long addr = untagged_addr(far); + /* * If we are in kernel mode at this point, we have no context to * handle this fault with. @@ -395,8 +398,7 @@ static void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *re const struct fault_info *inf = esr_to_fault_info(esr); set_thread_esr(addr, esr); - arm64_force_sig_fault(inf->sig, inf->code, (void __user *)addr, - inf->name); + arm64_force_sig_fault(inf->sig, inf->code, far, inf->name); } else { __do_kernel_fault(addr, esr, regs); } @@ -448,7 +450,7 @@ static bool is_write_abort(unsigned int esr) return (esr & ESR_ELx_WNR) && !(esr & ESR_ELx_CM); } -static int __kprobes do_page_fault(unsigned long addr, unsigned int esr, +static int __kprobes do_page_fault(unsigned long far, unsigned int esr, struct pt_regs *regs) { const struct fault_info *inf; @@ -456,6 +458,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr, vm_fault_t fault; unsigned long vm_flags = VM_ACCESS_FLAGS; unsigned int mm_flags = FAULT_FLAG_DEFAULT; + unsigned long addr = untagged_addr(far); if (kprobe_page_fault(regs, esr)) return 0; @@ -567,8 +570,7 @@ retry: * We had some memory, but were unable to successfully fix up * this page fault. */ - arm64_force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)addr, - inf->name); + arm64_force_sig_fault(SIGBUS, BUS_ADRERR, far, inf->name); } else if (fault & (VM_FAULT_HWPOISON_LARGE | VM_FAULT_HWPOISON)) { unsigned int lsb; @@ -576,8 +578,7 @@ retry: if (fault & VM_FAULT_HWPOISON_LARGE) lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault)); - arm64_force_sig_mceerr(BUS_MCEERR_AR, (void __user *)addr, lsb, - inf->name); + arm64_force_sig_mceerr(BUS_MCEERR_AR, far, lsb, inf->name); } else { /* * Something tried to access memory that isn't in our memory @@ -585,8 +586,7 @@ retry: */ arm64_force_sig_fault(SIGSEGV, fault == VM_FAULT_BADACCESS ? SEGV_ACCERR : SEGV_MAPERR, - (void __user *)addr, - inf->name); + far, inf->name); } return 0; @@ -596,33 +596,35 @@ no_context: return 0; } -static int __kprobes do_translation_fault(unsigned long addr, +static int __kprobes do_translation_fault(unsigned long far, unsigned int esr, struct pt_regs *regs) { + unsigned long addr = untagged_addr(far); + if (is_ttbr0_addr(addr)) - return do_page_fault(addr, esr, regs); + return do_page_fault(far, esr, regs); - do_bad_area(addr, esr, regs); + do_bad_area(far, esr, regs); return 0; } -static int do_alignment_fault(unsigned long addr, unsigned int esr, +static int do_alignment_fault(unsigned long far, unsigned int esr, struct pt_regs *regs) { - do_bad_area(addr, esr, regs); + do_bad_area(far, esr, regs); return 0; } -static int do_bad(unsigned long addr, unsigned int esr, struct pt_regs *regs) +static int do_bad(unsigned long far, unsigned int esr, struct pt_regs *regs) { return 1; /* "fault" */ } -static int do_sea(unsigned long addr, unsigned int esr, struct pt_regs *regs) +static int do_sea(unsigned long far, unsigned int esr, struct pt_regs *regs) { const struct fault_info *inf; - void __user *siaddr; + unsigned long siaddr; inf = esr_to_fault_info(esr); @@ -634,19 +636,30 @@ static int do_sea(unsigned long addr, unsigned int esr, struct pt_regs *regs) return 0; } - if (esr & ESR_ELx_FnV) - siaddr = NULL; - else - siaddr = (void __user *)addr; + if (esr & ESR_ELx_FnV) { + siaddr = 0; + } else { + /* + * The architecture specifies that the tag bits of FAR_EL1 are + * UNKNOWN for synchronous external aborts. Mask them out now + * so that userspace doesn't see them. + */ + siaddr = untagged_addr(far); + } arm64_notify_die(inf->name, regs, inf->sig, inf->code, siaddr, esr); return 0; } -static int do_tag_check_fault(unsigned long addr, unsigned int esr, +static int do_tag_check_fault(unsigned long far, unsigned int esr, struct pt_regs *regs) { - do_bad_area(addr, esr, regs); + /* + * The architecture specifies that bits 63:60 of FAR_EL1 are UNKNOWN for tag + * check faults. Mask them out now so that userspace doesn't see them. + */ + far &= (1UL << 60) - 1; + do_bad_area(far, esr, regs); return 0; } @@ -717,11 +730,12 @@ static const struct fault_info fault_info[] = { { do_bad, SIGKILL, SI_KERNEL, "unknown 63" }, }; -void do_mem_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs) +void do_mem_abort(unsigned long far, unsigned int esr, struct pt_regs *regs) { const struct fault_info *inf = esr_to_fault_info(esr); + unsigned long addr = untagged_addr(far); - if (!inf->fn(addr, esr, regs)) + if (!inf->fn(far, esr, regs)) return; if (!user_mode(regs)) { @@ -730,8 +744,12 @@ void do_mem_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs) show_pte(addr); } - arm64_notify_die(inf->name, regs, - inf->sig, inf->code, (void __user *)addr, esr); + /* + * At this point we have an unrecognized fault type whose tag bits may + * have been defined as UNKNOWN. Therefore we only expose the untagged + * address to the signal handler. + */ + arm64_notify_die(inf->name, regs, inf->sig, inf->code, addr, esr); } NOKPROBE_SYMBOL(do_mem_abort); @@ -744,8 +762,8 @@ NOKPROBE_SYMBOL(do_el0_irq_bp_hardening); void do_sp_pc_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs) { - arm64_notify_die("SP/PC alignment exception", regs, - SIGBUS, BUS_ADRALN, (void __user *)addr, esr); + arm64_notify_die("SP/PC alignment exception", regs, SIGBUS, BUS_ADRALN, + addr, esr); } NOKPROBE_SYMBOL(do_sp_pc_abort); @@ -871,8 +889,7 @@ void do_debug_exception(unsigned long addr_if_watchpoint, unsigned int esr, arm64_apply_bp_hardening(); if (inf->fn(addr_if_watchpoint, esr, regs)) { - arm64_notify_die(inf->name, regs, - inf->sig, inf->code, (void __user *)pc, esr); + arm64_notify_die(inf->name, regs, inf->sig, inf->code, pc, esr); } debug_exception_exit(regs); -- cgit v1.2.3 From 367c820ef08082e68df8a3bc12e62393af21e4b5 Mon Sep 17 00:00:00 2001 From: Sumit Garg Date: Wed, 7 Oct 2020 14:21:43 +0530 Subject: arm64: Enable perf events based hard lockup detector With the recent feature added to enable perf events to use pseudo NMIs as interrupts on platforms which support GICv3 or later, its now been possible to enable hard lockup detector (or NMI watchdog) on arm64 platforms. So enable corresponding support. One thing to note here is that normally lockup detector is initialized just after the early initcalls but PMU on arm64 comes up much later as device_initcall(). So we need to re-initialize lockup detection once PMU has been initialized. Signed-off-by: Sumit Garg Acked-by: Alexandru Elisei Link: https://lore.kernel.org/r/1602060704-10921-1-git-send-email-sumit.garg@linaro.org Signed-off-by: Will Deacon --- arch/arm64/Kconfig | 2 ++ arch/arm64/kernel/perf_event.c | 41 +++++++++++++++++++++++++++++++++++++++-- 2 files changed, 41 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 1515f6f153a0..7d3440770b14 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -170,6 +170,8 @@ config ARM64 select HAVE_NMI select HAVE_PATA_PLATFORM select HAVE_PERF_EVENTS + select HAVE_PERF_EVENTS_NMI if ARM64_PSEUDO_NMI + select HAVE_HARDLOCKUP_DETECTOR_PERF if PERF_EVENTS && HAVE_PERF_EVENTS_NMI select HAVE_PERF_REGS select HAVE_PERF_USER_STACK_DUMP select HAVE_REGS_AND_STACK_ACCESS_API diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index 3605f77ad4df..38bb07eff872 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c @@ -23,6 +23,8 @@ #include #include #include +#include +#include /* ARMv8 Cortex-A53 specific event types. */ #define ARMV8_A53_PERFCTR_PREF_LINEFILL 0xC2 @@ -1248,10 +1250,21 @@ static struct platform_driver armv8_pmu_driver = { static int __init armv8_pmu_driver_init(void) { + int ret; + if (acpi_disabled) - return platform_driver_register(&armv8_pmu_driver); + ret = platform_driver_register(&armv8_pmu_driver); else - return arm_pmu_acpi_probe(armv8_pmuv3_init); + ret = arm_pmu_acpi_probe(armv8_pmuv3_init); + + /* + * Try to re-initialize lockup detector after PMU init in + * case PMU events are triggered via NMIs. + */ + if (ret == 0 && arm_pmu_irq_is_nmi()) + lockup_detector_init(); + + return ret; } device_initcall(armv8_pmu_driver_init) @@ -1309,3 +1322,27 @@ void arch_perf_update_userpage(struct perf_event *event, userpg->cap_user_time_zero = 1; userpg->cap_user_time_short = 1; } + +#ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF +/* + * Safe maximum CPU frequency in case a particular platform doesn't implement + * cpufreq driver. Although, architecture doesn't put any restrictions on + * maximum frequency but 5 GHz seems to be safe maximum given the available + * Arm CPUs in the market which are clocked much less than 5 GHz. On the other + * hand, we can't make it much higher as it would lead to a large hard-lockup + * detection timeout on parts which are running slower (eg. 1GHz on + * Developerbox) and doesn't possess a cpufreq driver. + */ +#define SAFE_MAX_CPU_FREQ 5000000000UL // 5 GHz +u64 hw_nmi_get_sample_period(int watchdog_thresh) +{ + unsigned int cpu = smp_processor_id(); + unsigned long max_cpu_freq; + + max_cpu_freq = cpufreq_get_hw_max_freq(cpu) * 1000UL; + if (!max_cpu_freq) + max_cpu_freq = SAFE_MAX_CPU_FREQ; + + return (u64)max_cpu_freq * watchdog_thresh; +} +#endif -- cgit v1.2.3 From 49b3cf035edc5d7deb3ad1bf6805ce456ababc5b Mon Sep 17 00:00:00 2001 From: Peter Collingbourne Date: Sat, 21 Nov 2020 01:59:02 -0800 Subject: kasan: arm64: set TCR_EL1.TBID1 when enabled On hardware supporting pointer authentication, we previously ended up enabling TBI on instruction accesses when tag-based ASAN was enabled, but this was costing us 8 bits of PAC entropy, which was unnecessary since tag-based ASAN does not require TBI on instruction accesses. Get them back by setting TCR_EL1.TBID1. Signed-off-by: Peter Collingbourne Reviewed-by: Andrey Konovalov Link: https://linux-review.googlesource.com/id/I3dded7824be2e70ea64df0aabab9598d5aebfcc4 Link: https://lore.kernel.org/r/20f64e26fc8a1309caa446fffcb1b4e2fe9e229f.1605952129.git.pcc@google.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/pgtable-hwdef.h | 1 + arch/arm64/mm/proc.S | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h index 01a96d07ae74..42442a0ae2ab 100644 --- a/arch/arm64/include/asm/pgtable-hwdef.h +++ b/arch/arm64/include/asm/pgtable-hwdef.h @@ -260,6 +260,7 @@ #define TCR_TBI1 (UL(1) << 38) #define TCR_HA (UL(1) << 39) #define TCR_HD (UL(1) << 40) +#define TCR_TBID1 (UL(1) << 52) #define TCR_NFD0 (UL(1) << 53) #define TCR_NFD1 (UL(1) << 54) #define TCR_E0PD0 (UL(1) << 55) diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index 0eaf16b0442a..04945f72c64d 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -40,7 +40,7 @@ #define TCR_CACHE_FLAGS TCR_IRGN_WBWA | TCR_ORGN_WBWA #ifdef CONFIG_KASAN_SW_TAGS -#define TCR_KASAN_FLAGS TCR_TBI1 +#define TCR_KASAN_FLAGS TCR_TBI1 | TCR_TBID1 #else #define TCR_KASAN_FLAGS 0 #endif -- cgit v1.2.3 From 52ec03f75d599d5bc240422bac8574724a319bec Mon Sep 17 00:00:00 2001 From: Tyler Hicks Date: Mon, 21 Sep 2020 14:15:56 -0500 Subject: arm64: kaslr: Refactor early init command line parsing Don't ask for *the* command line string to search for "nokaslr" in kaslr_early_init(). Instead, tell a helper function to search all the appropriate command line strings for "nokaslr" and return the result. This paves the way for searching multiple command line strings without having to concatenate the strings in early init. Signed-off-by: Tyler Hicks Link: https://lore.kernel.org/r/20200921191557.350256-2-tyhicks@linux.microsoft.com Signed-off-by: Catalin Marinas --- arch/arm64/kernel/kaslr.c | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c index b181e0544b79..4c779a67c2a6 100644 --- a/arch/arm64/kernel/kaslr.c +++ b/arch/arm64/kernel/kaslr.c @@ -50,10 +50,16 @@ static __init u64 get_kaslr_seed(void *fdt) return ret; } -static __init const u8 *kaslr_get_cmdline(void *fdt) +static __init bool cmdline_contains_nokaslr(const u8 *cmdline) { - static __initconst const u8 default_cmdline[] = CONFIG_CMDLINE; + const u8 *str; + str = strstr(cmdline, "nokaslr"); + return str == cmdline || (str > cmdline && *(str - 1) == ' '); +} + +static __init bool is_kaslr_disabled_cmdline(void *fdt) +{ if (!IS_ENABLED(CONFIG_CMDLINE_FORCE)) { int node; const u8 *prop; @@ -65,10 +71,10 @@ static __init const u8 *kaslr_get_cmdline(void *fdt) prop = fdt_getprop(fdt, node, "bootargs", NULL); if (!prop) goto out; - return prop; + return cmdline_contains_nokaslr(prop); } out: - return default_cmdline; + return cmdline_contains_nokaslr(CONFIG_CMDLINE); } /* @@ -83,7 +89,6 @@ u64 __init kaslr_early_init(u64 dt_phys) { void *fdt; u64 seed, offset, mask, module_range; - const u8 *cmdline, *str; unsigned long raw; int size; @@ -115,9 +120,7 @@ u64 __init kaslr_early_init(u64 dt_phys) * Check if 'nokaslr' appears on the command line, and * return 0 if that is the case. */ - cmdline = kaslr_get_cmdline(fdt); - str = strstr(cmdline, "nokaslr"); - if (str == cmdline || (str > cmdline && *(str - 1) == ' ')) { + if (is_kaslr_disabled_cmdline(fdt)) { kaslr_status = KASLR_DISABLED_CMDLINE; return 0; } -- cgit v1.2.3 From 1e40d105dae5b7aca8ca7bfd7a0c348c0c509dba Mon Sep 17 00:00:00 2001 From: Tyler Hicks Date: Mon, 21 Sep 2020 14:15:57 -0500 Subject: arm64: Extend the kernel command line from the bootloader Provide support for additional kernel command line parameters to be concatenated onto the end of the command line provided by the bootloader. Additional parameters are specified in the CONFIG_CMDLINE option when CONFIG_CMDLINE_EXTEND is selected, matching other architectures and leveraging existing support in the FDT and EFI stub code. Special care must be taken for the arch-specific nokaslr parsing. Search the bootargs FDT property and the CONFIG_CMDLINE when CONFIG_CMDLINE_EXTEND is in use. There are a couple of known use cases for this feature: 1) Switching between stable and development kernel versions, where one of the versions benefits from additional command line parameters, such as debugging options. 2) Specifying additional command line parameters, for additional tuning or debugging, when the bootloader does not offer an interactive mode. Signed-off-by: Tyler Hicks Link: https://lore.kernel.org/r/20200921191557.350256-3-tyhicks@linux.microsoft.com Signed-off-by: Catalin Marinas --- arch/arm64/Kconfig | 23 ++++++++++++++++++++++- arch/arm64/kernel/kaslr.c | 9 ++++++++- 2 files changed, 30 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 1515f6f153a0..b5f5a9f41795 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1846,15 +1846,36 @@ config CMDLINE entering them here. As a minimum, you should specify the the root device (e.g. root=/dev/nfs). +choice + prompt "Kernel command line type" if CMDLINE != "" + default CMDLINE_FROM_BOOTLOADER + help + Choose how the kernel will handle the provided default kernel + command line string. + +config CMDLINE_FROM_BOOTLOADER + bool "Use bootloader kernel arguments if available" + help + Uses the command-line options passed by the boot loader. If + the boot loader doesn't provide any, the default kernel command + string provided in CMDLINE will be used. + +config CMDLINE_EXTEND + bool "Extend bootloader kernel arguments" + help + The command-line arguments provided by the boot loader will be + appended to the default kernel command string. + config CMDLINE_FORCE bool "Always use the default kernel command string" - depends on CMDLINE != "" help Always use the default kernel command string, even if the boot loader passes other arguments to the kernel. This is useful if you cannot or don't want to change the command-line options your boot loader passes to the kernel. +endchoice + config EFI_STUB bool diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c index 4c779a67c2a6..0921aa1520b0 100644 --- a/arch/arm64/kernel/kaslr.c +++ b/arch/arm64/kernel/kaslr.c @@ -71,7 +71,14 @@ static __init bool is_kaslr_disabled_cmdline(void *fdt) prop = fdt_getprop(fdt, node, "bootargs", NULL); if (!prop) goto out; - return cmdline_contains_nokaslr(prop); + + if (cmdline_contains_nokaslr(prop)) + return true; + + if (IS_ENABLED(CONFIG_CMDLINE_EXTEND)) + goto out; + + return false; } out: return cmdline_contains_nokaslr(CONFIG_CMDLINE); -- cgit v1.2.3 From 344f2db2a18af45faafce13133c84c4f076876a6 Mon Sep 17 00:00:00 2001 From: Youling Tang Date: Thu, 19 Nov 2020 09:45:40 +0800 Subject: arm64: vmlinux.lds.S: Drop redundant *.init.rodata.* We currently try to emit *.init.rodata.* twice, once in INIT_DATA, and once in the line immediately following it. As the two section definitions are identical, the latter is redundant and can be dropped. This patch drops the redundant *.init.rodata.* section definition. Signed-off-by: Youling Tang Acked-by: Will Deacon Link: https://lore.kernel.org/r/1605750340-910-1-git-send-email-tangyouling@loongson.cn Signed-off-by: Catalin Marinas --- arch/arm64/kernel/vmlinux.lds.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index 30c102978942..746afbc68df9 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S @@ -199,7 +199,7 @@ SECTIONS INIT_CALLS CON_INITCALL INIT_RAM_FS - *(.init.rodata.* .init.bss) /* from the EFI stub */ + *(.init.bss) /* from the EFI stub */ } .exit.data : { EXIT_DATA -- cgit v1.2.3 From ac20ffbb0279aae7be48567fb734eae7d050769e Mon Sep 17 00:00:00 2001 From: Sami Tolvanen Date: Mon, 30 Nov 2020 15:34:42 -0800 Subject: arm64: scs: use vmapped IRQ and SDEI shadow stacks Use scs_alloc() to allocate also IRQ and SDEI shadow stacks instead of using statically allocated stacks. Signed-off-by: Sami Tolvanen Acked-by: Will Deacon Link: https://lore.kernel.org/r/20201130233442.2562064-3-samitolvanen@google.com [will: Move CONFIG_SHADOW_CALL_STACK check into init_irq_scs()] Signed-off-by: Will Deacon --- arch/arm64/kernel/Makefile | 1 - arch/arm64/kernel/entry.S | 6 ++-- arch/arm64/kernel/irq.c | 21 ++++++++++++++ arch/arm64/kernel/scs.c | 16 ----------- arch/arm64/kernel/sdei.c | 70 ++++++++++++++++++++++++++++++++++++++++++++++ 5 files changed, 94 insertions(+), 20 deletions(-) delete mode 100644 arch/arm64/kernel/scs.c (limited to 'arch') diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index bbaf0bc4ad60..86364ab6f13f 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -58,7 +58,6 @@ obj-$(CONFIG_CRASH_DUMP) += crash_dump.o obj-$(CONFIG_CRASH_CORE) += crash_core.o obj-$(CONFIG_ARM_SDE_INTERFACE) += sdei.o obj-$(CONFIG_ARM64_PTR_AUTH) += pointer_auth.o -obj-$(CONFIG_SHADOW_CALL_STACK) += scs.o obj-$(CONFIG_ARM64_MTE) += mte.o obj-y += vdso/ probes/ diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index b295fb912b12..5c2ac4b5b2da 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -441,7 +441,7 @@ SYM_CODE_END(__swpan_exit_el0) #ifdef CONFIG_SHADOW_CALL_STACK /* also switch to the irq shadow stack */ - adr_this_cpu scs_sp, irq_shadow_call_stack, x26 + ldr_this_cpu scs_sp, irq_shadow_call_stack_ptr, x26 #endif 9998: @@ -1097,9 +1097,9 @@ SYM_CODE_START(__sdei_asm_handler) #ifdef CONFIG_SHADOW_CALL_STACK /* Use a separate shadow call stack for normal and critical events */ cbnz w4, 3f - adr_this_cpu dst=scs_sp, sym=sdei_shadow_call_stack_normal, tmp=x6 + ldr_this_cpu dst=scs_sp, sym=sdei_shadow_call_stack_normal_ptr, tmp=x6 b 4f -3: adr_this_cpu dst=scs_sp, sym=sdei_shadow_call_stack_critical, tmp=x6 +3: ldr_this_cpu dst=scs_sp, sym=sdei_shadow_call_stack_critical_ptr, tmp=x6 4: #endif diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c index 9cf2fb87584a..ac54c21140d4 100644 --- a/arch/arm64/kernel/irq.c +++ b/arch/arm64/kernel/irq.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -27,6 +28,25 @@ DEFINE_PER_CPU(struct nmi_ctx, nmi_contexts); DEFINE_PER_CPU(unsigned long *, irq_stack_ptr); + +DECLARE_PER_CPU(unsigned long *, irq_shadow_call_stack_ptr); + +#ifdef CONFIG_SHADOW_CALL_STACK +DEFINE_PER_CPU(unsigned long *, irq_shadow_call_stack_ptr); +#endif + +static void init_irq_scs(void) +{ + int cpu; + + if (!IS_ENABLED(CONFIG_SHADOW_CALL_STACK)) + return; + + for_each_possible_cpu(cpu) + per_cpu(irq_shadow_call_stack_ptr, cpu) = + scs_alloc(cpu_to_node(cpu)); +} + #ifdef CONFIG_VMAP_STACK static void init_irq_stacks(void) { @@ -54,6 +74,7 @@ static void init_irq_stacks(void) void __init init_IRQ(void) { init_irq_stacks(); + init_irq_scs(); irqchip_init(); if (!handle_arch_irq) panic("No interrupt controller found."); diff --git a/arch/arm64/kernel/scs.c b/arch/arm64/kernel/scs.c deleted file mode 100644 index e8f7ff45dd8f..000000000000 --- a/arch/arm64/kernel/scs.c +++ /dev/null @@ -1,16 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Shadow Call Stack support. - * - * Copyright (C) 2019 Google LLC - */ - -#include -#include - -DEFINE_SCS(irq_shadow_call_stack); - -#ifdef CONFIG_ARM_SDE_INTERFACE -DEFINE_SCS(sdei_shadow_call_stack_normal); -DEFINE_SCS(sdei_shadow_call_stack_critical); -#endif diff --git a/arch/arm64/kernel/sdei.c b/arch/arm64/kernel/sdei.c index 7689f2031c0c..d12fd786b267 100644 --- a/arch/arm64/kernel/sdei.c +++ b/arch/arm64/kernel/sdei.c @@ -7,6 +7,7 @@ #include #include #include +#include #include #include @@ -37,6 +38,14 @@ DEFINE_PER_CPU(unsigned long *, sdei_stack_normal_ptr); DEFINE_PER_CPU(unsigned long *, sdei_stack_critical_ptr); #endif +DECLARE_PER_CPU(unsigned long *, sdei_shadow_call_stack_normal_ptr); +DECLARE_PER_CPU(unsigned long *, sdei_shadow_call_stack_critical_ptr); + +#ifdef CONFIG_SHADOW_CALL_STACK +DEFINE_PER_CPU(unsigned long *, sdei_shadow_call_stack_normal_ptr); +DEFINE_PER_CPU(unsigned long *, sdei_shadow_call_stack_critical_ptr); +#endif + static void _free_sdei_stack(unsigned long * __percpu *ptr, int cpu) { unsigned long *p; @@ -90,6 +99,59 @@ static int init_sdei_stacks(void) return err; } +static void _free_sdei_scs(unsigned long * __percpu *ptr, int cpu) +{ + void *s; + + s = per_cpu(*ptr, cpu); + if (s) { + per_cpu(*ptr, cpu) = NULL; + scs_free(s); + } +} + +static void free_sdei_scs(void) +{ + int cpu; + + for_each_possible_cpu(cpu) { + _free_sdei_scs(&sdei_shadow_call_stack_normal_ptr, cpu); + _free_sdei_scs(&sdei_shadow_call_stack_critical_ptr, cpu); + } +} + +static int _init_sdei_scs(unsigned long * __percpu *ptr, int cpu) +{ + void *s; + + s = scs_alloc(cpu_to_node(cpu)); + if (!s) + return -ENOMEM; + per_cpu(*ptr, cpu) = s; + + return 0; +} + +static int init_sdei_scs(void) +{ + int cpu; + int err = 0; + + for_each_possible_cpu(cpu) { + err = _init_sdei_scs(&sdei_shadow_call_stack_normal_ptr, cpu); + if (err) + break; + err = _init_sdei_scs(&sdei_shadow_call_stack_critical_ptr, cpu); + if (err) + break; + } + + if (err) + free_sdei_scs(); + + return err; +} + static bool on_sdei_normal_stack(unsigned long sp, struct stack_info *info) { unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_normal_ptr); @@ -138,6 +200,14 @@ unsigned long sdei_arch_get_entry_point(int conduit) return 0; } + if (IS_ENABLED(CONFIG_SHADOW_CALL_STACK)) { + if (init_sdei_scs()) { + if (IS_ENABLED(CONFIG_VMAP_STACK)) + free_sdei_stacks(); + return 0; + } + } + sdei_exit_mode = (conduit == SMCCC_CONDUIT_HVC) ? SDEI_EXIT_HVC : SDEI_EXIT_SMC; #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 -- cgit v1.2.3 From eec3bf6861a8703ab63992578b1776353c5ac2a1 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 1 Dec 2020 11:24:16 +0000 Subject: arm64: sdei: Push IS_ENABLED() checks down to callee functions Handling all combinations of the VMAP_STACK and SHADOW_CALL_STACK options in sdei_arch_get_entry_point() makes the code difficult to read, particularly when considering the error and cleanup paths. Move the checking of these options into the callee functions, so that they return early if the relevant option is not enabled. Signed-off-by: Will Deacon --- arch/arm64/kernel/sdei.c | 30 ++++++++++++++++++------------ 1 file changed, 18 insertions(+), 12 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kernel/sdei.c b/arch/arm64/kernel/sdei.c index d12fd786b267..118306a836f3 100644 --- a/arch/arm64/kernel/sdei.c +++ b/arch/arm64/kernel/sdei.c @@ -61,6 +61,9 @@ static void free_sdei_stacks(void) { int cpu; + if (!IS_ENABLED(CONFIG_VMAP_STACK)) + return; + for_each_possible_cpu(cpu) { _free_sdei_stack(&sdei_stack_normal_ptr, cpu); _free_sdei_stack(&sdei_stack_critical_ptr, cpu); @@ -84,6 +87,9 @@ static int init_sdei_stacks(void) int cpu; int err = 0; + if (!IS_ENABLED(CONFIG_VMAP_STACK)) + return 0; + for_each_possible_cpu(cpu) { err = _init_sdei_stack(&sdei_stack_normal_ptr, cpu); if (err) @@ -137,6 +143,9 @@ static int init_sdei_scs(void) int cpu; int err = 0; + if (!IS_ENABLED(CONFIG_SHADOW_CALL_STACK)) + return 0; + for_each_possible_cpu(cpu) { err = _init_sdei_scs(&sdei_shadow_call_stack_normal_ptr, cpu); if (err) @@ -192,21 +201,14 @@ unsigned long sdei_arch_get_entry_point(int conduit) */ if (is_hyp_mode_available() && !is_kernel_in_hyp_mode()) { pr_err("Not supported on this hardware/boot configuration\n"); - return 0; + goto out_err; } - if (IS_ENABLED(CONFIG_VMAP_STACK)) { - if (init_sdei_stacks()) - return 0; - } + if (init_sdei_stacks()) + goto out_err; - if (IS_ENABLED(CONFIG_SHADOW_CALL_STACK)) { - if (init_sdei_scs()) { - if (IS_ENABLED(CONFIG_VMAP_STACK)) - free_sdei_stacks(); - return 0; - } - } + if (init_sdei_scs()) + goto out_err_free_stacks; sdei_exit_mode = (conduit == SMCCC_CONDUIT_HVC) ? SDEI_EXIT_HVC : SDEI_EXIT_SMC; @@ -221,6 +223,10 @@ unsigned long sdei_arch_get_entry_point(int conduit) #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ return (unsigned long)__sdei_asm_handler; +out_err_free_stacks: + free_sdei_stacks(); +out_err: + return 0; } /* -- cgit v1.2.3 From f80d034086d5bfcfd3bf4ab6f52b2df78c3ad2fa Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Fri, 13 Nov 2020 12:49:21 +0000 Subject: arm64: ensure ERET from kthread is illegal For consistency, all tasks have a pt_regs reserved at the highest portion of their task stack. Among other things, this ensures that a task's SP is always pointing within its stack rather than pointing immediately past the end. While it is never legitimate to ERET from a kthread, we take pains to initialize pt_regs for kthreads as if this were legitimate. As this is never legitimate, the effects of an erroneous return are rarely tested. Let's simplify things by initializing a kthread's pt_regs such that an ERET is caught as an illegal exception return, and removing the explicit initialization of other exception context. Note that as spectre_v4_enable_task_mitigation() only manipulates the PSTATE within the unused regs this is safe to remove. As user tasks will have their exception context initialized via start_thread() or start_compat_thread(), this should only impact cases where something has gone very wrong and we'd like that to be clearly indicated. Signed-off-by: Mark Rutland Cc: Christoph Hellwig Cc: James Morse Cc: Will Deacon Link: https://lore.kernel.org/r/20201113124937.20574-2-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/kernel/process.c | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 4784011cecac..855137daafbf 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -422,16 +422,15 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start, if (clone_flags & CLONE_SETTLS) p->thread.uw.tp_value = tls; } else { + /* + * A kthread has no context to ERET to, so ensure any buggy + * ERET is treated as an illegal exception return. + * + * When a user task is created from a kthread, childregs will + * be initialized by start_thread() or start_compat_thread(). + */ memset(childregs, 0, sizeof(struct pt_regs)); - childregs->pstate = PSR_MODE_EL1h; - if (IS_ENABLED(CONFIG_ARM64_UAO) && - cpus_have_const_cap(ARM64_HAS_UAO)) - childregs->pstate |= PSR_UAO_BIT; - - spectre_v4_enable_task_mitigation(p); - - if (system_uses_irq_prio_masking()) - childregs->pmr_save = GIC_PRIO_IRQON; + childregs->pstate = PSR_MODE_EL1h | PSR_IL_BIT; p->thread.cpu_context.x19 = stack_start; p->thread.cpu_context.x20 = stk_sz; -- cgit v1.2.3 From 515d5c8a1374b307225e41b3e66b0aad08585f53 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Fri, 13 Nov 2020 12:49:22 +0000 Subject: arm64: add C wrappers for SET_PSTATE_*() To make callsites easier to read, add trivial C wrappers for the SET_PSTATE_*() helpers, and convert trivial uses over to these. The new wrappers will be used further in subsequent patches. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland Cc: Christoph Hellwig Cc: James Morse Cc: Will Deacon Link: https://lore.kernel.org/r/20201113124937.20574-3-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/sysreg.h | 4 ++++ arch/arm64/kernel/cpufeature.c | 2 +- arch/arm64/kernel/proton-pack.c | 4 ++-- 3 files changed, 7 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 174817ba119c..24160f7410e8 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -98,6 +98,10 @@ #define SET_PSTATE_SSBS(x) __emit_inst(0xd500401f | PSTATE_SSBS | ((!!x) << PSTATE_Imm_shift)) #define SET_PSTATE_TCO(x) __emit_inst(0xd500401f | PSTATE_TCO | ((!!x) << PSTATE_Imm_shift)) +#define set_pstate_pan(x) asm volatile(SET_PSTATE_PAN(x)) +#define set_pstate_uao(x) asm volatile(SET_PSTATE_UAO(x)) +#define set_pstate_ssbs(x) asm volatile(SET_PSTATE_SSBS(x)) + #define __SYS_BARRIER_INSN(CRm, op2, Rt) \ __emit_inst(0xd5000000 | sys_insn(0, 3, 3, (CRm), (op2)) | ((Rt) & 0x1f)) diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index b7b6804cb931..403fb0dbf299 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -1598,7 +1598,7 @@ static void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused) WARN_ON_ONCE(in_interrupt()); sysreg_clear_set(sctlr_el1, SCTLR_EL1_SPAN, 0); - asm(SET_PSTATE_PAN(1)); + set_pstate_pan(1); } #endif /* CONFIG_ARM64_PAN */ diff --git a/arch/arm64/kernel/proton-pack.c b/arch/arm64/kernel/proton-pack.c index 4b202e460e6d..6809b556538f 100644 --- a/arch/arm64/kernel/proton-pack.c +++ b/arch/arm64/kernel/proton-pack.c @@ -538,12 +538,12 @@ static enum mitigation_state spectre_v4_enable_hw_mitigation(void) if (spectre_v4_mitigations_off()) { sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_DSSBS); - asm volatile(SET_PSTATE_SSBS(1)); + set_pstate_ssbs(1); return SPECTRE_VULNERABLE; } /* SCTLR_EL1.DSSBS was initialised to 0 during boot */ - asm volatile(SET_PSTATE_SSBS(0)); + set_pstate_ssbs(0); return SPECTRE_MITIGATED; } -- cgit v1.2.3 From ecbb11ab3ebc02763ec53489c9b1f983be9dc882 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Fri, 13 Nov 2020 12:49:23 +0000 Subject: arm64: head.S: rename el2_setup -> init_kernel_el For a while now el2_setup has performed some basic initialization of EL1 even when the kernel is booted at EL1, so the name is a little misleading. Further, some comments are stale as with VHE it doesn't drop the CPU to EL1. To clarify things, rename el2_setup to init_kernel_el, and update comments to be clearer as to the function's purpose. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland Cc: Christoph Hellwig Cc: James Morse Cc: Will Deacon Link: https://lore.kernel.org/r/20201113124937.20574-4-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/kernel/head.S | 15 ++++++++------- arch/arm64/kernel/sleep.S | 2 +- 2 files changed, 9 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index d8d9caf02834..5a31086e8e85 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -104,7 +104,7 @@ pe_header: */ SYM_CODE_START(primary_entry) bl preserve_boot_args - bl el2_setup // Drop to EL1, w0=cpu_boot_mode + bl init_kernel_el // w0=cpu_boot_mode adrp x23, __PHYS_OFFSET and x23, x23, MIN_KIMG_ALIGN - 1 // KASLR offset, defaults to 0 bl set_cpu_boot_mode_flag @@ -482,13 +482,14 @@ EXPORT_SYMBOL(kimage_vaddr) .section ".idmap.text","awx" /* - * If we're fortunate enough to boot at EL2, ensure that the world is - * sane before dropping to EL1. + * Starting from EL2 or EL1, configure the CPU to execute at the highest + * reachable EL supported by the kernel in a chosen default state. If dropping + * from EL2 to EL1, configure EL2 before configuring EL1. * * Returns either BOOT_CPU_MODE_EL1 or BOOT_CPU_MODE_EL2 in w0 if * booted in EL1 or EL2 respectively. */ -SYM_FUNC_START(el2_setup) +SYM_FUNC_START(init_kernel_el) msr SPsel, #1 // We want to use SP_EL{1,2} mrs x0, CurrentEL cmp x0, #CurrentEL_EL2 @@ -649,7 +650,7 @@ SYM_INNER_LABEL(install_el2_stub, SYM_L_LOCAL) msr elr_el2, lr mov w0, #BOOT_CPU_MODE_EL2 // This CPU booted in EL2 eret -SYM_FUNC_END(el2_setup) +SYM_FUNC_END(init_kernel_el) /* * Sets the __boot_cpu_mode flag depending on the CPU boot mode passed @@ -699,7 +700,7 @@ SYM_DATA_END(__early_cpu_boot_status) * cores are held until we're ready for them to initialise. */ SYM_FUNC_START(secondary_holding_pen) - bl el2_setup // Drop to EL1, w0=cpu_boot_mode + bl init_kernel_el // w0=cpu_boot_mode bl set_cpu_boot_mode_flag mrs x0, mpidr_el1 mov_q x1, MPIDR_HWID_BITMASK @@ -717,7 +718,7 @@ SYM_FUNC_END(secondary_holding_pen) * be used where CPUs are brought online dynamically by the kernel. */ SYM_FUNC_START(secondary_entry) - bl el2_setup // Drop to EL1 + bl init_kernel_el // w0=cpu_boot_mode bl set_cpu_boot_mode_flag b secondary_startup SYM_FUNC_END(secondary_entry) diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S index ba40d57757d6..4be7f7eed875 100644 --- a/arch/arm64/kernel/sleep.S +++ b/arch/arm64/kernel/sleep.S @@ -99,7 +99,7 @@ SYM_FUNC_END(__cpu_suspend_enter) .pushsection ".idmap.text", "awx" SYM_CODE_START(cpu_resume) - bl el2_setup // if in EL2 drop to EL1 cleanly + bl init_kernel_el bl __cpu_setup /* enable the MMU early - so we can access sleep_save_stash by va */ adrp x1, swapper_pg_dir -- cgit v1.2.3 From 2ffac9e3fdbd54be953e773f9deb08fc6a488a9f Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Fri, 13 Nov 2020 12:49:24 +0000 Subject: arm64: head.S: cleanup SCTLR_ELx initialization Let's make SCTLR_ELx initialization a bit clearer by using meaningful names for the initialization values, following the same scheme for SCTLR_EL1 and SCTLR_EL2. These definitions will be used more widely in subsequent patches. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland Cc: Christoph Hellwig Cc: James Morse Cc: Will Deacon Link: https://lore.kernel.org/r/20201113124937.20574-5-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/sysreg.h | 18 ++++++++++++------ arch/arm64/kernel/head.S | 6 +++--- arch/arm64/mm/proc.S | 2 +- 3 files changed, 16 insertions(+), 10 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 24160f7410e8..117bf97666ee 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -582,6 +582,9 @@ #define ENDIAN_SET_EL2 0 #endif +#define INIT_SCTLR_EL2_MMU_OFF \ + (SCTLR_EL2_RES1 | ENDIAN_SET_EL2) + /* SCTLR_EL1 specific flags. */ #define SCTLR_EL1_ATA0 (BIT(42)) @@ -615,12 +618,15 @@ #define ENDIAN_SET_EL1 0 #endif -#define SCTLR_EL1_SET (SCTLR_ELx_M | SCTLR_ELx_C | SCTLR_ELx_SA |\ - SCTLR_EL1_SA0 | SCTLR_EL1_SED | SCTLR_ELx_I |\ - SCTLR_EL1_DZE | SCTLR_EL1_UCT |\ - SCTLR_EL1_NTWE | SCTLR_ELx_IESB | SCTLR_EL1_SPAN |\ - SCTLR_ELx_ITFSB| SCTLR_ELx_ATA | SCTLR_EL1_ATA0 |\ - ENDIAN_SET_EL1 | SCTLR_EL1_UCI | SCTLR_EL1_RES1) +#define INIT_SCTLR_EL1_MMU_OFF \ + (ENDIAN_SET_EL1 | SCTLR_EL1_RES1) + +#define INIT_SCTLR_EL1_MMU_ON \ + (SCTLR_ELx_M | SCTLR_ELx_C | SCTLR_ELx_SA | SCTLR_EL1_SA0 | \ + SCTLR_EL1_SED | SCTLR_ELx_I | SCTLR_EL1_DZE | SCTLR_EL1_UCT | \ + SCTLR_EL1_NTWE | SCTLR_ELx_IESB | SCTLR_EL1_SPAN | SCTLR_ELx_ITFSB | \ + SCTLR_ELx_ATA | SCTLR_EL1_ATA0 | ENDIAN_SET_EL1 | SCTLR_EL1_UCI | \ + SCTLR_EL1_RES1) /* MAIR_ELx memory attributes (used by Linux) */ #define MAIR_ATTR_DEVICE_nGnRnE UL(0x00) diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 5a31086e8e85..4d113a4ef929 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -494,13 +494,13 @@ SYM_FUNC_START(init_kernel_el) mrs x0, CurrentEL cmp x0, #CurrentEL_EL2 b.eq 1f - mov_q x0, (SCTLR_EL1_RES1 | ENDIAN_SET_EL1) + mov_q x0, INIT_SCTLR_EL1_MMU_OFF msr sctlr_el1, x0 mov w0, #BOOT_CPU_MODE_EL1 // This cpu booted in EL1 isb ret -1: mov_q x0, (SCTLR_EL2_RES1 | ENDIAN_SET_EL2) +1: mov_q x0, INIT_SCTLR_EL2_MMU_OFF msr sctlr_el2, x0 #ifdef CONFIG_ARM64_VHE @@ -621,7 +621,7 @@ SYM_INNER_LABEL(install_el2_stub, SYM_L_LOCAL) * requires no configuration, and all non-hyp-specific EL2 setup * will be done via the _EL1 system register aliases in __cpu_setup. */ - mov_q x0, (SCTLR_EL1_RES1 | ENDIAN_SET_EL1) + mov_q x0, INIT_SCTLR_EL1_MMU_OFF msr sctlr_el1, x0 /* Coprocessor traps. */ diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index 23c326a06b2d..29f064e117d9 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -489,6 +489,6 @@ SYM_FUNC_START(__cpu_setup) /* * Prepare SCTLR */ - mov_q x0, SCTLR_EL1_SET + mov_q x0, INIT_SCTLR_EL1_MMU_ON ret // return to head.S SYM_FUNC_END(__cpu_setup) -- cgit v1.2.3 From d87a8e65b5101123a24cddeb7a8a2c7b45f7b60c Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Fri, 13 Nov 2020 12:49:25 +0000 Subject: arm64: head.S: always initialize PSTATE As with SCTLR_ELx and other control registers, some PSTATE bits are UNKNOWN out-of-reset, and we may not be able to rely on hardware or firmware to initialize them to our liking prior to entry to the kernel, e.g. in the primary/secondary boot paths and return from idle/suspend. It would be more robust (and easier to reason about) if we consistently initialized PSTATE to a default value, as we do with control registers. This will ensure that the kernel is not adversely affected by bits it is not aware of, e.g. when support for a feature such as PAN/UAO is disabled. This patch ensures that PSTATE is consistently initialized at boot time via an ERET. This is not intended to relax the existing requirements (e.g. DAIF bits must still be set prior to entering the kernel). For features detected dynamically (which may require system-wide support), it is still necessary to subsequently modify PSTATE. As ERET is not always a Context Synchronization Event, an ISB is placed before each exception return to ensure updates to control registers have taken effect. This handles the kernel being entered with SCTLR_ELx.EOS clear (or any future control bits being in an UNKNOWN state). Signed-off-by: Mark Rutland Cc: Christoph Hellwig Cc: James Morse Cc: Will Deacon Link: https://lore.kernel.org/r/20201113124937.20574-6-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/ptrace.h | 5 +++++ arch/arm64/kernel/head.S | 32 +++++++++++++++++++++----------- 2 files changed, 26 insertions(+), 11 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h index 997cf8c8cd52..2547d94634be 100644 --- a/arch/arm64/include/asm/ptrace.h +++ b/arch/arm64/include/asm/ptrace.h @@ -16,6 +16,11 @@ #define CurrentEL_EL1 (1 << 2) #define CurrentEL_EL2 (2 << 2) +#define INIT_PSTATE_EL1 \ + (PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT | PSR_MODE_EL1h) +#define INIT_PSTATE_EL2 \ + (PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT | PSR_MODE_EL2h) + /* * PMR values used to mask/unmask interrupts. * diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 4d113a4ef929..0b145bca1b0e 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -486,21 +486,29 @@ EXPORT_SYMBOL(kimage_vaddr) * reachable EL supported by the kernel in a chosen default state. If dropping * from EL2 to EL1, configure EL2 before configuring EL1. * + * Since we cannot always rely on ERET synchronizing writes to sysregs (e.g. if + * SCTLR_ELx.EOS is clear), we place an ISB prior to ERET. + * * Returns either BOOT_CPU_MODE_EL1 or BOOT_CPU_MODE_EL2 in w0 if * booted in EL1 or EL2 respectively. */ SYM_FUNC_START(init_kernel_el) - msr SPsel, #1 // We want to use SP_EL{1,2} mrs x0, CurrentEL cmp x0, #CurrentEL_EL2 - b.eq 1f + b.eq init_el2 + +SYM_INNER_LABEL(init_el1, SYM_L_LOCAL) mov_q x0, INIT_SCTLR_EL1_MMU_OFF msr sctlr_el1, x0 - mov w0, #BOOT_CPU_MODE_EL1 // This cpu booted in EL1 isb - ret + mov_q x0, INIT_PSTATE_EL1 + msr spsr_el1, x0 + msr elr_el1, lr + mov w0, #BOOT_CPU_MODE_EL1 + eret -1: mov_q x0, INIT_SCTLR_EL2_MMU_OFF +SYM_INNER_LABEL(init_el2, SYM_L_LOCAL) + mov_q x0, INIT_SCTLR_EL2_MMU_OFF msr sctlr_el2, x0 #ifdef CONFIG_ARM64_VHE @@ -609,9 +617,12 @@ set_hcr: cbz x2, install_el2_stub - mov w0, #BOOT_CPU_MODE_EL2 // This CPU booted in EL2 isb - ret + mov_q x0, INIT_PSTATE_EL2 + msr spsr_el2, x0 + msr elr_el2, lr + mov w0, #BOOT_CPU_MODE_EL2 + eret SYM_INNER_LABEL(install_el2_stub, SYM_L_LOCAL) /* @@ -643,12 +654,11 @@ SYM_INNER_LABEL(install_el2_stub, SYM_L_LOCAL) 7: adr_l x0, __hyp_stub_vectors msr vbar_el2, x0 - /* spsr */ - mov x0, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\ - PSR_MODE_EL1h) + isb + mov x0, #INIT_PSTATE_EL1 msr spsr_el2, x0 msr elr_el2, lr - mov w0, #BOOT_CPU_MODE_EL2 // This CPU booted in EL2 + mov w0, #BOOT_CPU_MODE_EL2 eret SYM_FUNC_END(init_kernel_el) -- cgit v1.2.3 From a0ccf2ba689f773f2882b9c1e79d8a43a19cb513 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Wed, 2 Dec 2020 13:15:47 +0000 Subject: arm64: sdei: move uaccess logic to arch/arm64/ The SDEI support code is split across arch/arm64/ and drivers/firmware/, largley this is split so that the arch-specific portions are under arch/arm64, and the management logic is under drivers/firmware/. However, exception entry fixups are currently under drivers/firmware. Let's move the exception entry fixups under arch/arm64/. This de-clutters the management logic, and puts all the arch-specific portions in one place. Doing this also allows the fixups to be applied earlier, so things like PAN and UAO will be in a known good state before we run other logic. This will also make subsequent refactoring easier. Signed-off-by: Mark Rutland Reviewed-by: James Morse Cc: Christoph Hellwig Cc: Will Deacon Link: https://lore.kernel.org/r/20201202131558.39270-2-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/kernel/sdei.c | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kernel/sdei.c b/arch/arm64/kernel/sdei.c index 7689f2031c0c..4a5f24602aa0 100644 --- a/arch/arm64/kernel/sdei.c +++ b/arch/arm64/kernel/sdei.c @@ -178,12 +178,6 @@ static __kprobes unsigned long _sdei_handler(struct pt_regs *regs, sdei_api_event_context(i, ®s->regs[i]); } - /* - * We didn't take an exception to get here, set PAN. UAO will be cleared - * by sdei_event_handler()s force_uaccess_begin() call. - */ - __uaccess_enable_hw_pan(); - err = sdei_event_handler(regs, arg); if (err) return SDEI_EV_FAILED; @@ -227,6 +221,16 @@ asmlinkage __kprobes notrace unsigned long __sdei_handler(struct pt_regs *regs, struct sdei_registered_event *arg) { unsigned long ret; + mm_segment_t orig_addr_limit; + + /* + * We didn't take an exception to get here, so the HW hasn't set PAN or + * cleared UAO, and the exception entry code hasn't reset addr_limit. + * Set PAN, then use force_uaccess_begin() to clear UAO and reset + * addr_limit. + */ + __uaccess_enable_hw_pan(); + orig_addr_limit = force_uaccess_begin(); nmi_enter(); @@ -234,5 +238,7 @@ __sdei_handler(struct pt_regs *regs, struct sdei_registered_event *arg) nmi_exit(); + force_uaccess_end(orig_addr_limit); + return ret; } -- cgit v1.2.3 From 2376e75cc77eeb80bf30447f35e9ceb0997508a8 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Wed, 2 Dec 2020 13:15:48 +0000 Subject: arm64: sdei: explicitly simulate PAN/UAO entry In preparation for removing addr_limit and set_fs() we must decouple the SDEI PAN/UAO manipulation from the uaccess code, and explicitly reinitialize these as required. SDEI enters the kernel with a non-architectural exception, and prior to the most recent revision of the specification (ARM DEN 0054B), PSTATE bits (e.g. PAN, UAO) are not manipulated in the same way as for architectural exceptions. Notably, older versions of the spec can be read ambiguously as to whether PSTATE bits are inherited unchanged from the interrupted context or whether they are generated from scratch, with TF-A doing the latter. We have three cases to consider: 1) The existing TF-A implementation of SDEI will clear PAN and clear UAO (along with other bits in PSTATE) when delivering an SDEI exception. 2) In theory, implementations of SDEI prior to revision B could inherit PAN and UAO (along with other bits in PSTATE) unchanged from the interrupted context. However, in practice such implementations do not exist. 3) Going forward, new implementations of SDEI must clear UAO, and depending on SCTLR_ELx.SPAN must either inherit or set PAN. As we can ignore (2) we can assume that upon SDEI entry, UAO is always clear, though PAN may be clear, inherited, or set per SCTLR_ELx.SPAN. Therefore, we must explicitly initialize PAN, but do not need to do anything for UAO. Considering what we need to do: * When set_fs() is removed, force_uaccess_begin() will have no HW side-effects. As this only clears UAO, which we can assume has already been cleared upon entry, this is not a problem. We do not need to add code to manipulate UAO explicitly. * PAN may be cleared upon entry (in case 1 above), so where a kernel is built to use PAN and this is supported by all CPUs, the kernel must set PAN upon entry to ensure expected behaviour. * PAN may be inherited from the interrupted context (in case 3 above), and so where a kernel is not built to use PAN or where PAN support is not uniform across CPUs, the kernel must clear PAN to ensure expected behaviour. This patch reworks the SDEI code accordingly, explicitly setting PAN to the expected state in all cases. To cater for the cases where the kernel does not use PAN or this is not uniformly supported by hardware we add a new cpu_has_pan() helper which can be used regardless of whether the kernel is built to use PAN. The existing system_uses_ttbr0_pan() is redefined in terms of system_uses_hw_pan() both for clarity and as a minor optimization when HW PAN is not selected. Signed-off-by: Mark Rutland Reviewed-by: James Morse Cc: James Morse Cc: Christoph Hellwig Cc: Will Deacon Link: https://lore.kernel.org/r/20201202131558.39270-3-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/cpufeature.h | 15 ++++++++++++++- arch/arm64/kernel/sdei.c | 30 +++++++++++++++++++++++++----- 2 files changed, 39 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 97244d4feca9..092092177128 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -667,10 +667,16 @@ static __always_inline bool system_supports_fpsimd(void) return !cpus_have_const_cap(ARM64_HAS_NO_FPSIMD); } +static inline bool system_uses_hw_pan(void) +{ + return IS_ENABLED(CONFIG_ARM64_PAN) && + cpus_have_const_cap(ARM64_HAS_PAN); +} + static inline bool system_uses_ttbr0_pan(void) { return IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN) && - !cpus_have_const_cap(ARM64_HAS_PAN); + !system_uses_hw_pan(); } static __always_inline bool system_supports_sve(void) @@ -762,6 +768,13 @@ static inline bool cpu_has_hw_af(void) ID_AA64MMFR1_HADBS_SHIFT); } +static inline bool cpu_has_pan(void) +{ + u64 mmfr1 = read_cpuid(ID_AA64MMFR1_EL1); + return cpuid_feature_extract_unsigned_field(mmfr1, + ID_AA64MMFR1_PAN_SHIFT); +} + #ifdef CONFIG_ARM64_AMU_EXTN /* Check whether the cpu supports the Activity Monitors Unit (AMU) */ extern bool cpu_has_amu_feat(int cpu); diff --git a/arch/arm64/kernel/sdei.c b/arch/arm64/kernel/sdei.c index 4a5f24602aa0..c9640e50967a 100644 --- a/arch/arm64/kernel/sdei.c +++ b/arch/arm64/kernel/sdei.c @@ -216,6 +216,27 @@ static __kprobes unsigned long _sdei_handler(struct pt_regs *regs, return vbar + 0x480; } +static void __kprobes notrace __sdei_pstate_entry(void) +{ + /* + * The original SDEI spec (ARM DEN 0054A) can be read ambiguously as to + * whether PSTATE bits are inherited unchanged or generated from + * scratch, and the TF-A implementation always clears PAN and always + * clears UAO. There are no other known implementations. + * + * Subsequent revisions (ARM DEN 0054B) follow the usual rules for how + * PSTATE is modified upon architectural exceptions, and so PAN is + * either inherited or set per SCTLR_ELx.SPAN, and UAO is always + * cleared. + * + * We must explicitly reset PAN to the expected state, including + * clearing it when the host isn't using it, in case a VM had it set. + */ + if (system_uses_hw_pan()) + set_pstate_pan(1); + else if (cpu_has_pan()) + set_pstate_pan(0); +} asmlinkage __kprobes notrace unsigned long __sdei_handler(struct pt_regs *regs, struct sdei_registered_event *arg) @@ -224,12 +245,11 @@ __sdei_handler(struct pt_regs *regs, struct sdei_registered_event *arg) mm_segment_t orig_addr_limit; /* - * We didn't take an exception to get here, so the HW hasn't set PAN or - * cleared UAO, and the exception entry code hasn't reset addr_limit. - * Set PAN, then use force_uaccess_begin() to clear UAO and reset - * addr_limit. + * We didn't take an exception to get here, so the HW hasn't + * set/cleared bits in PSTATE that we may rely on. Initialize PAN, then + * use force_uaccess_begin() to reset addr_limit. */ - __uaccess_enable_hw_pan(); + __sdei_pstate_entry(); orig_addr_limit = force_uaccess_begin(); nmi_enter(); -- cgit v1.2.3 From 923e1e7d8223cfa6e67d00ad238ee415c3c59320 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Wed, 2 Dec 2020 13:15:50 +0000 Subject: arm64: uaccess: rename privileged uaccess routines We currently have many uaccess_*{enable,disable}*() variants, which subsequent patches will cut down as part of removing set_fs() and friends. Once this simplification is made, most uaccess routines will only need to ensure that the user page tables are mapped in TTBR0, as is currently dealt with by uaccess_ttbr0_{enable,disable}(). The existing uaccess_{enable,disable}() routines ensure that user page tables are mapped in TTBR0, and also disable PAN protections, which is necessary to be able to use atomics on user memory, but also permit unrelated privileged accesses to access user memory. As preparatory step, let's rename uaccess_{enable,disable}() to uaccess_{enable,disable}_privileged(), highlighting this caveat and discouraging wider misuse. Subsequent patches can reuse the uaccess_{enable,disable}() naming for the common case of ensuring the user page tables are mapped in TTBR0. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland Cc: Christoph Hellwig Cc: James Morse Cc: Will Deacon Link: https://lore.kernel.org/r/20201202131558.39270-5-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/futex.h | 8 ++++---- arch/arm64/include/asm/uaccess.h | 4 ++-- arch/arm64/kernel/armv8_deprecated.c | 4 ++-- 3 files changed, 8 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h index 97f6a63810ec..8e41faa37c69 100644 --- a/arch/arm64/include/asm/futex.h +++ b/arch/arm64/include/asm/futex.h @@ -16,7 +16,7 @@ do { \ unsigned int loops = FUTEX_MAX_LOOPS; \ \ - uaccess_enable(); \ + uaccess_enable_privileged(); \ asm volatile( \ " prfm pstl1strm, %2\n" \ "1: ldxr %w1, %2\n" \ @@ -39,7 +39,7 @@ do { \ "+r" (loops) \ : "r" (oparg), "Ir" (-EFAULT), "Ir" (-EAGAIN) \ : "memory"); \ - uaccess_disable(); \ + uaccess_disable_privileged(); \ } while (0) static inline int @@ -95,7 +95,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *_uaddr, return -EFAULT; uaddr = __uaccess_mask_ptr(_uaddr); - uaccess_enable(); + uaccess_enable_privileged(); asm volatile("// futex_atomic_cmpxchg_inatomic\n" " prfm pstl1strm, %2\n" "1: ldxr %w1, %2\n" @@ -118,7 +118,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *_uaddr, : "+r" (ret), "=&r" (val), "+Q" (*uaddr), "=&r" (tmp), "+r" (loops) : "r" (oldval), "r" (newval), "Ir" (-EFAULT), "Ir" (-EAGAIN) : "memory"); - uaccess_disable(); + uaccess_disable_privileged(); if (!ret) *uval = val; diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index 991dd5f031e4..d6a4e496ebc6 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -200,12 +200,12 @@ do { \ CONFIG_ARM64_PAN)); \ } while (0) -static inline void uaccess_disable(void) +static inline void uaccess_disable_privileged(void) { __uaccess_disable(ARM64_HAS_PAN); } -static inline void uaccess_enable(void) +static inline void uaccess_enable_privileged(void) { __uaccess_enable(ARM64_HAS_PAN); } diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c index 7364de008bab..0e86e8b9cedd 100644 --- a/arch/arm64/kernel/armv8_deprecated.c +++ b/arch/arm64/kernel/armv8_deprecated.c @@ -277,7 +277,7 @@ static void __init register_insn_emulation_sysctl(void) #define __user_swpX_asm(data, addr, res, temp, temp2, B) \ do { \ - uaccess_enable(); \ + uaccess_enable_privileged(); \ __asm__ __volatile__( \ " mov %w3, %w7\n" \ "0: ldxr"B" %w2, [%4]\n" \ @@ -302,7 +302,7 @@ do { \ "i" (-EFAULT), \ "i" (__SWP_LL_SC_LOOPS) \ : "memory"); \ - uaccess_disable(); \ + uaccess_disable_privileged(); \ } while (0) #define __user_swp_asm(data, addr, res, temp, temp2) \ -- cgit v1.2.3 From 9e94fdade4d8f3c9b64c302ba081e2718c9e4087 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Wed, 2 Dec 2020 13:15:51 +0000 Subject: arm64: uaccess: simplify __copy_user_flushcache() Currently __copy_user_flushcache() open-codes raw_copy_from_user(), and doesn't use uaccess_mask_ptr() on the user address. Let's have it call raw_copy_from_user(), which is both a simplification and ensures that user pointers are masked under speculation. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland Reviewed-by: Robin Murphy Cc: Christoph Hellwig Cc: Will Deacon Link: https://lore.kernel.org/r/20201202131558.39270-6-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/lib/uaccess_flushcache.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/arm64/lib/uaccess_flushcache.c b/arch/arm64/lib/uaccess_flushcache.c index bfa30b75b2b8..c83bb5a4aad2 100644 --- a/arch/arm64/lib/uaccess_flushcache.c +++ b/arch/arm64/lib/uaccess_flushcache.c @@ -30,9 +30,7 @@ unsigned long __copy_user_flushcache(void *to, const void __user *from, { unsigned long rc; - uaccess_enable_not_uao(); - rc = __arch_copy_from_user(to, from, n); - uaccess_disable_not_uao(); + rc = raw_copy_from_user(to, from, n); /* See above */ __clean_dcache_area_pop(to, n - rc); -- cgit v1.2.3 From f253d827f33cb5a5990b5cfd271941d1a21ecd85 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Wed, 2 Dec 2020 13:15:52 +0000 Subject: arm64: uaccess: refactor __{get,put}_user As a step towards implementing __{get,put}_kernel_nofault(), this patch splits most user-memory specific logic out of __{get,put}_user(), with the memory access and fault handling in new __{raw_get,put}_mem() helpers. For now the LDR/LDTR patching is left within the *get_mem() helpers, and will be removed in a subsequent patch. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland Cc: Christoph Hellwig Cc: James Morse Cc: Will Deacon Link: https://lore.kernel.org/r/20201202131558.39270-7-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/uaccess.h | 44 ++++++++++++++++++++++++---------------- 1 file changed, 27 insertions(+), 17 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index d6a4e496ebc6..743f209d3fab 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -253,7 +253,7 @@ static inline void __user *__uaccess_mask_ptr(const void __user *ptr) * The "__xxx_error" versions set the third argument to -EFAULT if an error * occurs, and leave it unchanged on success. */ -#define __get_user_asm(instr, alt_instr, reg, x, addr, err, feature) \ +#define __get_mem_asm(instr, alt_instr, reg, x, addr, err, feature) \ asm volatile( \ "1:"ALTERNATIVE(instr " " reg "1, [%2]\n", \ alt_instr " " reg "1, [%2]\n", feature) \ @@ -268,35 +268,40 @@ static inline void __user *__uaccess_mask_ptr(const void __user *ptr) : "+r" (err), "=&r" (x) \ : "r" (addr), "i" (-EFAULT)) -#define __raw_get_user(x, ptr, err) \ +#define __raw_get_mem(x, ptr, err) \ do { \ unsigned long __gu_val; \ - __chk_user_ptr(ptr); \ - uaccess_enable_not_uao(); \ switch (sizeof(*(ptr))) { \ case 1: \ - __get_user_asm("ldrb", "ldtrb", "%w", __gu_val, (ptr), \ + __get_mem_asm("ldrb", "ldtrb", "%w", __gu_val, (ptr), \ (err), ARM64_HAS_UAO); \ break; \ case 2: \ - __get_user_asm("ldrh", "ldtrh", "%w", __gu_val, (ptr), \ + __get_mem_asm("ldrh", "ldtrh", "%w", __gu_val, (ptr), \ (err), ARM64_HAS_UAO); \ break; \ case 4: \ - __get_user_asm("ldr", "ldtr", "%w", __gu_val, (ptr), \ + __get_mem_asm("ldr", "ldtr", "%w", __gu_val, (ptr), \ (err), ARM64_HAS_UAO); \ break; \ case 8: \ - __get_user_asm("ldr", "ldtr", "%x", __gu_val, (ptr), \ + __get_mem_asm("ldr", "ldtr", "%x", __gu_val, (ptr), \ (err), ARM64_HAS_UAO); \ break; \ default: \ BUILD_BUG(); \ } \ - uaccess_disable_not_uao(); \ (x) = (__force __typeof__(*(ptr)))__gu_val; \ } while (0) +#define __raw_get_user(x, ptr, err) \ +do { \ + __chk_user_ptr(ptr); \ + uaccess_enable_not_uao(); \ + __raw_get_mem(x, ptr, err); \ + uaccess_disable_not_uao(); \ +} while (0) + #define __get_user_error(x, ptr, err) \ do { \ __typeof__(*(ptr)) __user *__p = (ptr); \ @@ -318,7 +323,7 @@ do { \ #define get_user __get_user -#define __put_user_asm(instr, alt_instr, reg, x, addr, err, feature) \ +#define __put_mem_asm(instr, alt_instr, reg, x, addr, err, feature) \ asm volatile( \ "1:"ALTERNATIVE(instr " " reg "1, [%2]\n", \ alt_instr " " reg "1, [%2]\n", feature) \ @@ -332,31 +337,36 @@ do { \ : "+r" (err) \ : "r" (x), "r" (addr), "i" (-EFAULT)) -#define __raw_put_user(x, ptr, err) \ +#define __raw_put_mem(x, ptr, err) \ do { \ __typeof__(*(ptr)) __pu_val = (x); \ - __chk_user_ptr(ptr); \ - uaccess_enable_not_uao(); \ switch (sizeof(*(ptr))) { \ case 1: \ - __put_user_asm("strb", "sttrb", "%w", __pu_val, (ptr), \ + __put_mem_asm("strb", "sttrb", "%w", __pu_val, (ptr), \ (err), ARM64_HAS_UAO); \ break; \ case 2: \ - __put_user_asm("strh", "sttrh", "%w", __pu_val, (ptr), \ + __put_mem_asm("strh", "sttrh", "%w", __pu_val, (ptr), \ (err), ARM64_HAS_UAO); \ break; \ case 4: \ - __put_user_asm("str", "sttr", "%w", __pu_val, (ptr), \ + __put_mem_asm("str", "sttr", "%w", __pu_val, (ptr), \ (err), ARM64_HAS_UAO); \ break; \ case 8: \ - __put_user_asm("str", "sttr", "%x", __pu_val, (ptr), \ + __put_mem_asm("str", "sttr", "%x", __pu_val, (ptr), \ (err), ARM64_HAS_UAO); \ break; \ default: \ BUILD_BUG(); \ } \ +} while (0) + +#define __raw_put_user(x, ptr, err) \ +do { \ + __chk_user_ptr(ptr); \ + uaccess_enable_not_uao(); \ + __raw_put_mem(x, ptr, err); \ uaccess_disable_not_uao(); \ } while (0) -- cgit v1.2.3 From fc703d80130b1c9d6783f4cbb9516fd5fe4a750d Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Wed, 2 Dec 2020 13:15:53 +0000 Subject: arm64: uaccess: split user/kernel routines This patch separates arm64's user and kernel memory access primitives into distinct routines, adding new __{get,put}_kernel_nofault() helpers to access kernel memory, upon which core code builds larger copy routines. The kernel access routines (using LDR/STR) are not affected by PAN (when legitimately accessing kernel memory), nor are they affected by UAO. Switching to KERNEL_DS may set UAO, but this does not adversely affect the kernel access routines. The user access routines (using LDTR/STTR) are not affected by PAN (when legitimately accessing user memory), but are affected by UAO. As these are only legitimate to use under USER_DS with UAO clear, this should not be problematic. Routines performing atomics to user memory (futex and deprecated instruction emulation) still need to transiently clear PAN, and these are left as-is. These are never used on kernel memory. Subsequent patches will refactor the uaccess helpers to remove redundant code, and will also remove the redundant PAN/UAO manipulation. Signed-off-by: Mark Rutland Cc: Christoph Hellwig Cc: James Morse Cc: Will Deacon Link: https://lore.kernel.org/r/20201202131558.39270-8-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/asm-uaccess.h | 48 +++++---------------------- arch/arm64/include/asm/uaccess.h | 64 +++++++++++++++++++++--------------- 2 files changed, 47 insertions(+), 65 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/asm-uaccess.h b/arch/arm64/include/asm/asm-uaccess.h index 2c26ca5b7bb0..2b5454fa0f24 100644 --- a/arch/arm64/include/asm/asm-uaccess.h +++ b/arch/arm64/include/asm/asm-uaccess.h @@ -59,62 +59,32 @@ alternative_else_nop_endif #endif /* - * Generate the assembly for UAO alternatives with exception table entries. + * Generate the assembly for LDTR/STTR with exception table entries. * This is complicated as there is no post-increment or pair versions of the * unprivileged instructions, and USER() only works for single instructions. */ -#ifdef CONFIG_ARM64_UAO .macro uao_ldp l, reg1, reg2, addr, post_inc - alternative_if_not ARM64_HAS_UAO -8888: ldp \reg1, \reg2, [\addr], \post_inc; -8889: nop; - nop; - alternative_else - ldtr \reg1, [\addr]; - ldtr \reg2, [\addr, #8]; - add \addr, \addr, \post_inc; - alternative_endif +8888: ldtr \reg1, [\addr]; +8889: ldtr \reg2, [\addr, #8]; + add \addr, \addr, \post_inc; _asm_extable 8888b,\l; _asm_extable 8889b,\l; .endm .macro uao_stp l, reg1, reg2, addr, post_inc - alternative_if_not ARM64_HAS_UAO -8888: stp \reg1, \reg2, [\addr], \post_inc; -8889: nop; - nop; - alternative_else - sttr \reg1, [\addr]; - sttr \reg2, [\addr, #8]; - add \addr, \addr, \post_inc; - alternative_endif +8888: sttr \reg1, [\addr]; +8889: sttr \reg2, [\addr, #8]; + add \addr, \addr, \post_inc; _asm_extable 8888b,\l; _asm_extable 8889b,\l; .endm .macro uao_user_alternative l, inst, alt_inst, reg, addr, post_inc - alternative_if_not ARM64_HAS_UAO -8888: \inst \reg, [\addr], \post_inc; - nop; - alternative_else - \alt_inst \reg, [\addr]; - add \addr, \addr, \post_inc; - alternative_endif +8888: \alt_inst \reg, [\addr]; + add \addr, \addr, \post_inc; _asm_extable 8888b,\l; .endm -#else - .macro uao_ldp l, reg1, reg2, addr, post_inc - USER(\l, ldp \reg1, \reg2, [\addr], \post_inc) - .endm - .macro uao_stp l, reg1, reg2, addr, post_inc - USER(\l, stp \reg1, \reg2, [\addr], \post_inc) - .endm - .macro uao_user_alternative l, inst, alt_inst, reg, addr, post_inc - USER(\l, \inst \reg, [\addr], \post_inc) - .endm -#endif - #endif diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index 743f209d3fab..eb04f02299fe 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -24,6 +24,8 @@ #include #include +#define HAVE_GET_KERNEL_NOFAULT + #define get_fs() (current_thread_info()->addr_limit) static inline void set_fs(mm_segment_t fs) @@ -253,10 +255,9 @@ static inline void __user *__uaccess_mask_ptr(const void __user *ptr) * The "__xxx_error" versions set the third argument to -EFAULT if an error * occurs, and leave it unchanged on success. */ -#define __get_mem_asm(instr, alt_instr, reg, x, addr, err, feature) \ +#define __get_mem_asm(load, reg, x, addr, err) \ asm volatile( \ - "1:"ALTERNATIVE(instr " " reg "1, [%2]\n", \ - alt_instr " " reg "1, [%2]\n", feature) \ + "1: " load " " reg "1, [%2]\n" \ "2:\n" \ " .section .fixup, \"ax\"\n" \ " .align 2\n" \ @@ -268,25 +269,21 @@ static inline void __user *__uaccess_mask_ptr(const void __user *ptr) : "+r" (err), "=&r" (x) \ : "r" (addr), "i" (-EFAULT)) -#define __raw_get_mem(x, ptr, err) \ +#define __raw_get_mem(ldr, x, ptr, err) \ do { \ unsigned long __gu_val; \ switch (sizeof(*(ptr))) { \ case 1: \ - __get_mem_asm("ldrb", "ldtrb", "%w", __gu_val, (ptr), \ - (err), ARM64_HAS_UAO); \ + __get_mem_asm(ldr "b", "%w", __gu_val, (ptr), (err)); \ break; \ case 2: \ - __get_mem_asm("ldrh", "ldtrh", "%w", __gu_val, (ptr), \ - (err), ARM64_HAS_UAO); \ + __get_mem_asm(ldr "h", "%w", __gu_val, (ptr), (err)); \ break; \ case 4: \ - __get_mem_asm("ldr", "ldtr", "%w", __gu_val, (ptr), \ - (err), ARM64_HAS_UAO); \ + __get_mem_asm(ldr, "%w", __gu_val, (ptr), (err)); \ break; \ case 8: \ - __get_mem_asm("ldr", "ldtr", "%x", __gu_val, (ptr), \ - (err), ARM64_HAS_UAO); \ + __get_mem_asm(ldr, "%x", __gu_val, (ptr), (err)); \ break; \ default: \ BUILD_BUG(); \ @@ -298,7 +295,7 @@ do { \ do { \ __chk_user_ptr(ptr); \ uaccess_enable_not_uao(); \ - __raw_get_mem(x, ptr, err); \ + __raw_get_mem("ldtr", x, ptr, err); \ uaccess_disable_not_uao(); \ } while (0) @@ -323,10 +320,19 @@ do { \ #define get_user __get_user -#define __put_mem_asm(instr, alt_instr, reg, x, addr, err, feature) \ +#define __get_kernel_nofault(dst, src, type, err_label) \ +do { \ + int __gkn_err = 0; \ + \ + __raw_get_mem("ldr", *((type *)(dst)), \ + (__force type *)(src), __gkn_err); \ + if (unlikely(__gkn_err)) \ + goto err_label; \ +} while (0) + +#define __put_mem_asm(store, reg, x, addr, err) \ asm volatile( \ - "1:"ALTERNATIVE(instr " " reg "1, [%2]\n", \ - alt_instr " " reg "1, [%2]\n", feature) \ + "1: " store " " reg "1, [%2]\n" \ "2:\n" \ " .section .fixup,\"ax\"\n" \ " .align 2\n" \ @@ -337,25 +343,21 @@ do { \ : "+r" (err) \ : "r" (x), "r" (addr), "i" (-EFAULT)) -#define __raw_put_mem(x, ptr, err) \ +#define __raw_put_mem(str, x, ptr, err) \ do { \ __typeof__(*(ptr)) __pu_val = (x); \ switch (sizeof(*(ptr))) { \ case 1: \ - __put_mem_asm("strb", "sttrb", "%w", __pu_val, (ptr), \ - (err), ARM64_HAS_UAO); \ + __put_mem_asm(str "b", "%w", __pu_val, (ptr), (err)); \ break; \ case 2: \ - __put_mem_asm("strh", "sttrh", "%w", __pu_val, (ptr), \ - (err), ARM64_HAS_UAO); \ + __put_mem_asm(str "h", "%w", __pu_val, (ptr), (err)); \ break; \ case 4: \ - __put_mem_asm("str", "sttr", "%w", __pu_val, (ptr), \ - (err), ARM64_HAS_UAO); \ + __put_mem_asm(str, "%w", __pu_val, (ptr), (err)); \ break; \ case 8: \ - __put_mem_asm("str", "sttr", "%x", __pu_val, (ptr), \ - (err), ARM64_HAS_UAO); \ + __put_mem_asm(str, "%x", __pu_val, (ptr), (err)); \ break; \ default: \ BUILD_BUG(); \ @@ -366,7 +368,7 @@ do { \ do { \ __chk_user_ptr(ptr); \ uaccess_enable_not_uao(); \ - __raw_put_mem(x, ptr, err); \ + __raw_put_mem("sttr", x, ptr, err); \ uaccess_disable_not_uao(); \ } while (0) @@ -391,6 +393,16 @@ do { \ #define put_user __put_user +#define __put_kernel_nofault(dst, src, type, err_label) \ +do { \ + int __pkn_err = 0; \ + \ + __raw_put_mem("str", *((type *)(src)), \ + (__force type *)(dst), __pkn_err); \ + if (unlikely(__pkn_err)) \ + goto err_label; \ +} while(0) + extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n); #define raw_copy_from_user(to, from, n) \ ({ \ -- cgit v1.2.3 From 7b90dc40e36e0beb0fdecfef80f33a2e88aced14 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Wed, 2 Dec 2020 13:15:54 +0000 Subject: arm64: uaccess cleanup macro naming Now the uaccess primitives use LDTR/STTR unconditionally, the uao_{ldp,stp,user_alternative} asm macros are misnamed, and have a redundant argument. Let's remove the redundant argument and rename these to user_{ldp,stp,ldst} respectively to clean this up. Signed-off-by: Mark Rutland Reviewed-by: Robin Murohy Cc: Christoph Hellwig Cc: James Morse Cc: Will Deacon Link: https://lore.kernel.org/r/20201202131558.39270-9-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/asm-uaccess.h | 8 ++++---- arch/arm64/lib/clear_user.S | 8 ++++---- arch/arm64/lib/copy_from_user.S | 8 ++++---- arch/arm64/lib/copy_in_user.S | 16 ++++++++-------- arch/arm64/lib/copy_to_user.S | 8 ++++---- arch/arm64/lib/mte.S | 4 ++-- 6 files changed, 26 insertions(+), 26 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/asm-uaccess.h b/arch/arm64/include/asm/asm-uaccess.h index 2b5454fa0f24..0fa95d1b311f 100644 --- a/arch/arm64/include/asm/asm-uaccess.h +++ b/arch/arm64/include/asm/asm-uaccess.h @@ -63,7 +63,7 @@ alternative_else_nop_endif * This is complicated as there is no post-increment or pair versions of the * unprivileged instructions, and USER() only works for single instructions. */ - .macro uao_ldp l, reg1, reg2, addr, post_inc + .macro user_ldp l, reg1, reg2, addr, post_inc 8888: ldtr \reg1, [\addr]; 8889: ldtr \reg2, [\addr, #8]; add \addr, \addr, \post_inc; @@ -72,7 +72,7 @@ alternative_else_nop_endif _asm_extable 8889b,\l; .endm - .macro uao_stp l, reg1, reg2, addr, post_inc + .macro user_stp l, reg1, reg2, addr, post_inc 8888: sttr \reg1, [\addr]; 8889: sttr \reg2, [\addr, #8]; add \addr, \addr, \post_inc; @@ -81,8 +81,8 @@ alternative_else_nop_endif _asm_extable 8889b,\l; .endm - .macro uao_user_alternative l, inst, alt_inst, reg, addr, post_inc -8888: \alt_inst \reg, [\addr]; + .macro user_ldst l, inst, reg, addr, post_inc +8888: \inst \reg, [\addr]; add \addr, \addr, \post_inc; _asm_extable 8888b,\l; diff --git a/arch/arm64/lib/clear_user.S b/arch/arm64/lib/clear_user.S index 48a3a26eff66..af9afcbec92c 100644 --- a/arch/arm64/lib/clear_user.S +++ b/arch/arm64/lib/clear_user.S @@ -24,20 +24,20 @@ SYM_FUNC_START(__arch_clear_user) subs x1, x1, #8 b.mi 2f 1: -uao_user_alternative 9f, str, sttr, xzr, x0, 8 +user_ldst 9f, sttr, xzr, x0, 8 subs x1, x1, #8 b.pl 1b 2: adds x1, x1, #4 b.mi 3f -uao_user_alternative 9f, str, sttr, wzr, x0, 4 +user_ldst 9f, sttr, wzr, x0, 4 sub x1, x1, #4 3: adds x1, x1, #2 b.mi 4f -uao_user_alternative 9f, strh, sttrh, wzr, x0, 2 +user_ldst 9f, sttrh, wzr, x0, 2 sub x1, x1, #2 4: adds x1, x1, #1 b.mi 5f -uao_user_alternative 9f, strb, sttrb, wzr, x0, 0 +user_ldst 9f, sttrb, wzr, x0, 0 5: mov x0, #0 ret SYM_FUNC_END(__arch_clear_user) diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S index 0f8a3a9e3795..95cd62d67371 100644 --- a/arch/arm64/lib/copy_from_user.S +++ b/arch/arm64/lib/copy_from_user.S @@ -21,7 +21,7 @@ */ .macro ldrb1 reg, ptr, val - uao_user_alternative 9998f, ldrb, ldtrb, \reg, \ptr, \val + user_ldst 9998f, ldtrb, \reg, \ptr, \val .endm .macro strb1 reg, ptr, val @@ -29,7 +29,7 @@ .endm .macro ldrh1 reg, ptr, val - uao_user_alternative 9998f, ldrh, ldtrh, \reg, \ptr, \val + user_ldst 9998f, ldtrh, \reg, \ptr, \val .endm .macro strh1 reg, ptr, val @@ -37,7 +37,7 @@ .endm .macro ldr1 reg, ptr, val - uao_user_alternative 9998f, ldr, ldtr, \reg, \ptr, \val + user_ldst 9998f, ldtr, \reg, \ptr, \val .endm .macro str1 reg, ptr, val @@ -45,7 +45,7 @@ .endm .macro ldp1 reg1, reg2, ptr, val - uao_ldp 9998f, \reg1, \reg2, \ptr, \val + user_ldp 9998f, \reg1, \reg2, \ptr, \val .endm .macro stp1 reg1, reg2, ptr, val diff --git a/arch/arm64/lib/copy_in_user.S b/arch/arm64/lib/copy_in_user.S index 80e37ada0ee1..1f61cd0df062 100644 --- a/arch/arm64/lib/copy_in_user.S +++ b/arch/arm64/lib/copy_in_user.S @@ -22,35 +22,35 @@ * x0 - bytes not copied */ .macro ldrb1 reg, ptr, val - uao_user_alternative 9998f, ldrb, ldtrb, \reg, \ptr, \val + user_ldst 9998f, ldtrb, \reg, \ptr, \val .endm .macro strb1 reg, ptr, val - uao_user_alternative 9998f, strb, sttrb, \reg, \ptr, \val + user_ldst 9998f, sttrb, \reg, \ptr, \val .endm .macro ldrh1 reg, ptr, val - uao_user_alternative 9998f, ldrh, ldtrh, \reg, \ptr, \val + user_ldst 9998f, ldtrh, \reg, \ptr, \val .endm .macro strh1 reg, ptr, val - uao_user_alternative 9998f, strh, sttrh, \reg, \ptr, \val + user_ldst 9998f, sttrh, \reg, \ptr, \val .endm .macro ldr1 reg, ptr, val - uao_user_alternative 9998f, ldr, ldtr, \reg, \ptr, \val + user_ldst 9998f, ldtr, \reg, \ptr, \val .endm .macro str1 reg, ptr, val - uao_user_alternative 9998f, str, sttr, \reg, \ptr, \val + user_ldst 9998f, sttr, \reg, \ptr, \val .endm .macro ldp1 reg1, reg2, ptr, val - uao_ldp 9998f, \reg1, \reg2, \ptr, \val + user_ldp 9998f, \reg1, \reg2, \ptr, \val .endm .macro stp1 reg1, reg2, ptr, val - uao_stp 9998f, \reg1, \reg2, \ptr, \val + user_stp 9998f, \reg1, \reg2, \ptr, \val .endm end .req x5 diff --git a/arch/arm64/lib/copy_to_user.S b/arch/arm64/lib/copy_to_user.S index 4ec59704b8f2..043da90f5dd7 100644 --- a/arch/arm64/lib/copy_to_user.S +++ b/arch/arm64/lib/copy_to_user.S @@ -24,7 +24,7 @@ .endm .macro strb1 reg, ptr, val - uao_user_alternative 9998f, strb, sttrb, \reg, \ptr, \val + user_ldst 9998f, sttrb, \reg, \ptr, \val .endm .macro ldrh1 reg, ptr, val @@ -32,7 +32,7 @@ .endm .macro strh1 reg, ptr, val - uao_user_alternative 9998f, strh, sttrh, \reg, \ptr, \val + user_ldst 9998f, sttrh, \reg, \ptr, \val .endm .macro ldr1 reg, ptr, val @@ -40,7 +40,7 @@ .endm .macro str1 reg, ptr, val - uao_user_alternative 9998f, str, sttr, \reg, \ptr, \val + user_ldst 9998f, sttr, \reg, \ptr, \val .endm .macro ldp1 reg1, reg2, ptr, val @@ -48,7 +48,7 @@ .endm .macro stp1 reg1, reg2, ptr, val - uao_stp 9998f, \reg1, \reg2, \ptr, \val + user_stp 9998f, \reg1, \reg2, \ptr, \val .endm end .req x5 diff --git a/arch/arm64/lib/mte.S b/arch/arm64/lib/mte.S index cceed41bba15..351537c12f36 100644 --- a/arch/arm64/lib/mte.S +++ b/arch/arm64/lib/mte.S @@ -67,7 +67,7 @@ SYM_FUNC_START(mte_copy_tags_from_user) mov x3, x1 cbz x2, 2f 1: - uao_user_alternative 2f, ldrb, ldtrb, w4, x1, 0 + user_ldst 2f, ldtrb, w4, x1, 0 lsl x4, x4, #MTE_TAG_SHIFT stg x4, [x0], #MTE_GRANULE_SIZE add x1, x1, #1 @@ -94,7 +94,7 @@ SYM_FUNC_START(mte_copy_tags_to_user) 1: ldg x4, [x1] ubfx x4, x4, #MTE_TAG_SHIFT, #MTE_TAG_SIZE - uao_user_alternative 2f, strb, sttrb, w4, x0, 0 + user_ldst 2f, sttrb, w4, x0, 0 add x0, x0, #1 add x1, x1, #MTE_GRANULE_SIZE subs x2, x2, #1 -- cgit v1.2.3 From 3d2403fd10a1dbb359b154af41ffed9f2a7520e8 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Wed, 2 Dec 2020 13:15:55 +0000 Subject: arm64: uaccess: remove set_fs() Now that the uaccess primitives dont take addr_limit into account, we have no need to manipulate this via set_fs() and get_fs(). Remove support for these, along with some infrastructure this renders redundant. We no longer need to flip UAO to access kernel memory under KERNEL_DS, and head.S unconditionally clears UAO for all kernel configurations via an ERET in init_kernel_el. Thus, we don't need to dynamically flip UAO, nor do we need to context-switch it. However, we still need to adjust PAN during SDEI entry. Masking of __user pointers no longer needs to use the dynamic value of addr_limit, and can use a constant derived from the maximum possible userspace task size. A new TASK_SIZE_MAX constant is introduced for this, which is also used by core code. In configurations supporting 52-bit VAs, this may include a region of unusable VA space above a 48-bit TTBR0 limit, but never includes any portion of TTBR1. Note that TASK_SIZE_MAX is an exclusive limit, while USER_DS and KERNEL_DS were inclusive limits, and is converted to a mask by subtracting one. As the SDEI entry code repurposes the otherwise unnecessary pt_regs::orig_addr_limit field to store the TTBR1 of the interrupted context, for now we rename that to pt_regs::sdei_ttbr1. In future we can consider factoring that out. Signed-off-by: Mark Rutland Acked-by: James Morse Cc: Christoph Hellwig Cc: Will Deacon Link: https://lore.kernel.org/r/20201202131558.39270-10-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/Kconfig | 1 - arch/arm64/include/asm/exec.h | 1 - arch/arm64/include/asm/processor.h | 4 +--- arch/arm64/include/asm/ptrace.h | 3 +-- arch/arm64/include/asm/thread_info.h | 4 ---- arch/arm64/include/asm/uaccess.h | 41 ++++++------------------------------ arch/arm64/kernel/asm-offsets.c | 3 +-- arch/arm64/kernel/cpufeature.c | 4 ---- arch/arm64/kernel/entry.S | 19 +++-------------- arch/arm64/kernel/process.c | 12 ----------- arch/arm64/kernel/sdei.c | 7 +----- arch/arm64/kernel/suspend.c | 1 - arch/arm64/mm/fault.c | 5 ----- 13 files changed, 13 insertions(+), 92 deletions(-) (limited to 'arch') diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 0f8b2e35ba99..6fae3ec1f2b6 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -195,7 +195,6 @@ config ARM64 select PCI_SYSCALL if PCI select POWER_RESET select POWER_SUPPLY - select SET_FS select SPARSE_IRQ select SWIOTLB select SYSCTL_EXCEPTION_TRACE diff --git a/arch/arm64/include/asm/exec.h b/arch/arm64/include/asm/exec.h index 1aae6f9962fc..9a1c22ce664b 100644 --- a/arch/arm64/include/asm/exec.h +++ b/arch/arm64/include/asm/exec.h @@ -10,6 +10,5 @@ #include extern unsigned long arch_align_stack(unsigned long sp); -void uao_thread_switch(struct task_struct *next); #endif /* __ASM_EXEC_H */ diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index fce8cbecd6bc..724249f37af5 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -8,9 +8,6 @@ #ifndef __ASM_PROCESSOR_H #define __ASM_PROCESSOR_H -#define KERNEL_DS UL(-1) -#define USER_DS ((UL(1) << VA_BITS) - 1) - /* * On arm64 systems, unaligned accesses by the CPU are cheap, and so there is * no point in shifting all network buffers by 2 bytes just to make some IP @@ -48,6 +45,7 @@ #define DEFAULT_MAP_WINDOW_64 (UL(1) << VA_BITS_MIN) #define TASK_SIZE_64 (UL(1) << vabits_actual) +#define TASK_SIZE_MAX (UL(1) << VA_BITS) #ifdef CONFIG_COMPAT #if defined(CONFIG_ARM64_64K_PAGES) && defined(CONFIG_KUSER_HELPERS) diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h index 2547d94634be..2bb53bc3c326 100644 --- a/arch/arm64/include/asm/ptrace.h +++ b/arch/arm64/include/asm/ptrace.h @@ -193,8 +193,7 @@ struct pt_regs { s32 syscallno; u32 unused2; #endif - - u64 orig_addr_limit; + u64 sdei_ttbr1; /* Only valid when ARM64_HAS_IRQ_PRIO_MASKING is enabled. */ u64 pmr_save; u64 stackframe[2]; diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h index 1fbab854a51b..3a88a55de568 100644 --- a/arch/arm64/include/asm/thread_info.h +++ b/arch/arm64/include/asm/thread_info.h @@ -18,14 +18,11 @@ struct task_struct; #include #include -typedef unsigned long mm_segment_t; - /* * low level task data that entry.S needs immediate access to. */ struct thread_info { unsigned long flags; /* low level flags */ - mm_segment_t addr_limit; /* address limit */ #ifdef CONFIG_ARM64_SW_TTBR0_PAN u64 ttbr0; /* saved TTBR0_EL1 */ #endif @@ -119,7 +116,6 @@ void arch_release_task_struct(struct task_struct *tsk); { \ .flags = _TIF_FOREIGN_FPSTATE, \ .preempt_count = INIT_PREEMPT_COUNT, \ - .addr_limit = KERNEL_DS, \ INIT_SCS \ } diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index eb04f02299fe..e639d8b2b38d 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -26,44 +26,16 @@ #define HAVE_GET_KERNEL_NOFAULT -#define get_fs() (current_thread_info()->addr_limit) - -static inline void set_fs(mm_segment_t fs) -{ - current_thread_info()->addr_limit = fs; - - /* - * Prevent a mispredicted conditional call to set_fs from forwarding - * the wrong address limit to access_ok under speculation. - */ - spec_bar(); - - /* On user-mode return, check fs is correct */ - set_thread_flag(TIF_FSCHECK); - - /* - * Enable/disable UAO so that copy_to_user() etc can access - * kernel memory with the unprivileged instructions. - */ - if (IS_ENABLED(CONFIG_ARM64_UAO) && fs == KERNEL_DS) - asm(ALTERNATIVE("nop", SET_PSTATE_UAO(1), ARM64_HAS_UAO)); - else - asm(ALTERNATIVE("nop", SET_PSTATE_UAO(0), ARM64_HAS_UAO, - CONFIG_ARM64_UAO)); -} - -#define uaccess_kernel() (get_fs() == KERNEL_DS) - /* * Test whether a block of memory is a valid user space address. * Returns 1 if the range is valid, 0 otherwise. * * This is equivalent to the following test: - * (u65)addr + (u65)size <= (u65)current->addr_limit + 1 + * (u65)addr + (u65)size <= (u65)TASK_SIZE_MAX */ static inline unsigned long __range_ok(const void __user *addr, unsigned long size) { - unsigned long ret, limit = current_thread_info()->addr_limit; + unsigned long ret, limit = TASK_SIZE_MAX - 1; /* * Asynchronous I/O running in a kernel thread does not have the @@ -96,7 +68,6 @@ static inline unsigned long __range_ok(const void __user *addr, unsigned long si } #define access_ok(addr, size) __range_ok(addr, size) -#define user_addr_max get_fs #define _ASM_EXTABLE(from, to) \ " .pushsection __ex_table, \"a\"\n" \ @@ -226,9 +197,9 @@ static inline void uaccess_enable_not_uao(void) } /* - * Sanitise a uaccess pointer such that it becomes NULL if above the - * current addr_limit. In case the pointer is tagged (has the top byte set), - * untag the pointer before checking. + * Sanitise a uaccess pointer such that it becomes NULL if above the maximum + * user address. In case the pointer is tagged (has the top byte set), untag + * the pointer before checking. */ #define uaccess_mask_ptr(ptr) (__typeof__(ptr))__uaccess_mask_ptr(ptr) static inline void __user *__uaccess_mask_ptr(const void __user *ptr) @@ -239,7 +210,7 @@ static inline void __user *__uaccess_mask_ptr(const void __user *ptr) " bics xzr, %3, %2\n" " csel %0, %1, xzr, eq\n" : "=&r" (safe_ptr) - : "r" (ptr), "r" (current_thread_info()->addr_limit), + : "r" (ptr), "r" (TASK_SIZE_MAX - 1), "r" (untagged_addr(ptr)) : "cc"); diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index 7d32fc959b1a..679b19b8a7ff 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c @@ -30,7 +30,6 @@ int main(void) BLANK(); DEFINE(TSK_TI_FLAGS, offsetof(struct task_struct, thread_info.flags)); DEFINE(TSK_TI_PREEMPT, offsetof(struct task_struct, thread_info.preempt_count)); - DEFINE(TSK_TI_ADDR_LIMIT, offsetof(struct task_struct, thread_info.addr_limit)); #ifdef CONFIG_ARM64_SW_TTBR0_PAN DEFINE(TSK_TI_TTBR0, offsetof(struct task_struct, thread_info.ttbr0)); #endif @@ -70,7 +69,7 @@ int main(void) DEFINE(S_PSTATE, offsetof(struct pt_regs, pstate)); DEFINE(S_PC, offsetof(struct pt_regs, pc)); DEFINE(S_SYSCALLNO, offsetof(struct pt_regs, syscallno)); - DEFINE(S_ORIG_ADDR_LIMIT, offsetof(struct pt_regs, orig_addr_limit)); + DEFINE(S_SDEI_TTBR1, offsetof(struct pt_regs, sdei_ttbr1)); DEFINE(S_PMR_SAVE, offsetof(struct pt_regs, pmr_save)); DEFINE(S_STACKFRAME, offsetof(struct pt_regs, stackframe)); DEFINE(S_FRAME_SIZE, sizeof(struct pt_regs)); diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 403fb0dbf299..ddca55fb79f5 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -1777,10 +1777,6 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .sys_reg = SYS_ID_AA64MMFR2_EL1, .field_pos = ID_AA64MMFR2_UAO_SHIFT, .min_field_value = 1, - /* - * We rely on stop_machine() calling uao_thread_switch() to set - * UAO immediately after patching. - */ }, #endif /* CONFIG_ARM64_UAO */ #ifdef CONFIG_ARM64_PAN diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index b295fb912b12..bdd3b57b12f5 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -216,12 +216,6 @@ alternative_else_nop_endif .else add x21, sp, #S_FRAME_SIZE get_current_task tsk - /* Save the task's original addr_limit and set USER_DS */ - ldr x20, [tsk, #TSK_TI_ADDR_LIMIT] - str x20, [sp, #S_ORIG_ADDR_LIMIT] - mov x20, #USER_DS - str x20, [tsk, #TSK_TI_ADDR_LIMIT] - /* No need to reset PSTATE.UAO, hardware's already set it to 0 for us */ .endif /* \el == 0 */ mrs x22, elr_el1 mrs x23, spsr_el1 @@ -279,12 +273,6 @@ alternative_else_nop_endif .macro kernel_exit, el .if \el != 0 disable_daif - - /* Restore the task's original addr_limit. */ - ldr x20, [sp, #S_ORIG_ADDR_LIMIT] - str x20, [tsk, #TSK_TI_ADDR_LIMIT] - - /* No need to restore UAO, it will be restored from SPSR_EL1 */ .endif /* Restore pmr */ @@ -999,10 +987,9 @@ SYM_CODE_START(__sdei_asm_entry_trampoline) mov x4, xzr /* - * Use reg->interrupted_regs.addr_limit to remember whether to unmap - * the kernel on exit. + * Remember whether to unmap the kernel on exit. */ -1: str x4, [x1, #(SDEI_EVENT_INTREGS + S_ORIG_ADDR_LIMIT)] +1: str x4, [x1, #(SDEI_EVENT_INTREGS + S_SDEI_TTBR1)] #ifdef CONFIG_RANDOMIZE_BASE adr x4, tramp_vectors + PAGE_SIZE @@ -1023,7 +1010,7 @@ NOKPROBE(__sdei_asm_entry_trampoline) * x4: struct sdei_registered_event argument from registration time. */ SYM_CODE_START(__sdei_asm_exit_trampoline) - ldr x4, [x4, #(SDEI_EVENT_INTREGS + S_ORIG_ADDR_LIMIT)] + ldr x4, [x4, #(SDEI_EVENT_INTREGS + S_SDEI_TTBR1)] cbnz x4, 1f tramp_unmap_kernel tmp=x4 diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 855137daafbf..569910ac0fd2 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -460,17 +460,6 @@ static void tls_thread_switch(struct task_struct *next) write_sysreg(*task_user_tls(next), tpidr_el0); } -/* Restore the UAO state depending on next's addr_limit */ -void uao_thread_switch(struct task_struct *next) -{ - if (IS_ENABLED(CONFIG_ARM64_UAO)) { - if (task_thread_info(next)->addr_limit == KERNEL_DS) - asm(ALTERNATIVE("nop", SET_PSTATE_UAO(1), ARM64_HAS_UAO)); - else - asm(ALTERNATIVE("nop", SET_PSTATE_UAO(0), ARM64_HAS_UAO)); - } -} - /* * Force SSBS state on context-switch, since it may be lost after migrating * from a CPU which treats the bit as RES0 in a heterogeneous system. @@ -554,7 +543,6 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev, hw_breakpoint_thread_switch(next); contextidr_thread_switch(next); entry_task_switch(next); - uao_thread_switch(next); ssbs_thread_switch(next); erratum_1418040_thread_switch(prev, next); diff --git a/arch/arm64/kernel/sdei.c b/arch/arm64/kernel/sdei.c index c9640e50967a..e04b3e90c003 100644 --- a/arch/arm64/kernel/sdei.c +++ b/arch/arm64/kernel/sdei.c @@ -242,15 +242,12 @@ asmlinkage __kprobes notrace unsigned long __sdei_handler(struct pt_regs *regs, struct sdei_registered_event *arg) { unsigned long ret; - mm_segment_t orig_addr_limit; /* * We didn't take an exception to get here, so the HW hasn't - * set/cleared bits in PSTATE that we may rely on. Initialize PAN, then - * use force_uaccess_begin() to reset addr_limit. + * set/cleared bits in PSTATE that we may rely on. Initialize PAN. */ __sdei_pstate_entry(); - orig_addr_limit = force_uaccess_begin(); nmi_enter(); @@ -258,7 +255,5 @@ __sdei_handler(struct pt_regs *regs, struct sdei_registered_event *arg) nmi_exit(); - force_uaccess_end(orig_addr_limit); - return ret; } diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c index 96cd347c7a46..a67b37a7a47e 100644 --- a/arch/arm64/kernel/suspend.c +++ b/arch/arm64/kernel/suspend.c @@ -58,7 +58,6 @@ void notrace __cpu_suspend_exit(void) * features that might not have been set correctly. */ __uaccess_enable_hw_pan(); - uao_thread_switch(current); /* * Restore HW breakpoint registers to sane values diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 1ee94002801f..8dc17e650c8e 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -479,11 +479,6 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr, } if (is_ttbr0_addr(addr) && is_el1_permission_fault(addr, esr, regs)) { - /* regs->orig_addr_limit may be 0 if we entered from EL0 */ - if (regs->orig_addr_limit == KERNEL_DS) - die_kernel_fault("access to user memory with fs=KERNEL_DS", - addr, esr, regs); - if (is_el1_instruction_abort(esr)) die_kernel_fault("execution of user memory", addr, esr, regs); -- cgit v1.2.3 From b5a5a01d8e9a44ecb18dc31d471233cad2f88291 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Wed, 2 Dec 2020 13:15:56 +0000 Subject: arm64: uaccess: remove addr_limit_user_check() Now that set_fs() is gone, addr_limit_user_check() is redundant. Remove the checks and associated thread flag. To ensure that _TIF_WORK_MASK can be used as an immediate value in an AND instruction (as it is in `ret_to_user`), TIF_MTE_ASYNC_FAULT is renumbered to keep the constituent bits of _TIF_WORK_MASK contiguous. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland Cc: Christoph Hellwig Cc: James Morse Cc: Will Deacon Link: https://lore.kernel.org/r/20201202131558.39270-11-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/thread_info.h | 6 ++---- arch/arm64/kernel/signal.c | 3 --- 2 files changed, 2 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h index 3a88a55de568..015beafe58f5 100644 --- a/arch/arm64/include/asm/thread_info.h +++ b/arch/arm64/include/asm/thread_info.h @@ -63,8 +63,7 @@ void arch_release_task_struct(struct task_struct *tsk); #define TIF_NOTIFY_RESUME 2 /* callback before returning to user */ #define TIF_FOREIGN_FPSTATE 3 /* CPU's FP state is not current's */ #define TIF_UPROBE 4 /* uprobe breakpoint or singlestep */ -#define TIF_FSCHECK 5 /* Check FS is USER_DS on return */ -#define TIF_MTE_ASYNC_FAULT 6 /* MTE Asynchronous Tag Check Fault */ +#define TIF_MTE_ASYNC_FAULT 5 /* MTE Asynchronous Tag Check Fault */ #define TIF_SYSCALL_TRACE 8 /* syscall trace active */ #define TIF_SYSCALL_AUDIT 9 /* syscall auditing */ #define TIF_SYSCALL_TRACEPOINT 10 /* syscall tracepoint for ftrace */ @@ -90,7 +89,6 @@ void arch_release_task_struct(struct task_struct *tsk); #define _TIF_SECCOMP (1 << TIF_SECCOMP) #define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU) #define _TIF_UPROBE (1 << TIF_UPROBE) -#define _TIF_FSCHECK (1 << TIF_FSCHECK) #define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) #define _TIF_32BIT (1 << TIF_32BIT) #define _TIF_SVE (1 << TIF_SVE) @@ -98,7 +96,7 @@ void arch_release_task_struct(struct task_struct *tsk); #define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \ _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE | \ - _TIF_UPROBE | _TIF_FSCHECK | _TIF_MTE_ASYNC_FAULT) + _TIF_UPROBE | _TIF_MTE_ASYNC_FAULT) #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \ diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c index a8184cad8890..af5c6c6638f7 100644 --- a/arch/arm64/kernel/signal.c +++ b/arch/arm64/kernel/signal.c @@ -922,9 +922,6 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, trace_hardirqs_off(); do { - /* Check valid user FS if needed */ - addr_limit_user_check(); - if (thread_flags & _TIF_NEED_RESCHED) { /* Unmask Debug and SError for the next task */ local_daif_restore(DAIF_PROCCTX_NOIRQ); -- cgit v1.2.3 From 7cf283c7bd6260ae43a74cd213f5ec9d665a19b5 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Wed, 2 Dec 2020 13:15:57 +0000 Subject: arm64: uaccess: remove redundant PAN toggling Some code (e.g. futex) needs to make privileged accesses to userspace memory, and uses uaccess_{enable,disable}_privileged() in order to permit this. All other uaccess primitives use LDTR/STTR, and never need to toggle PAN. Remove the redundant PAN toggling. Signed-off-by: Mark Rutland Cc: Christoph Hellwig Cc: James Morse Cc: Will Deacon Link: https://lore.kernel.org/r/20201202131558.39270-12-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/cpucaps.h | 1 - arch/arm64/include/asm/uaccess.h | 59 +++++++++++++--------------------------- arch/arm64/kernel/cpufeature.c | 17 ------------ 3 files changed, 19 insertions(+), 58 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h index 64ea0bb9f420..ab3bec7a5a4d 100644 --- a/arch/arm64/include/asm/cpucaps.h +++ b/arch/arm64/include/asm/cpucaps.h @@ -17,7 +17,6 @@ #define ARM64_WORKAROUND_834220 7 #define ARM64_HAS_NO_HW_PREFETCH 8 #define ARM64_HAS_UAO 9 -#define ARM64_ALT_PAN_NOT_UAO 10 #define ARM64_HAS_VIRT_HOST_EXTN 11 #define ARM64_WORKAROUND_CAVIUM_27456 12 #define ARM64_HAS_32BIT_EL0 13 diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index e639d8b2b38d..769cad7b4910 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -159,41 +159,20 @@ static inline void __uaccess_enable_hw_pan(void) CONFIG_ARM64_PAN)); } -#define __uaccess_disable(alt) \ -do { \ - if (!uaccess_ttbr0_disable()) \ - asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), alt, \ - CONFIG_ARM64_PAN)); \ -} while (0) - -#define __uaccess_enable(alt) \ -do { \ - if (!uaccess_ttbr0_enable()) \ - asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), alt, \ - CONFIG_ARM64_PAN)); \ -} while (0) - static inline void uaccess_disable_privileged(void) { - __uaccess_disable(ARM64_HAS_PAN); -} + if (uaccess_ttbr0_disable()) + return; -static inline void uaccess_enable_privileged(void) -{ - __uaccess_enable(ARM64_HAS_PAN); + __uaccess_enable_hw_pan(); } -/* - * These functions are no-ops when UAO is present. - */ -static inline void uaccess_disable_not_uao(void) +static inline void uaccess_enable_privileged(void) { - __uaccess_disable(ARM64_ALT_PAN_NOT_UAO); -} + if (uaccess_ttbr0_enable()) + return; -static inline void uaccess_enable_not_uao(void) -{ - __uaccess_enable(ARM64_ALT_PAN_NOT_UAO); + __uaccess_disable_hw_pan(); } /* @@ -265,9 +244,9 @@ do { \ #define __raw_get_user(x, ptr, err) \ do { \ __chk_user_ptr(ptr); \ - uaccess_enable_not_uao(); \ + uaccess_ttbr0_enable(); \ __raw_get_mem("ldtr", x, ptr, err); \ - uaccess_disable_not_uao(); \ + uaccess_ttbr0_disable(); \ } while (0) #define __get_user_error(x, ptr, err) \ @@ -338,9 +317,9 @@ do { \ #define __raw_put_user(x, ptr, err) \ do { \ __chk_user_ptr(ptr); \ - uaccess_enable_not_uao(); \ + uaccess_ttbr0_enable(); \ __raw_put_mem("sttr", x, ptr, err); \ - uaccess_disable_not_uao(); \ + uaccess_ttbr0_disable(); \ } while (0) #define __put_user_error(x, ptr, err) \ @@ -378,10 +357,10 @@ extern unsigned long __must_check __arch_copy_from_user(void *to, const void __u #define raw_copy_from_user(to, from, n) \ ({ \ unsigned long __acfu_ret; \ - uaccess_enable_not_uao(); \ + uaccess_ttbr0_enable(); \ __acfu_ret = __arch_copy_from_user((to), \ __uaccess_mask_ptr(from), (n)); \ - uaccess_disable_not_uao(); \ + uaccess_ttbr0_disable(); \ __acfu_ret; \ }) @@ -389,10 +368,10 @@ extern unsigned long __must_check __arch_copy_to_user(void __user *to, const voi #define raw_copy_to_user(to, from, n) \ ({ \ unsigned long __actu_ret; \ - uaccess_enable_not_uao(); \ + uaccess_ttbr0_enable(); \ __actu_ret = __arch_copy_to_user(__uaccess_mask_ptr(to), \ (from), (n)); \ - uaccess_disable_not_uao(); \ + uaccess_ttbr0_disable(); \ __actu_ret; \ }) @@ -400,10 +379,10 @@ extern unsigned long __must_check __arch_copy_in_user(void __user *to, const voi #define raw_copy_in_user(to, from, n) \ ({ \ unsigned long __aciu_ret; \ - uaccess_enable_not_uao(); \ + uaccess_ttbr0_enable(); \ __aciu_ret = __arch_copy_in_user(__uaccess_mask_ptr(to), \ __uaccess_mask_ptr(from), (n)); \ - uaccess_disable_not_uao(); \ + uaccess_ttbr0_disable(); \ __aciu_ret; \ }) @@ -414,9 +393,9 @@ extern unsigned long __must_check __arch_clear_user(void __user *to, unsigned lo static inline unsigned long __must_check __clear_user(void __user *to, unsigned long n) { if (access_ok(to, n)) { - uaccess_enable_not_uao(); + uaccess_ttbr0_enable(); n = __arch_clear_user(__uaccess_mask_ptr(to), n); - uaccess_disable_not_uao(); + uaccess_ttbr0_disable(); } return n; } diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index ddca55fb79f5..d09bdc60a8e3 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -153,10 +153,6 @@ EXPORT_SYMBOL(cpu_hwcap_keys); .width = 0, \ } -/* meta feature for alternatives */ -static bool __maybe_unused -cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused); - static void cpu_enable_cnp(struct arm64_cpu_capabilities const *cap); static bool __system_matches_cap(unsigned int n); @@ -1779,13 +1775,6 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .min_field_value = 1, }, #endif /* CONFIG_ARM64_UAO */ -#ifdef CONFIG_ARM64_PAN - { - .capability = ARM64_ALT_PAN_NOT_UAO, - .type = ARM64_CPUCAP_SYSTEM_FEATURE, - .matches = cpufeature_pan_not_uao, - }, -#endif /* CONFIG_ARM64_PAN */ #ifdef CONFIG_ARM64_VHE { .desc = "Virtualization Host Extensions", @@ -2736,12 +2725,6 @@ void __init setup_cpu_features(void) ARCH_DMA_MINALIGN); } -static bool __maybe_unused -cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused) -{ - return (__system_matches_cap(ARM64_HAS_PAN) && !__system_matches_cap(ARM64_HAS_UAO)); -} - static void __maybe_unused cpu_enable_cnp(struct arm64_cpu_capabilities const *cap) { cpu_replace_ttbr1(lm_alias(swapper_pg_dir)); -- cgit v1.2.3 From 1517c4facf2e66401394998dba1ee236fd261310 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Wed, 2 Dec 2020 13:15:58 +0000 Subject: arm64: uaccess: remove vestigal UAO support Now that arm64 no longer uses UAO, remove the vestigal feature detection code and Kconfig text. Signed-off-by: Mark Rutland Cc: Christoph Hellwig Cc: James Morse Cc: Will Deacon Link: https://lore.kernel.org/r/20201202131558.39270-13-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/Kconfig | 21 --------------------- arch/arm64/include/asm/cpucaps.h | 1 - arch/arm64/kernel/cpufeature.c | 11 ----------- 3 files changed, 33 deletions(-) (limited to 'arch') diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 6fae3ec1f2b6..8d13b9135634 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1427,27 +1427,6 @@ endmenu menu "ARMv8.2 architectural features" -config ARM64_UAO - bool "Enable support for User Access Override (UAO)" - default y - help - User Access Override (UAO; part of the ARMv8.2 Extensions) - causes the 'unprivileged' variant of the load/store instructions to - be overridden to be privileged. - - This option changes get_user() and friends to use the 'unprivileged' - variant of the load/store instructions. This ensures that user-space - really did have access to the supplied memory. When addr_limit is - set to kernel memory the UAO bit will be set, allowing privileged - access to kernel memory. - - Choosing this option will cause copy_to_user() et al to use user-space - memory permissions. - - The feature is detected at runtime, the kernel will use the - regular load/store instructions if the cpu does not implement the - feature. - config ARM64_PMEM bool "Enable support for persistent memory" select ARCH_HAS_PMEM_API diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h index ab3bec7a5a4d..a7242ef2a2cd 100644 --- a/arch/arm64/include/asm/cpucaps.h +++ b/arch/arm64/include/asm/cpucaps.h @@ -16,7 +16,6 @@ #define ARM64_WORKAROUND_CAVIUM_23154 6 #define ARM64_WORKAROUND_834220 7 #define ARM64_HAS_NO_HW_PREFETCH 8 -#define ARM64_HAS_UAO 9 #define ARM64_HAS_VIRT_HOST_EXTN 11 #define ARM64_WORKAROUND_CAVIUM_27456 12 #define ARM64_HAS_32BIT_EL0 13 diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index d09bdc60a8e3..cf09bac80adb 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -1764,17 +1764,6 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE, .matches = has_no_hw_prefetch, }, -#ifdef CONFIG_ARM64_UAO - { - .desc = "User Access Override", - .capability = ARM64_HAS_UAO, - .type = ARM64_CPUCAP_SYSTEM_FEATURE, - .matches = has_cpuid_feature, - .sys_reg = SYS_ID_AA64MMFR2_EL1, - .field_pos = ID_AA64MMFR2_UAO_SHIFT, - .min_field_value = 1, - }, -#endif /* CONFIG_ARM64_UAO */ #ifdef CONFIG_ARM64_VHE { .desc = "Virtualization Host Extensions", -- cgit v1.2.3 From 701f49065e68741a26752e6ae235c02bcafa2424 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Thu, 3 Dec 2020 15:24:03 +0000 Subject: arm64: mark __system_matches_cap as __maybe_unused Now that the PAN toggling has been removed, the only user of __system_matches_cap() is has_generic_auth(), which is only built when CONFIG_ARM64_PTR_AUTH is selected, and Qian reports that this results in a build-time warning when CONFIG_ARM64_PTR_AUTH is not selected: | arch/arm64/kernel/cpufeature.c:2649:13: warning: '__system_matches_cap' defined but not used [-Wunused-function] | static bool __system_matches_cap(unsigned int n) | ^~~~~~~~~~~~~~~~~~~~ It's tricky to restructure things to prevent this, so let's mark __system_matches_cap() as __maybe_unused, as we used to do for the other user of __system_matches_cap() which we just removed. Reported-by: Qian Cai Suggested-by: Qian Cai Signed-off-by: Mark Rutland Link: https://lore.kernel.org/r/20201203152403.26100-1-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/kernel/cpufeature.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index cf09bac80adb..cffcb0198bae 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -2634,7 +2634,7 @@ bool this_cpu_has_cap(unsigned int n) * - The SYSTEM_FEATURE cpu_hwcaps may not have been set. * In all other cases cpus_have_{const_}cap() should be used. */ -static bool __system_matches_cap(unsigned int n) +static bool __maybe_unused __system_matches_cap(unsigned int n) { if (n < ARM64_NCAPS) { const struct arm64_cpu_capabilities *cap = cpu_hwcaps_ptrs[n]; -- cgit v1.2.3 From 929c1f3384d7e5cd319d03242cb925c3f91236f7 Mon Sep 17 00:00:00 2001 From: Peter Collingbourne Date: Wed, 2 Dec 2020 23:51:10 -0800 Subject: arm64: mte: fix prctl(PR_GET_TAGGED_ADDR_CTRL) if TCF0=NONE Previously we were always returning a tag inclusion mask of zero via PR_GET_TAGGED_ADDR_CTRL if TCF0 was set to NONE. Fix it by making the code for the NONE case match the others. Signed-off-by: Peter Collingbourne Link: https://linux-review.googlesource.com/id/Iefbea66cf7d2b4c80b82f9639b9ea7f33f7fac53 Fixes: af5ce95282dc ("arm64: mte: Allow user control of the generated random tags via prctl()") Reviewed-by: Catalin Marinas Link: https://lore.kernel.org/r/20201203075110.2781021-1-pcc@google.com Signed-off-by: Will Deacon --- arch/arm64/kernel/mte.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c index 52a0638ed967..ef15c8a2a49d 100644 --- a/arch/arm64/kernel/mte.c +++ b/arch/arm64/kernel/mte.c @@ -189,7 +189,8 @@ long get_mte_ctrl(struct task_struct *task) switch (task->thread.sctlr_tcf0) { case SCTLR_EL1_TCF0_NONE: - return PR_MTE_TCF_NONE; + ret |= PR_MTE_TCF_NONE; + break; case SCTLR_EL1_TCF0_SYNC: ret |= PR_MTE_TCF_SYNC; break; -- cgit v1.2.3 From ce4b2c01781a24b0a04845f683feea44ade70503 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 4 Dec 2020 09:19:35 +0000 Subject: arm64: Fix build failure when HARDLOCKUP_DETECTOR_PERF is enabled If HARDLOCKUP_DETECTOR_PERF is selected but HW_PERF_EVENTS is not, then the associated watchdog driver will fail to link: | aarch64-linux-ld: Unexpected GOT/PLT entries detected! | aarch64-linux-ld: Unexpected run-time procedure linkages detected! | aarch64-linux-ld: kernel/watchdog_hld.o: in function `hardlockup_detector_event_create': | >> watchdog_hld.c:(.text+0x68): undefined reference to `hw_nmi_get_sample_period Change the Kconfig dependencies so that HAVE_PERF_EVENTS_NMI requires the hardware PMU driver to be enabled, ensuring that the required symbols are present. Cc: Sumit Garg Reported-by: kernel test robot Link: https://lore.kernel.org/r/202012031509.4O5ZoWNI-lkp@intel.com Fixes: 367c820ef080 ("arm64: Enable perf events based hard lockup detector") Signed-off-by: Will Deacon --- arch/arm64/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 7d3440770b14..fe0d30a749b6 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -170,7 +170,7 @@ config ARM64 select HAVE_NMI select HAVE_PATA_PLATFORM select HAVE_PERF_EVENTS - select HAVE_PERF_EVENTS_NMI if ARM64_PSEUDO_NMI + select HAVE_PERF_EVENTS_NMI if ARM64_PSEUDO_NMI && HW_PERF_EVENTS select HAVE_HARDLOCKUP_DETECTOR_PERF if PERF_EVENTS && HAVE_PERF_EVENTS_NMI select HAVE_PERF_REGS select HAVE_PERF_USER_STACK_DUMP -- cgit v1.2.3