summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2021-03-12 11:39:53 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2021-03-12 11:39:53 -0800
commit17f8fc198a6fc64cee2b1e126398d0c41823f5a3 (patch)
tree8a01503b61531853a4f589b9a366358d6f12732e /arch
parent6bf8819fede1fef9805e1d803261c0d3bb62f239 (diff)
parentc8e3866836528a4ba3b0535834f03768d74f7d8e (diff)
downloadlinux-stable-17f8fc198a6fc64cee2b1e126398d0c41823f5a3.tar.gz
linux-stable-17f8fc198a6fc64cee2b1e126398d0c41823f5a3.tar.bz2
linux-stable-17f8fc198a6fc64cee2b1e126398d0c41823f5a3.zip
Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 fixes from Will Deacon: "We've got a smattering of changes all over the place which we've acrued since -rc1. To my knowledge, there aren't any pending issues at the moment, but there's still plenty of time for something else to crop up... Summary: - Fix booting a 52-bit-VA-aware kernel on Qualcomm Amberwing - Fix pfn_valid() not to reject all ZONE_DEVICE memory - Fix memory tagging setup for hotplugged memory regions - Fix KASAN tagging in page_alloc() when DEBUG_VIRTUAL is enabled - Fix accidental truncation of CPU PMU event counters - Fix error code initialisation when failing probe of DMC620 PMU - Fix return value initialisation for sve-ptrace selftest - Drop broken support for CMDLINE_EXTEND" * tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: perf/arm_dmc620_pmu: Fix error return code in dmc620_pmu_device_probe() arm64: mm: remove unused __cpu_uses_extended_idmap[_level()] arm64: mm: use a 48-bit ID map when possible on 52-bit VA builds arm64: perf: Fix 64-bit event counter read truncation arm64/mm: Fix __enable_mmu() for new TGRAN range values kselftest: arm64: Fix exit code of sve-ptrace arm64: mte: Map hotplugged memory as Normal Tagged arm64: kasan: fix page_alloc tagging with DEBUG_VIRTUAL arm64/mm: Reorganize pfn_valid() arm64/mm: Fix pfn_valid() for ZONE_DEVICE based memory arm64/mm: Drop THP conditionality from FORCE_MAX_ZONEORDER arm64/mm: Drop redundant ARCH_WANT_HUGE_PMD_SHARE arm64: Drop support for CMDLINE_EXTEND arm64: cpufeatures: Fix handling of CONFIG_CMDLINE for idreg overrides
Diffstat (limited to 'arch')
-rw-r--r--arch/arm64/Kconfig12
-rw-r--r--arch/arm64/include/asm/memory.h5
-rw-r--r--arch/arm64/include/asm/mmu_context.h17
-rw-r--r--arch/arm64/include/asm/pgtable-prot.h1
-rw-r--r--arch/arm64/include/asm/pgtable.h3
-rw-r--r--arch/arm64/include/asm/sysreg.h20
-rw-r--r--arch/arm64/kernel/head.S8
-rw-r--r--arch/arm64/kernel/idreg-override.c43
-rw-r--r--arch/arm64/kernel/perf_event.c2
-rw-r--r--arch/arm64/kvm/reset.c10
-rw-r--r--arch/arm64/mm/init.c29
-rw-r--r--arch/arm64/mm/mmu.c5
12 files changed, 88 insertions, 67 deletions
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 1f212b47a48a..5656e7aacd69 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -1055,8 +1055,6 @@ config HW_PERF_EVENTS
config SYS_SUPPORTS_HUGETLBFS
def_bool y
-config ARCH_WANT_HUGE_PMD_SHARE
-
config ARCH_HAS_CACHE_LINE_SIZE
def_bool y
@@ -1157,8 +1155,8 @@ config XEN
config FORCE_MAX_ZONEORDER
int
- default "14" if (ARM64_64K_PAGES && TRANSPARENT_HUGEPAGE)
- default "12" if (ARM64_16K_PAGES && TRANSPARENT_HUGEPAGE)
+ default "14" if ARM64_64K_PAGES
+ default "12" if ARM64_16K_PAGES
default "11"
help
The kernel memory allocator divides physically contiguous memory
@@ -1855,12 +1853,6 @@ config CMDLINE_FROM_BOOTLOADER
the boot loader doesn't provide any, the default kernel command
string provided in CMDLINE will be used.
-config CMDLINE_EXTEND
- bool "Extend bootloader kernel arguments"
- help
- The command-line arguments provided by the boot loader will be
- appended to the default kernel command string.
-
config CMDLINE_FORCE
bool "Always use the default kernel command string"
help
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index c759faf7a1ff..0aabc3be9a75 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -328,6 +328,11 @@ static inline void *phys_to_virt(phys_addr_t x)
#define ARCH_PFN_OFFSET ((unsigned long)PHYS_PFN_OFFSET)
#if !defined(CONFIG_SPARSEMEM_VMEMMAP) || defined(CONFIG_DEBUG_VIRTUAL)
+#define page_to_virt(x) ({ \
+ __typeof__(x) __page = x; \
+ void *__addr = __va(page_to_phys(__page)); \
+ (void *)__tag_set((const void *)__addr, page_kasan_tag(__page));\
+})
#define virt_to_page(x) pfn_to_page(virt_to_pfn(x))
#else
#define page_to_virt(x) ({ \
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
index 70ce8c1d2b07..bd02e99b1a4c 100644
--- a/arch/arm64/include/asm/mmu_context.h
+++ b/arch/arm64/include/asm/mmu_context.h
@@ -63,23 +63,6 @@ static inline void cpu_switch_mm(pgd_t *pgd, struct mm_struct *mm)
extern u64 idmap_t0sz;
extern u64 idmap_ptrs_per_pgd;
-static inline bool __cpu_uses_extended_idmap(void)
-{
- if (IS_ENABLED(CONFIG_ARM64_VA_BITS_52))
- return false;
-
- return unlikely(idmap_t0sz != TCR_T0SZ(VA_BITS));
-}
-
-/*
- * True if the extended ID map requires an extra level of translation table
- * to be configured.
- */
-static inline bool __cpu_uses_extended_idmap_level(void)
-{
- return ARM64_HW_PGTABLE_LEVELS(64 - idmap_t0sz) > CONFIG_PGTABLE_LEVELS;
-}
-
/*
* Ensure TCR.T0SZ is set to the provided value.
*/
diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h
index 046be789fbb4..9a65fb528110 100644
--- a/arch/arm64/include/asm/pgtable-prot.h
+++ b/arch/arm64/include/asm/pgtable-prot.h
@@ -66,7 +66,6 @@ extern bool arm64_use_ng_mappings;
#define _PAGE_DEFAULT (_PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL))
#define PAGE_KERNEL __pgprot(PROT_NORMAL)
-#define PAGE_KERNEL_TAGGED __pgprot(PROT_NORMAL_TAGGED)
#define PAGE_KERNEL_RO __pgprot((PROT_NORMAL & ~PTE_WRITE) | PTE_RDONLY)
#define PAGE_KERNEL_ROX __pgprot((PROT_NORMAL & ~(PTE_WRITE | PTE_PXN)) | PTE_RDONLY)
#define PAGE_KERNEL_EXEC __pgprot(PROT_NORMAL & ~PTE_PXN)
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index e17b96d0e4b5..47027796c2f9 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -486,6 +486,9 @@ static inline pmd_t pmd_mkdevmap(pmd_t pmd)
__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
#define pgprot_device(prot) \
__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_PXN | PTE_UXN)
+#define pgprot_tagged(prot) \
+ __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_TAGGED))
+#define pgprot_mhp pgprot_tagged
/*
* DMA allocations for non-coherent devices use what the Arm architecture calls
* "Normal non-cacheable" memory, which permits speculation, unaligned accesses
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index dfd4edbfe360..d4a5fca984c3 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -796,6 +796,11 @@
#define ID_AA64MMFR0_PARANGE_48 0x5
#define ID_AA64MMFR0_PARANGE_52 0x6
+#define ID_AA64MMFR0_TGRAN_2_SUPPORTED_DEFAULT 0x0
+#define ID_AA64MMFR0_TGRAN_2_SUPPORTED_NONE 0x1
+#define ID_AA64MMFR0_TGRAN_2_SUPPORTED_MIN 0x2
+#define ID_AA64MMFR0_TGRAN_2_SUPPORTED_MAX 0x7
+
#ifdef CONFIG_ARM64_PA_BITS_52
#define ID_AA64MMFR0_PARANGE_MAX ID_AA64MMFR0_PARANGE_52
#else
@@ -961,14 +966,17 @@
#define ID_PFR1_PROGMOD_SHIFT 0
#if defined(CONFIG_ARM64_4K_PAGES)
-#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN4_SHIFT
-#define ID_AA64MMFR0_TGRAN_SUPPORTED ID_AA64MMFR0_TGRAN4_SUPPORTED
+#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN4_SHIFT
+#define ID_AA64MMFR0_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_TGRAN4_SUPPORTED
+#define ID_AA64MMFR0_TGRAN_SUPPORTED_MAX 0x7
#elif defined(CONFIG_ARM64_16K_PAGES)
-#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN16_SHIFT
-#define ID_AA64MMFR0_TGRAN_SUPPORTED ID_AA64MMFR0_TGRAN16_SUPPORTED
+#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN16_SHIFT
+#define ID_AA64MMFR0_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_TGRAN16_SUPPORTED
+#define ID_AA64MMFR0_TGRAN_SUPPORTED_MAX 0xF
#elif defined(CONFIG_ARM64_64K_PAGES)
-#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN64_SHIFT
-#define ID_AA64MMFR0_TGRAN_SUPPORTED ID_AA64MMFR0_TGRAN64_SUPPORTED
+#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN64_SHIFT
+#define ID_AA64MMFR0_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_TGRAN64_SUPPORTED
+#define ID_AA64MMFR0_TGRAN_SUPPORTED_MAX 0x7
#endif
#define MVFR2_FPMISC_SHIFT 4
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 66b0e0b66e31..840bda1869e9 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -319,7 +319,7 @@ SYM_FUNC_START_LOCAL(__create_page_tables)
*/
adrp x5, __idmap_text_end
clz x5, x5
- cmp x5, TCR_T0SZ(VA_BITS) // default T0SZ small enough?
+ cmp x5, TCR_T0SZ(VA_BITS_MIN) // default T0SZ small enough?
b.ge 1f // .. then skip VA range extension
adr_l x6, idmap_t0sz
@@ -655,8 +655,10 @@ SYM_FUNC_END(__secondary_too_slow)
SYM_FUNC_START(__enable_mmu)
mrs x2, ID_AA64MMFR0_EL1
ubfx x2, x2, #ID_AA64MMFR0_TGRAN_SHIFT, 4
- cmp x2, #ID_AA64MMFR0_TGRAN_SUPPORTED
- b.ne __no_granule_support
+ cmp x2, #ID_AA64MMFR0_TGRAN_SUPPORTED_MIN
+ b.lt __no_granule_support
+ cmp x2, #ID_AA64MMFR0_TGRAN_SUPPORTED_MAX
+ b.gt __no_granule_support
update_early_cpu_boot_status 0, x2, x3
adrp x2, idmap_pg_dir
phys_to_ttbr x1, x1
diff --git a/arch/arm64/kernel/idreg-override.c b/arch/arm64/kernel/idreg-override.c
index dffb16682330..83f1c4b92095 100644
--- a/arch/arm64/kernel/idreg-override.c
+++ b/arch/arm64/kernel/idreg-override.c
@@ -163,33 +163,36 @@ static __init void __parse_cmdline(const char *cmdline, bool parse_aliases)
} while (1);
}
-static __init void parse_cmdline(void)
+static __init const u8 *get_bootargs_cmdline(void)
{
- if (!IS_ENABLED(CONFIG_CMDLINE_FORCE)) {
- const u8 *prop;
- void *fdt;
- int node;
+ const u8 *prop;
+ void *fdt;
+ int node;
- fdt = get_early_fdt_ptr();
- if (!fdt)
- goto out;
+ fdt = get_early_fdt_ptr();
+ if (!fdt)
+ return NULL;
- node = fdt_path_offset(fdt, "/chosen");
- if (node < 0)
- goto out;
+ node = fdt_path_offset(fdt, "/chosen");
+ if (node < 0)
+ return NULL;
- prop = fdt_getprop(fdt, node, "bootargs", NULL);
- if (!prop)
- goto out;
+ prop = fdt_getprop(fdt, node, "bootargs", NULL);
+ if (!prop)
+ return NULL;
- __parse_cmdline(prop, true);
+ return strlen(prop) ? prop : NULL;
+}
- if (!IS_ENABLED(CONFIG_CMDLINE_EXTEND))
- return;
- }
+static __init void parse_cmdline(void)
+{
+ const u8 *prop = get_bootargs_cmdline();
-out:
- __parse_cmdline(CONFIG_CMDLINE, true);
+ if (IS_ENABLED(CONFIG_CMDLINE_FORCE) || !prop)
+ __parse_cmdline(CONFIG_CMDLINE, true);
+
+ if (!IS_ENABLED(CONFIG_CMDLINE_FORCE) && prop)
+ __parse_cmdline(prop, true);
}
/* Keep checkers quiet */
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index 7d2318f80955..4658fcf88c2b 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -460,7 +460,7 @@ static inline int armv8pmu_counter_has_overflowed(u32 pmnc, int idx)
return pmnc & BIT(ARMV8_IDX_TO_COUNTER(idx));
}
-static inline u32 armv8pmu_read_evcntr(int idx)
+static inline u64 armv8pmu_read_evcntr(int idx)
{
u32 counter = ARMV8_IDX_TO_COUNTER(idx);
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
index 47f3f035f3ea..e81c7ec9e102 100644
--- a/arch/arm64/kvm/reset.c
+++ b/arch/arm64/kvm/reset.c
@@ -311,16 +311,18 @@ int kvm_set_ipa_limit(void)
}
switch (cpuid_feature_extract_unsigned_field(mmfr0, tgran_2)) {
- default:
- case 1:
+ case ID_AA64MMFR0_TGRAN_2_SUPPORTED_NONE:
kvm_err("PAGE_SIZE not supported at Stage-2, giving up\n");
return -EINVAL;
- case 0:
+ case ID_AA64MMFR0_TGRAN_2_SUPPORTED_DEFAULT:
kvm_debug("PAGE_SIZE supported at Stage-2 (default)\n");
break;
- case 2:
+ case ID_AA64MMFR0_TGRAN_2_SUPPORTED_MIN ... ID_AA64MMFR0_TGRAN_2_SUPPORTED_MAX:
kvm_debug("PAGE_SIZE supported at Stage-2 (advertised)\n");
break;
+ default:
+ kvm_err("Unsupported value for TGRAN_2, giving up\n");
+ return -EINVAL;
}
kvm_ipa_limit = id_aa64mmfr0_parange_to_phys_shift(parange);
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 0ace5e68efba..3685e12aba9b 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -219,17 +219,40 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
int pfn_valid(unsigned long pfn)
{
- phys_addr_t addr = pfn << PAGE_SHIFT;
+ phys_addr_t addr = PFN_PHYS(pfn);
- if ((addr >> PAGE_SHIFT) != pfn)
+ /*
+ * Ensure the upper PAGE_SHIFT bits are clear in the
+ * pfn. Else it might lead to false positives when
+ * some of the upper bits are set, but the lower bits
+ * match a valid pfn.
+ */
+ if (PHYS_PFN(addr) != pfn)
return 0;
#ifdef CONFIG_SPARSEMEM
+{
+ struct mem_section *ms;
+
if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
return 0;
- if (!valid_section(__pfn_to_section(pfn)))
+ ms = __pfn_to_section(pfn);
+ if (!valid_section(ms))
return 0;
+
+ /*
+ * ZONE_DEVICE memory does not have the memblock entries.
+ * memblock_is_map_memory() check for ZONE_DEVICE based
+ * addresses will always fail. Even the normal hotplugged
+ * memory will never have MEMBLOCK_NOMAP flag set in their
+ * memblock entries. Skip memblock search for all non early
+ * memory sections covering all of hotplug memory including
+ * both normal and ZONE_DEVICE based.
+ */
+ if (!early_section(ms))
+ return pfn_section_valid(ms, pfn);
+}
#endif
return memblock_is_map_memory(addr);
}
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 3802cfbdd20d..7484ea4f6ba0 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -40,7 +40,7 @@
#define NO_BLOCK_MAPPINGS BIT(0)
#define NO_CONT_MAPPINGS BIT(1)
-u64 idmap_t0sz = TCR_T0SZ(VA_BITS);
+u64 idmap_t0sz = TCR_T0SZ(VA_BITS_MIN);
u64 idmap_ptrs_per_pgd = PTRS_PER_PGD;
u64 __section(".mmuoff.data.write") vabits_actual;
@@ -512,7 +512,8 @@ static void __init map_mem(pgd_t *pgdp)
* if MTE is present. Otherwise, it has the same attributes as
* PAGE_KERNEL.
*/
- __map_memblock(pgdp, start, end, PAGE_KERNEL_TAGGED, flags);
+ __map_memblock(pgdp, start, end, pgprot_tagged(PAGE_KERNEL),
+ flags);
}
/*