summaryrefslogtreecommitdiffstats
path: root/arch/arm64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/Kconfig28
-rw-r--r--arch/arm64/include/asm/alternative.h3
-rw-r--r--arch/arm64/include/asm/archrandom.h2
-rw-r--r--arch/arm64/include/asm/asm-uaccess.h2
-rw-r--r--arch/arm64/include/asm/compat.h2
-rw-r--r--arch/arm64/include/asm/efi.h2
-rw-r--r--arch/arm64/include/asm/el2_setup.h20
-rw-r--r--arch/arm64/include/asm/esr.h30
-rw-r--r--arch/arm64/include/asm/exception.h6
-rw-r--r--arch/arm64/include/asm/hw_breakpoint.h8
-rw-r--r--arch/arm64/include/asm/hwcap.h1
-rw-r--r--arch/arm64/include/asm/kvm_arm.h4
-rw-r--r--arch/arm64/include/asm/kvm_asm.h18
-rw-r--r--arch/arm64/include/asm/kvm_host.h2
-rw-r--r--arch/arm64/include/asm/memory.h16
-rw-r--r--arch/arm64/include/asm/mmu_context.h8
-rw-r--r--arch/arm64/include/asm/module.h8
-rw-r--r--arch/arm64/include/asm/module.lds.h2
-rw-r--r--arch/arm64/include/asm/scs.h1
-rw-r--r--arch/arm64/include/asm/spectre.h16
-rw-r--r--arch/arm64/include/asm/syscall_wrapper.h4
-rw-r--r--arch/arm64/include/asm/sysreg.h66
-rw-r--r--arch/arm64/include/asm/traps.h2
-rw-r--r--arch/arm64/include/asm/uaccess.h2
-rw-r--r--arch/arm64/include/uapi/asm/hwcap.h1
-rw-r--r--arch/arm64/kernel/Makefile3
-rw-r--r--arch/arm64/kernel/alternative.c4
-rw-r--r--arch/arm64/kernel/cpufeature.c29
-rw-r--r--arch/arm64/kernel/cpuidle.c2
-rw-r--r--arch/arm64/kernel/cpuinfo.c1
-rw-r--r--arch/arm64/kernel/entry-common.c17
-rw-r--r--arch/arm64/kernel/entry.S57
-rw-r--r--arch/arm64/kernel/fpsimd.c1
-rw-r--r--arch/arm64/kernel/ftrace.c8
-rw-r--r--arch/arm64/kernel/hibernate.c1
-rw-r--r--arch/arm64/kernel/hw_breakpoint.c8
-rw-r--r--arch/arm64/kernel/idreg-override.c2
-rw-r--r--arch/arm64/kernel/kaslr.c83
-rw-r--r--arch/arm64/kernel/module-plts.c1
-rw-r--r--arch/arm64/kernel/module.c159
-rw-r--r--arch/arm64/kernel/setup.c2
-rw-r--r--arch/arm64/kernel/signal.c3
-rw-r--r--arch/arm64/kernel/syscall.c2
-rw-r--r--arch/arm64/kernel/traps.c61
-rw-r--r--arch/arm64/kvm/debug.c2
-rw-r--r--arch/arm64/kvm/hyp/include/hyp/switch.h6
-rw-r--r--arch/arm64/kvm/hyp/nvhe/debug-sr.c2
-rw-r--r--arch/arm64/kvm/sys_regs.c11
-rw-r--r--arch/arm64/lib/xor-neon.c8
-rw-r--r--arch/arm64/mm/context.c2
-rw-r--r--arch/arm64/mm/fault.c20
-rw-r--r--arch/arm64/mm/flush.c1
-rw-r--r--arch/arm64/mm/init.c44
-rw-r--r--arch/arm64/mm/kasan_init.c17
-rw-r--r--arch/arm64/mm/mmu.c13
-rw-r--r--arch/arm64/tools/cpucaps2
-rwxr-xr-xarch/arm64/tools/gen-cpucaps.awk4
-rw-r--r--arch/arm64/tools/sysreg132
58 files changed, 614 insertions, 348 deletions
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index b1201d25a8a4..a6002084f09c 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -207,6 +207,7 @@ config ARM64
select HAVE_IOREMAP_PROT
select HAVE_IRQ_TIME_ACCOUNTING
select HAVE_KVM
+ select HAVE_MOD_ARCH_SPECIFIC
select HAVE_NMI
select HAVE_PERF_EVENTS
select HAVE_PERF_REGS
@@ -577,7 +578,6 @@ config ARM64_ERRATUM_845719
config ARM64_ERRATUM_843419
bool "Cortex-A53: 843419: A load or store might access an incorrect address"
default y
- select ARM64_MODULE_PLTS if MODULES
help
This option links the kernel with '--fix-cortex-a53-843419' and
enables PLT support to replace certain ADRP instructions, which can
@@ -2107,26 +2107,6 @@ config ARM64_SME
register state capable of holding two dimensional matrix tiles to
enable various matrix operations.
-config ARM64_MODULE_PLTS
- bool "Use PLTs to allow module memory to spill over into vmalloc area"
- depends on MODULES
- select HAVE_MOD_ARCH_SPECIFIC
- help
- Allocate PLTs when loading modules so that jumps and calls whose
- targets are too far away for their relative offsets to be encoded
- in the instructions themselves can be bounced via veneers in the
- module's PLT. This allows modules to be allocated in the generic
- vmalloc area after the dedicated module memory area has been
- exhausted.
-
- When running with address space randomization (KASLR), the module
- region itself may be too far away for ordinary relative jumps and
- calls, and so in that case, module PLTs are required and cannot be
- disabled.
-
- Specific errata workaround(s) might also force module PLTs to be
- enabled (ARM64_ERRATUM_843419).
-
config ARM64_PSEUDO_NMI
bool "Support for NMI-like interrupts"
select ARM_GIC_V3
@@ -2167,7 +2147,6 @@ config RELOCATABLE
config RANDOMIZE_BASE
bool "Randomize the address of the kernel image"
- select ARM64_MODULE_PLTS if MODULES
select RELOCATABLE
help
Randomizes the virtual address at which the kernel image is
@@ -2198,9 +2177,8 @@ config RANDOMIZE_MODULE_REGION_FULL
When this option is not set, the module region will be randomized over
a limited range that contains the [_stext, _etext] interval of the
core kernel, so branch relocations are almost always in range unless
- ARM64_MODULE_PLTS is enabled and the region is exhausted. In this
- particular case of region exhaustion, modules might be able to fall
- back to a larger 2GB area.
+ the region is exhausted. In this particular case of region
+ exhaustion, modules might be able to fall back to a larger 2GB area.
config CC_HAVE_STACKPROTECTOR_SYSREG
def_bool $(cc-option,-mstack-protector-guard=sysreg -mstack-protector-guard-reg=sp_el0 -mstack-protector-guard-offset=0)
diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h
index fe657e878757..00d97b8a757f 100644
--- a/arch/arm64/include/asm/alternative.h
+++ b/arch/arm64/include/asm/alternative.h
@@ -31,5 +31,8 @@ void apply_alternatives_module(void *start, size_t length);
static inline void apply_alternatives_module(void *start, size_t length) { }
#endif
+void alt_cb_patch_nops(struct alt_instr *alt, __le32 *origptr,
+ __le32 *updptr, int nr_inst);
+
#endif /* __ASSEMBLY__ */
#endif /* __ASM_ALTERNATIVE_H */
diff --git a/arch/arm64/include/asm/archrandom.h b/arch/arm64/include/asm/archrandom.h
index 2f5f3da34782..b0abc64f86b0 100644
--- a/arch/arm64/include/asm/archrandom.h
+++ b/arch/arm64/include/asm/archrandom.h
@@ -129,4 +129,6 @@ static inline bool __init __early_cpu_has_rndr(void)
return (ftr >> ID_AA64ISAR0_EL1_RNDR_SHIFT) & 0xf;
}
+u64 kaslr_early_init(void *fdt);
+
#endif /* _ASM_ARCHRANDOM_H */
diff --git a/arch/arm64/include/asm/asm-uaccess.h b/arch/arm64/include/asm/asm-uaccess.h
index 75b211c98dea..5b6efe8abeeb 100644
--- a/arch/arm64/include/asm/asm-uaccess.h
+++ b/arch/arm64/include/asm/asm-uaccess.h
@@ -18,7 +18,6 @@
bic \tmp1, \tmp1, #TTBR_ASID_MASK
sub \tmp1, \tmp1, #RESERVED_SWAPPER_OFFSET // reserved_pg_dir
msr ttbr0_el1, \tmp1 // set reserved TTBR0_EL1
- isb
add \tmp1, \tmp1, #RESERVED_SWAPPER_OFFSET
msr ttbr1_el1, \tmp1 // set reserved ASID
isb
@@ -31,7 +30,6 @@
extr \tmp2, \tmp2, \tmp1, #48
ror \tmp2, \tmp2, #16
msr ttbr1_el1, \tmp2 // set the active ASID
- isb
msr ttbr0_el1, \tmp1 // set the non-PAN TTBR0_EL1
isb
.endm
diff --git a/arch/arm64/include/asm/compat.h b/arch/arm64/include/asm/compat.h
index 74575c3d6987..ae904a1ad529 100644
--- a/arch/arm64/include/asm/compat.h
+++ b/arch/arm64/include/asm/compat.h
@@ -96,6 +96,8 @@ static inline int is_compat_thread(struct thread_info *thread)
return test_ti_thread_flag(thread, TIF_32BIT);
}
+long compat_arm_syscall(struct pt_regs *regs, int scno);
+
#else /* !CONFIG_COMPAT */
static inline int is_compat_thread(struct thread_info *thread)
diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h
index f86b157a5da3..ef46f2daca62 100644
--- a/arch/arm64/include/asm/efi.h
+++ b/arch/arm64/include/asm/efi.h
@@ -166,4 +166,6 @@ static inline void efi_capsule_flush_cache_range(void *addr, int size)
dcache_clean_inval_poc((unsigned long)addr, (unsigned long)addr + size);
}
+efi_status_t efi_handle_corrupted_x18(efi_status_t s, const char *f);
+
#endif /* _ASM_EFI_H */
diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h
index 037724b19c5c..96440c7f2a84 100644
--- a/arch/arm64/include/asm/el2_setup.h
+++ b/arch/arm64/include/asm/el2_setup.h
@@ -22,6 +22,15 @@
isb
.endm
+.macro __init_el2_hcrx
+ mrs x0, id_aa64mmfr1_el1
+ ubfx x0, x0, #ID_AA64MMFR1_EL1_HCX_SHIFT, #4
+ cbz x0, .Lskip_hcrx_\@
+ mov_q x0, HCRX_HOST_FLAGS
+ msr_s SYS_HCRX_EL2, x0
+.Lskip_hcrx_\@:
+.endm
+
/*
* Allow Non-secure EL1 and EL0 to access physical timer and counter.
* This is not necessary for VHE, since the host kernel runs in EL2,
@@ -69,7 +78,7 @@
cbz x0, .Lskip_trace_\@ // Skip if TraceBuffer is not present
mrs_s x0, SYS_TRBIDR_EL1
- and x0, x0, TRBIDR_PROG
+ and x0, x0, TRBIDR_EL1_P
cbnz x0, .Lskip_trace_\@ // If TRBE is available at EL2
mov x0, #(MDCR_EL2_E2TB_MASK << MDCR_EL2_E2TB_SHIFT)
@@ -184,6 +193,7 @@
*/
.macro init_el2_state
__init_el2_sctlr
+ __init_el2_hcrx
__init_el2_timers
__init_el2_debug
__init_el2_lor
@@ -284,14 +294,6 @@
cbz x1, .Lskip_sme_\@
msr_s SYS_SMPRIMAP_EL2, xzr // Make all priorities equal
-
- mrs x1, id_aa64mmfr1_el1 // HCRX_EL2 present?
- ubfx x1, x1, #ID_AA64MMFR1_EL1_HCX_SHIFT, #4
- cbz x1, .Lskip_sme_\@
-
- mrs_s x1, SYS_HCRX_EL2
- orr x1, x1, #HCRX_EL2_SMPME_MASK // Enable priority mapping
- msr_s SYS_HCRX_EL2, x1
.Lskip_sme_\@:
.endm
diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
index 8487aec9b658..ae35939f395b 100644
--- a/arch/arm64/include/asm/esr.h
+++ b/arch/arm64/include/asm/esr.h
@@ -47,7 +47,7 @@
#define ESR_ELx_EC_DABT_LOW (0x24)
#define ESR_ELx_EC_DABT_CUR (0x25)
#define ESR_ELx_EC_SP_ALIGN (0x26)
-/* Unallocated EC: 0x27 */
+#define ESR_ELx_EC_MOPS (0x27)
#define ESR_ELx_EC_FP_EXC32 (0x28)
/* Unallocated EC: 0x29 - 0x2B */
#define ESR_ELx_EC_FP_EXC64 (0x2C)
@@ -75,8 +75,11 @@
#define ESR_ELx_IL_SHIFT (25)
#define ESR_ELx_IL (UL(1) << ESR_ELx_IL_SHIFT)
-#define ESR_ELx_ISS_MASK (ESR_ELx_IL - 1)
+#define ESR_ELx_ISS_MASK (GENMASK(24, 0))
#define ESR_ELx_ISS(esr) ((esr) & ESR_ELx_ISS_MASK)
+#define ESR_ELx_ISS2_SHIFT (32)
+#define ESR_ELx_ISS2_MASK (GENMASK_ULL(55, 32))
+#define ESR_ELx_ISS2(esr) (((esr) & ESR_ELx_ISS2_MASK) >> ESR_ELx_ISS2_SHIFT)
/* ISS field definitions shared by different classes */
#define ESR_ELx_WNR_SHIFT (6)
@@ -140,6 +143,20 @@
#define ESR_ELx_CM_SHIFT (8)
#define ESR_ELx_CM (UL(1) << ESR_ELx_CM_SHIFT)
+/* ISS2 field definitions for Data Aborts */
+#define ESR_ELx_TnD_SHIFT (10)
+#define ESR_ELx_TnD (UL(1) << ESR_ELx_TnD_SHIFT)
+#define ESR_ELx_TagAccess_SHIFT (9)
+#define ESR_ELx_TagAccess (UL(1) << ESR_ELx_TagAccess_SHIFT)
+#define ESR_ELx_GCS_SHIFT (8)
+#define ESR_ELx_GCS (UL(1) << ESR_ELx_GCS_SHIFT)
+#define ESR_ELx_Overlay_SHIFT (6)
+#define ESR_ELx_Overlay (UL(1) << ESR_ELx_Overlay_SHIFT)
+#define ESR_ELx_DirtyBit_SHIFT (5)
+#define ESR_ELx_DirtyBit (UL(1) << ESR_ELx_DirtyBit_SHIFT)
+#define ESR_ELx_Xs_SHIFT (0)
+#define ESR_ELx_Xs_MASK (GENMASK_ULL(4, 0))
+
/* ISS field definitions for exceptions taken in to Hyp */
#define ESR_ELx_CV (UL(1) << 24)
#define ESR_ELx_COND_SHIFT (20)
@@ -356,6 +373,15 @@
#define ESR_ELx_SME_ISS_ZA_DISABLED 3
#define ESR_ELx_SME_ISS_ZT_DISABLED 4
+/* ISS field definitions for MOPS exceptions */
+#define ESR_ELx_MOPS_ISS_MEM_INST (UL(1) << 24)
+#define ESR_ELx_MOPS_ISS_FROM_EPILOGUE (UL(1) << 18)
+#define ESR_ELx_MOPS_ISS_WRONG_OPTION (UL(1) << 17)
+#define ESR_ELx_MOPS_ISS_OPTION_A (UL(1) << 16)
+#define ESR_ELx_MOPS_ISS_DESTREG(esr) (((esr) & (UL(0x1f) << 10)) >> 10)
+#define ESR_ELx_MOPS_ISS_SRCREG(esr) (((esr) & (UL(0x1f) << 5)) >> 5)
+#define ESR_ELx_MOPS_ISS_SIZEREG(esr) (((esr) & (UL(0x1f) << 0)) >> 0)
+
#ifndef __ASSEMBLY__
#include <asm/types.h>
diff --git a/arch/arm64/include/asm/exception.h b/arch/arm64/include/asm/exception.h
index e73af709cb7a..ad688e157c9b 100644
--- a/arch/arm64/include/asm/exception.h
+++ b/arch/arm64/include/asm/exception.h
@@ -8,16 +8,11 @@
#define __ASM_EXCEPTION_H
#include <asm/esr.h>
-#include <asm/kprobes.h>
#include <asm/ptrace.h>
#include <linux/interrupt.h>
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
#define __exception_irq_entry __irq_entry
-#else
-#define __exception_irq_entry __kprobes
-#endif
static inline unsigned long disr_to_esr(u64 disr)
{
@@ -77,6 +72,7 @@ void do_el0_svc(struct pt_regs *regs);
void do_el0_svc_compat(struct pt_regs *regs);
void do_el0_fpac(struct pt_regs *regs, unsigned long esr);
void do_el1_fpac(struct pt_regs *regs, unsigned long esr);
+void do_el0_mops(struct pt_regs *regs, unsigned long esr);
void do_serror(struct pt_regs *regs, unsigned long esr);
void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags);
diff --git a/arch/arm64/include/asm/hw_breakpoint.h b/arch/arm64/include/asm/hw_breakpoint.h
index fa4c6ff3aa9b..84055329cd8b 100644
--- a/arch/arm64/include/asm/hw_breakpoint.h
+++ b/arch/arm64/include/asm/hw_breakpoint.h
@@ -154,4 +154,12 @@ static inline int get_num_wrps(void)
ID_AA64DFR0_EL1_WRPs_SHIFT);
}
+#ifdef CONFIG_CPU_PM
+extern void cpu_suspend_set_dbg_restorer(int (*hw_bp_restore)(unsigned int));
+#else
+static inline void cpu_suspend_set_dbg_restorer(int (*hw_bp_restore)(unsigned int))
+{
+}
+#endif
+
#endif /* __ASM_BREAKPOINT_H */
diff --git a/arch/arm64/include/asm/hwcap.h b/arch/arm64/include/asm/hwcap.h
index 5d45f19fda7f..692b1ec663b2 100644
--- a/arch/arm64/include/asm/hwcap.h
+++ b/arch/arm64/include/asm/hwcap.h
@@ -137,6 +137,7 @@
#define KERNEL_HWCAP_SME_BI32I32 __khwcap2_feature(SME_BI32I32)
#define KERNEL_HWCAP_SME_B16B16 __khwcap2_feature(SME_B16B16)
#define KERNEL_HWCAP_SME_F16F16 __khwcap2_feature(SME_F16F16)
+#define KERNEL_HWCAP_MOPS __khwcap2_feature(MOPS)
/*
* This yields a mask that user programs can use to figure out what
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index baef29fcbeee..d2d4f4cd12b8 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -9,6 +9,7 @@
#include <asm/esr.h>
#include <asm/memory.h>
+#include <asm/sysreg.h>
#include <asm/types.h>
/* Hyp Configuration Register (HCR) bits */
@@ -92,6 +93,9 @@
#define HCR_HOST_NVHE_PROTECTED_FLAGS (HCR_HOST_NVHE_FLAGS | HCR_TSC)
#define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H)
+#define HCRX_GUEST_FLAGS (HCRX_EL2_SMPME)
+#define HCRX_HOST_FLAGS (HCRX_EL2_MSCEn)
+
/* TCR_EL2 Registers bits */
#define TCR_EL2_RES1 ((1U << 31) | (1 << 23))
#define TCR_EL2_TBI (1 << 20)
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index 43c3bc0f9544..86042afa86c3 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -267,6 +267,24 @@ extern u64 __kvm_get_mdcr_el2(void);
__kvm_at_err; \
} )
+void __noreturn hyp_panic(void);
+asmlinkage void kvm_unexpected_el2_exception(void);
+asmlinkage void __noreturn hyp_panic(void);
+asmlinkage void __noreturn hyp_panic_bad_stack(void);
+asmlinkage void kvm_unexpected_el2_exception(void);
+struct kvm_cpu_context;
+void handle_trap(struct kvm_cpu_context *host_ctxt);
+asmlinkage void __noreturn kvm_host_psci_cpu_entry(bool is_cpu_on);
+void __noreturn __pkvm_init_finalise(void);
+void kvm_nvhe_prepare_backtrace(unsigned long fp, unsigned long pc);
+void kvm_patch_vector_branch(struct alt_instr *alt,
+ __le32 *origptr, __le32 *updptr, int nr_inst);
+void kvm_get_kimage_voffset(struct alt_instr *alt,
+ __le32 *origptr, __le32 *updptr, int nr_inst);
+void kvm_compute_final_ctr_el0(struct alt_instr *alt,
+ __le32 *origptr, __le32 *updptr, int nr_inst);
+void __noreturn __cold nvhe_hyp_panic_handler(u64 esr, u64 spsr, u64 elr_virt,
+ u64 elr_phys, u64 par, uintptr_t vcpu, u64 far, u64 hpfar);
#else /* __ASSEMBLY__ */
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 7e7e19ef6993..e759e6b0cd02 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -1031,7 +1031,7 @@ void kvm_arm_clear_debug(struct kvm_vcpu *vcpu);
void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu);
#define kvm_vcpu_os_lock_enabled(vcpu) \
- (!!(__vcpu_sys_reg(vcpu, OSLSR_EL1) & SYS_OSLSR_OSLK))
+ (!!(__vcpu_sys_reg(vcpu, OSLSR_EL1) & OSLSR_EL1_OSLK))
int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
struct kvm_device_attr *attr);
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index c735afdf639b..6e0e5722f229 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -46,7 +46,7 @@
#define KIMAGE_VADDR (MODULES_END)
#define MODULES_END (MODULES_VADDR + MODULES_VSIZE)
#define MODULES_VADDR (_PAGE_END(VA_BITS_MIN))
-#define MODULES_VSIZE (SZ_128M)
+#define MODULES_VSIZE (SZ_2G)
#define VMEMMAP_START (-(UL(1) << (VA_BITS - VMEMMAP_SHIFT)))
#define VMEMMAP_END (VMEMMAP_START + VMEMMAP_SIZE)
#define PCI_IO_END (VMEMMAP_START - SZ_8M)
@@ -204,15 +204,17 @@ static inline unsigned long kaslr_offset(void)
return kimage_vaddr - KIMAGE_VADDR;
}
+#ifdef CONFIG_RANDOMIZE_BASE
+void kaslr_init(void);
static inline bool kaslr_enabled(void)
{
- /*
- * The KASLR offset modulo MIN_KIMG_ALIGN is taken from the physical
- * placement of the image rather than from the seed, so a displacement
- * of less than MIN_KIMG_ALIGN means that no seed was provided.
- */
- return kaslr_offset() >= MIN_KIMG_ALIGN;
+ extern bool __kaslr_is_enabled;
+ return __kaslr_is_enabled;
}
+#else
+static inline void kaslr_init(void) { }
+static inline bool kaslr_enabled(void) { return false; }
+#endif
/*
* Allow all memory at the discovery stage. We will clip it later.
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
index 5e0402946c35..a6fb325424e7 100644
--- a/arch/arm64/include/asm/mmu_context.h
+++ b/arch/arm64/include/asm/mmu_context.h
@@ -39,11 +39,16 @@ static inline void contextidr_thread_switch(struct task_struct *next)
/*
* Set TTBR0 to reserved_pg_dir. No translations will be possible via TTBR0.
*/
-static inline void cpu_set_reserved_ttbr0(void)
+static inline void cpu_set_reserved_ttbr0_nosync(void)
{
unsigned long ttbr = phys_to_ttbr(__pa_symbol(reserved_pg_dir));
write_sysreg(ttbr, ttbr0_el1);
+}
+
+static inline void cpu_set_reserved_ttbr0(void)
+{
+ cpu_set_reserved_ttbr0_nosync();
isb();
}
@@ -52,7 +57,6 @@ void cpu_do_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
static inline void cpu_switch_mm(pgd_t *pgd, struct mm_struct *mm)
{
BUG_ON(pgd == swapper_pg_dir);
- cpu_set_reserved_ttbr0();
cpu_do_switch_mm(virt_to_phys(pgd),mm);
}
diff --git a/arch/arm64/include/asm/module.h b/arch/arm64/include/asm/module.h
index 18734fed3bdd..bfa6638b4c93 100644
--- a/arch/arm64/include/asm/module.h
+++ b/arch/arm64/include/asm/module.h
@@ -7,7 +7,6 @@
#include <asm-generic/module.h>
-#ifdef CONFIG_ARM64_MODULE_PLTS
struct mod_plt_sec {
int plt_shndx;
int plt_num_entries;
@@ -21,7 +20,6 @@ struct mod_arch_specific {
/* for CONFIG_DYNAMIC_FTRACE */
struct plt_entry *ftrace_trampolines;
};
-#endif
u64 module_emit_plt_entry(struct module *mod, Elf64_Shdr *sechdrs,
void *loc, const Elf64_Rela *rela,
@@ -30,12 +28,6 @@ u64 module_emit_plt_entry(struct module *mod, Elf64_Shdr *sechdrs,
u64 module_emit_veneer_for_adrp(struct module *mod, Elf64_Shdr *sechdrs,
void *loc, u64 val);
-#ifdef CONFIG_RANDOMIZE_BASE
-extern u64 module_alloc_base;
-#else
-#define module_alloc_base ((u64)_etext - MODULES_VSIZE)
-#endif
-
struct plt_entry {
/*
* A program that conforms to the AArch64 Procedure Call Standard
diff --git a/arch/arm64/include/asm/module.lds.h b/arch/arm64/include/asm/module.lds.h
index dbba4b7559aa..b9ae8349e35d 100644
--- a/arch/arm64/include/asm/module.lds.h
+++ b/arch/arm64/include/asm/module.lds.h
@@ -1,9 +1,7 @@
SECTIONS {
-#ifdef CONFIG_ARM64_MODULE_PLTS
.plt 0 : { BYTE(0) }
.init.plt 0 : { BYTE(0) }
.text.ftrace_trampoline 0 : { BYTE(0) }
-#endif
#ifdef CONFIG_KASAN_SW_TAGS
/*
diff --git a/arch/arm64/include/asm/scs.h b/arch/arm64/include/asm/scs.h
index 13df982a0808..3fdae5fe3142 100644
--- a/arch/arm64/include/asm/scs.h
+++ b/arch/arm64/include/asm/scs.h
@@ -73,6 +73,7 @@ static inline void dynamic_scs_init(void) {}
#endif
int scs_patch(const u8 eh_frame[], int size);
+asmlinkage void scs_patch_vmlinux(void);
#endif /* __ASSEMBLY __ */
diff --git a/arch/arm64/include/asm/spectre.h b/arch/arm64/include/asm/spectre.h
index db7b371b367c..9cc501450486 100644
--- a/arch/arm64/include/asm/spectre.h
+++ b/arch/arm64/include/asm/spectre.h
@@ -100,5 +100,21 @@ bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry, int sco
u8 spectre_bhb_loop_affected(int scope);
void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *__unused);
bool try_emulate_el1_ssbs(struct pt_regs *regs, u32 instr);
+
+void spectre_v4_patch_fw_mitigation_enable(struct alt_instr *alt, __le32 *origptr,
+ __le32 *updptr, int nr_inst);
+void smccc_patch_fw_mitigation_conduit(struct alt_instr *alt, __le32 *origptr,
+ __le32 *updptr, int nr_inst);
+void spectre_bhb_patch_loop_mitigation_enable(struct alt_instr *alt, __le32 *origptr,
+ __le32 *updptr, int nr_inst);
+void spectre_bhb_patch_fw_mitigation_enabled(struct alt_instr *alt, __le32 *origptr,
+ __le32 *updptr, int nr_inst);
+void spectre_bhb_patch_loop_iter(struct alt_instr *alt,
+ __le32 *origptr, __le32 *updptr, int nr_inst);
+void spectre_bhb_patch_wa3(struct alt_instr *alt,
+ __le32 *origptr, __le32 *updptr, int nr_inst);
+void spectre_bhb_patch_clearbhb(struct alt_instr *alt,
+ __le32 *origptr, __le32 *updptr, int nr_inst);
+
#endif /* __ASSEMBLY__ */
#endif /* __ASM_SPECTRE_H */
diff --git a/arch/arm64/include/asm/syscall_wrapper.h b/arch/arm64/include/asm/syscall_wrapper.h
index d30217c21eff..17f687510c48 100644
--- a/arch/arm64/include/asm/syscall_wrapper.h
+++ b/arch/arm64/include/asm/syscall_wrapper.h
@@ -38,6 +38,7 @@
asmlinkage long __arm64_compat_sys_##sname(const struct pt_regs *__unused)
#define COND_SYSCALL_COMPAT(name) \
+ asmlinkage long __arm64_compat_sys_##name(const struct pt_regs *regs); \
asmlinkage long __weak __arm64_compat_sys_##name(const struct pt_regs *regs) \
{ \
return sys_ni_syscall(); \
@@ -53,6 +54,7 @@
ALLOW_ERROR_INJECTION(__arm64_sys##name, ERRNO); \
static long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \
static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \
+ asmlinkage long __arm64_sys##name(const struct pt_regs *regs); \
asmlinkage long __arm64_sys##name(const struct pt_regs *regs) \
{ \
return __se_sys##name(SC_ARM64_REGS_TO_ARGS(x,__VA_ARGS__)); \
@@ -73,11 +75,13 @@
asmlinkage long __arm64_sys_##sname(const struct pt_regs *__unused)
#define COND_SYSCALL(name) \
+ asmlinkage long __arm64_sys_##name(const struct pt_regs *regs); \
asmlinkage long __weak __arm64_sys_##name(const struct pt_regs *regs) \
{ \
return sys_ni_syscall(); \
}
+asmlinkage long __arm64_sys_ni_syscall(const struct pt_regs *__unused);
#define SYS_NI(name) SYSCALL_ALIAS(__arm64_sys_##name, sys_ni_posix_timers);
#endif /* __ASM_SYSCALL_WRAPPER_H */
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index e72d9aaab6b1..0c07b03d511f 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -134,25 +134,17 @@
#define SYS_SVCR_SMSTART_SM_EL0 sys_reg(0, 3, 4, 3, 3)
#define SYS_SVCR_SMSTOP_SMZA_EL0 sys_reg(0, 3, 4, 6, 3)
-#define SYS_OSDTRRX_EL1 sys_reg(2, 0, 0, 0, 2)
-#define SYS_MDCCINT_EL1 sys_reg(2, 0, 0, 2, 0)
-#define SYS_MDSCR_EL1 sys_reg(2, 0, 0, 2, 2)
-#define SYS_OSDTRTX_EL1 sys_reg(2, 0, 0, 3, 2)
-#define SYS_OSECCR_EL1 sys_reg(2, 0, 0, 6, 2)
#define SYS_DBGBVRn_EL1(n) sys_reg(2, 0, 0, n, 4)
#define SYS_DBGBCRn_EL1(n) sys_reg(2, 0, 0, n, 5)
#define SYS_DBGWVRn_EL1(n) sys_reg(2, 0, 0, n, 6)
#define SYS_DBGWCRn_EL1(n) sys_reg(2, 0, 0, n, 7)
#define SYS_MDRAR_EL1 sys_reg(2, 0, 1, 0, 0)
-#define SYS_OSLAR_EL1 sys_reg(2, 0, 1, 0, 4)
-#define SYS_OSLAR_OSLK BIT(0)
-
#define SYS_OSLSR_EL1 sys_reg(2, 0, 1, 1, 4)
-#define SYS_OSLSR_OSLM_MASK (BIT(3) | BIT(0))
-#define SYS_OSLSR_OSLM_NI 0
-#define SYS_OSLSR_OSLM_IMPLEMENTED BIT(3)
-#define SYS_OSLSR_OSLK BIT(1)
+#define OSLSR_EL1_OSLM_MASK (BIT(3) | BIT(0))
+#define OSLSR_EL1_OSLM_NI 0
+#define OSLSR_EL1_OSLM_IMPLEMENTED BIT(3)
+#define OSLSR_EL1_OSLK BIT(1)
#define SYS_OSDLR_EL1 sys_reg(2, 0, 1, 3, 4)
#define SYS_DBGPRCR_EL1 sys_reg(2, 0, 1, 4, 4)
@@ -235,54 +227,8 @@
/*** End of Statistical Profiling Extension ***/
-/*
- * TRBE Registers
- */
-#define SYS_TRBLIMITR_EL1 sys_reg(3, 0, 9, 11, 0)
-#define SYS_TRBPTR_EL1 sys_reg(3, 0, 9, 11, 1)
-#define SYS_TRBBASER_EL1 sys_reg(3, 0, 9, 11, 2)
-#define SYS_TRBSR_EL1 sys_reg(3, 0, 9, 11, 3)
-#define SYS_TRBMAR_EL1 sys_reg(3, 0, 9, 11, 4)
-#define SYS_TRBTRG_EL1 sys_reg(3, 0, 9, 11, 6)
-#define SYS_TRBIDR_EL1 sys_reg(3, 0, 9, 11, 7)
-
-#define TRBLIMITR_LIMIT_MASK GENMASK_ULL(51, 0)
-#define TRBLIMITR_LIMIT_SHIFT 12
-#define TRBLIMITR_NVM BIT(5)
-#define TRBLIMITR_TRIG_MODE_MASK GENMASK(1, 0)
-#define TRBLIMITR_TRIG_MODE_SHIFT 3
-#define TRBLIMITR_FILL_MODE_MASK GENMASK(1, 0)
-#define TRBLIMITR_FILL_MODE_SHIFT 1
-#define TRBLIMITR_ENABLE BIT(0)
-#define TRBPTR_PTR_MASK GENMASK_ULL(63, 0)
-#define TRBPTR_PTR_SHIFT 0
-#define TRBBASER_BASE_MASK GENMASK_ULL(51, 0)
-#define TRBBASER_BASE_SHIFT 12
-#define TRBSR_EC_MASK GENMASK(5, 0)
-#define TRBSR_EC_SHIFT 26
-#define TRBSR_IRQ BIT(22)
-#define TRBSR_TRG BIT(21)
-#define TRBSR_WRAP BIT(20)
-#define TRBSR_ABORT BIT(18)
-#define TRBSR_STOP BIT(17)
-#define TRBSR_MSS_MASK GENMASK(15, 0)
-#define TRBSR_MSS_SHIFT 0
-#define TRBSR_BSC_MASK GENMASK(5, 0)
-#define TRBSR_BSC_SHIFT 0
-#define TRBSR_FSC_MASK GENMASK(5, 0)
-#define TRBSR_FSC_SHIFT 0
-#define TRBMAR_SHARE_MASK GENMASK(1, 0)
-#define TRBMAR_SHARE_SHIFT 8
-#define TRBMAR_OUTER_MASK GENMASK(3, 0)
-#define TRBMAR_OUTER_SHIFT 4
-#define TRBMAR_INNER_MASK GENMASK(3, 0)
-#define TRBMAR_INNER_SHIFT 0
-#define TRBTRG_TRG_MASK GENMASK(31, 0)
-#define TRBTRG_TRG_SHIFT 0
-#define TRBIDR_FLAG BIT(5)
-#define TRBIDR_PROG BIT(4)
-#define TRBIDR_ALIGN_MASK GENMASK(3, 0)
-#define TRBIDR_ALIGN_SHIFT 0
+#define TRBSR_EL1_BSC_MASK GENMASK(5, 0)
+#define TRBSR_EL1_BSC_SHIFT 0
#define SYS_PMINTENSET_EL1 sys_reg(3, 0, 9, 14, 1)
#define SYS_PMINTENCLR_EL1 sys_reg(3, 0, 9, 14, 2)
diff --git a/arch/arm64/include/asm/traps.h b/arch/arm64/include/asm/traps.h
index 1f361e2da516..d66dfb3a72dd 100644
--- a/arch/arm64/include/asm/traps.h
+++ b/arch/arm64/include/asm/traps.h
@@ -29,6 +29,8 @@ void arm64_force_sig_fault(int signo, int code, unsigned long far, const char *s
void arm64_force_sig_mceerr(int code, unsigned long far, short lsb, const char *str);
void arm64_force_sig_ptrace_errno_trap(int errno, unsigned long far, const char *str);
+int early_brk64(unsigned long addr, unsigned long esr, struct pt_regs *regs);
+
/*
* Move regs->pc to next instruction and do necessary setup before it
* is executed.
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 05f4fc265428..14be5000c5a0 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -65,7 +65,6 @@ static inline void __uaccess_ttbr0_disable(void)
ttbr &= ~TTBR_ASID_MASK;
/* reserved_pg_dir placed before swapper_pg_dir */
write_sysreg(ttbr - RESERVED_SWAPPER_OFFSET, ttbr0_el1);
- isb();
/* Set reserved ASID */
write_sysreg(ttbr, ttbr1_el1);
isb();
@@ -89,7 +88,6 @@ static inline void __uaccess_ttbr0_enable(void)
ttbr1 &= ~TTBR_ASID_MASK; /* safety measure */
ttbr1 |= ttbr0 & TTBR_ASID_MASK;
write_sysreg(ttbr1, ttbr1_el1);
- isb();
/* Restore user page table */
write_sysreg(ttbr0, ttbr0_el1);
diff --git a/arch/arm64/include/uapi/asm/hwcap.h b/arch/arm64/include/uapi/asm/hwcap.h
index 69a4fb749c65..a2cac4305b1e 100644
--- a/arch/arm64/include/uapi/asm/hwcap.h
+++ b/arch/arm64/include/uapi/asm/hwcap.h
@@ -102,5 +102,6 @@
#define HWCAP2_SME_BI32I32 (1UL << 40)
#define HWCAP2_SME_B16B16 (1UL << 41)
#define HWCAP2_SME_F16F16 (1UL << 42)
+#define HWCAP2_MOPS (1UL << 43)
#endif /* _UAPI__ASM_HWCAP_H */
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index 7c2bb4e72476..3864a64e2b2b 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -42,8 +42,7 @@ obj-$(CONFIG_COMPAT) += sigreturn32.o
obj-$(CONFIG_COMPAT_ALIGNMENT_FIXUPS) += compat_alignment.o
obj-$(CONFIG_KUSER_HELPERS) += kuser32.o
obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o entry-ftrace.o
-obj-$(CONFIG_MODULES) += module.o
-obj-$(CONFIG_ARM64_MODULE_PLTS) += module-plts.o
+obj-$(CONFIG_MODULES) += module.o module-plts.o
obj-$(CONFIG_PERF_EVENTS) += perf_regs.o perf_callchain.o
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
obj-$(CONFIG_CPU_PM) += sleep.o suspend.o
diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c
index 53d13b2e5f59..8ff6610af496 100644
--- a/arch/arm64/kernel/alternative.c
+++ b/arch/arm64/kernel/alternative.c
@@ -121,11 +121,11 @@ static noinstr void patch_alternative(struct alt_instr *alt,
* accidentally call into the cache.S code, which is patched by us at
* runtime.
*/
-static void clean_dcache_range_nopatch(u64 start, u64 end)
+static noinstr void clean_dcache_range_nopatch(u64 start, u64 end)
{
u64 cur, d_size, ctr_el0;
- ctr_el0 = read_sanitised_ftr_reg(SYS_CTR_EL0);
+ ctr_el0 = arm64_ftr_reg_ctrel0.sys_val;
d_size = 4 << cpuid_feature_extract_unsigned_field(ctr_el0,
CTR_EL0_DminLine_SHIFT);
cur = start & ~(d_size - 1);
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index bfadac361ac1..05660f0d96cf 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -223,6 +223,7 @@ static const struct arm64_ftr_bits ftr_id_aa64isar2[] = {
ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_CSSC_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_RPRFM_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_HIGHER_SAFE, ID_AA64ISAR2_EL1_BC_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_MOPS_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
FTR_STRICT, FTR_EXACT, ID_AA64ISAR2_EL1_APA3_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
@@ -364,6 +365,7 @@ static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = {
static const struct arm64_ftr_bits ftr_id_aa64mmfr1[] = {
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_TIDCP1_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_AFP_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_HCX_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_ETS_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_TWED_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_XNX_SHIFT, 4, 0),
@@ -2186,6 +2188,11 @@ static void cpu_enable_dit(const struct arm64_cpu_capabilities *__unused)
set_pstate_dit(1);
}
+static void cpu_enable_mops(const struct arm64_cpu_capabilities *__unused)
+{
+ sysreg_clear_set(sctlr_el1, 0, SCTLR_EL1_MSCEn);
+}
+
/* Internal helper functions to match cpu capability type */
static bool
cpucap_late_cpu_optional(const struct arm64_cpu_capabilities *cap)
@@ -2235,11 +2242,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.capability = ARM64_HAS_ECV_CNTPOFF,
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
.matches = has_cpuid_feature,
- .sys_reg = SYS_ID_AA64MMFR0_EL1,
- .field_pos = ID_AA64MMFR0_EL1_ECV_SHIFT,
- .field_width = 4,
- .sign = FTR_UNSIGNED,
- .min_field_value = ID_AA64MMFR0_EL1_ECV_CNTPOFF,
+ ARM64_CPUID_FIELDS(ID_AA64MMFR0_EL1, ECV, CNTPOFF)
},
#ifdef CONFIG_ARM64_PAN
{
@@ -2309,6 +2312,13 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
.matches = is_kvm_protected_mode,
},
+ {
+ .desc = "HCRX_EL2 register",
+ .capability = ARM64_HAS_HCX,
+ .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
+ .matches = has_cpuid_feature,
+ ARM64_CPUID_FIELDS(ID_AA64MMFR1_EL1, HCX, IMP)
+ },
#endif
{
.desc = "Kernel page table isolation (KPTI)",
@@ -2641,6 +2651,14 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.cpu_enable = cpu_enable_dit,
ARM64_CPUID_FIELDS(ID_AA64PFR0_EL1, DIT, IMP)
},
+ {
+ .desc = "Memory Copy and Memory Set instructions",
+ .capability = ARM64_HAS_MOPS,
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .matches = has_cpuid_feature,
+ .cpu_enable = cpu_enable_mops,
+ ARM64_CPUID_FIELDS(ID_AA64ISAR2_EL1, MOPS, IMP)
+ },
{},
};
@@ -2769,6 +2787,7 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
HWCAP_CAP(ID_AA64ISAR2_EL1, RPRFM, IMP, CAP_HWCAP, KERNEL_HWCAP_RPRFM),
HWCAP_CAP(ID_AA64ISAR2_EL1, RPRES, IMP, CAP_HWCAP, KERNEL_HWCAP_RPRES),
HWCAP_CAP(ID_AA64ISAR2_EL1, WFxT, IMP, CAP_HWCAP, KERNEL_HWCAP_WFXT),
+ HWCAP_CAP(ID_AA64ISAR2_EL1, MOPS, IMP, CAP_HWCAP, KERNEL_HWCAP_MOPS),
#ifdef CONFIG_ARM64_SME
HWCAP_CAP(ID_AA64PFR1_EL1, SME, IMP, CAP_HWCAP, KERNEL_HWCAP_SME),
HWCAP_CAP(ID_AA64SMFR0_EL1, FA64, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_FA64),
diff --git a/arch/arm64/kernel/cpuidle.c b/arch/arm64/kernel/cpuidle.c
index 42e19fff40ee..d1f68599c29f 100644
--- a/arch/arm64/kernel/cpuidle.c
+++ b/arch/arm64/kernel/cpuidle.c
@@ -13,7 +13,7 @@
#include <linux/of_device.h>
#include <linux/psci.h>
-#ifdef CONFIG_ACPI
+#ifdef CONFIG_ACPI_PROCESSOR_IDLE
#include <acpi/processor.h>
diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
index eb4378c23b3c..076a124255d0 100644
--- a/arch/arm64/kernel/cpuinfo.c
+++ b/arch/arm64/kernel/cpuinfo.c
@@ -125,6 +125,7 @@ static const char *const hwcap_str[] = {
[KERNEL_HWCAP_SME_BI32I32] = "smebi32i32",
[KERNEL_HWCAP_SME_B16B16] = "smeb16b16",
[KERNEL_HWCAP_SME_F16F16] = "smef16f16",
+ [KERNEL_HWCAP_MOPS] = "mops",
};
#ifdef CONFIG_COMPAT
diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c
index 3af3c01c93a6..6b2e0c367702 100644
--- a/arch/arm64/kernel/entry-common.c
+++ b/arch/arm64/kernel/entry-common.c
@@ -126,7 +126,7 @@ static __always_inline void __exit_to_user_mode(void)
lockdep_hardirqs_on(CALLER_ADDR0);
}
-static __always_inline void prepare_exit_to_user_mode(struct pt_regs *regs)
+static __always_inline void exit_to_user_mode_prepare(struct pt_regs *regs)
{
unsigned long flags;
@@ -135,11 +135,13 @@ static __always_inline void prepare_exit_to_user_mode(struct pt_regs *regs)
flags = read_thread_flags();
if (unlikely(flags & _TIF_WORK_MASK))
do_notify_resume(regs, flags);
+
+ lockdep_sys_exit();
}
static __always_inline void exit_to_user_mode(struct pt_regs *regs)
{
- prepare_exit_to_user_mode(regs);
+ exit_to_user_mode_prepare(regs);
mte_check_tfsr_exit();
__exit_to_user_mode();
}
@@ -611,6 +613,14 @@ static void noinstr el0_bti(struct pt_regs *regs)
exit_to_user_mode(regs);
}
+static void noinstr el0_mops(struct pt_regs *regs, unsigned long esr)
+{
+ enter_from_user_mode(regs);
+ local_daif_restore(DAIF_PROCCTX);
+ do_el0_mops(regs, esr);
+ exit_to_user_mode(regs);
+}
+
static void noinstr el0_inv(struct pt_regs *regs, unsigned long esr)
{
enter_from_user_mode(regs);
@@ -688,6 +698,9 @@ asmlinkage void noinstr el0t_64_sync_handler(struct pt_regs *regs)
case ESR_ELx_EC_BTI:
el0_bti(regs);
break;
+ case ESR_ELx_EC_MOPS:
+ el0_mops(regs, esr);
+ break;
case ESR_ELx_EC_BREAKPT_LOW:
case ESR_ELx_EC_SOFTSTP_LOW:
case ESR_ELx_EC_WATCHPT_LOW:
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index ab2a6e33c052..a40e5e50fa55 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -101,12 +101,11 @@
.org .Lventry_start\@ + 128 // Did we overflow the ventry slot?
.endm
- .macro tramp_alias, dst, sym, tmp
- mov_q \dst, TRAMP_VALIAS
- adr_l \tmp, \sym
- add \dst, \dst, \tmp
- adr_l \tmp, .entry.tramp.text
- sub \dst, \dst, \tmp
+ .macro tramp_alias, dst, sym
+ .set .Lalias\@, TRAMP_VALIAS + \sym - .entry.tramp.text
+ movz \dst, :abs_g2_s:.Lalias\@
+ movk \dst, :abs_g1_nc:.Lalias\@
+ movk \dst, :abs_g0_nc:.Lalias\@
.endm
/*
@@ -435,13 +434,14 @@ alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0
eret
alternative_else_nop_endif
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
- bne 4f
msr far_el1, x29
- tramp_alias x30, tramp_exit_native, x29
- br x30
-4:
- tramp_alias x30, tramp_exit_compat, x29
- br x30
+
+ ldr_this_cpu x30, this_cpu_vector, x29
+ tramp_alias x29, tramp_exit
+ msr vbar_el1, x30 // install vector table
+ ldr lr, [sp, #S_LR] // restore x30
+ add sp, sp, #PT_REGS_SIZE // restore sp
+ br x29
#endif
.else
ldr lr, [sp, #S_LR]
@@ -732,22 +732,6 @@ alternative_else_nop_endif
.org 1b + 128 // Did we overflow the ventry slot?
.endm
- .macro tramp_exit, regsize = 64
- tramp_data_read_var x30, this_cpu_vector
- get_this_cpu_offset x29
- ldr x30, [x30, x29]
-
- msr vbar_el1, x30
- ldr lr, [sp, #S_LR]
- tramp_unmap_kernel x29
- .if \regsize == 64
- mrs x29, far_el1
- .endif
- add sp, sp, #PT_REGS_SIZE // restore sp
- eret
- sb
- .endm
-
.macro generate_tramp_vector, kpti, bhb
.Lvector_start\@:
.space 0x400
@@ -768,7 +752,7 @@ alternative_else_nop_endif
*/
.pushsection ".entry.tramp.text", "ax"
.align 11
-SYM_CODE_START_NOALIGN(tramp_vectors)
+SYM_CODE_START_LOCAL_NOALIGN(tramp_vectors)
#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_LOOP
generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_FW
@@ -777,13 +761,12 @@ SYM_CODE_START_NOALIGN(tramp_vectors)
generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_NONE
SYM_CODE_END(tramp_vectors)
-SYM_CODE_START(tramp_exit_native)
- tramp_exit
-SYM_CODE_END(tramp_exit_native)
-
-SYM_CODE_START(tramp_exit_compat)
- tramp_exit 32
-SYM_CODE_END(tramp_exit_compat)
+SYM_CODE_START_LOCAL(tramp_exit)
+ tramp_unmap_kernel x29
+ mrs x29, far_el1 // restore x29
+ eret
+ sb
+SYM_CODE_END(tramp_exit)
.popsection // .entry.tramp.text
#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
@@ -1077,7 +1060,7 @@ alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0
alternative_else_nop_endif
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
- tramp_alias dst=x5, sym=__sdei_asm_exit_trampoline, tmp=x3
+ tramp_alias dst=x5, sym=__sdei_asm_exit_trampoline
br x5
#endif
SYM_CODE_END(__sdei_asm_handler)
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index 2fbafa5cc7ac..7a1aeb95d7c3 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -1649,6 +1649,7 @@ void fpsimd_flush_thread(void)
fpsimd_flush_thread_vl(ARM64_VEC_SME);
current->thread.svcr = 0;
+ sme_smstop();
}
current->thread.fp_type = FP_STATE_FPSIMD;
diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c
index 432626c866a8..a650f5e11fc5 100644
--- a/arch/arm64/kernel/ftrace.c
+++ b/arch/arm64/kernel/ftrace.c
@@ -197,7 +197,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
static struct plt_entry *get_ftrace_plt(struct module *mod)
{
-#ifdef CONFIG_ARM64_MODULE_PLTS
+#ifdef CONFIG_MODULES
struct plt_entry *plt = mod->arch.ftrace_trampolines;
return &plt[FTRACE_PLT_IDX];
@@ -249,7 +249,7 @@ static bool ftrace_find_callable_addr(struct dyn_ftrace *rec,
* must use a PLT to reach it. We can only place PLTs for modules, and
* only when module PLT support is built-in.
*/
- if (!IS_ENABLED(CONFIG_ARM64_MODULE_PLTS))
+ if (!IS_ENABLED(CONFIG_MODULES))
return false;
/*
@@ -431,10 +431,8 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
*
* Note: 'mod' is only set at module load time.
*/
- if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_ARGS) &&
- IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) && mod) {
+ if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_ARGS) && mod)
return aarch64_insn_patch_text_nosync((void *)pc, new);
- }
if (!ftrace_find_callable_addr(rec, mod, &addr))
return -EINVAL;
diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c
index 788597a6b6a2..02870beb271e 100644
--- a/arch/arm64/kernel/hibernate.c
+++ b/arch/arm64/kernel/hibernate.c
@@ -99,7 +99,6 @@ int pfn_is_nosave(unsigned long pfn)
void notrace save_processor_state(void)
{
- WARN_ON(num_online_cpus() != 1);
}
void notrace restore_processor_state(void)
diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
index b29a311bb055..db2a1861bb97 100644
--- a/arch/arm64/kernel/hw_breakpoint.c
+++ b/arch/arm64/kernel/hw_breakpoint.c
@@ -973,14 +973,6 @@ static int hw_breakpoint_reset(unsigned int cpu)
return 0;
}
-#ifdef CONFIG_CPU_PM
-extern void cpu_suspend_set_dbg_restorer(int (*hw_bp_restore)(unsigned int));
-#else
-static inline void cpu_suspend_set_dbg_restorer(int (*hw_bp_restore)(unsigned int))
-{
-}
-#endif
-
/*
* One-time initialisation.
*/
diff --git a/arch/arm64/kernel/idreg-override.c b/arch/arm64/kernel/idreg-override.c
index 370ab84fd06e..8439248c21d3 100644
--- a/arch/arm64/kernel/idreg-override.c
+++ b/arch/arm64/kernel/idreg-override.c
@@ -123,6 +123,7 @@ static const struct ftr_set_desc isar2 __initconst = {
.fields = {
FIELD("gpa3", ID_AA64ISAR2_EL1_GPA3_SHIFT, NULL),
FIELD("apa3", ID_AA64ISAR2_EL1_APA3_SHIFT, NULL),
+ FIELD("mops", ID_AA64ISAR2_EL1_MOPS_SHIFT, NULL),
{}
},
};
@@ -174,6 +175,7 @@ static const struct {
"id_aa64isar1.gpi=0 id_aa64isar1.gpa=0 "
"id_aa64isar1.api=0 id_aa64isar1.apa=0 "
"id_aa64isar2.gpa3=0 id_aa64isar2.apa3=0" },
+ { "arm64.nomops", "id_aa64isar2.mops=0" },
{ "arm64.nomte", "id_aa64pfr1.mte=0" },
{ "nokaslr", "kaslr.disabled=1" },
};
diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c
index e7477f21a4c9..17f96a19781d 100644
--- a/arch/arm64/kernel/kaslr.c
+++ b/arch/arm64/kernel/kaslr.c
@@ -4,90 +4,35 @@
*/
#include <linux/cache.h>
-#include <linux/crc32.h>
#include <linux/init.h>
-#include <linux/libfdt.h>
-#include <linux/mm_types.h>
-#include <linux/sched.h>
-#include <linux/types.h>
-#include <linux/pgtable.h>
-#include <linux/random.h>
+#include <linux/printk.h>
-#include <asm/fixmap.h>
-#include <asm/kernel-pgtable.h>
+#include <asm/cpufeature.h>
#include <asm/memory.h>
-#include <asm/mmu.h>
-#include <asm/sections.h>
-#include <asm/setup.h>
-u64 __ro_after_init module_alloc_base;
u16 __initdata memstart_offset_seed;
struct arm64_ftr_override kaslr_feature_override __initdata;
-static int __init kaslr_init(void)
-{
- u64 module_range;
- u32 seed;
-
- /*
- * Set a reasonable default for module_alloc_base in case
- * we end up running with module randomization disabled.
- */
- module_alloc_base = (u64)_etext - MODULES_VSIZE;
+bool __ro_after_init __kaslr_is_enabled = false;
+void __init kaslr_init(void)
+{
if (kaslr_feature_override.val & kaslr_feature_override.mask & 0xf) {
pr_info("KASLR disabled on command line\n");
- return 0;
- }
-
- if (!kaslr_enabled()) {
- pr_warn("KASLR disabled due to lack of seed\n");
- return 0;
+ return;
}
- pr_info("KASLR enabled\n");
-
/*
- * KASAN without KASAN_VMALLOC does not expect the module region to
- * intersect the vmalloc region, since shadow memory is allocated for
- * each module at load time, whereas the vmalloc region will already be
- * shadowed by KASAN zero pages.
+ * The KASLR offset modulo MIN_KIMG_ALIGN is taken from the physical
+ * placement of the image rather than from the seed, so a displacement
+ * of less than MIN_KIMG_ALIGN means that no seed was provided.
*/
- BUILD_BUG_ON((IS_ENABLED(CONFIG_KASAN_GENERIC) ||
- IS_ENABLED(CONFIG_KASAN_SW_TAGS)) &&
- !IS_ENABLED(CONFIG_KASAN_VMALLOC));
-
- seed = get_random_u32();
-
- if (IS_ENABLED(CONFIG_RANDOMIZE_MODULE_REGION_FULL)) {
- /*
- * Randomize the module region over a 2 GB window covering the
- * kernel. This reduces the risk of modules leaking information
- * about the address of the kernel itself, but results in
- * branches between modules and the core kernel that are
- * resolved via PLTs. (Branches between modules will be
- * resolved normally.)
- */
- module_range = SZ_2G - (u64)(_end - _stext);
- module_alloc_base = max((u64)_end - SZ_2G, (u64)MODULES_VADDR);
- } else {
- /*
- * Randomize the module region by setting module_alloc_base to
- * a PAGE_SIZE multiple in the range [_etext - MODULES_VSIZE,
- * _stext) . This guarantees that the resulting region still
- * covers [_stext, _etext], and that all relative branches can
- * be resolved without veneers unless this region is exhausted
- * and we fall back to a larger 2GB window in module_alloc()
- * when ARM64_MODULE_PLTS is enabled.
- */
- module_range = MODULES_VSIZE - (u64)(_etext - _stext);
+ if (kaslr_offset() < MIN_KIMG_ALIGN) {
+ pr_warn("KASLR disabled due to lack of seed\n");
+ return;
}
- /* use the lower 21 bits to randomize the base of the module region */
- module_alloc_base += (module_range * (seed & ((1 << 21) - 1))) >> 21;
- module_alloc_base &= PAGE_MASK;
-
- return 0;
+ pr_info("KASLR enabled\n");
+ __kaslr_is_enabled = true;
}
-subsys_initcall(kaslr_init)
diff --git a/arch/arm64/kernel/module-plts.c b/arch/arm64/kernel/module-plts.c
index 543493bf924d..ad02058756b5 100644
--- a/arch/arm64/kernel/module-plts.c
+++ b/arch/arm64/kernel/module-plts.c
@@ -7,6 +7,7 @@
#include <linux/ftrace.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/moduleloader.h>
#include <linux/sort.h>
static struct plt_entry __get_adrp_add_pair(u64 dst, u64 pc,
diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
index 5af4975caeb5..dd851297596e 100644
--- a/arch/arm64/kernel/module.c
+++ b/arch/arm64/kernel/module.c
@@ -7,6 +7,8 @@
* Author: Will Deacon <will.deacon@arm.com>
*/
+#define pr_fmt(fmt) "Modules: " fmt
+
#include <linux/bitops.h>
#include <linux/elf.h>
#include <linux/ftrace.h>
@@ -15,52 +17,131 @@
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/moduleloader.h>
+#include <linux/random.h>
#include <linux/scs.h>
#include <linux/vmalloc.h>
+
#include <asm/alternative.h>
#include <asm/insn.h>
#include <asm/scs.h>
#include <asm/sections.h>
+static u64 module_direct_base __ro_after_init = 0;
+static u64 module_plt_base __ro_after_init = 0;
+
+/*
+ * Choose a random page-aligned base address for a window of 'size' bytes which
+ * entirely contains the interval [start, end - 1].
+ */
+static u64 __init random_bounding_box(u64 size, u64 start, u64 end)
+{
+ u64 max_pgoff, pgoff;
+
+ if ((end - start) >= size)
+ return 0;
+
+ max_pgoff = (size - (end - start)) / PAGE_SIZE;
+ pgoff = get_random_u32_inclusive(0, max_pgoff);
+
+ return start - pgoff * PAGE_SIZE;
+}
+
+/*
+ * Modules may directly reference data and text anywhere within the kernel
+ * image and other modules. References using PREL32 relocations have a +/-2G
+ * range, and so we need to ensure that the entire kernel image and all modules
+ * fall within a 2G window such that these are always within range.
+ *
+ * Modules may directly branch to functions and code within the kernel text,
+ * and to functions and code within other modules. These branches will use
+ * CALL26/JUMP26 relocations with a +/-128M range. Without PLTs, we must ensure
+ * that the entire kernel text and all module text falls within a 128M window
+ * such that these are always within range. With PLTs, we can expand this to a
+ * 2G window.
+ *
+ * We chose the 128M region to surround the entire kernel image (rather than
+ * just the text) as using the same bounds for the 128M and 2G regions ensures
+ * by construction that we never select a 128M region that is not a subset of
+ * the 2G region. For very large and unusual kernel configurations this means
+ * we may fall back to PLTs where they could have been avoided, but this keeps
+ * the logic significantly simpler.
+ */
+static int __init module_init_limits(void)
+{
+ u64 kernel_end = (u64)_end;
+ u64 kernel_start = (u64)_text;
+ u64 kernel_size = kernel_end - kernel_start;
+
+ /*
+ * The default modules region is placed immediately below the kernel
+ * image, and is large enough to use the full 2G relocation range.
+ */
+ BUILD_BUG_ON(KIMAGE_VADDR != MODULES_END);
+ BUILD_BUG_ON(MODULES_VSIZE < SZ_2G);
+
+ if (!kaslr_enabled()) {
+ if (kernel_size < SZ_128M)
+ module_direct_base = kernel_end - SZ_128M;
+ if (kernel_size < SZ_2G)
+ module_plt_base = kernel_end - SZ_2G;
+ } else {
+ u64 min = kernel_start;
+ u64 max = kernel_end;
+
+ if (IS_ENABLED(CONFIG_RANDOMIZE_MODULE_REGION_FULL)) {
+ pr_info("2G module region forced by RANDOMIZE_MODULE_REGION_FULL\n");
+ } else {
+ module_direct_base = random_bounding_box(SZ_128M, min, max);
+ if (module_direct_base) {
+ min = module_direct_base;
+ max = module_direct_base + SZ_128M;
+ }
+ }
+
+ module_plt_base = random_bounding_box(SZ_2G, min, max);
+ }
+
+ pr_info("%llu pages in range for non-PLT usage",
+ module_direct_base ? (SZ_128M - kernel_size) / PAGE_SIZE : 0);
+ pr_info("%llu pages in range for PLT usage",
+ module_plt_base ? (SZ_2G - kernel_size) / PAGE_SIZE : 0);
+
+ return 0;
+}
+subsys_initcall(module_init_limits);
+
void *module_alloc(unsigned long size)
{
- u64 module_alloc_end = module_alloc_base + MODULES_VSIZE;
- gfp_t gfp_mask = GFP_KERNEL;
- void *p;
-
- /* Silence the initial allocation */
- if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS))
- gfp_mask |= __GFP_NOWARN;
-
- if (IS_ENABLED(CONFIG_KASAN_GENERIC) ||
- IS_ENABLED(CONFIG_KASAN_SW_TAGS))
- /* don't exceed the static module region - see below */
- module_alloc_end = MODULES_END;
-
- p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base,
- module_alloc_end, gfp_mask, PAGE_KERNEL, VM_DEFER_KMEMLEAK,
- NUMA_NO_NODE, __builtin_return_address(0));
-
- if (!p && IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
- (IS_ENABLED(CONFIG_KASAN_VMALLOC) ||
- (!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
- !IS_ENABLED(CONFIG_KASAN_SW_TAGS))))
- /*
- * KASAN without KASAN_VMALLOC can only deal with module
- * allocations being served from the reserved module region,
- * since the remainder of the vmalloc region is already
- * backed by zero shadow pages, and punching holes into it
- * is non-trivial. Since the module region is not randomized
- * when KASAN is enabled without KASAN_VMALLOC, it is even
- * less likely that the module region gets exhausted, so we
- * can simply omit this fallback in that case.
- */
- p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base,
- module_alloc_base + SZ_2G, GFP_KERNEL,
- PAGE_KERNEL, 0, NUMA_NO_NODE,
- __builtin_return_address(0));
+ void *p = NULL;
+
+ /*
+ * Where possible, prefer to allocate within direct branch range of the
+ * kernel such that no PLTs are necessary.
+ */
+ if (module_direct_base) {
+ p = __vmalloc_node_range(size, MODULE_ALIGN,
+ module_direct_base,
+ module_direct_base + SZ_128M,
+ GFP_KERNEL | __GFP_NOWARN,
+ PAGE_KERNEL, 0, NUMA_NO_NODE,
+ __builtin_return_address(0));
+ }
- if (p && (kasan_alloc_module_shadow(p, size, gfp_mask) < 0)) {
+ if (!p && module_plt_base) {
+ p = __vmalloc_node_range(size, MODULE_ALIGN,
+ module_plt_base,
+ module_plt_base + SZ_2G,
+ GFP_KERNEL | __GFP_NOWARN,
+ PAGE_KERNEL, 0, NUMA_NO_NODE,
+ __builtin_return_address(0));
+ }
+
+ if (!p) {
+ pr_warn_ratelimited("%s: unable to allocate memory\n",
+ __func__);
+ }
+
+ if (p && (kasan_alloc_module_shadow(p, size, GFP_KERNEL) < 0)) {
vfree(p);
return NULL;
}
@@ -448,9 +529,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
case R_AARCH64_CALL26:
ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 26,
AARCH64_INSN_IMM_26);
-
- if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
- ovf == -ERANGE) {
+ if (ovf == -ERANGE) {
val = module_emit_plt_entry(me, sechdrs, loc, &rel[i], sym);
if (!val)
return -ENOEXEC;
@@ -487,7 +566,7 @@ static int module_init_ftrace_plt(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs,
struct module *mod)
{
-#if defined(CONFIG_ARM64_MODULE_PLTS) && defined(CONFIG_DYNAMIC_FTRACE)
+#if defined(CONFIG_DYNAMIC_FTRACE)
const Elf_Shdr *s;
struct plt_entry *plts;
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index b8ec7b3ac9cb..417a8a86b2db 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -296,6 +296,8 @@ void __init __no_sanitize_address setup_arch(char **cmdline_p)
*cmdline_p = boot_command_line;
+ kaslr_init();
+
/*
* If know now we are going to need KPTI then use non-global
* mappings from the start, avoiding the cost of rewriting
diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
index 2cfc810d0a5b..e304f7ebec2a 100644
--- a/arch/arm64/kernel/signal.c
+++ b/arch/arm64/kernel/signal.c
@@ -23,6 +23,7 @@
#include <asm/daifflags.h>
#include <asm/debug-monitors.h>
#include <asm/elf.h>
+#include <asm/exception.h>
#include <asm/cacheflush.h>
#include <asm/ucontext.h>
#include <asm/unistd.h>
@@ -398,7 +399,7 @@ static int restore_tpidr2_context(struct user_ctxs *user)
__get_user_error(tpidr2_el0, &user->tpidr2->tpidr2, err);
if (!err)
- current->thread.tpidr2_el0 = tpidr2_el0;
+ write_sysreg_s(tpidr2_el0, SYS_TPIDR2_EL0);
return err;
}
diff --git a/arch/arm64/kernel/syscall.c b/arch/arm64/kernel/syscall.c
index da84cf855c44..5a668d7f3c1f 100644
--- a/arch/arm64/kernel/syscall.c
+++ b/arch/arm64/kernel/syscall.c
@@ -147,11 +147,9 @@ static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
* exit regardless, as the old entry assembly did.
*/
if (!has_syscall_work(flags) && !IS_ENABLED(CONFIG_DEBUG_RSEQ)) {
- local_daif_mask();
flags = read_thread_flags();
if (!has_syscall_work(flags) && !(flags & _TIF_SINGLESTEP))
return;
- local_daif_restore(DAIF_PROCCTX);
}
trace_exit:
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index 4bb1b8f47298..794a2dd3659a 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -514,6 +514,63 @@ void do_el1_fpac(struct pt_regs *regs, unsigned long esr)
die("Oops - FPAC", regs, esr);
}
+void do_el0_mops(struct pt_regs *regs, unsigned long esr)
+{
+ bool wrong_option = esr & ESR_ELx_MOPS_ISS_WRONG_OPTION;
+ bool option_a = esr & ESR_ELx_MOPS_ISS_OPTION_A;
+ int dstreg = ESR_ELx_MOPS_ISS_DESTREG(esr);
+ int srcreg = ESR_ELx_MOPS_ISS_SRCREG(esr);
+ int sizereg = ESR_ELx_MOPS_ISS_SIZEREG(esr);
+ unsigned long dst, src, size;
+
+ dst = pt_regs_read_reg(regs, dstreg);
+ src = pt_regs_read_reg(regs, srcreg);
+ size = pt_regs_read_reg(regs, sizereg);
+
+ /*
+ * Put the registers back in the original format suitable for a
+ * prologue instruction, using the generic return routine from the
+ * Arm ARM (DDI 0487I.a) rules CNTMJ and MWFQH.
+ */
+ if (esr & ESR_ELx_MOPS_ISS_MEM_INST) {
+ /* SET* instruction */
+ if (option_a ^ wrong_option) {
+ /* Format is from Option A; forward set */
+ pt_regs_write_reg(regs, dstreg, dst + size);
+ pt_regs_write_reg(regs, sizereg, -size);
+ }
+ } else {
+ /* CPY* instruction */
+ if (!(option_a ^ wrong_option)) {
+ /* Format is from Option B */
+ if (regs->pstate & PSR_N_BIT) {
+ /* Backward copy */
+ pt_regs_write_reg(regs, dstreg, dst - size);
+ pt_regs_write_reg(regs, srcreg, src - size);
+ }
+ } else {
+ /* Format is from Option A */
+ if (size & BIT(63)) {
+ /* Forward copy */
+ pt_regs_write_reg(regs, dstreg, dst + size);
+ pt_regs_write_reg(regs, srcreg, src + size);
+ pt_regs_write_reg(regs, sizereg, -size);
+ }
+ }
+ }
+
+ if (esr & ESR_ELx_MOPS_ISS_FROM_EPILOGUE)
+ regs->pc -= 8;
+ else
+ regs->pc -= 4;
+
+ /*
+ * If single stepping then finish the step before executing the
+ * prologue instruction.
+ */
+ user_fastforward_single_step(current);
+}
+
#define __user_cache_maint(insn, address, res) \
if (address >= TASK_SIZE_MAX) { \
res = -EFAULT; \
@@ -824,6 +881,7 @@ static const char *esr_class_str[] = {
[ESR_ELx_EC_DABT_LOW] = "DABT (lower EL)",
[ESR_ELx_EC_DABT_CUR] = "DABT (current EL)",
[ESR_ELx_EC_SP_ALIGN] = "SP Alignment",
+ [ESR_ELx_EC_MOPS] = "MOPS",
[ESR_ELx_EC_FP_EXC32] = "FP (AArch32)",
[ESR_ELx_EC_FP_EXC64] = "FP (AArch64)",
[ESR_ELx_EC_SERROR] = "SError",
@@ -947,7 +1005,7 @@ void do_serror(struct pt_regs *regs, unsigned long esr)
}
/* GENERIC_BUG traps */
-
+#ifdef CONFIG_GENERIC_BUG
int is_valid_bugaddr(unsigned long addr)
{
/*
@@ -959,6 +1017,7 @@ int is_valid_bugaddr(unsigned long addr)
*/
return 1;
}
+#endif
static int bug_handler(struct pt_regs *regs, unsigned long esr)
{
diff --git a/arch/arm64/kvm/debug.c b/arch/arm64/kvm/debug.c
index 55f80fb93925..8725291cb00a 100644
--- a/arch/arm64/kvm/debug.c
+++ b/arch/arm64/kvm/debug.c
@@ -333,7 +333,7 @@ void kvm_arch_vcpu_load_debug_state_flags(struct kvm_vcpu *vcpu)
/* Check if we have TRBE implemented and available at the host */
if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_TraceBuffer_SHIFT) &&
- !(read_sysreg_s(SYS_TRBIDR_EL1) & TRBIDR_PROG))
+ !(read_sysreg_s(SYS_TRBIDR_EL1) & TRBIDR_EL1_P))
vcpu_set_flag(vcpu, DEBUG_STATE_SAVE_TRBE);
}
diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h
index e78a08a72a3c..eb123aa7479d 100644
--- a/arch/arm64/kvm/hyp/include/hyp/switch.h
+++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
@@ -130,6 +130,9 @@ static inline void ___activate_traps(struct kvm_vcpu *vcpu)
if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN) && (hcr & HCR_VSE))
write_sysreg_s(vcpu->arch.vsesr_el2, SYS_VSESR_EL2);
+
+ if (cpus_have_final_cap(ARM64_HAS_HCX))
+ write_sysreg_s(HCRX_GUEST_FLAGS, SYS_HCRX_EL2);
}
static inline void ___deactivate_traps(struct kvm_vcpu *vcpu)
@@ -144,6 +147,9 @@ static inline void ___deactivate_traps(struct kvm_vcpu *vcpu)
vcpu->arch.hcr_el2 &= ~HCR_VSE;
vcpu->arch.hcr_el2 |= read_sysreg(hcr_el2) & HCR_VSE;
}
+
+ if (cpus_have_final_cap(ARM64_HAS_HCX))
+ write_sysreg_s(HCRX_HOST_FLAGS, SYS_HCRX_EL2);
}
static inline bool __populate_fault_info(struct kvm_vcpu *vcpu)
diff --git a/arch/arm64/kvm/hyp/nvhe/debug-sr.c b/arch/arm64/kvm/hyp/nvhe/debug-sr.c
index d756b939f296..4558c02eb352 100644
--- a/arch/arm64/kvm/hyp/nvhe/debug-sr.c
+++ b/arch/arm64/kvm/hyp/nvhe/debug-sr.c
@@ -56,7 +56,7 @@ static void __debug_save_trace(u64 *trfcr_el1)
*trfcr_el1 = 0;
/* Check if the TRBE is enabled */
- if (!(read_sysreg_s(SYS_TRBLIMITR_EL1) & TRBLIMITR_ENABLE))
+ if (!(read_sysreg_s(SYS_TRBLIMITR_EL1) & TRBLIMITR_EL1_E))
return;
/*
* Prohibit trace generation while we are in guest.
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 71b12094d613..34262ff0842b 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -388,9 +388,9 @@ static bool trap_oslar_el1(struct kvm_vcpu *vcpu,
return read_from_write_only(vcpu, p, r);
/* Forward the OSLK bit to OSLSR */
- oslsr = __vcpu_sys_reg(vcpu, OSLSR_EL1) & ~SYS_OSLSR_OSLK;
- if (p->regval & SYS_OSLAR_OSLK)
- oslsr |= SYS_OSLSR_OSLK;
+ oslsr = __vcpu_sys_reg(vcpu, OSLSR_EL1) & ~OSLSR_EL1_OSLK;
+ if (p->regval & OSLAR_EL1_OSLK)
+ oslsr |= OSLSR_EL1_OSLK;
__vcpu_sys_reg(vcpu, OSLSR_EL1) = oslsr;
return true;
@@ -414,7 +414,7 @@ static int set_oslsr_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
* The only modifiable bit is the OSLK bit. Refuse the write if
* userspace attempts to change any other bit in the register.
*/
- if ((val ^ rd->val) & ~SYS_OSLSR_OSLK)
+ if ((val ^ rd->val) & ~OSLSR_EL1_OSLK)
return -EINVAL;
__vcpu_sys_reg(vcpu, rd->reg) = val;
@@ -1252,6 +1252,7 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu, struct sys_reg_desc const *r
ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_GPA3));
if (!cpus_have_final_cap(ARM64_HAS_WFXT))
val &= ~ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_WFxT);
+ val &= ~ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_MOPS);
break;
case SYS_ID_AA64DFR0_EL1:
/* Limit debug to ARMv8.0 */
@@ -1781,7 +1782,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ SYS_DESC(SYS_MDRAR_EL1), trap_raz_wi },
{ SYS_DESC(SYS_OSLAR_EL1), trap_oslar_el1 },
{ SYS_DESC(SYS_OSLSR_EL1), trap_oslsr_el1, reset_val, OSLSR_EL1,
- SYS_OSLSR_OSLM_IMPLEMENTED, .set_user = set_oslsr_el1, },
+ OSLSR_EL1_OSLM_IMPLEMENTED, .set_user = set_oslsr_el1, },
{ SYS_DESC(SYS_OSDLR_EL1), trap_raz_wi },
{ SYS_DESC(SYS_DBGPRCR_EL1), trap_raz_wi },
{ SYS_DESC(SYS_DBGCLAIMSET_EL1), trap_raz_wi },
diff --git a/arch/arm64/lib/xor-neon.c b/arch/arm64/lib/xor-neon.c
index 96b171995d19..f9a53b7f9842 100644
--- a/arch/arm64/lib/xor-neon.c
+++ b/arch/arm64/lib/xor-neon.c
@@ -10,7 +10,7 @@
#include <linux/module.h>
#include <asm/neon-intrinsics.h>
-void xor_arm64_neon_2(unsigned long bytes, unsigned long * __restrict p1,
+static void xor_arm64_neon_2(unsigned long bytes, unsigned long * __restrict p1,
const unsigned long * __restrict p2)
{
uint64_t *dp1 = (uint64_t *)p1;
@@ -37,7 +37,7 @@ void xor_arm64_neon_2(unsigned long bytes, unsigned long * __restrict p1,
} while (--lines > 0);
}
-void xor_arm64_neon_3(unsigned long bytes, unsigned long * __restrict p1,
+static void xor_arm64_neon_3(unsigned long bytes, unsigned long * __restrict p1,
const unsigned long * __restrict p2,
const unsigned long * __restrict p3)
{
@@ -73,7 +73,7 @@ void xor_arm64_neon_3(unsigned long bytes, unsigned long * __restrict p1,
} while (--lines > 0);
}
-void xor_arm64_neon_4(unsigned long bytes, unsigned long * __restrict p1,
+static void xor_arm64_neon_4(unsigned long bytes, unsigned long * __restrict p1,
const unsigned long * __restrict p2,
const unsigned long * __restrict p3,
const unsigned long * __restrict p4)
@@ -118,7 +118,7 @@ void xor_arm64_neon_4(unsigned long bytes, unsigned long * __restrict p1,
} while (--lines > 0);
}
-void xor_arm64_neon_5(unsigned long bytes, unsigned long * __restrict p1,
+static void xor_arm64_neon_5(unsigned long bytes, unsigned long * __restrict p1,
const unsigned long * __restrict p2,
const unsigned long * __restrict p3,
const unsigned long * __restrict p4,
diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
index e1e0dca01839..188197590fc9 100644
--- a/arch/arm64/mm/context.c
+++ b/arch/arm64/mm/context.c
@@ -364,8 +364,8 @@ void cpu_do_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm)
ttbr1 &= ~TTBR_ASID_MASK;
ttbr1 |= FIELD_PREP(TTBR_ASID_MASK, asid);
+ cpu_set_reserved_ttbr0_nosync();
write_sysreg(ttbr1, ttbr1_el1);
- isb();
write_sysreg(ttbr0, ttbr0_el1);
isb();
post_ttbr_update_workaround();
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index cb21ccd7940d..7b2537c0bfbc 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -66,6 +66,8 @@ static inline const struct fault_info *esr_to_debug_fault_info(unsigned long esr
static void data_abort_decode(unsigned long esr)
{
+ unsigned long iss2 = ESR_ELx_ISS2(esr);
+
pr_alert("Data abort info:\n");
if (esr & ESR_ELx_ISV) {
@@ -78,12 +80,21 @@ static void data_abort_decode(unsigned long esr)
(esr & ESR_ELx_SF) >> ESR_ELx_SF_SHIFT,
(esr & ESR_ELx_AR) >> ESR_ELx_AR_SHIFT);
} else {
- pr_alert(" ISV = 0, ISS = 0x%08lx\n", esr & ESR_ELx_ISS_MASK);
+ pr_alert(" ISV = 0, ISS = 0x%08lx, ISS2 = 0x%08lx\n",
+ esr & ESR_ELx_ISS_MASK, iss2);
}
- pr_alert(" CM = %lu, WnR = %lu\n",
+ pr_alert(" CM = %lu, WnR = %lu, TnD = %lu, TagAccess = %lu\n",
(esr & ESR_ELx_CM) >> ESR_ELx_CM_SHIFT,
- (esr & ESR_ELx_WNR) >> ESR_ELx_WNR_SHIFT);
+ (esr & ESR_ELx_WNR) >> ESR_ELx_WNR_SHIFT,
+ (iss2 & ESR_ELx_TnD) >> ESR_ELx_TnD_SHIFT,
+ (iss2 & ESR_ELx_TagAccess) >> ESR_ELx_TagAccess_SHIFT);
+
+ pr_alert(" GCS = %ld, Overlay = %lu, DirtyBit = %lu, Xs = %llu\n",
+ (iss2 & ESR_ELx_GCS) >> ESR_ELx_GCS_SHIFT,
+ (iss2 & ESR_ELx_Overlay) >> ESR_ELx_Overlay_SHIFT,
+ (iss2 & ESR_ELx_DirtyBit) >> ESR_ELx_DirtyBit_SHIFT,
+ (iss2 & ESR_ELx_Xs_MASK) >> ESR_ELx_Xs_SHIFT);
}
static void mem_abort_decode(unsigned long esr)
@@ -886,9 +897,6 @@ void do_sp_pc_abort(unsigned long addr, unsigned long esr, struct pt_regs *regs)
}
NOKPROBE_SYMBOL(do_sp_pc_abort);
-int __init early_brk64(unsigned long addr, unsigned long esr,
- struct pt_regs *regs);
-
/*
* __refdata because early_brk64 is __init, but the reference to it is
* clobbered at arch_initcall time.
diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c
index 5f9379b3c8c8..4e6476094952 100644
--- a/arch/arm64/mm/flush.c
+++ b/arch/arm64/mm/flush.c
@@ -8,6 +8,7 @@
#include <linux/export.h>
#include <linux/mm.h>
+#include <linux/libnvdimm.h>
#include <linux/pagemap.h>
#include <asm/cacheflush.h>
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 66e70ca47680..c28c2c8483cc 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -69,6 +69,7 @@ phys_addr_t __ro_after_init arm64_dma_phys_limit;
#define CRASH_ADDR_LOW_MAX arm64_dma_phys_limit
#define CRASH_ADDR_HIGH_MAX (PHYS_MASK + 1)
+#define CRASH_HIGH_SEARCH_BASE SZ_4G
#define DEFAULT_CRASH_KERNEL_LOW_SIZE (128UL << 20)
@@ -101,12 +102,13 @@ static int __init reserve_crashkernel_low(unsigned long long low_size)
*/
static void __init reserve_crashkernel(void)
{
- unsigned long long crash_base, crash_size;
- unsigned long long crash_low_size = 0;
+ unsigned long long crash_low_size = 0, search_base = 0;
unsigned long long crash_max = CRASH_ADDR_LOW_MAX;
+ unsigned long long crash_base, crash_size;
char *cmdline = boot_command_line;
- int ret;
bool fixed_base = false;
+ bool high = false;
+ int ret;
if (!IS_ENABLED(CONFIG_KEXEC_CORE))
return;
@@ -129,7 +131,9 @@ static void __init reserve_crashkernel(void)
else if (ret)
return;
+ search_base = CRASH_HIGH_SEARCH_BASE;
crash_max = CRASH_ADDR_HIGH_MAX;
+ high = true;
} else if (ret || !crash_size) {
/* The specified value is invalid */
return;
@@ -140,31 +144,51 @@ static void __init reserve_crashkernel(void)
/* User specifies base address explicitly. */
if (crash_base) {
fixed_base = true;
+ search_base = crash_base;
crash_max = crash_base + crash_size;
}
retry:
crash_base = memblock_phys_alloc_range(crash_size, CRASH_ALIGN,
- crash_base, crash_max);
+ search_base, crash_max);
if (!crash_base) {
/*
- * If the first attempt was for low memory, fall back to
- * high memory, the minimum required low memory will be
- * reserved later.
+ * For crashkernel=size[KMG]@offset[KMG], print out failure
+ * message if can't reserve the specified region.
*/
- if (!fixed_base && (crash_max == CRASH_ADDR_LOW_MAX)) {
+ if (fixed_base) {
+ pr_warn("crashkernel reservation failed - memory is in use.\n");
+ return;
+ }
+
+ /*
+ * For crashkernel=size[KMG], if the first attempt was for
+ * low memory, fall back to high memory, the minimum required
+ * low memory will be reserved later.
+ */
+ if (!high && crash_max == CRASH_ADDR_LOW_MAX) {
crash_max = CRASH_ADDR_HIGH_MAX;
+ search_base = CRASH_ADDR_LOW_MAX;
crash_low_size = DEFAULT_CRASH_KERNEL_LOW_SIZE;
goto retry;
}
+ /*
+ * For crashkernel=size[KMG],high, if the first attempt was
+ * for high memory, fall back to low memory.
+ */
+ if (high && crash_max == CRASH_ADDR_HIGH_MAX) {
+ crash_max = CRASH_ADDR_LOW_MAX;
+ search_base = 0;
+ goto retry;
+ }
pr_warn("cannot allocate crashkernel (size:0x%llx)\n",
crash_size);
return;
}
- if ((crash_base > CRASH_ADDR_LOW_MAX - crash_low_size) &&
- crash_low_size && reserve_crashkernel_low(crash_low_size)) {
+ if ((crash_base >= CRASH_ADDR_LOW_MAX) && crash_low_size &&
+ reserve_crashkernel_low(crash_low_size)) {
memblock_phys_free(crash_base, crash_size);
return;
}
diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c
index e969e68de005..f17d066e85eb 100644
--- a/arch/arm64/mm/kasan_init.c
+++ b/arch/arm64/mm/kasan_init.c
@@ -214,7 +214,7 @@ static void __init clear_pgds(unsigned long start,
static void __init kasan_init_shadow(void)
{
u64 kimg_shadow_start, kimg_shadow_end;
- u64 mod_shadow_start, mod_shadow_end;
+ u64 mod_shadow_start;
u64 vmalloc_shadow_end;
phys_addr_t pa_start, pa_end;
u64 i;
@@ -223,7 +223,6 @@ static void __init kasan_init_shadow(void)
kimg_shadow_end = PAGE_ALIGN((u64)kasan_mem_to_shadow(KERNEL_END));
mod_shadow_start = (u64)kasan_mem_to_shadow((void *)MODULES_VADDR);
- mod_shadow_end = (u64)kasan_mem_to_shadow((void *)MODULES_END);
vmalloc_shadow_end = (u64)kasan_mem_to_shadow((void *)VMALLOC_END);
@@ -246,17 +245,9 @@ static void __init kasan_init_shadow(void)
kasan_populate_early_shadow(kasan_mem_to_shadow((void *)PAGE_END),
(void *)mod_shadow_start);
- if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
- BUILD_BUG_ON(VMALLOC_START != MODULES_END);
- kasan_populate_early_shadow((void *)vmalloc_shadow_end,
- (void *)KASAN_SHADOW_END);
- } else {
- kasan_populate_early_shadow((void *)kimg_shadow_end,
- (void *)KASAN_SHADOW_END);
- if (kimg_shadow_start > mod_shadow_end)
- kasan_populate_early_shadow((void *)mod_shadow_end,
- (void *)kimg_shadow_start);
- }
+ BUILD_BUG_ON(VMALLOC_START != MODULES_END);
+ kasan_populate_early_shadow((void *)vmalloc_shadow_end,
+ (void *)KASAN_SHADOW_END);
for_each_mem_range(i, &pa_start, &pa_end) {
void *start = (void *)__phys_to_virt(pa_start);
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index af6bc8403ee4..95d360805f8a 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -451,7 +451,7 @@ static phys_addr_t pgd_pgtable_alloc(int shift)
void __init create_mapping_noalloc(phys_addr_t phys, unsigned long virt,
phys_addr_t size, pgprot_t prot)
{
- if ((virt >= PAGE_END) && (virt < VMALLOC_START)) {
+ if (virt < PAGE_OFFSET) {
pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
&phys, virt);
return;
@@ -478,7 +478,7 @@ void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
static void update_mapping_prot(phys_addr_t phys, unsigned long virt,
phys_addr_t size, pgprot_t prot)
{
- if ((virt >= PAGE_END) && (virt < VMALLOC_START)) {
+ if (virt < PAGE_OFFSET) {
pr_warn("BUG: not updating mapping for %pa at 0x%016lx - outside kernel range\n",
&phys, virt);
return;
@@ -663,12 +663,17 @@ static void __init map_kernel_segment(pgd_t *pgdp, void *va_start, void *va_end,
vm_area_add_early(vma);
}
+static pgprot_t kernel_exec_prot(void)
+{
+ return rodata_enabled ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC;
+}
+
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
static int __init map_entry_trampoline(void)
{
int i;
- pgprot_t prot = rodata_enabled ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC;
+ pgprot_t prot = kernel_exec_prot();
phys_addr_t pa_start = __pa_symbol(__entry_tramp_text_start);
/* The trampoline is always mapped and can therefore be global */
@@ -723,7 +728,7 @@ static void __init map_kernel(pgd_t *pgdp)
* mapping to install SW breakpoints. Allow this (only) when
* explicitly requested with rodata=off.
*/
- pgprot_t text_prot = rodata_enabled ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC;
+ pgprot_t text_prot = kernel_exec_prot();
/*
* If we have a CPU that supports BTI and a kernel built for
diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps
index 40ba95472594..debc4609f129 100644
--- a/arch/arm64/tools/cpucaps
+++ b/arch/arm64/tools/cpucaps
@@ -32,8 +32,10 @@ HAS_GENERIC_AUTH_IMP_DEF
HAS_GIC_CPUIF_SYSREGS
HAS_GIC_PRIO_MASKING
HAS_GIC_PRIO_RELAXED_SYNC
+HAS_HCX
HAS_LDAPR
HAS_LSE_ATOMICS
+HAS_MOPS
HAS_NESTED_VIRT
HAS_NO_FPSIMD
HAS_NO_HW_PREFETCH
diff --git a/arch/arm64/tools/gen-cpucaps.awk b/arch/arm64/tools/gen-cpucaps.awk
index 00c9e72a200a..8525980379d7 100755
--- a/arch/arm64/tools/gen-cpucaps.awk
+++ b/arch/arm64/tools/gen-cpucaps.awk
@@ -24,12 +24,12 @@ BEGIN {
}
/^[vA-Z0-9_]+$/ {
- printf("#define ARM64_%-30s\t%d\n", $0, cap_num++)
+ printf("#define ARM64_%-40s\t%d\n", $0, cap_num++)
next
}
END {
- printf("#define ARM64_NCAPS\t\t\t\t%d\n", cap_num)
+ printf("#define ARM64_NCAPS\t\t\t\t\t%d\n", cap_num)
print ""
print "#endif /* __ASM_CPUCAPS_H */"
}
diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg
index c9a0d1fa3209..c585725172d5 100644
--- a/arch/arm64/tools/sysreg
+++ b/arch/arm64/tools/sysreg
@@ -48,6 +48,61 @@
# feature that introduces them (eg, FEAT_LS64_ACCDATA introduces enumeration
# item ACCDATA) though it may be more taseful to do something else.
+Sysreg OSDTRRX_EL1 2 0 0 0 2
+Res0 63:32
+Field 31:0 DTRRX
+EndSysreg
+
+Sysreg MDCCINT_EL1 2 0 0 2 0
+Res0 63:31
+Field 30 RX
+Field 29 TX
+Res0 28:0
+EndSysreg
+
+Sysreg MDSCR_EL1 2 0 0 2 2
+Res0 63:36
+Field 35 EHBWE
+Field 34 EnSPM
+Field 33 TTA
+Field 32 EMBWE
+Field 31 TFO
+Field 30 RXfull
+Field 29 TXfull
+Res0 28
+Field 27 RXO
+Field 26 TXU
+Res0 25:24
+Field 23:22 INTdis
+Field 21 TDA
+Res0 20
+Field 19 SC2
+Res0 18:16
+Field 15 MDE
+Field 14 HDE
+Field 13 KDE
+Field 12 TDCC
+Res0 11:7
+Field 6 ERR
+Res0 5:1
+Field 0 SS
+EndSysreg
+
+Sysreg OSDTRTX_EL1 2 0 0 3 2
+Res0 63:32
+Field 31:0 DTRTX
+EndSysreg
+
+Sysreg OSECCR_EL1 2 0 0 6 2
+Res0 63:32
+Field 31:0 EDECCR
+EndSysreg
+
+Sysreg OSLAR_EL1 2 0 1 0 4
+Res0 63:1
+Field 0 OSLK
+EndSysreg
+
Sysreg ID_PFR0_EL1 3 0 0 1 0
Res0 63:32
UnsignedEnum 31:28 RAS
@@ -2200,3 +2255,80 @@ Sysreg ICC_NMIAR1_EL1 3 0 12 9 5
Res0 63:24
Field 23:0 INTID
EndSysreg
+
+Sysreg TRBLIMITR_EL1 3 0 9 11 0
+Field 63:12 LIMIT
+Res0 11:7
+Field 6 XE
+Field 5 nVM
+Enum 4:3 TM
+ 0b00 STOP
+ 0b01 IRQ
+ 0b11 IGNR
+EndEnum
+Enum 2:1 FM
+ 0b00 FILL
+ 0b01 WRAP
+ 0b11 CBUF
+EndEnum
+Field 0 E
+EndSysreg
+
+Sysreg TRBPTR_EL1 3 0 9 11 1
+Field 63:0 PTR
+EndSysreg
+
+Sysreg TRBBASER_EL1 3 0 9 11 2
+Field 63:12 BASE
+Res0 11:0
+EndSysreg
+
+Sysreg TRBSR_EL1 3 0 9 11 3
+Res0 63:56
+Field 55:32 MSS2
+Field 31:26 EC
+Res0 25:24
+Field 23 DAT
+Field 22 IRQ
+Field 21 TRG
+Field 20 WRAP
+Res0 19
+Field 18 EA
+Field 17 S
+Res0 16
+Field 15:0 MSS
+EndSysreg
+
+Sysreg TRBMAR_EL1 3 0 9 11 4
+Res0 63:12
+Enum 11:10 PAS
+ 0b00 SECURE
+ 0b01 NON_SECURE
+ 0b10 ROOT
+ 0b11 REALM
+EndEnum
+Enum 9:8 SH
+ 0b00 NON_SHAREABLE
+ 0b10 OUTER_SHAREABLE
+ 0b11 INNER_SHAREABLE
+EndEnum
+Field 7:0 Attr
+EndSysreg
+
+Sysreg TRBTRG_EL1 3 0 9 11 6
+Res0 63:32
+Field 31:0 TRG
+EndSysreg
+
+Sysreg TRBIDR_EL1 3 0 9 11 7
+Res0 63:12
+Enum 11:8 EA
+ 0b0000 NON_DESC
+ 0b0001 IGNORE
+ 0b0010 SERROR
+EndEnum
+Res0 7:6
+Field 5 F
+Field 4 P
+Field 3:0 Align
+EndSysreg