summaryrefslogtreecommitdiffstats
path: root/arch/arm64/include/asm/assembler.h
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2021-06-28 14:04:24 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2021-06-28 14:04:24 -0700
commit9840cfcb97fc8b6aa7b36cec3cc3fd763f14052e (patch)
tree20c1702dcfbc9874fe64a23ec01c0168ea33711a /arch/arm64/include/asm/assembler.h
parent17cfb9d332243f6d2551818df27d666d3dd70a23 (diff)
parent3d1bf78c7b13a0ad88e77dce94df88400f47e0dd (diff)
downloadlinux-9840cfcb97fc8b6aa7b36cec3cc3fd763f14052e.tar.gz
linux-9840cfcb97fc8b6aa7b36cec3cc3fd763f14052e.tar.bz2
linux-9840cfcb97fc8b6aa7b36cec3cc3fd763f14052e.zip
Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 updates from Will Deacon: "There's a reasonable amount here and the juicy details are all below. It's worth noting that the MTE/KASAN changes strayed outside of our usual directories due to core mm changes and some associated changes to some other architectures; Andrew asked for us to carry these [1] rather that take them via the -mm tree. Summary: - Optimise SVE switching for CPUs with 128-bit implementations. - Fix output format from SVE selftest. - Add support for versions v1.2 and 1.3 of the SMC calling convention. - Allow Pointer Authentication to be configured independently for kernel and userspace. - PMU driver cleanups for managing IRQ affinity and exposing event attributes via sysfs. - KASAN optimisations for both hardware tagging (MTE) and out-of-line software tagging implementations. - Relax frame record alignment requirements to facilitate 8-byte alignment with KASAN and Clang. - Cleanup of page-table definitions and removal of unused memory types. - Reduction of ARCH_DMA_MINALIGN back to 64 bytes. - Refactoring of our instruction decoding routines and addition of some missing encodings. - Move entry code moved into C and hardened against harmful compiler instrumentation. - Update booting requirements for the FEAT_HCX feature, added to v8.7 of the architecture. - Fix resume from idle when pNMI is being used. - Additional CPU sanity checks for MTE and preparatory changes for systems where not all of the CPUs support 32-bit EL0. - Update our kernel string routines to the latest Cortex Strings implementation. - Big cleanup of our cache maintenance routines, which were confusingly named and inconsistent in their implementations. - Tweak linker flags so that GDB can understand vmlinux when using RELR relocations. - Boot path cleanups to enable early initialisation of per-cpu operations needed by KCSAN. - Non-critical fixes and miscellaneous cleanup" * tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (150 commits) arm64: tlb: fix the TTL value of tlb_get_level arm64: Restrict undef hook for cpufeature registers arm64/mm: Rename ARM64_SWAPPER_USES_SECTION_MAPS arm64: insn: avoid circular include dependency arm64: smp: Bump debugging information print down to KERN_DEBUG drivers/perf: fix the missed ida_simple_remove() in ddr_perf_probe() perf/arm-cmn: Fix invalid pointer when access dtc object sharing the same IRQ number arm64: suspend: Use cpuidle context helpers in cpu_suspend() PSCI: Use cpuidle context helpers in psci_cpu_suspend_enter() arm64: Convert cpu_do_idle() to using cpuidle context helpers arm64: Add cpuidle context save/restore helpers arm64: head: fix code comments in set_cpu_boot_mode_flag arm64: mm: drop unused __pa(__idmap_text_start) arm64: mm: fix the count comments in compute_indices arm64/mm: Fix ttbr0 values stored in struct thread_info for software-pan arm64: mm: Pass original fault address to handle_mm_fault() arm64/mm: Drop SECTION_[SHIFT|SIZE|MASK] arm64/mm: Use CONT_PMD_SHIFT for ARM64_MEMSTART_SHIFT arm64/mm: Drop SWAPPER_INIT_MAP_SIZE arm64: Conditionally configure PTR_AUTH key of the kernel. ...
Diffstat (limited to 'arch/arm64/include/asm/assembler.h')
-rw-r--r--arch/arm64/include/asm/assembler.h98
1 files changed, 61 insertions, 37 deletions
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index 8418c1bd8f04..89faca0e740d 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -130,15 +130,27 @@ alternative_endif
.endm
/*
- * Emit an entry into the exception table
+ * Create an exception table entry for `insn`, which will branch to `fixup`
+ * when an unhandled fault is taken.
*/
- .macro _asm_extable, from, to
+ .macro _asm_extable, insn, fixup
.pushsection __ex_table, "a"
.align 3
- .long (\from - .), (\to - .)
+ .long (\insn - .), (\fixup - .)
.popsection
.endm
+/*
+ * Create an exception table entry for `insn` if `fixup` is provided. Otherwise
+ * do nothing.
+ */
+ .macro _cond_extable, insn, fixup
+ .ifnc \fixup,
+ _asm_extable \insn, \fixup
+ .endif
+ .endm
+
+
#define USER(l, x...) \
9999: x; \
_asm_extable 9999b, l
@@ -232,17 +244,25 @@ lr .req x30 // link register
* @dst: destination register
*/
#if defined(__KVM_NVHE_HYPERVISOR__) || defined(__KVM_VHE_HYPERVISOR__)
- .macro this_cpu_offset, dst
+ .macro get_this_cpu_offset, dst
mrs \dst, tpidr_el2
.endm
#else
- .macro this_cpu_offset, dst
+ .macro get_this_cpu_offset, dst
alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
mrs \dst, tpidr_el1
alternative_else
mrs \dst, tpidr_el2
alternative_endif
.endm
+
+ .macro set_this_cpu_offset, src
+alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
+ msr tpidr_el1, \src
+alternative_else
+ msr tpidr_el2, \src
+alternative_endif
+ .endm
#endif
/*
@@ -253,7 +273,7 @@ alternative_endif
.macro adr_this_cpu, dst, sym, tmp
adrp \tmp, \sym
add \dst, \tmp, #:lo12:\sym
- this_cpu_offset \tmp
+ get_this_cpu_offset \tmp
add \dst, \dst, \tmp
.endm
@@ -264,7 +284,7 @@ alternative_endif
*/
.macro ldr_this_cpu dst, sym, tmp
adr_l \dst, \sym
- this_cpu_offset \tmp
+ get_this_cpu_offset \tmp
ldr \dst, [\dst, \tmp]
.endm
@@ -375,51 +395,53 @@ alternative_cb_end
bfi \tcr, \tmp0, \pos, #3
.endm
+ .macro __dcache_op_workaround_clean_cache, op, addr
+alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE
+ dc \op, \addr
+alternative_else
+ dc civac, \addr
+alternative_endif
+ .endm
+
/*
* Macro to perform a data cache maintenance for the interval
- * [kaddr, kaddr + size)
+ * [start, end)
*
* op: operation passed to dc instruction
* domain: domain used in dsb instruciton
- * kaddr: starting virtual address of the region
- * size: size of the region
- * Corrupts: kaddr, size, tmp1, tmp2
+ * start: starting virtual address of the region
+ * end: end virtual address of the region
+ * fixup: optional label to branch to on user fault
+ * Corrupts: start, end, tmp1, tmp2
*/
- .macro __dcache_op_workaround_clean_cache, op, kaddr
-alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE
- dc \op, \kaddr
-alternative_else
- dc civac, \kaddr
-alternative_endif
- .endm
-
- .macro dcache_by_line_op op, domain, kaddr, size, tmp1, tmp2
+ .macro dcache_by_line_op op, domain, start, end, tmp1, tmp2, fixup
dcache_line_size \tmp1, \tmp2
- add \size, \kaddr, \size
sub \tmp2, \tmp1, #1
- bic \kaddr, \kaddr, \tmp2
-9998:
+ bic \start, \start, \tmp2
+.Ldcache_op\@:
.ifc \op, cvau
- __dcache_op_workaround_clean_cache \op, \kaddr
+ __dcache_op_workaround_clean_cache \op, \start
.else
.ifc \op, cvac
- __dcache_op_workaround_clean_cache \op, \kaddr
+ __dcache_op_workaround_clean_cache \op, \start
.else
.ifc \op, cvap
- sys 3, c7, c12, 1, \kaddr // dc cvap
+ sys 3, c7, c12, 1, \start // dc cvap
.else
.ifc \op, cvadp
- sys 3, c7, c13, 1, \kaddr // dc cvadp
+ sys 3, c7, c13, 1, \start // dc cvadp
.else
- dc \op, \kaddr
+ dc \op, \start
.endif
.endif
.endif
.endif
- add \kaddr, \kaddr, \tmp1
- cmp \kaddr, \size
- b.lo 9998b
+ add \start, \start, \tmp1
+ cmp \start, \end
+ b.lo .Ldcache_op\@
dsb \domain
+
+ _cond_extable .Ldcache_op\@, \fixup
.endm
/*
@@ -427,20 +449,22 @@ alternative_endif
* [start, end)
*
* start, end: virtual addresses describing the region
- * label: A label to branch to on user fault.
+ * fixup: optional label to branch to on user fault
* Corrupts: tmp1, tmp2
*/
- .macro invalidate_icache_by_line start, end, tmp1, tmp2, label
+ .macro invalidate_icache_by_line start, end, tmp1, tmp2, fixup
icache_line_size \tmp1, \tmp2
sub \tmp2, \tmp1, #1
bic \tmp2, \start, \tmp2
-9997:
-USER(\label, ic ivau, \tmp2) // invalidate I line PoU
+.Licache_op\@:
+ ic ivau, \tmp2 // invalidate I line PoU
add \tmp2, \tmp2, \tmp1
cmp \tmp2, \end
- b.lo 9997b
+ b.lo .Licache_op\@
dsb ish
isb
+
+ _cond_extable .Licache_op\@, \fixup
.endm
/*
@@ -745,7 +769,7 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU
cbz \tmp, \lbl
#endif
adr_l \tmp, irq_stat + IRQ_CPUSTAT_SOFTIRQ_PENDING
- this_cpu_offset \tmp2
+ get_this_cpu_offset \tmp2
ldr w\tmp, [\tmp, \tmp2]
cbnz w\tmp, \lbl // yield on pending softirq in task context
.Lnoyield_\@: