summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorNicholas Piggin <npiggin@gmail.com>2023-04-08 12:17:48 +1000
committerMichael Ellerman <mpe@ellerman.id.au>2023-04-20 12:23:14 +1000
commitb270bebd34e36fb69363d65e24b00a9d148903e8 (patch)
tree34eeba359270e2e27ad77752b291793451ad2205 /arch
parent4f18b9e6ca58440394e86a53bf1be0d8a1920bcd (diff)
downloadlinux-stable-b270bebd34e36fb69363d65e24b00a9d148903e8.tar.gz
linux-stable-b270bebd34e36fb69363d65e24b00a9d148903e8.tar.bz2
linux-stable-b270bebd34e36fb69363d65e24b00a9d148903e8.zip
powerpc/64s: Run at the kernel virtual address earlier in boot
This mostly consolidates the Book3E and Book3S behaviour in boot WRT executing from the physical or virtual address. Book3E sets up kernel virtual linear map in start_initialization_book3e and runs from the virtual linear alias after that. This change makes Book3S begin to execute from the virtual alias at the same point. Book3S can not use its MMU for that at this point, but when the MMU is disabled, the virtual linear address correctly aliases to physical memory because the top bits of the address are ignored with MMU disabled. Secondaries execute from the virtual address similarly early. This reduces the differences between subarchs, but the main motivation was to enable the PC-relative addressing ABI for Book3S, where pointer calculations must execute from the virtual address or the top bits of the pointer will be lost. This is similar to the requirement the TOC relative addressing already has that the TOC pointer use its virtual address. Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://msgid.link/20230408021752.862660-3-npiggin@gmail.com
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/kernel/head_64.S82
1 files changed, 44 insertions, 38 deletions
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 00ac5920e84b..bac1dfe52ae5 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -76,6 +76,13 @@
* 2. The kernel is entered at __start
*/
+/*
+ * boot_from_prom and prom_init run at the physical address. Everything
+ * after prom and kexec entry run at the virtual address (PAGE_OFFSET).
+ * Secondaries run at the virtual address from generic_secondary_common_init
+ * onward.
+ */
+
OPEN_FIXED_SECTION(first_256B, 0x0, 0x100)
USE_FIXED_SECTION(first_256B)
/*
@@ -303,13 +310,11 @@ _GLOBAL(fsl_secondary_thread_init)
/* turn on 64-bit mode */
bl enable_64b_mode
- /* get a valid TOC pointer, wherever we're mapped at */
- bl relative_toc
- tovirt(r2,r2)
-
/* Book3E initialization */
mr r3,r24
bl book3e_secondary_thread_init
+ bl relative_toc
+
b generic_secondary_common_init
#endif /* CONFIG_PPC_BOOK3E_64 */
@@ -331,16 +336,12 @@ _GLOBAL(generic_secondary_smp_init)
/* turn on 64-bit mode */
bl enable_64b_mode
- /* get a valid TOC pointer, wherever we're mapped at */
- bl relative_toc
- tovirt(r2,r2)
-
#ifdef CONFIG_PPC_BOOK3E_64
/* Book3E initialization */
mr r3,r24
mr r4,r25
bl book3e_secondary_core_init
-
+ /* Now NIA and r2 are relocated to PAGE_OFFSET if not already */
/*
* After common core init has finished, check if the current thread is the
* one we wanted to boot. If not, start the specified thread and stop the
@@ -378,6 +379,16 @@ _GLOBAL(generic_secondary_smp_init)
10:
b 10b
20:
+#else
+ /* Now the MMU is off, can branch to our PAGE_OFFSET address */
+ bcl 20,31,$+4
+1: mflr r11
+ addi r11,r11,(2f - 1b)
+ tovirt(r11, r11)
+ mtctr r11
+ bctr
+2:
+ bl relative_toc
#endif
generic_secondary_common_init:
@@ -492,6 +503,8 @@ SYM_FUNC_START_LOCAL(start_initialization_book3s)
/* Switch off MMU if not already off */
bl __mmu_off
+ /* Now the MMU is off, can return to our PAGE_OFFSET address */
+ tovirt(r25,r25)
mtlr r25
blr
SYM_FUNC_END(start_initialization_book3s)
@@ -534,16 +547,19 @@ __start_initialization_multiplatform:
/* Get TOC pointer (current runtime address) */
bl relative_toc
+ /* These functions return to the virtual (PAGE_OFFSET) address */
#ifdef CONFIG_PPC_BOOK3E_64
bl start_initialization_book3e
#else
bl start_initialization_book3s
#endif /* CONFIG_PPC_BOOK3E_64 */
- /* Get TOC pointer */
+ /* Get TOC pointer, virtual */
bl relative_toc
/* find out where we are now */
+
+ /* OPAL doesn't pass base address in r4, have to derive it. */
bcl 20,31,$+4
0: mflr r26 /* r26 = runtime addr here */
addis r26,r26,(_stext - 0b)@ha
@@ -554,7 +570,7 @@ __start_initialization_multiplatform:
__REF
__boot_from_prom:
#ifdef CONFIG_PPC_OF_BOOT_TRAMPOLINE
- /* Get TOC pointer */
+ /* Get TOC pointer, non-virtual */
bl relative_toc
/* find out where we are now */
@@ -603,18 +619,11 @@ __boot_from_prom:
__after_prom_start:
#ifdef CONFIG_RELOCATABLE
/* process relocations for the final address of the kernel */
- lis r25,PAGE_OFFSET@highest /* compute virtual base of kernel */
- sldi r25,r25,32
-#if defined(CONFIG_PPC_BOOK3E_64)
- tovirt(r26,r26) /* on booke, we already run at PAGE_OFFSET */
-#endif
lwz r7,(FIXED_SYMBOL_ABS_ADDR(__run_at_load))(r26)
-#if defined(CONFIG_PPC_BOOK3E_64)
- tophys(r26,r26)
-#endif
cmplwi cr0,r7,1 /* flagged to stay where we are ? */
- bne 1f
- add r25,r25,r26
+ mr r25,r26 /* then use current kernel base */
+ beq 1f
+ LOAD_REG_IMMEDIATE(r25, PAGE_OFFSET) /* else use static kernel base */
1: mr r3,r25
bl relocate
#if defined(CONFIG_PPC_BOOK3E_64)
@@ -630,14 +639,8 @@ __after_prom_start:
*
* Note: This process overwrites the OF exception vectors.
*/
- li r3,0 /* target addr */
-#ifdef CONFIG_PPC_BOOK3E_64
- tovirt(r3,r3) /* on booke, we already run at PAGE_OFFSET */
-#endif
+ LOAD_REG_IMMEDIATE(r3, PAGE_OFFSET)
mr. r4,r26 /* In some cases the loader may */
-#if defined(CONFIG_PPC_BOOK3E_64)
- tovirt(r4,r4)
-#endif
beq 9f /* have already put us at zero */
li r6,0x100 /* Start offset, the first 0x100 */
/* bytes were copied earlier. */
@@ -648,9 +651,6 @@ __after_prom_start:
* variable __run_at_load, if it is set the kernel is treated as relocatable
* kernel, otherwise it will be moved to PHYSICAL_START
*/
-#if defined(CONFIG_PPC_BOOK3E_64)
- tovirt(r26,r26) /* on booke, we already run at PAGE_OFFSET */
-#endif
lwz r7,(FIXED_SYMBOL_ABS_ADDR(__run_at_load))(r26)
cmplwi cr0,r7,1
bne 3f
@@ -769,9 +769,15 @@ _GLOBAL(pmac_secondary_start)
sync
slbia
- /* get TOC pointer (real address) */
+ /* Branch to our PAGE_OFFSET address */
+ bcl 20,31,$+4
+1: mflr r11
+ addi r11,r11,(2f - 1b)
+ tovirt(r11, r11)
+ mtctr r11
+ bctr
+2:
bl relative_toc
- tovirt(r2,r2)
/* Copy some CPU settings from CPU 0 */
bl __restore_cpu_ppc970
@@ -910,8 +916,9 @@ SYM_FUNC_END(enable_64b_mode)
* TOC in -mcmodel=medium mode. After we relocate to 0 but before
* the MMU is on we need our TOC to be a virtual address otherwise
* these pointers will be real addresses which may get stored and
- * accessed later with the MMU on. We use tovirt() at the call
- * sites to handle this.
+ * accessed later with the MMU on. We branch to the virtual address
+ * while still in real mode then call relative_toc again to handle
+ * this.
*/
_GLOBAL(relative_toc)
mflr r0
@@ -930,9 +937,8 @@ p_toc: .8byte .TOC. - 0b
*/
__REF
start_here_multiplatform:
- /* set up the TOC */
- bl relative_toc
- tovirt(r2,r2)
+ /* Adjust TOC for moved kernel. Could adjust when moving it instead. */
+ bl relative_toc
/* Clear out the BSS. It may have been done in prom_init,
* already but that's irrelevant since prom_init will soon