diff options
Diffstat (limited to 'arch/arm64/kernel/head.S')
-rw-r--r-- | arch/arm64/kernel/head.S | 86 |
1 files changed, 46 insertions, 40 deletions
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 989b1944cb71..57a91032b4c2 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -105,7 +105,7 @@ pe_header: * x24 __primary_switch() .. relocate_kernel() * current RELR displacement */ -ENTRY(stext) +SYM_CODE_START(stext) bl preserve_boot_args bl el2_setup // Drop to EL1, w0=cpu_boot_mode adrp x23, __PHYS_OFFSET @@ -118,14 +118,15 @@ ENTRY(stext) * On return, the CPU will be ready for the MMU to be turned on and * the TCR will have been set. */ + mov x0, #ARM64_CPU_BOOT_PRIMARY bl __cpu_setup // initialise processor b __primary_switch -ENDPROC(stext) +SYM_CODE_END(stext) /* * Preserve the arguments passed by the bootloader in x0 .. x3 */ -preserve_boot_args: +SYM_CODE_START_LOCAL(preserve_boot_args) mov x21, x0 // x21=FDT adr_l x0, boot_args // record the contents of @@ -137,7 +138,7 @@ preserve_boot_args: mov x1, #0x20 // 4 x 8 bytes b __inval_dcache_area // tail call -ENDPROC(preserve_boot_args) +SYM_CODE_END(preserve_boot_args) /* * Macro to create a table entry to the next page. @@ -275,7 +276,7 @@ ENDPROC(preserve_boot_args) * - first few MB of the kernel linear mapping to jump to once the MMU has * been enabled */ -__create_page_tables: +SYM_FUNC_START_LOCAL(__create_page_tables) mov x28, lr /* @@ -403,15 +404,14 @@ __create_page_tables: bl __inval_dcache_area ret x28 -ENDPROC(__create_page_tables) - .ltorg +SYM_FUNC_END(__create_page_tables) /* * The following fragment of code is executed with the MMU enabled. * * x0 = __PHYS_OFFSET */ -__primary_switched: +SYM_FUNC_START_LOCAL(__primary_switched) adrp x4, init_thread_union add sp, x4, #THREAD_SIZE adr_l x5, init_task @@ -456,7 +456,14 @@ __primary_switched: mov x29, #0 mov x30, #0 b start_kernel -ENDPROC(__primary_switched) +SYM_FUNC_END(__primary_switched) + + .pushsection ".rodata", "a" +SYM_DATA_START(kimage_vaddr) + .quad _text - TEXT_OFFSET +SYM_DATA_END(kimage_vaddr) +EXPORT_SYMBOL(kimage_vaddr) + .popsection /* * end early head section, begin head code that is also used for @@ -464,10 +471,6 @@ ENDPROC(__primary_switched) */ .section ".idmap.text","awx" -ENTRY(kimage_vaddr) - .quad _text - TEXT_OFFSET -EXPORT_SYMBOL(kimage_vaddr) - /* * If we're fortunate enough to boot at EL2, ensure that the world is * sane before dropping to EL1. @@ -475,7 +478,7 @@ EXPORT_SYMBOL(kimage_vaddr) * Returns either BOOT_CPU_MODE_EL1 or BOOT_CPU_MODE_EL2 in w0 if * booted in EL1 or EL2 respectively. */ -ENTRY(el2_setup) +SYM_FUNC_START(el2_setup) msr SPsel, #1 // We want to use SP_EL{1,2} mrs x0, CurrentEL cmp x0, #CurrentEL_EL2 @@ -599,7 +602,7 @@ set_hcr: isb ret -install_el2_stub: +SYM_INNER_LABEL(install_el2_stub, SYM_L_LOCAL) /* * When VHE is not in use, early init of EL2 and EL1 needs to be * done here. @@ -636,13 +639,13 @@ install_el2_stub: msr elr_el2, lr mov w0, #BOOT_CPU_MODE_EL2 // This CPU booted in EL2 eret -ENDPROC(el2_setup) +SYM_FUNC_END(el2_setup) /* * Sets the __boot_cpu_mode flag depending on the CPU boot mode passed * in w0. See arch/arm64/include/asm/virt.h for more info. */ -set_cpu_boot_mode_flag: +SYM_FUNC_START_LOCAL(set_cpu_boot_mode_flag) adr_l x1, __boot_cpu_mode cmp w0, #BOOT_CPU_MODE_EL2 b.ne 1f @@ -651,7 +654,7 @@ set_cpu_boot_mode_flag: dmb sy dc ivac, x1 // Invalidate potentially stale cache line ret -ENDPROC(set_cpu_boot_mode_flag) +SYM_FUNC_END(set_cpu_boot_mode_flag) /* * These values are written with the MMU off, but read with the MMU on. @@ -667,15 +670,17 @@ ENDPROC(set_cpu_boot_mode_flag) * This is not in .bss, because we set it sufficiently early that the boot-time * zeroing of .bss would clobber it. */ -ENTRY(__boot_cpu_mode) +SYM_DATA_START(__boot_cpu_mode) .long BOOT_CPU_MODE_EL2 .long BOOT_CPU_MODE_EL1 +SYM_DATA_END(__boot_cpu_mode) /* * The booting CPU updates the failed status @__early_cpu_boot_status, * with MMU turned off. */ -ENTRY(__early_cpu_boot_status) +SYM_DATA_START(__early_cpu_boot_status) .quad 0 +SYM_DATA_END(__early_cpu_boot_status) .popsection @@ -683,7 +688,7 @@ ENTRY(__early_cpu_boot_status) * This provides a "holding pen" for platforms to hold all secondary * cores are held until we're ready for them to initialise. */ -ENTRY(secondary_holding_pen) +SYM_FUNC_START(secondary_holding_pen) bl el2_setup // Drop to EL1, w0=cpu_boot_mode bl set_cpu_boot_mode_flag mrs x0, mpidr_el1 @@ -695,31 +700,32 @@ pen: ldr x4, [x3] b.eq secondary_startup wfe b pen -ENDPROC(secondary_holding_pen) +SYM_FUNC_END(secondary_holding_pen) /* * Secondary entry point that jumps straight into the kernel. Only to * be used where CPUs are brought online dynamically by the kernel. */ -ENTRY(secondary_entry) +SYM_FUNC_START(secondary_entry) bl el2_setup // Drop to EL1 bl set_cpu_boot_mode_flag b secondary_startup -ENDPROC(secondary_entry) +SYM_FUNC_END(secondary_entry) -secondary_startup: +SYM_FUNC_START_LOCAL(secondary_startup) /* * Common entry point for secondary CPUs. */ bl __cpu_secondary_check52bitva + mov x0, #ARM64_CPU_BOOT_SECONDARY bl __cpu_setup // initialise processor adrp x1, swapper_pg_dir bl __enable_mmu ldr x8, =__secondary_switched br x8 -ENDPROC(secondary_startup) +SYM_FUNC_END(secondary_startup) -__secondary_switched: +SYM_FUNC_START_LOCAL(__secondary_switched) adr_l x5, vectors msr vbar_el1, x5 isb @@ -734,13 +740,13 @@ __secondary_switched: mov x29, #0 mov x30, #0 b secondary_start_kernel -ENDPROC(__secondary_switched) +SYM_FUNC_END(__secondary_switched) -__secondary_too_slow: +SYM_FUNC_START_LOCAL(__secondary_too_slow) wfe wfi b __secondary_too_slow -ENDPROC(__secondary_too_slow) +SYM_FUNC_END(__secondary_too_slow) /* * The booting CPU updates the failed status @__early_cpu_boot_status, @@ -772,7 +778,7 @@ ENDPROC(__secondary_too_slow) * Checks if the selected granule size is supported by the CPU. * If it isn't, park the CPU */ -ENTRY(__enable_mmu) +SYM_FUNC_START(__enable_mmu) mrs x2, ID_AA64MMFR0_EL1 ubfx x2, x2, #ID_AA64MMFR0_TGRAN_SHIFT, 4 cmp x2, #ID_AA64MMFR0_TGRAN_SUPPORTED @@ -796,9 +802,9 @@ ENTRY(__enable_mmu) dsb nsh isb ret -ENDPROC(__enable_mmu) +SYM_FUNC_END(__enable_mmu) -ENTRY(__cpu_secondary_check52bitva) +SYM_FUNC_START(__cpu_secondary_check52bitva) #ifdef CONFIG_ARM64_VA_BITS_52 ldr_l x0, vabits_actual cmp x0, #52 @@ -816,9 +822,9 @@ ENTRY(__cpu_secondary_check52bitva) #endif 2: ret -ENDPROC(__cpu_secondary_check52bitva) +SYM_FUNC_END(__cpu_secondary_check52bitva) -__no_granule_support: +SYM_FUNC_START_LOCAL(__no_granule_support) /* Indicate that this CPU can't boot and is stuck in the kernel */ update_early_cpu_boot_status \ CPU_STUCK_IN_KERNEL | CPU_STUCK_REASON_NO_GRAN, x1, x2 @@ -826,10 +832,10 @@ __no_granule_support: wfe wfi b 1b -ENDPROC(__no_granule_support) +SYM_FUNC_END(__no_granule_support) #ifdef CONFIG_RELOCATABLE -__relocate_kernel: +SYM_FUNC_START_LOCAL(__relocate_kernel) /* * Iterate over each entry in the relocation table, and apply the * relocations in place. @@ -931,10 +937,10 @@ __relocate_kernel: #endif ret -ENDPROC(__relocate_kernel) +SYM_FUNC_END(__relocate_kernel) #endif -__primary_switch: +SYM_FUNC_START_LOCAL(__primary_switch) #ifdef CONFIG_RANDOMIZE_BASE mov x19, x0 // preserve new SCTLR_EL1 value mrs x20, sctlr_el1 // preserve old SCTLR_EL1 value @@ -977,4 +983,4 @@ __primary_switch: ldr x8, =__primary_switched adrp x0, __PHYS_OFFSET br x8 -ENDPROC(__primary_switch) +SYM_FUNC_END(__primary_switch) |