diff options
author | Jiri Slaby <jslaby@suse.cz> | 2019-10-11 13:50:47 +0200 |
---|---|---|
committer | Borislav Petkov <bp@suse.de> | 2019-10-18 10:29:34 +0200 |
commit | deff8a24e1021fb39dddf5f6bc5832e0e3a632ea (patch) | |
tree | 1162565298647e88883eb06be4e3cd421b5f0a77 /arch/x86/boot | |
parent | 74d8b90a889022e306b543ff2147a6941c99b354 (diff) | |
download | linux-stable-deff8a24e1021fb39dddf5f6bc5832e0e3a632ea.tar.gz linux-stable-deff8a24e1021fb39dddf5f6bc5832e0e3a632ea.tar.bz2 linux-stable-deff8a24e1021fb39dddf5f6bc5832e0e3a632ea.zip |
x86/boot: Annotate local functions
.Lrelocated, .Lpaging_enabled, .Lno_longmode, and .Lin_pm32 are
self-standing local functions, annotate them as such and preserve "no
alignment".
The annotations do not generate anything yet.
Signed-off-by: Jiri Slaby <jslaby@suse.cz>
Signed-off-by: Borislav Petkov <bp@suse.de>
Cc: Cao jin <caoj.fnst@cn.fujitsu.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Kate Stewart <kstewart@linuxfoundation.org>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: linux-arch@vger.kernel.org
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Wei Huang <wei@redhat.com>
Cc: x86-ml <x86@kernel.org>
Cc: Xiaoyao Li <xiaoyao.li@linux.intel.com>
Link: https://lkml.kernel.org/r/20191011115108.12392-8-jslaby@suse.cz
Diffstat (limited to 'arch/x86/boot')
-rw-r--r-- | arch/x86/boot/compressed/head_32.S | 3 | ||||
-rw-r--r-- | arch/x86/boot/compressed/head_64.S | 9 | ||||
-rw-r--r-- | arch/x86/boot/pmjump.S | 4 |
3 files changed, 10 insertions, 6 deletions
diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S index 5e30eaaf8576..f9e2a80bd699 100644 --- a/arch/x86/boot/compressed/head_32.S +++ b/arch/x86/boot/compressed/head_32.S @@ -209,7 +209,7 @@ ENDPROC(efi32_stub_entry) #endif .text -.Lrelocated: +SYM_FUNC_START_LOCAL_NOALIGN(.Lrelocated) /* * Clear BSS (stack is currently empty) @@ -260,6 +260,7 @@ ENDPROC(efi32_stub_entry) */ xorl %ebx, %ebx jmp *%eax +SYM_FUNC_END(.Lrelocated) #ifdef CONFIG_EFI_STUB .data diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S index d98cd483377e..7afe6e067066 100644 --- a/arch/x86/boot/compressed/head_64.S +++ b/arch/x86/boot/compressed/head_64.S @@ -511,7 +511,7 @@ ENDPROC(efi64_stub_entry) #endif .text -.Lrelocated: +SYM_FUNC_START_LOCAL_NOALIGN(.Lrelocated) /* * Clear BSS (stack is currently empty) @@ -540,6 +540,7 @@ ENDPROC(efi64_stub_entry) * Jump to the decompressed kernel. */ jmp *%rax +SYM_FUNC_END(.Lrelocated) /* * Adjust the global offset table @@ -635,9 +636,10 @@ ENTRY(trampoline_32bit_src) lret .code64 -.Lpaging_enabled: +SYM_FUNC_START_LOCAL_NOALIGN(.Lpaging_enabled) /* Return from the trampoline */ jmp *%rdi +SYM_FUNC_END(.Lpaging_enabled) /* * The trampoline code has a size limit. @@ -647,11 +649,12 @@ ENTRY(trampoline_32bit_src) .org trampoline_32bit_src + TRAMPOLINE_32BIT_CODE_SIZE .code32 -.Lno_longmode: +SYM_FUNC_START_LOCAL_NOALIGN(.Lno_longmode) /* This isn't an x86-64 CPU, so hang intentionally, we cannot continue */ 1: hlt jmp 1b +SYM_FUNC_END(.Lno_longmode) #include "../../kernel/verify_cpu.S" diff --git a/arch/x86/boot/pmjump.S b/arch/x86/boot/pmjump.S index ea88d52eeac7..81658fe35380 100644 --- a/arch/x86/boot/pmjump.S +++ b/arch/x86/boot/pmjump.S @@ -46,7 +46,7 @@ ENDPROC(protected_mode_jump) .code32 .section ".text32","ax" -.Lin_pm32: +SYM_FUNC_START_LOCAL_NOALIGN(.Lin_pm32) # Set up data segments for flat 32-bit mode movl %ecx, %ds movl %ecx, %es @@ -72,4 +72,4 @@ ENDPROC(protected_mode_jump) lldt %cx jmpl *%eax # Jump to the 32-bit entrypoint -ENDPROC(.Lin_pm32) +SYM_FUNC_END(.Lin_pm32) |