diff options
author | Ard Biesheuvel <ard.biesheuvel@linaro.org> | 2018-09-18 23:51:38 -0700 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2018-09-27 17:56:47 +0200 |
commit | c296146c058c87e9ed53001b6a3988519dbbb6a5 (patch) | |
tree | 2b09a08e8e36f4360324624ec10e501f1d06d77e /arch | |
parent | 50ff18ab497aa22f6a59444625df7508c8918237 (diff) | |
download | linux-stable-c296146c058c87e9ed53001b6a3988519dbbb6a5.tar.gz linux-stable-c296146c058c87e9ed53001b6a3988519dbbb6a5.tar.bz2 linux-stable-c296146c058c87e9ed53001b6a3988519dbbb6a5.zip |
arm64/kernel: jump_label: Switch to relative references
On a randomly chosen distro kernel build for arm64, vmlinux.o shows the
following sections, containing jump label entries, and the associated
RELA relocation records, respectively:
...
[38088] __jump_table PROGBITS 0000000000000000 00e19f30
000000000002ea10 0000000000000000 WA 0 0 8
[38089] .rela__jump_table RELA 0000000000000000 01fd8bb0
000000000008be30 0000000000000018 I 38178 38088 8
...
In other words, we have 190 KB worth of 'struct jump_entry' instances,
and 573 KB worth of RELA entries to relocate each entry's code, target
and key members. This means the RELA section occupies 10% of the .init
segment, and the two sections combined represent 5% of vmlinux's entire
memory footprint.
So let's switch from 64-bit absolute references to 32-bit relative
references for the code and target field, and a 64-bit relative
reference for the 'key' field (which may reside in another module or the
core kernel, which may be more than 4 GB way on arm64 when running with
KASLR enable): this reduces the size of the __jump_table by 33%, and
gets rid of the RELA section entirely.
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Will Deacon <will.deacon@arm.com>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: linux-arm-kernel@lists.infradead.org
Cc: linux-s390@vger.kernel.org
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Jessica Yu <jeyu@kernel.org>
Link: https://lkml.kernel.org/r/20180919065144.25010-4-ard.biesheuvel@linaro.org
Diffstat (limited to 'arch')
-rw-r--r-- | arch/arm64/Kconfig | 1 | ||||
-rw-r--r-- | arch/arm64/include/asm/jump_label.h | 38 | ||||
-rw-r--r-- | arch/arm64/kernel/jump_label.c | 6 |
3 files changed, 22 insertions, 23 deletions
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 1b1a0e95c751..16e1e2c1e77b 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -104,6 +104,7 @@ config ARM64 select HAVE_ARCH_BITREVERSE select HAVE_ARCH_HUGE_VMAP select HAVE_ARCH_JUMP_LABEL + select HAVE_ARCH_JUMP_LABEL_RELATIVE select HAVE_ARCH_KASAN if !(ARM64_16K_PAGES && ARM64_VA_BITS_48) select HAVE_ARCH_KGDB select HAVE_ARCH_MMAP_RND_BITS diff --git a/arch/arm64/include/asm/jump_label.h b/arch/arm64/include/asm/jump_label.h index 7e2b3e360086..472023498d71 100644 --- a/arch/arm64/include/asm/jump_label.h +++ b/arch/arm64/include/asm/jump_label.h @@ -26,13 +26,16 @@ #define JUMP_LABEL_NOP_SIZE AARCH64_INSN_SIZE -static __always_inline bool arch_static_branch(struct static_key *key, bool branch) +static __always_inline bool arch_static_branch(struct static_key *key, + bool branch) { - asm_volatile_goto("1: nop\n\t" - ".pushsection __jump_table, \"aw\"\n\t" - ".align 3\n\t" - ".quad 1b, %l[l_yes], %c0\n\t" - ".popsection\n\t" + asm_volatile_goto( + "1: nop \n\t" + " .pushsection __jump_table, \"aw\" \n\t" + " .align 3 \n\t" + " .long 1b - ., %l[l_yes] - . \n\t" + " .quad %c0 - . \n\t" + " .popsection \n\t" : : "i"(&((char *)key)[branch]) : : l_yes); return false; @@ -40,13 +43,16 @@ l_yes: return true; } -static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch) +static __always_inline bool arch_static_branch_jump(struct static_key *key, + bool branch) { - asm_volatile_goto("1: b %l[l_yes]\n\t" - ".pushsection __jump_table, \"aw\"\n\t" - ".align 3\n\t" - ".quad 1b, %l[l_yes], %c0\n\t" - ".popsection\n\t" + asm_volatile_goto( + "1: b %l[l_yes] \n\t" + " .pushsection __jump_table, \"aw\" \n\t" + " .align 3 \n\t" + " .long 1b - ., %l[l_yes] - . \n\t" + " .quad %c0 - . \n\t" + " .popsection \n\t" : : "i"(&((char *)key)[branch]) : : l_yes); return false; @@ -54,13 +60,5 @@ l_yes: return true; } -typedef u64 jump_label_t; - -struct jump_entry { - jump_label_t code; - jump_label_t target; - jump_label_t key; -}; - #endif /* __ASSEMBLY__ */ #endif /* __ASM_JUMP_LABEL_H */ diff --git a/arch/arm64/kernel/jump_label.c b/arch/arm64/kernel/jump_label.c index e0756416e567..646b9562ee64 100644 --- a/arch/arm64/kernel/jump_label.c +++ b/arch/arm64/kernel/jump_label.c @@ -25,12 +25,12 @@ void arch_jump_label_transform(struct jump_entry *entry, enum jump_label_type type) { - void *addr = (void *)entry->code; + void *addr = (void *)jump_entry_code(entry); u32 insn; if (type == JUMP_LABEL_JMP) { - insn = aarch64_insn_gen_branch_imm(entry->code, - entry->target, + insn = aarch64_insn_gen_branch_imm(jump_entry_code(entry), + jump_entry_target(entry), AARCH64_INSN_BRANCH_NOLINK); } else { insn = aarch64_insn_gen_nop(); |