From 5fc52248559802f0f1ac10b3c8729a1568216715 Mon Sep 17 00:00:00 2001 From: Petr Pavlu Date: Tue, 11 Jul 2023 14:50:54 +0200 Subject: vmlinux.lds.h: Remove a reference to no longer used sections .text..refcount Sections .text..refcount were previously used to hold an error path code for fast refcount overflow protection on x86, see commit 7a46ec0e2f48 ("locking/refcounts, x86/asm: Implement fast refcount overflow protection") and commit 564c9cc84e2a ("locking/refcounts, x86/asm: Use unique .text section for refcount exceptions"). The code was replaced and removed in commit fb041bb7c0a9 ("locking/refcount: Consolidate implementations of refcount_t") and no sections .text..refcount are present since then. Remove then a relic referencing these sections from TEXT_TEXT to avoid confusing people, like me. This is a non-functional change. Signed-off-by: Petr Pavlu Link: https://lore.kernel.org/r/20230711125054.9000-1-petr.pavlu@suse.com Signed-off-by: Kees Cook --- include/asm-generic/vmlinux.lds.h | 1 - 1 file changed, 1 deletion(-) diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 0587354ba678..9c59409104f6 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -578,7 +578,6 @@ *(.text.unlikely .text.unlikely.*) \ *(.text.unknown .text.unknown.*) \ NOINSTR_TEXT \ - *(.text..refcount) \ *(.ref.text) \ *(.text.asan.* .text.tsan.*) \ MEM_KEEP(init.text*) \ -- cgit v1.2.3 From 8cc32a9bbf2934d90762d9de0187adcb5ad46a11 Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Wed, 28 Jun 2023 11:19:26 -0700 Subject: kallsyms: strip LTO-only suffixes from promoted global functions Commit 6eb4bd92c1ce ("kallsyms: strip LTO suffixes from static functions") stripped all function/variable suffixes started with '.' regardless of whether those suffixes are generated at LTO mode or not. In fact, as far as I know, in LTO mode, when a static function/variable is promoted to the global scope, '.llvm.<...>' suffix is added. The existing mechanism breaks live patch for a LTO kernel even if no .llvm.<...> symbols are involved. For example, for the following kernel symbols: $ grep bpf_verifier_vlog /proc/kallsyms ffffffff81549f60 t bpf_verifier_vlog ffffffff8268b430 d bpf_verifier_vlog._entry ffffffff8282a958 d bpf_verifier_vlog._entry_ptr ffffffff82e12a1f d bpf_verifier_vlog.__already_done 'bpf_verifier_vlog' is a static function. '_entry', '_entry_ptr' and '__already_done' are static variables used inside 'bpf_verifier_vlog', so llvm promotes them to file-level static with prefix 'bpf_verifier_vlog.'. Note that the func-level to file-level static function promotion also happens without LTO. Given a symbol name 'bpf_verifier_vlog', with LTO kernel, current mechanism will return 4 symbols to live patch subsystem which current live patching subsystem cannot handle it. With non-LTO kernel, only one symbol is returned. In [1], we have a lengthy discussion, the suggestion is to separate two cases: (1). new symbols with suffix which are generated regardless of whether LTO is enabled or not, and (2). new symbols with suffix generated only when LTO is enabled. The cleanup_symbol_name() should only remove suffixes for case (2). Case (1) should not be changed so it can work uniformly with or without LTO. This patch removed LTO-only suffix '.llvm.<...>' so live patching and tracing should work the same way for non-LTO kernel. The cleanup_symbol_name() in scripts/kallsyms.c is also changed to have the same filtering pattern so both kernel and kallsyms tool have the same expectation on the order of symbols. [1] https://lore.kernel.org/live-patching/20230615170048.2382735-1-song@kernel.org/T/#u Fixes: 6eb4bd92c1ce ("kallsyms: strip LTO suffixes from static functions") Reported-by: Song Liu Signed-off-by: Yonghong Song Reviewed-by: Zhen Lei Reviewed-by: Nick Desaulniers Acked-by: Song Liu Link: https://lore.kernel.org/r/20230628181926.4102448-1-yhs@fb.com Signed-off-by: Kees Cook --- kernel/kallsyms.c | 5 ++--- scripts/kallsyms.c | 6 +++--- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c index 7982cc9d497c..016d997131d4 100644 --- a/kernel/kallsyms.c +++ b/kernel/kallsyms.c @@ -174,11 +174,10 @@ static bool cleanup_symbol_name(char *s) * LLVM appends various suffixes for local functions and variables that * must be promoted to global scope as part of LTO. This can break * hooking of static functions with kprobes. '.' is not a valid - * character in an identifier in C. Suffixes observed: + * character in an identifier in C. Suffixes only in LLVM LTO observed: * - foo.llvm.[0-9a-f]+ - * - foo.[0-9a-f]+ */ - res = strchr(s, '.'); + res = strstr(s, ".llvm."); if (res) { *res = '\0'; return true; diff --git a/scripts/kallsyms.c b/scripts/kallsyms.c index d387c9381650..16c87938b316 100644 --- a/scripts/kallsyms.c +++ b/scripts/kallsyms.c @@ -349,10 +349,10 @@ static void cleanup_symbol_name(char *s) * ASCII[_] = 5f * ASCII[a-z] = 61,7a * - * As above, replacing '.' with '\0' does not affect the main sorting, - * but it helps us with subsorting. + * As above, replacing the first '.' in ".llvm." with '\0' does not + * affect the main sorting, but it helps us with subsorting. */ - p = strchr(s, '.'); + p = strstr(s, ".llvm."); if (p) *p = '\0'; } -- cgit v1.2.3 From e0b7b2081233ac7fe55838ff68cbc7ca9887a91f Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Wed, 12 Jul 2023 12:46:29 -0700 Subject: MAINTAINERS: Foolishly claim maintainership of string routines Since the string API is tightly coupled with FORTIFY_SOURCE, I am offering myself up as maintainer for it. Thankfully Andy is already a reviewer and can keep me on the straight and narrow. Acked-by: Andy Shevchenko Link: https://lore.kernel.org/r/20230712194625.never.252-kees@kernel.org Signed-off-by: Kees Cook --- MAINTAINERS | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/MAINTAINERS b/MAINTAINERS index 3be1bdfe8ecc..b639d6123856 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -8672,8 +8672,11 @@ S: Maintained F: drivers/input/touchscreen/resistive-adc-touch.c GENERIC STRING LIBRARY +M: Kees Cook R: Andy Shevchenko -S: Maintained +L: linux-hardening@vger.kernel.org +S: Supported +T: git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git for-next/hardening F: include/linux/string.h F: include/linux/string_choices.h F: include/linux/string_helpers.h -- cgit v1.2.3 From ec7633de404e7ce704d8f79081b97bca5b616c23 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 28 Jun 2023 11:49:18 +0200 Subject: sparc: mark __arch_xchg() as __always_inline An otherwise correct change to the atomic operations uncovered an existing bug in the sparc __arch_xchg() function, which is calls __xchg_called_with_bad_pointer() when its arguments are unknown at compile time: ERROR: modpost: "__xchg_called_with_bad_pointer" [lib/atomic64_test.ko] undefined! This now happens because gcc determines that it's better to not inline the function. Avoid this by just marking the function as __always_inline to force the compiler to do the right thing here. Reported-by: Guenter Roeck Link: https://lore.kernel.org/all/c525adc9-6623-4660-8718-e0c9311563b8@roeck-us.net/ Fixes: d12157efc8e08 ("locking/atomic: make atomic*_{cmp,}xchg optional") Signed-off-by: Arnd Bergmann Acked-by: Palmer Dabbelt Acked-by: Mark Rutland Reviewed-by: Sam Ravnborg Acked-by: Guenter Roeck Acked-by: Andi Shyti Link: https://lore.kernel.org/r/20230628094938.2318171-1-arnd@kernel.org Signed-off-by: Kees Cook --- arch/sparc/include/asm/cmpxchg_32.h | 2 +- arch/sparc/include/asm/cmpxchg_64.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/sparc/include/asm/cmpxchg_32.h b/arch/sparc/include/asm/cmpxchg_32.h index 7a1339533d1d..d0af82c240b7 100644 --- a/arch/sparc/include/asm/cmpxchg_32.h +++ b/arch/sparc/include/asm/cmpxchg_32.h @@ -15,7 +15,7 @@ unsigned long __xchg_u32(volatile u32 *m, u32 new); void __xchg_called_with_bad_pointer(void); -static inline unsigned long __arch_xchg(unsigned long x, __volatile__ void * ptr, int size) +static __always_inline unsigned long __arch_xchg(unsigned long x, __volatile__ void * ptr, int size) { switch (size) { case 4: diff --git a/arch/sparc/include/asm/cmpxchg_64.h b/arch/sparc/include/asm/cmpxchg_64.h index 66cd61dde9ec..3de25262c411 100644 --- a/arch/sparc/include/asm/cmpxchg_64.h +++ b/arch/sparc/include/asm/cmpxchg_64.h @@ -87,7 +87,7 @@ xchg16(__volatile__ unsigned short *m, unsigned short val) return (load32 & mask) >> bit_shift; } -static inline unsigned long +static __always_inline unsigned long __arch_xchg(unsigned long x, __volatile__ void * ptr, int size) { switch (size) { -- cgit v1.2.3