diff options
author | Christophe Leroy <christophe.leroy@c-s.fr> | 2019-04-26 16:23:26 +0000 |
---|---|---|
committer | Michael Ellerman <mpe@ellerman.id.au> | 2019-05-03 01:20:25 +1000 |
commit | 26deb04342e343ac58ab05bc7d2345ff0be9b667 (patch) | |
tree | 39040b7c409757661ecd6c7d82dce9e919072625 /arch/powerpc/lib | |
parent | d69ca6bab39e84a84781535b977c7e62c8f84d37 (diff) | |
download | linux-26deb04342e343ac58ab05bc7d2345ff0be9b667.tar.gz linux-26deb04342e343ac58ab05bc7d2345ff0be9b667.tar.bz2 linux-26deb04342e343ac58ab05bc7d2345ff0be9b667.zip |
powerpc: prepare string/mem functions for KASAN
CONFIG_KASAN implements wrappers for memcpy() memmove() and memset()
Those wrappers are doing the verification then call respectively
__memcpy() __memmove() and __memset(). The arches are therefore
expected to rename their optimised functions that way.
For files on which KASAN is inhibited, #defines are used to allow
them to directly call optimised versions of the functions without
going through the KASAN wrappers.
See commit 393f203f5fd5 ("x86_64: kasan: add interceptors for
memset/memmove/memcpy functions") for details.
Other string / mem functions do not (yet) have kasan wrappers,
we therefore have to fallback to the generic versions when
KASAN is active, otherwise KASAN checks will be skipped.
Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
[mpe: Fixups to keep selftests working]
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'arch/powerpc/lib')
-rw-r--r-- | arch/powerpc/lib/Makefile | 11 | ||||
-rw-r--r-- | arch/powerpc/lib/copy_32.S | 12 | ||||
-rw-r--r-- | arch/powerpc/lib/mem_64.S | 9 | ||||
-rw-r--r-- | arch/powerpc/lib/memcpy_64.S | 4 |
4 files changed, 27 insertions, 9 deletions
diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile index 79396e184bca..47a4de434c22 100644 --- a/arch/powerpc/lib/Makefile +++ b/arch/powerpc/lib/Makefile @@ -8,9 +8,14 @@ ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC) CFLAGS_REMOVE_code-patching.o = $(CC_FLAGS_FTRACE) CFLAGS_REMOVE_feature-fixups.o = $(CC_FLAGS_FTRACE) -obj-y += string.o alloc.o code-patching.o feature-fixups.o +obj-y += alloc.o code-patching.o feature-fixups.o -obj-$(CONFIG_PPC32) += div64.o copy_32.o crtsavres.o strlen_32.o +ifndef CONFIG_KASAN +obj-y += string.o memcmp_$(BITS).o +obj-$(CONFIG_PPC32) += strlen_32.o +endif + +obj-$(CONFIG_PPC32) += div64.o copy_32.o crtsavres.o obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o @@ -34,7 +39,7 @@ obj64-$(CONFIG_KPROBES_SANITY_TEST) += test_emulate_step.o \ test_emulate_step_exec_instr.o obj-y += checksum_$(BITS).o checksum_wrappers.o \ - string_$(BITS).o memcmp_$(BITS).o + string_$(BITS).o obj-y += sstep.o ldstfp.o quad.o obj64-y += quad.o diff --git a/arch/powerpc/lib/copy_32.S b/arch/powerpc/lib/copy_32.S index ba66846fe973..d5642481fb98 100644 --- a/arch/powerpc/lib/copy_32.S +++ b/arch/powerpc/lib/copy_32.S @@ -14,6 +14,7 @@ #include <asm/ppc_asm.h> #include <asm/export.h> #include <asm/code-patching-asm.h> +#include <asm/kasan.h> #define COPY_16_BYTES \ lwz r7,4(r4); \ @@ -68,6 +69,7 @@ CACHELINE_BYTES = L1_CACHE_BYTES LG_CACHELINE_BYTES = L1_CACHE_SHIFT CACHELINE_MASK = (L1_CACHE_BYTES-1) +#ifndef CONFIG_KASAN _GLOBAL(memset16) rlwinm. r0 ,r5, 31, 1, 31 addi r6, r3, -4 @@ -81,6 +83,7 @@ _GLOBAL(memset16) sth r4, 4(r6) blr EXPORT_SYMBOL(memset16) +#endif /* * Use dcbz on the complete cache lines in the destination @@ -91,7 +94,7 @@ EXPORT_SYMBOL(memset16) * We therefore skip the optimised bloc that uses dcbz. This jump is * replaced by a nop once cache is active. This is done in machine_init() */ -_GLOBAL(memset) +_GLOBAL_KASAN(memset) cmplwi 0,r5,4 blt 7f @@ -151,6 +154,7 @@ _GLOBAL(memset) bdnz 9b blr EXPORT_SYMBOL(memset) +EXPORT_SYMBOL_KASAN(memset) /* * This version uses dcbz on the complete cache lines in the @@ -163,12 +167,12 @@ EXPORT_SYMBOL(memset) * We therefore jump to generic_memcpy which doesn't use dcbz. This jump is * replaced by a nop once cache is active. This is done in machine_init() */ -_GLOBAL(memmove) +_GLOBAL_KASAN(memmove) cmplw 0,r3,r4 bgt backwards_memcpy /* fall through */ -_GLOBAL(memcpy) +_GLOBAL_KASAN(memcpy) 1: b generic_memcpy patch_site 1b, patch__memcpy_nocache @@ -244,6 +248,8 @@ _GLOBAL(memcpy) 65: blr EXPORT_SYMBOL(memcpy) EXPORT_SYMBOL(memmove) +EXPORT_SYMBOL_KASAN(memcpy) +EXPORT_SYMBOL_KASAN(memmove) generic_memcpy: srwi. r7,r5,3 diff --git a/arch/powerpc/lib/mem_64.S b/arch/powerpc/lib/mem_64.S index 3c3be02f33b7..7f6bd031c306 100644 --- a/arch/powerpc/lib/mem_64.S +++ b/arch/powerpc/lib/mem_64.S @@ -12,7 +12,9 @@ #include <asm/errno.h> #include <asm/ppc_asm.h> #include <asm/export.h> +#include <asm/kasan.h> +#ifndef CONFIG_KASAN _GLOBAL(__memset16) rlwimi r4,r4,16,0,15 /* fall through */ @@ -29,8 +31,9 @@ _GLOBAL(__memset64) EXPORT_SYMBOL(__memset16) EXPORT_SYMBOL(__memset32) EXPORT_SYMBOL(__memset64) +#endif -_GLOBAL(memset) +_GLOBAL_KASAN(memset) neg r0,r3 rlwimi r4,r4,8,16,23 andi. r0,r0,7 /* # bytes to be 8-byte aligned */ @@ -96,8 +99,9 @@ _GLOBAL(memset) stb r4,0(r6) blr EXPORT_SYMBOL(memset) +EXPORT_SYMBOL_KASAN(memset) -_GLOBAL_TOC(memmove) +_GLOBAL_TOC_KASAN(memmove) cmplw 0,r3,r4 bgt backwards_memcpy b memcpy @@ -139,3 +143,4 @@ _GLOBAL(backwards_memcpy) mtctr r7 b 1b EXPORT_SYMBOL(memmove) +EXPORT_SYMBOL_KASAN(memmove) diff --git a/arch/powerpc/lib/memcpy_64.S b/arch/powerpc/lib/memcpy_64.S index 273ea67e60a1..25c3772c1dfb 100644 --- a/arch/powerpc/lib/memcpy_64.S +++ b/arch/powerpc/lib/memcpy_64.S @@ -11,6 +11,7 @@ #include <asm/export.h> #include <asm/asm-compat.h> #include <asm/feature-fixups.h> +#include <asm/kasan.h> #ifndef SELFTEST_CASE /* For big-endian, 0 == most CPUs, 1 == POWER6, 2 == Cell */ @@ -18,7 +19,7 @@ #endif .align 7 -_GLOBAL_TOC(memcpy) +_GLOBAL_TOC_KASAN(memcpy) BEGIN_FTR_SECTION #ifdef __LITTLE_ENDIAN__ cmpdi cr7,r5,0 @@ -230,3 +231,4 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) blr #endif EXPORT_SYMBOL(memcpy) +EXPORT_SYMBOL_KASAN(memcpy) |