diff options
Diffstat (limited to 'arch/arm64/lib')
-rw-r--r-- | arch/arm64/lib/Makefile | 13 | ||||
-rw-r--r-- | arch/arm64/lib/atomic_ll_sc.c | 3 | ||||
-rw-r--r-- | arch/arm64/lib/bitops.S | 45 | ||||
-rw-r--r-- | arch/arm64/lib/clear_user.S | 8 | ||||
-rw-r--r-- | arch/arm64/lib/copy_from_user.S | 25 | ||||
-rw-r--r-- | arch/arm64/lib/copy_in_user.S | 25 | ||||
-rw-r--r-- | arch/arm64/lib/copy_to_user.S | 25 |
7 files changed, 107 insertions, 37 deletions
diff --git a/arch/arm64/lib/Makefile b/arch/arm64/lib/Makefile index d98d3e39879e..1a811ecf71da 100644 --- a/arch/arm64/lib/Makefile +++ b/arch/arm64/lib/Makefile @@ -3,3 +3,16 @@ lib-y := bitops.o clear_user.o delay.o copy_from_user.o \ clear_page.o memchr.o memcpy.o memmove.o memset.o \ memcmp.o strcmp.o strncmp.o strlen.o strnlen.o \ strchr.o strrchr.o + +# Tell the compiler to treat all general purpose registers as +# callee-saved, which allows for efficient runtime patching of the bl +# instruction in the caller with an atomic instruction when supported by +# the CPU. Result and argument registers are handled correctly, based on +# the function prototype. +lib-$(CONFIG_ARM64_LSE_ATOMICS) += atomic_ll_sc.o +CFLAGS_atomic_ll_sc.o := -fcall-used-x0 -ffixed-x1 -ffixed-x2 \ + -ffixed-x3 -ffixed-x4 -ffixed-x5 -ffixed-x6 \ + -ffixed-x7 -fcall-saved-x8 -fcall-saved-x9 \ + -fcall-saved-x10 -fcall-saved-x11 -fcall-saved-x12 \ + -fcall-saved-x13 -fcall-saved-x14 -fcall-saved-x15 \ + -fcall-saved-x16 -fcall-saved-x17 -fcall-saved-x18 diff --git a/arch/arm64/lib/atomic_ll_sc.c b/arch/arm64/lib/atomic_ll_sc.c new file mode 100644 index 000000000000..b0c538b0da28 --- /dev/null +++ b/arch/arm64/lib/atomic_ll_sc.c @@ -0,0 +1,3 @@ +#include <asm/atomic.h> +#define __ARM64_IN_ATOMIC_IMPL +#include <asm/atomic_ll_sc.h> diff --git a/arch/arm64/lib/bitops.S b/arch/arm64/lib/bitops.S index 7dac371cc9a2..43ac736baa5b 100644 --- a/arch/arm64/lib/bitops.S +++ b/arch/arm64/lib/bitops.S @@ -18,52 +18,59 @@ #include <linux/linkage.h> #include <asm/assembler.h> +#include <asm/lse.h> /* * x0: bits 5:0 bit offset * bits 31:6 word offset * x1: address */ - .macro bitop, name, instr + .macro bitop, name, llsc, lse ENTRY( \name ) and w3, w0, #63 // Get bit offset eor w0, w0, w3 // Clear low bits mov x2, #1 add x1, x1, x0, lsr #3 // Get word offset +alt_lse " prfm pstl1strm, [x1]", "nop" lsl x3, x2, x3 // Create mask -1: ldxr x2, [x1] - \instr x2, x2, x3 - stxr w0, x2, [x1] - cbnz w0, 1b + +alt_lse "1: ldxr x2, [x1]", "\lse x3, [x1]" +alt_lse " \llsc x2, x2, x3", "nop" +alt_lse " stxr w0, x2, [x1]", "nop" +alt_lse " cbnz w0, 1b", "nop" + ret ENDPROC(\name ) .endm - .macro testop, name, instr + .macro testop, name, llsc, lse ENTRY( \name ) and w3, w0, #63 // Get bit offset eor w0, w0, w3 // Clear low bits mov x2, #1 add x1, x1, x0, lsr #3 // Get word offset +alt_lse " prfm pstl1strm, [x1]", "nop" lsl x4, x2, x3 // Create mask -1: ldxr x2, [x1] - lsr x0, x2, x3 // Save old value of bit - \instr x2, x2, x4 // toggle bit - stlxr w5, x2, [x1] - cbnz w5, 1b - dmb ish + +alt_lse "1: ldxr x2, [x1]", "\lse x4, x2, [x1]" + lsr x0, x2, x3 +alt_lse " \llsc x2, x2, x4", "nop" +alt_lse " stlxr w5, x2, [x1]", "nop" +alt_lse " cbnz w5, 1b", "nop" +alt_lse " dmb ish", "nop" + and x0, x0, #1 -3: ret + ret ENDPROC(\name ) .endm /* * Atomic bit operations. */ - bitop change_bit, eor - bitop clear_bit, bic - bitop set_bit, orr + bitop change_bit, eor, steor + bitop clear_bit, bic, stclr + bitop set_bit, orr, stset - testop test_and_change_bit, eor - testop test_and_clear_bit, bic - testop test_and_set_bit, orr + testop test_and_change_bit, eor, ldeoral + testop test_and_clear_bit, bic, ldclral + testop test_and_set_bit, orr, ldsetal diff --git a/arch/arm64/lib/clear_user.S b/arch/arm64/lib/clear_user.S index c17967fdf5f6..a9723c71c52b 100644 --- a/arch/arm64/lib/clear_user.S +++ b/arch/arm64/lib/clear_user.S @@ -16,7 +16,11 @@ * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/linkage.h> + +#include <asm/alternative.h> #include <asm/assembler.h> +#include <asm/cpufeature.h> +#include <asm/sysreg.h> .text @@ -29,6 +33,8 @@ * Alignment fixed up by hardware. */ ENTRY(__clear_user) +ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \ + CONFIG_ARM64_PAN) mov x2, x1 // save the size for fixup return subs x1, x1, #8 b.mi 2f @@ -48,6 +54,8 @@ USER(9f, strh wzr, [x0], #2 ) b.mi 5f USER(9f, strb wzr, [x0] ) 5: mov x0, #0 +ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \ + CONFIG_ARM64_PAN) ret ENDPROC(__clear_user) diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S index 5e27add9d362..1be9ef27be97 100644 --- a/arch/arm64/lib/copy_from_user.S +++ b/arch/arm64/lib/copy_from_user.S @@ -15,7 +15,11 @@ */ #include <linux/linkage.h> + +#include <asm/alternative.h> #include <asm/assembler.h> +#include <asm/cpufeature.h> +#include <asm/sysreg.h> /* * Copy from user space to a kernel buffer (alignment handled by the hardware) @@ -28,14 +32,21 @@ * x0 - bytes not copied */ ENTRY(__copy_from_user) - add x4, x1, x2 // upper user buffer boundary - subs x2, x2, #8 +ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \ + CONFIG_ARM64_PAN) + add x5, x1, x2 // upper user buffer boundary + subs x2, x2, #16 + b.mi 1f +0: +USER(9f, ldp x3, x4, [x1], #16) + subs x2, x2, #16 + stp x3, x4, [x0], #16 + b.pl 0b +1: adds x2, x2, #8 b.mi 2f -1: USER(9f, ldr x3, [x1], #8 ) - subs x2, x2, #8 + sub x2, x2, #8 str x3, [x0], #8 - b.pl 1b 2: adds x2, x2, #4 b.mi 3f USER(9f, ldr w3, [x1], #4 ) @@ -51,12 +62,14 @@ USER(9f, ldrh w3, [x1], #2 ) USER(9f, ldrb w3, [x1] ) strb w3, [x0] 5: mov x0, #0 +ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \ + CONFIG_ARM64_PAN) ret ENDPROC(__copy_from_user) .section .fixup,"ax" .align 2 -9: sub x2, x4, x1 +9: sub x2, x5, x1 mov x3, x2 10: strb wzr, [x0], #1 // zero remaining buffer space subs x3, x3, #1 diff --git a/arch/arm64/lib/copy_in_user.S b/arch/arm64/lib/copy_in_user.S index 84b6c9bb9b93..1b94661e22b3 100644 --- a/arch/arm64/lib/copy_in_user.S +++ b/arch/arm64/lib/copy_in_user.S @@ -17,7 +17,11 @@ */ #include <linux/linkage.h> + +#include <asm/alternative.h> #include <asm/assembler.h> +#include <asm/cpufeature.h> +#include <asm/sysreg.h> /* * Copy from user space to user space (alignment handled by the hardware) @@ -30,14 +34,21 @@ * x0 - bytes not copied */ ENTRY(__copy_in_user) - add x4, x0, x2 // upper user buffer boundary - subs x2, x2, #8 +ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \ + CONFIG_ARM64_PAN) + add x5, x0, x2 // upper user buffer boundary + subs x2, x2, #16 + b.mi 1f +0: +USER(9f, ldp x3, x4, [x1], #16) + subs x2, x2, #16 +USER(9f, stp x3, x4, [x0], #16) + b.pl 0b +1: adds x2, x2, #8 b.mi 2f -1: USER(9f, ldr x3, [x1], #8 ) - subs x2, x2, #8 + sub x2, x2, #8 USER(9f, str x3, [x0], #8 ) - b.pl 1b 2: adds x2, x2, #4 b.mi 3f USER(9f, ldr w3, [x1], #4 ) @@ -53,11 +64,13 @@ USER(9f, strh w3, [x0], #2 ) USER(9f, ldrb w3, [x1] ) USER(9f, strb w3, [x0] ) 5: mov x0, #0 +ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \ + CONFIG_ARM64_PAN) ret ENDPROC(__copy_in_user) .section .fixup,"ax" .align 2 -9: sub x0, x4, x0 // bytes not copied +9: sub x0, x5, x0 // bytes not copied ret .previous diff --git a/arch/arm64/lib/copy_to_user.S b/arch/arm64/lib/copy_to_user.S index a0aeeb9b7a28..a257b47e2dc4 100644 --- a/arch/arm64/lib/copy_to_user.S +++ b/arch/arm64/lib/copy_to_user.S @@ -15,7 +15,11 @@ */ #include <linux/linkage.h> + +#include <asm/alternative.h> #include <asm/assembler.h> +#include <asm/cpufeature.h> +#include <asm/sysreg.h> /* * Copy to user space from a kernel buffer (alignment handled by the hardware) @@ -28,14 +32,21 @@ * x0 - bytes not copied */ ENTRY(__copy_to_user) - add x4, x0, x2 // upper user buffer boundary - subs x2, x2, #8 +ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \ + CONFIG_ARM64_PAN) + add x5, x0, x2 // upper user buffer boundary + subs x2, x2, #16 + b.mi 1f +0: + ldp x3, x4, [x1], #16 + subs x2, x2, #16 +USER(9f, stp x3, x4, [x0], #16) + b.pl 0b +1: adds x2, x2, #8 b.mi 2f -1: ldr x3, [x1], #8 - subs x2, x2, #8 + sub x2, x2, #8 USER(9f, str x3, [x0], #8 ) - b.pl 1b 2: adds x2, x2, #4 b.mi 3f ldr w3, [x1], #4 @@ -51,11 +62,13 @@ USER(9f, strh w3, [x0], #2 ) ldrb w3, [x1] USER(9f, strb w3, [x0] ) 5: mov x0, #0 +ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \ + CONFIG_ARM64_PAN) ret ENDPROC(__copy_to_user) .section .fixup,"ax" .align 2 -9: sub x0, x4, x0 // bytes not copied +9: sub x0, x5, x0 // bytes not copied ret .previous |