summaryrefslogtreecommitdiffstats
path: root/arch/s390/lib
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/lib')
-rw-r--r--arch/s390/lib/Makefile5
-rw-r--r--arch/s390/lib/crc32-glue.c92
-rw-r--r--arch/s390/lib/crc32-vx.h12
-rw-r--r--arch/s390/lib/crc32be-vx.c174
-rw-r--r--arch/s390/lib/crc32le-vx.c240
-rw-r--r--arch/s390/lib/expoline.S (renamed from arch/s390/lib/expoline/expoline.S)0
-rw-r--r--arch/s390/lib/expoline/Makefile3
-rw-r--r--arch/s390/lib/mem.S15
-rw-r--r--arch/s390/lib/spinlock.c85
-rw-r--r--arch/s390/lib/string.c10
-rw-r--r--arch/s390/lib/test_kprobes.c1
-rw-r--r--arch/s390/lib/test_modules.c1
-rw-r--r--arch/s390/lib/test_unwind.c7
-rw-r--r--arch/s390/lib/uaccess.c176
-rw-r--r--arch/s390/lib/xor.c61
15 files changed, 697 insertions, 185 deletions
diff --git a/arch/s390/lib/Makefile b/arch/s390/lib/Makefile
index 90eac15ea62a..14bbfe50033c 100644
--- a/arch/s390/lib/Makefile
+++ b/arch/s390/lib/Makefile
@@ -23,4 +23,7 @@ obj-$(CONFIG_S390_MODULES_SANITY_TEST_HELPERS) += test_modules_helpers.o
lib-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o
-obj-$(CONFIG_EXPOLINE_EXTERN) += expoline/
+obj-$(CONFIG_EXPOLINE_EXTERN) += expoline.o
+
+obj-$(CONFIG_CRC32_ARCH) += crc32-s390.o
+crc32-s390-y := crc32-glue.o crc32le-vx.o crc32be-vx.o
diff --git a/arch/s390/lib/crc32-glue.c b/arch/s390/lib/crc32-glue.c
new file mode 100644
index 000000000000..124214a27340
--- /dev/null
+++ b/arch/s390/lib/crc32-glue.c
@@ -0,0 +1,92 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * CRC-32 implemented with the z/Architecture Vector Extension Facility.
+ *
+ * Copyright IBM Corp. 2015
+ * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
+ */
+#define KMSG_COMPONENT "crc32-vx"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/module.h>
+#include <linux/cpufeature.h>
+#include <linux/crc32.h>
+#include <asm/fpu.h>
+#include "crc32-vx.h"
+
+#define VX_MIN_LEN 64
+#define VX_ALIGNMENT 16L
+#define VX_ALIGN_MASK (VX_ALIGNMENT - 1)
+
+static DEFINE_STATIC_KEY_FALSE(have_vxrs);
+
+/*
+ * DEFINE_CRC32_VX() - Define a CRC-32 function using the vector extension
+ *
+ * Creates a function to perform a particular CRC-32 computation. Depending
+ * on the message buffer, the hardware-accelerated or software implementation
+ * is used. Note that the message buffer is aligned to improve fetch
+ * operations of VECTOR LOAD MULTIPLE instructions.
+ */
+#define DEFINE_CRC32_VX(___fname, ___crc32_vx, ___crc32_sw) \
+ u32 ___fname(u32 crc, const u8 *data, size_t datalen) \
+ { \
+ unsigned long prealign, aligned, remaining; \
+ DECLARE_KERNEL_FPU_ONSTACK16(vxstate); \
+ \
+ if (datalen < VX_MIN_LEN + VX_ALIGN_MASK || \
+ !static_branch_likely(&have_vxrs)) \
+ return ___crc32_sw(crc, data, datalen); \
+ \
+ if ((unsigned long)data & VX_ALIGN_MASK) { \
+ prealign = VX_ALIGNMENT - \
+ ((unsigned long)data & VX_ALIGN_MASK); \
+ datalen -= prealign; \
+ crc = ___crc32_sw(crc, data, prealign); \
+ data = (void *)((unsigned long)data + prealign); \
+ } \
+ \
+ aligned = datalen & ~VX_ALIGN_MASK; \
+ remaining = datalen & VX_ALIGN_MASK; \
+ \
+ kernel_fpu_begin(&vxstate, KERNEL_VXR_LOW); \
+ crc = ___crc32_vx(crc, data, aligned); \
+ kernel_fpu_end(&vxstate, KERNEL_VXR_LOW); \
+ \
+ if (remaining) \
+ crc = ___crc32_sw(crc, data + aligned, remaining); \
+ \
+ return crc; \
+ } \
+ EXPORT_SYMBOL(___fname);
+
+DEFINE_CRC32_VX(crc32_le_arch, crc32_le_vgfm_16, crc32_le_base)
+DEFINE_CRC32_VX(crc32_be_arch, crc32_be_vgfm_16, crc32_be_base)
+DEFINE_CRC32_VX(crc32c_arch, crc32c_le_vgfm_16, crc32c_base)
+
+static int __init crc32_s390_init(void)
+{
+ if (cpu_have_feature(S390_CPU_FEATURE_VXRS))
+ static_branch_enable(&have_vxrs);
+ return 0;
+}
+arch_initcall(crc32_s390_init);
+
+static void __exit crc32_s390_exit(void)
+{
+}
+module_exit(crc32_s390_exit);
+
+u32 crc32_optimizations(void)
+{
+ if (static_key_enabled(&have_vxrs))
+ return CRC32_LE_OPTIMIZATION |
+ CRC32_BE_OPTIMIZATION |
+ CRC32C_OPTIMIZATION;
+ return 0;
+}
+EXPORT_SYMBOL(crc32_optimizations);
+
+MODULE_AUTHOR("Hendrik Brueckner <brueckner@linux.vnet.ibm.com>");
+MODULE_DESCRIPTION("CRC-32 algorithms using z/Architecture Vector Extension Facility");
+MODULE_LICENSE("GPL");
diff --git a/arch/s390/lib/crc32-vx.h b/arch/s390/lib/crc32-vx.h
new file mode 100644
index 000000000000..652c96e1a822
--- /dev/null
+++ b/arch/s390/lib/crc32-vx.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _CRC32_VX_S390_H
+#define _CRC32_VX_S390_H
+
+#include <linux/types.h>
+
+u32 crc32_be_vgfm_16(u32 crc, unsigned char const *buf, size_t size);
+u32 crc32_le_vgfm_16(u32 crc, unsigned char const *buf, size_t size);
+u32 crc32c_le_vgfm_16(u32 crc, unsigned char const *buf, size_t size);
+
+#endif /* _CRC32_VX_S390_H */
diff --git a/arch/s390/lib/crc32be-vx.c b/arch/s390/lib/crc32be-vx.c
new file mode 100644
index 000000000000..fed7c9c70d05
--- /dev/null
+++ b/arch/s390/lib/crc32be-vx.c
@@ -0,0 +1,174 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Hardware-accelerated CRC-32 variants for Linux on z Systems
+ *
+ * Use the z/Architecture Vector Extension Facility to accelerate the
+ * computing of CRC-32 checksums.
+ *
+ * This CRC-32 implementation algorithm processes the most-significant
+ * bit first (BE).
+ *
+ * Copyright IBM Corp. 2015
+ * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
+ */
+
+#include <linux/types.h>
+#include <asm/fpu.h>
+#include "crc32-vx.h"
+
+/* Vector register range containing CRC-32 constants */
+#define CONST_R1R2 9
+#define CONST_R3R4 10
+#define CONST_R5 11
+#define CONST_R6 12
+#define CONST_RU_POLY 13
+#define CONST_CRC_POLY 14
+
+/*
+ * The CRC-32 constant block contains reduction constants to fold and
+ * process particular chunks of the input data stream in parallel.
+ *
+ * For the CRC-32 variants, the constants are precomputed according to
+ * these definitions:
+ *
+ * R1 = x4*128+64 mod P(x)
+ * R2 = x4*128 mod P(x)
+ * R3 = x128+64 mod P(x)
+ * R4 = x128 mod P(x)
+ * R5 = x96 mod P(x)
+ * R6 = x64 mod P(x)
+ *
+ * Barret reduction constant, u, is defined as floor(x**64 / P(x)).
+ *
+ * where P(x) is the polynomial in the normal domain and the P'(x) is the
+ * polynomial in the reversed (bitreflected) domain.
+ *
+ * Note that the constant definitions below are extended in order to compute
+ * intermediate results with a single VECTOR GALOIS FIELD MULTIPLY instruction.
+ * The rightmost doubleword can be 0 to prevent contribution to the result or
+ * can be multiplied by 1 to perform an XOR without the need for a separate
+ * VECTOR EXCLUSIVE OR instruction.
+ *
+ * CRC-32 (IEEE 802.3 Ethernet, ...) polynomials:
+ *
+ * P(x) = 0x04C11DB7
+ * P'(x) = 0xEDB88320
+ */
+
+static unsigned long constants_CRC_32_BE[] = {
+ 0x08833794c, 0x0e6228b11, /* R1, R2 */
+ 0x0c5b9cd4c, 0x0e8a45605, /* R3, R4 */
+ 0x0f200aa66, 1UL << 32, /* R5, x32 */
+ 0x0490d678d, 1, /* R6, 1 */
+ 0x104d101df, 0, /* u */
+ 0x104C11DB7, 0, /* P(x) */
+};
+
+/**
+ * crc32_be_vgfm_16 - Compute CRC-32 (BE variant) with vector registers
+ * @crc: Initial CRC value, typically ~0.
+ * @buf: Input buffer pointer, performance might be improved if the
+ * buffer is on a doubleword boundary.
+ * @size: Size of the buffer, must be 64 bytes or greater.
+ *
+ * Register usage:
+ * V0: Initial CRC value and intermediate constants and results.
+ * V1..V4: Data for CRC computation.
+ * V5..V8: Next data chunks that are fetched from the input buffer.
+ * V9..V14: CRC-32 constants.
+ */
+u32 crc32_be_vgfm_16(u32 crc, unsigned char const *buf, size_t size)
+{
+ /* Load CRC-32 constants */
+ fpu_vlm(CONST_R1R2, CONST_CRC_POLY, &constants_CRC_32_BE);
+ fpu_vzero(0);
+
+ /* Load the initial CRC value into the leftmost word of V0. */
+ fpu_vlvgf(0, crc, 0);
+
+ /* Load a 64-byte data chunk and XOR with CRC */
+ fpu_vlm(1, 4, buf);
+ fpu_vx(1, 0, 1);
+ buf += 64;
+ size -= 64;
+
+ while (size >= 64) {
+ /* Load the next 64-byte data chunk into V5 to V8 */
+ fpu_vlm(5, 8, buf);
+
+ /*
+ * Perform a GF(2) multiplication of the doublewords in V1 with
+ * the reduction constants in V0. The intermediate result is
+ * then folded (accumulated) with the next data chunk in V5 and
+ * stored in V1. Repeat this step for the register contents
+ * in V2, V3, and V4 respectively.
+ */
+ fpu_vgfmag(1, CONST_R1R2, 1, 5);
+ fpu_vgfmag(2, CONST_R1R2, 2, 6);
+ fpu_vgfmag(3, CONST_R1R2, 3, 7);
+ fpu_vgfmag(4, CONST_R1R2, 4, 8);
+ buf += 64;
+ size -= 64;
+ }
+
+ /* Fold V1 to V4 into a single 128-bit value in V1 */
+ fpu_vgfmag(1, CONST_R3R4, 1, 2);
+ fpu_vgfmag(1, CONST_R3R4, 1, 3);
+ fpu_vgfmag(1, CONST_R3R4, 1, 4);
+
+ while (size >= 16) {
+ fpu_vl(2, buf);
+ fpu_vgfmag(1, CONST_R3R4, 1, 2);
+ buf += 16;
+ size -= 16;
+ }
+
+ /*
+ * The R5 constant is used to fold a 128-bit value into an 96-bit value
+ * that is XORed with the next 96-bit input data chunk. To use a single
+ * VGFMG instruction, multiply the rightmost 64-bit with x^32 (1<<32) to
+ * form an intermediate 96-bit value (with appended zeros) which is then
+ * XORed with the intermediate reduction result.
+ */
+ fpu_vgfmg(1, CONST_R5, 1);
+
+ /*
+ * Further reduce the remaining 96-bit value to a 64-bit value using a
+ * single VGFMG, the rightmost doubleword is multiplied with 0x1. The
+ * intermediate result is then XORed with the product of the leftmost
+ * doubleword with R6. The result is a 64-bit value and is subject to
+ * the Barret reduction.
+ */
+ fpu_vgfmg(1, CONST_R6, 1);
+
+ /*
+ * The input values to the Barret reduction are the degree-63 polynomial
+ * in V1 (R(x)), degree-32 generator polynomial, and the reduction
+ * constant u. The Barret reduction result is the CRC value of R(x) mod
+ * P(x).
+ *
+ * The Barret reduction algorithm is defined as:
+ *
+ * 1. T1(x) = floor( R(x) / x^32 ) GF2MUL u
+ * 2. T2(x) = floor( T1(x) / x^32 ) GF2MUL P(x)
+ * 3. C(x) = R(x) XOR T2(x) mod x^32
+ *
+ * Note: To compensate the division by x^32, use the vector unpack
+ * instruction to move the leftmost word into the leftmost doubleword
+ * of the vector register. The rightmost doubleword is multiplied
+ * with zero to not contribute to the intermediate results.
+ */
+
+ /* T1(x) = floor( R(x) / x^32 ) GF2MUL u */
+ fpu_vupllf(2, 1);
+ fpu_vgfmg(2, CONST_RU_POLY, 2);
+
+ /*
+ * Compute the GF(2) product of the CRC polynomial in VO with T1(x) in
+ * V2 and XOR the intermediate result, T2(x), with the value in V1.
+ * The final result is in the rightmost word of V2.
+ */
+ fpu_vupllf(2, 2);
+ fpu_vgfmag(2, CONST_CRC_POLY, 2, 1);
+ return fpu_vlgvf(2, 3);
+}
diff --git a/arch/s390/lib/crc32le-vx.c b/arch/s390/lib/crc32le-vx.c
new file mode 100644
index 000000000000..2f629f394df7
--- /dev/null
+++ b/arch/s390/lib/crc32le-vx.c
@@ -0,0 +1,240 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Hardware-accelerated CRC-32 variants for Linux on z Systems
+ *
+ * Use the z/Architecture Vector Extension Facility to accelerate the
+ * computing of bitreflected CRC-32 checksums for IEEE 802.3 Ethernet
+ * and Castagnoli.
+ *
+ * This CRC-32 implementation algorithm is bitreflected and processes
+ * the least-significant bit first (Little-Endian).
+ *
+ * Copyright IBM Corp. 2015
+ * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
+ */
+
+#include <linux/types.h>
+#include <asm/fpu.h>
+#include "crc32-vx.h"
+
+/* Vector register range containing CRC-32 constants */
+#define CONST_PERM_LE2BE 9
+#define CONST_R2R1 10
+#define CONST_R4R3 11
+#define CONST_R5 12
+#define CONST_RU_POLY 13
+#define CONST_CRC_POLY 14
+
+/*
+ * The CRC-32 constant block contains reduction constants to fold and
+ * process particular chunks of the input data stream in parallel.
+ *
+ * For the CRC-32 variants, the constants are precomputed according to
+ * these definitions:
+ *
+ * R1 = [(x4*128+32 mod P'(x) << 32)]' << 1
+ * R2 = [(x4*128-32 mod P'(x) << 32)]' << 1
+ * R3 = [(x128+32 mod P'(x) << 32)]' << 1
+ * R4 = [(x128-32 mod P'(x) << 32)]' << 1
+ * R5 = [(x64 mod P'(x) << 32)]' << 1
+ * R6 = [(x32 mod P'(x) << 32)]' << 1
+ *
+ * The bitreflected Barret reduction constant, u', is defined as
+ * the bit reversal of floor(x**64 / P(x)).
+ *
+ * where P(x) is the polynomial in the normal domain and the P'(x) is the
+ * polynomial in the reversed (bitreflected) domain.
+ *
+ * CRC-32 (IEEE 802.3 Ethernet, ...) polynomials:
+ *
+ * P(x) = 0x04C11DB7
+ * P'(x) = 0xEDB88320
+ *
+ * CRC-32C (Castagnoli) polynomials:
+ *
+ * P(x) = 0x1EDC6F41
+ * P'(x) = 0x82F63B78
+ */
+
+static unsigned long constants_CRC_32_LE[] = {
+ 0x0f0e0d0c0b0a0908, 0x0706050403020100, /* BE->LE mask */
+ 0x1c6e41596, 0x154442bd4, /* R2, R1 */
+ 0x0ccaa009e, 0x1751997d0, /* R4, R3 */
+ 0x0, 0x163cd6124, /* R5 */
+ 0x0, 0x1f7011641, /* u' */
+ 0x0, 0x1db710641 /* P'(x) << 1 */
+};
+
+static unsigned long constants_CRC_32C_LE[] = {
+ 0x0f0e0d0c0b0a0908, 0x0706050403020100, /* BE->LE mask */
+ 0x09e4addf8, 0x740eef02, /* R2, R1 */
+ 0x14cd00bd6, 0xf20c0dfe, /* R4, R3 */
+ 0x0, 0x0dd45aab8, /* R5 */
+ 0x0, 0x0dea713f1, /* u' */
+ 0x0, 0x105ec76f0 /* P'(x) << 1 */
+};
+
+/**
+ * crc32_le_vgfm_generic - Compute CRC-32 (LE variant) with vector registers
+ * @crc: Initial CRC value, typically ~0.
+ * @buf: Input buffer pointer, performance might be improved if the
+ * buffer is on a doubleword boundary.
+ * @size: Size of the buffer, must be 64 bytes or greater.
+ * @constants: CRC-32 constant pool base pointer.
+ *
+ * Register usage:
+ * V0: Initial CRC value and intermediate constants and results.
+ * V1..V4: Data for CRC computation.
+ * V5..V8: Next data chunks that are fetched from the input buffer.
+ * V9: Constant for BE->LE conversion and shift operations
+ * V10..V14: CRC-32 constants.
+ */
+static u32 crc32_le_vgfm_generic(u32 crc, unsigned char const *buf, size_t size, unsigned long *constants)
+{
+ /* Load CRC-32 constants */
+ fpu_vlm(CONST_PERM_LE2BE, CONST_CRC_POLY, constants);
+
+ /*
+ * Load the initial CRC value.
+ *
+ * The CRC value is loaded into the rightmost word of the
+ * vector register and is later XORed with the LSB portion
+ * of the loaded input data.
+ */
+ fpu_vzero(0); /* Clear V0 */
+ fpu_vlvgf(0, crc, 3); /* Load CRC into rightmost word */
+
+ /* Load a 64-byte data chunk and XOR with CRC */
+ fpu_vlm(1, 4, buf);
+ fpu_vperm(1, 1, 1, CONST_PERM_LE2BE);
+ fpu_vperm(2, 2, 2, CONST_PERM_LE2BE);
+ fpu_vperm(3, 3, 3, CONST_PERM_LE2BE);
+ fpu_vperm(4, 4, 4, CONST_PERM_LE2BE);
+
+ fpu_vx(1, 0, 1); /* V1 ^= CRC */
+ buf += 64;
+ size -= 64;
+
+ while (size >= 64) {
+ fpu_vlm(5, 8, buf);
+ fpu_vperm(5, 5, 5, CONST_PERM_LE2BE);
+ fpu_vperm(6, 6, 6, CONST_PERM_LE2BE);
+ fpu_vperm(7, 7, 7, CONST_PERM_LE2BE);
+ fpu_vperm(8, 8, 8, CONST_PERM_LE2BE);
+ /*
+ * Perform a GF(2) multiplication of the doublewords in V1 with
+ * the R1 and R2 reduction constants in V0. The intermediate
+ * result is then folded (accumulated) with the next data chunk
+ * in V5 and stored in V1. Repeat this step for the register
+ * contents in V2, V3, and V4 respectively.
+ */
+ fpu_vgfmag(1, CONST_R2R1, 1, 5);
+ fpu_vgfmag(2, CONST_R2R1, 2, 6);
+ fpu_vgfmag(3, CONST_R2R1, 3, 7);
+ fpu_vgfmag(4, CONST_R2R1, 4, 8);
+ buf += 64;
+ size -= 64;
+ }
+
+ /*
+ * Fold V1 to V4 into a single 128-bit value in V1. Multiply V1 with R3
+ * and R4 and accumulating the next 128-bit chunk until a single 128-bit
+ * value remains.
+ */
+ fpu_vgfmag(1, CONST_R4R3, 1, 2);
+ fpu_vgfmag(1, CONST_R4R3, 1, 3);
+ fpu_vgfmag(1, CONST_R4R3, 1, 4);
+
+ while (size >= 16) {
+ fpu_vl(2, buf);
+ fpu_vperm(2, 2, 2, CONST_PERM_LE2BE);
+ fpu_vgfmag(1, CONST_R4R3, 1, 2);
+ buf += 16;
+ size -= 16;
+ }
+
+ /*
+ * Set up a vector register for byte shifts. The shift value must
+ * be loaded in bits 1-4 in byte element 7 of a vector register.
+ * Shift by 8 bytes: 0x40
+ * Shift by 4 bytes: 0x20
+ */
+ fpu_vleib(9, 0x40, 7);
+
+ /*
+ * Prepare V0 for the next GF(2) multiplication: shift V0 by 8 bytes
+ * to move R4 into the rightmost doubleword and set the leftmost
+ * doubleword to 0x1.
+ */
+ fpu_vsrlb(0, CONST_R4R3, 9);
+ fpu_vleig(0, 1, 0);
+
+ /*
+ * Compute GF(2) product of V1 and V0. The rightmost doubleword
+ * of V1 is multiplied with R4. The leftmost doubleword of V1 is
+ * multiplied by 0x1 and is then XORed with rightmost product.
+ * Implicitly, the intermediate leftmost product becomes padded
+ */
+ fpu_vgfmg(1, 0, 1);
+
+ /*
+ * Now do the final 32-bit fold by multiplying the rightmost word
+ * in V1 with R5 and XOR the result with the remaining bits in V1.
+ *
+ * To achieve this by a single VGFMAG, right shift V1 by a word
+ * and store the result in V2 which is then accumulated. Use the
+ * vector unpack instruction to load the rightmost half of the
+ * doubleword into the rightmost doubleword element of V1; the other
+ * half is loaded in the leftmost doubleword.
+ * The vector register with CONST_R5 contains the R5 constant in the
+ * rightmost doubleword and the leftmost doubleword is zero to ignore
+ * the leftmost product of V1.
+ */
+ fpu_vleib(9, 0x20, 7); /* Shift by words */
+ fpu_vsrlb(2, 1, 9); /* Store remaining bits in V2 */
+ fpu_vupllf(1, 1); /* Split rightmost doubleword */
+ fpu_vgfmag(1, CONST_R5, 1, 2); /* V1 = (V1 * R5) XOR V2 */
+
+ /*
+ * Apply a Barret reduction to compute the final 32-bit CRC value.
+ *
+ * The input values to the Barret reduction are the degree-63 polynomial
+ * in V1 (R(x)), degree-32 generator polynomial, and the reduction
+ * constant u. The Barret reduction result is the CRC value of R(x) mod
+ * P(x).
+ *
+ * The Barret reduction algorithm is defined as:
+ *
+ * 1. T1(x) = floor( R(x) / x^32 ) GF2MUL u
+ * 2. T2(x) = floor( T1(x) / x^32 ) GF2MUL P(x)
+ * 3. C(x) = R(x) XOR T2(x) mod x^32
+ *
+ * Note: The leftmost doubleword of vector register containing
+ * CONST_RU_POLY is zero and, thus, the intermediate GF(2) product
+ * is zero and does not contribute to the final result.
+ */
+
+ /* T1(x) = floor( R(x) / x^32 ) GF2MUL u */
+ fpu_vupllf(2, 1);
+ fpu_vgfmg(2, CONST_RU_POLY, 2);
+
+ /*
+ * Compute the GF(2) product of the CRC polynomial with T1(x) in
+ * V2 and XOR the intermediate result, T2(x), with the value in V1.
+ * The final result is stored in word element 2 of V2.
+ */
+ fpu_vupllf(2, 2);
+ fpu_vgfmag(2, CONST_CRC_POLY, 2, 1);
+
+ return fpu_vlgvf(2, 2);
+}
+
+u32 crc32_le_vgfm_16(u32 crc, unsigned char const *buf, size_t size)
+{
+ return crc32_le_vgfm_generic(crc, buf, size, &constants_CRC_32_LE[0]);
+}
+
+u32 crc32c_le_vgfm_16(u32 crc, unsigned char const *buf, size_t size)
+{
+ return crc32_le_vgfm_generic(crc, buf, size, &constants_CRC_32C_LE[0]);
+}
diff --git a/arch/s390/lib/expoline/expoline.S b/arch/s390/lib/expoline.S
index 92ed8409a7a4..92ed8409a7a4 100644
--- a/arch/s390/lib/expoline/expoline.S
+++ b/arch/s390/lib/expoline.S
diff --git a/arch/s390/lib/expoline/Makefile b/arch/s390/lib/expoline/Makefile
deleted file mode 100644
index 854631d9cb03..000000000000
--- a/arch/s390/lib/expoline/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-
-obj-y += expoline.o
diff --git a/arch/s390/lib/mem.S b/arch/s390/lib/mem.S
index 08f60a42b9a6..d026debf250c 100644
--- a/arch/s390/lib/mem.S
+++ b/arch/s390/lib/mem.S
@@ -34,8 +34,7 @@ SYM_FUNC_START(__memmove)
la %r3,256(%r3)
brctg %r0,.Lmemmove_forward_loop
.Lmemmove_forward_remainder:
- larl %r5,.Lmemmove_mvc
- ex %r4,0(%r5)
+ exrl %r4,.Lmemmove_mvc
.Lmemmove_exit:
BR_EX %r14
.Lmemmove_reverse:
@@ -83,8 +82,7 @@ SYM_FUNC_START(__memset)
la %r1,256(%r1)
brctg %r3,.Lmemset_clear_loop
.Lmemset_clear_remainder:
- larl %r3,.Lmemset_xc
- ex %r4,0(%r3)
+ exrl %r4,.Lmemset_xc
.Lmemset_exit:
BR_EX %r14
.Lmemset_fill:
@@ -102,8 +100,7 @@ SYM_FUNC_START(__memset)
brctg %r5,.Lmemset_fill_loop
.Lmemset_fill_remainder:
stc %r3,0(%r1)
- larl %r5,.Lmemset_mvc
- ex %r4,0(%r5)
+ exrl %r4,.Lmemset_mvc
BR_EX %r14
.Lmemset_fill_exit:
stc %r3,0(%r1)
@@ -132,8 +129,7 @@ SYM_FUNC_START(__memcpy)
lgr %r1,%r2
jnz .Lmemcpy_loop
.Lmemcpy_remainder:
- larl %r5,.Lmemcpy_mvc
- ex %r4,0(%r5)
+ exrl %r4,.Lmemcpy_mvc
.Lmemcpy_exit:
BR_EX %r14
.Lmemcpy_loop:
@@ -175,8 +171,7 @@ SYM_FUNC_START(__memset\bits)
brctg %r5,.L__memset_loop\bits
.L__memset_remainder\bits:
\insn %r3,0(%r1)
- larl %r5,.L__memset_mvc\bits
- ex %r4,0(%r5)
+ exrl %r4,.L__memset_mvc\bits
BR_EX %r14
.L__memset_store\bits:
\insn %r3,0(%r2)
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c
index 81c53440b3e6..ad9da4038511 100644
--- a/arch/s390/lib/spinlock.c
+++ b/arch/s390/lib/spinlock.c
@@ -10,11 +10,14 @@
#include <linux/export.h>
#include <linux/spinlock.h>
#include <linux/jiffies.h>
+#include <linux/sysctl.h>
#include <linux/init.h>
#include <linux/smp.h>
#include <linux/percpu.h>
#include <linux/io.h>
#include <asm/alternative.h>
+#include <asm/machine.h>
+#include <asm/asm.h>
int spin_retry = -1;
@@ -36,6 +39,23 @@ static int __init spin_retry_setup(char *str)
}
__setup("spin_retry=", spin_retry_setup);
+static const struct ctl_table s390_spin_sysctl_table[] = {
+ {
+ .procname = "spin_retry",
+ .data = &spin_retry,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+};
+
+static int __init init_s390_spin_sysctls(void)
+{
+ register_sysctl_init("kernel", s390_spin_sysctl_table);
+ return 0;
+}
+arch_initcall(init_s390_spin_sysctls);
+
struct spin_wait {
struct spin_wait *next, *prev;
int node_id;
@@ -75,25 +95,44 @@ static inline int arch_load_niai4(int *lock)
int owner;
asm_inline volatile(
- ALTERNATIVE("nop", ".insn rre,0xb2fa0000,4,0", 49) /* NIAI 4 */
- " l %0,%1\n"
- : "=d" (owner) : "Q" (*lock) : "memory");
+ ALTERNATIVE("nop", ".insn rre,0xb2fa0000,4,0", ALT_FACILITY(49)) /* NIAI 4 */
+ " l %[owner],%[lock]\n"
+ : [owner] "=d" (owner) : [lock] "R" (*lock) : "memory");
return owner;
}
-static inline int arch_cmpxchg_niai8(int *lock, int old, int new)
+#ifdef __HAVE_ASM_FLAG_OUTPUTS__
+
+static inline int arch_try_cmpxchg_niai8(int *lock, int old, int new)
+{
+ int cc;
+
+ asm_inline volatile(
+ ALTERNATIVE("nop", ".insn rre,0xb2fa0000,8,0", ALT_FACILITY(49)) /* NIAI 8 */
+ " cs %[old],%[new],%[lock]\n"
+ : [old] "+d" (old), [lock] "+Q" (*lock), "=@cc" (cc)
+ : [new] "d" (new)
+ : "memory");
+ return cc == 0;
+}
+
+#else /* __HAVE_ASM_FLAG_OUTPUTS__ */
+
+static inline int arch_try_cmpxchg_niai8(int *lock, int old, int new)
{
int expected = old;
asm_inline volatile(
- ALTERNATIVE("nop", ".insn rre,0xb2fa0000,8,0", 49) /* NIAI 8 */
- " cs %0,%3,%1\n"
- : "=d" (old), "=Q" (*lock)
- : "0" (old), "d" (new), "Q" (*lock)
+ ALTERNATIVE("nop", ".insn rre,0xb2fa0000,8,0", ALT_FACILITY(49)) /* NIAI 8 */
+ " cs %[old],%[new],%[lock]\n"
+ : [old] "+d" (old), [lock] "+Q" (*lock)
+ : [new] "d" (new)
: "cc", "memory");
return expected == old;
}
+#endif /* __HAVE_ASM_FLAG_OUTPUTS__ */
+
static inline struct spin_wait *arch_spin_decode_tail(int lock)
{
int ix, cpu;
@@ -119,16 +158,16 @@ static inline void arch_spin_lock_queued(arch_spinlock_t *lp)
struct spin_wait *node, *next;
int lockval, ix, node_id, tail_id, old, new, owner, count;
- ix = S390_lowcore.spinlock_index++;
+ ix = get_lowcore()->spinlock_index++;
barrier();
- lockval = SPINLOCK_LOCKVAL; /* cpu + 1 */
+ lockval = spinlock_lockval(); /* cpu + 1 */
node = this_cpu_ptr(&spin_wait[ix]);
node->prev = node->next = NULL;
node_id = node->node_id;
/* Enqueue the node for this CPU in the spinlock wait queue */
+ old = READ_ONCE(lp->lock);
while (1) {
- old = READ_ONCE(lp->lock);
if ((old & _Q_LOCK_CPU_MASK) == 0 &&
(old & _Q_LOCK_STEAL_MASK) != _Q_LOCK_STEAL_MASK) {
/*
@@ -139,7 +178,7 @@ static inline void arch_spin_lock_queued(arch_spinlock_t *lp)
* waiter will get the lock.
*/
new = (old ? (old + _Q_LOCK_STEAL_ADD) : 0) | lockval;
- if (__atomic_cmpxchg_bool(&lp->lock, old, new))
+ if (arch_try_cmpxchg(&lp->lock, &old, new))
/* Got the lock */
goto out;
/* lock passing in progress */
@@ -147,7 +186,7 @@ static inline void arch_spin_lock_queued(arch_spinlock_t *lp)
}
/* Make the node of this CPU the new tail. */
new = node_id | (old & _Q_LOCK_MASK);
- if (__atomic_cmpxchg_bool(&lp->lock, old, new))
+ if (arch_try_cmpxchg(&lp->lock, &old, new))
break;
}
/* Set the 'next' pointer of the tail node in the queue */
@@ -184,7 +223,7 @@ static inline void arch_spin_lock_queued(arch_spinlock_t *lp)
if (!owner) {
tail_id = old & _Q_TAIL_MASK;
new = ((tail_id != node_id) ? tail_id : 0) | lockval;
- if (__atomic_cmpxchg_bool(&lp->lock, old, new))
+ if (arch_try_cmpxchg(&lp->lock, &old, new))
/* Got the lock */
break;
continue;
@@ -192,7 +231,7 @@ static inline void arch_spin_lock_queued(arch_spinlock_t *lp)
if (count-- >= 0)
continue;
count = spin_retry;
- if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(owner - 1))
+ if (!machine_is_lpar() || arch_vcpu_is_preempted(owner - 1))
smp_yield_cpu(owner - 1);
}
@@ -205,14 +244,14 @@ static inline void arch_spin_lock_queued(arch_spinlock_t *lp)
}
out:
- S390_lowcore.spinlock_index--;
+ get_lowcore()->spinlock_index--;
}
static inline void arch_spin_lock_classic(arch_spinlock_t *lp)
{
int lockval, old, new, owner, count;
- lockval = SPINLOCK_LOCKVAL; /* cpu + 1 */
+ lockval = spinlock_lockval(); /* cpu + 1 */
/* Pass the virtual CPU to the lock holder if it is not running */
owner = arch_spin_yield_target(READ_ONCE(lp->lock), NULL);
@@ -226,7 +265,7 @@ static inline void arch_spin_lock_classic(arch_spinlock_t *lp)
/* Try to get the lock if it is free. */
if (!owner) {
new = (old & _Q_TAIL_MASK) | lockval;
- if (arch_cmpxchg_niai8(&lp->lock, old, new)) {
+ if (arch_try_cmpxchg_niai8(&lp->lock, old, new)) {
/* Got the lock */
return;
}
@@ -235,7 +274,7 @@ static inline void arch_spin_lock_classic(arch_spinlock_t *lp)
if (count-- >= 0)
continue;
count = spin_retry;
- if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(owner - 1))
+ if (!machine_is_lpar() || arch_vcpu_is_preempted(owner - 1))
smp_yield_cpu(owner - 1);
}
}
@@ -251,14 +290,14 @@ EXPORT_SYMBOL(arch_spin_lock_wait);
int arch_spin_trylock_retry(arch_spinlock_t *lp)
{
- int cpu = SPINLOCK_LOCKVAL;
+ int cpu = spinlock_lockval();
int owner, count;
for (count = spin_retry; count > 0; count--) {
owner = READ_ONCE(lp->lock);
/* Try to get the lock if it is free. */
if (!owner) {
- if (__atomic_cmpxchg_bool(&lp->lock, 0, cpu))
+ if (arch_try_cmpxchg(&lp->lock, &owner, cpu))
return 1;
}
}
@@ -300,7 +339,7 @@ void arch_write_lock_wait(arch_rwlock_t *rw)
while (1) {
old = READ_ONCE(rw->cnts);
if ((old & 0x1ffff) == 0 &&
- __atomic_cmpxchg_bool(&rw->cnts, old, old | 0x10000))
+ arch_try_cmpxchg(&rw->cnts, &old, old | 0x10000))
/* Got the lock */
break;
barrier();
@@ -317,7 +356,7 @@ void arch_spin_relax(arch_spinlock_t *lp)
cpu = READ_ONCE(lp->lock) & _Q_LOCK_CPU_MASK;
if (!cpu)
return;
- if (MACHINE_IS_LPAR && !arch_vcpu_is_preempted(cpu - 1))
+ if (machine_is_lpar() && !arch_vcpu_is_preempted(cpu - 1))
return;
smp_yield_cpu(cpu - 1);
}
diff --git a/arch/s390/lib/string.c b/arch/s390/lib/string.c
index 7d8741818239..373fa1f01937 100644
--- a/arch/s390/lib/string.c
+++ b/arch/s390/lib/string.c
@@ -15,6 +15,7 @@
#include <linux/types.h>
#include <linux/string.h>
#include <linux/export.h>
+#include <asm/asm.h>
/*
* Helper functions to find the end of a string
@@ -238,12 +239,11 @@ static inline int clcle(const char *s1, unsigned long l1,
asm volatile(
"0: clcle %[r1],%[r3],0\n"
" jo 0b\n"
- " ipm %[cc]\n"
- " srl %[cc],28\n"
- : [cc] "=&d" (cc), [r1] "+&d" (r1.pair), [r3] "+&d" (r3.pair)
+ CC_IPM(cc)
+ : CC_OUT(cc, cc), [r1] "+d" (r1.pair), [r3] "+d" (r3.pair)
:
- : "cc", "memory");
- return cc;
+ : CC_CLOBBER_LIST("memory"));
+ return CC_TRANSFORM(cc);
}
/**
diff --git a/arch/s390/lib/test_kprobes.c b/arch/s390/lib/test_kprobes.c
index 9e62d62812e5..9021298c3e8a 100644
--- a/arch/s390/lib/test_kprobes.c
+++ b/arch/s390/lib/test_kprobes.c
@@ -72,4 +72,5 @@ static struct kunit_suite kprobes_test_suite = {
kunit_test_suites(&kprobes_test_suite);
+MODULE_DESCRIPTION("KUnit tests for kprobes");
MODULE_LICENSE("GPL");
diff --git a/arch/s390/lib/test_modules.c b/arch/s390/lib/test_modules.c
index 9894009fc1f2..f96b6a3737e7 100644
--- a/arch/s390/lib/test_modules.c
+++ b/arch/s390/lib/test_modules.c
@@ -29,4 +29,5 @@ static struct kunit_suite modules_test_suite = {
kunit_test_suites(&modules_test_suite);
+MODULE_DESCRIPTION("KUnit test that modules with many relocations are loaded properly");
MODULE_LICENSE("GPL");
diff --git a/arch/s390/lib/test_unwind.c b/arch/s390/lib/test_unwind.c
index 2848e3fb2ff5..6e42100875e7 100644
--- a/arch/s390/lib/test_unwind.c
+++ b/arch/s390/lib/test_unwind.c
@@ -270,9 +270,9 @@ static void notrace __used test_unwind_ftrace_handler(unsigned long ip,
struct ftrace_ops *fops,
struct ftrace_regs *fregs)
{
- struct unwindme *u = (struct unwindme *)fregs->regs.gprs[2];
+ struct unwindme *u = (struct unwindme *)arch_ftrace_regs(fregs)->regs.gprs[2];
- u->ret = test_unwind(NULL, (u->flags & UWM_REGS) ? &fregs->regs : NULL,
+ u->ret = test_unwind(NULL, (u->flags & UWM_REGS) ? &arch_ftrace_regs(fregs)->regs : NULL,
(u->flags & UWM_SP) ? u->sp : 0);
}
@@ -356,7 +356,7 @@ static noinline int unwindme_func2(struct unwindme *u)
if (u->flags & UWM_SWITCH_STACK) {
local_irq_save(flags);
local_mcck_save(mflags);
- rc = call_on_stack(1, S390_lowcore.nodat_stack,
+ rc = call_on_stack(1, get_lowcore()->nodat_stack,
int, unwindme_func3, struct unwindme *, u);
local_mcck_restore(mflags);
local_irq_restore(flags);
@@ -519,4 +519,5 @@ static struct kunit_suite test_unwind_suite = {
kunit_test_suites(&test_unwind_suite);
+MODULE_DESCRIPTION("KUnit test for unwind_for_each_frame");
MODULE_LICENSE("GPL");
diff --git a/arch/s390/lib/uaccess.c b/arch/s390/lib/uaccess.c
index 61d8dcd95bbc..cec20db88479 100644
--- a/arch/s390/lib/uaccess.c
+++ b/arch/s390/lib/uaccess.c
@@ -21,60 +21,69 @@ void debug_user_asce(int exit)
local_ctl_store(1, &cr1);
local_ctl_store(7, &cr7);
- if (cr1.val == S390_lowcore.kernel_asce.val && cr7.val == S390_lowcore.user_asce.val)
+ if (cr1.val == get_lowcore()->kernel_asce.val && cr7.val == get_lowcore()->user_asce.val)
return;
panic("incorrect ASCE on kernel %s\n"
"cr1: %016lx cr7: %016lx\n"
"kernel: %016lx user: %016lx\n",
exit ? "exit" : "entry", cr1.val, cr7.val,
- S390_lowcore.kernel_asce.val, S390_lowcore.user_asce.val);
+ get_lowcore()->kernel_asce.val, get_lowcore()->user_asce.val);
}
#endif /*CONFIG_DEBUG_ENTRY */
-static unsigned long raw_copy_from_user_key(void *to, const void __user *from,
- unsigned long size, unsigned long key)
+union oac {
+ unsigned int val;
+ struct {
+ struct {
+ unsigned short key : 4;
+ unsigned short : 4;
+ unsigned short as : 2;
+ unsigned short : 4;
+ unsigned short k : 1;
+ unsigned short a : 1;
+ } oac1;
+ struct {
+ unsigned short key : 4;
+ unsigned short : 4;
+ unsigned short as : 2;
+ unsigned short : 4;
+ unsigned short k : 1;
+ unsigned short a : 1;
+ } oac2;
+ };
+};
+
+static uaccess_kmsan_or_inline __must_check unsigned long
+raw_copy_from_user_key(void *to, const void __user *from, unsigned long size, unsigned long key)
{
- unsigned long rem;
+ unsigned long osize;
union oac spec = {
.oac2.key = key,
.oac2.as = PSW_BITS_AS_SECONDARY,
.oac2.k = 1,
.oac2.a = 1,
};
+ int cc;
- asm volatile(
- " lr 0,%[spec]\n"
- "0: mvcos 0(%[to]),0(%[from]),%[size]\n"
- "1: jz 5f\n"
- " algr %[size],%[val]\n"
- " slgr %[from],%[val]\n"
- " slgr %[to],%[val]\n"
- " j 0b\n"
- "2: la %[rem],4095(%[from])\n" /* rem = from + 4095 */
- " nr %[rem],%[val]\n" /* rem = (from + 4095) & -4096 */
- " slgr %[rem],%[from]\n"
- " clgr %[size],%[rem]\n" /* copy crosses next page boundary? */
- " jnh 6f\n"
- "3: mvcos 0(%[to]),0(%[from]),%[rem]\n"
- "4: slgr %[size],%[rem]\n"
- " j 6f\n"
- "5: slgr %[size],%[size]\n"
- "6:\n"
- EX_TABLE(0b, 2b)
- EX_TABLE(1b, 2b)
- EX_TABLE(3b, 6b)
- EX_TABLE(4b, 6b)
- : [size] "+&a" (size), [from] "+&a" (from), [to] "+&a" (to), [rem] "=&a" (rem)
- : [val] "a" (-4096UL), [spec] "d" (spec.val)
- : "cc", "memory", "0");
- return size;
-}
-
-unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n)
-{
- return raw_copy_from_user_key(to, from, n, 0);
+ while (1) {
+ osize = size;
+ asm_inline volatile(
+ " lr %%r0,%[spec]\n"
+ "0: mvcos %[to],%[from],%[size]\n"
+ "1: nopr %%r7\n"
+ CC_IPM(cc)
+ EX_TABLE_UA_MVCOS_FROM(0b, 0b)
+ EX_TABLE_UA_MVCOS_FROM(1b, 0b)
+ : CC_OUT(cc, cc), [size] "+d" (size), [to] "=Q" (*(char *)to)
+ : [spec] "d" (spec.val), [from] "Q" (*(const char __user *)from)
+ : CC_CLOBBER_LIST("memory", "0"));
+ if (CC_TRANSFORM(cc) == 0)
+ return osize - size;
+ size -= 4096;
+ to += 4096;
+ from += 4096;
+ }
}
-EXPORT_SYMBOL(raw_copy_from_user);
unsigned long _copy_from_user_key(void *to, const void __user *from,
unsigned long n, unsigned long key)
@@ -93,50 +102,37 @@ unsigned long _copy_from_user_key(void *to, const void __user *from,
}
EXPORT_SYMBOL(_copy_from_user_key);
-static unsigned long raw_copy_to_user_key(void __user *to, const void *from,
- unsigned long size, unsigned long key)
+static uaccess_kmsan_or_inline __must_check unsigned long
+raw_copy_to_user_key(void __user *to, const void *from, unsigned long size, unsigned long key)
{
- unsigned long rem;
+ unsigned long osize;
union oac spec = {
.oac1.key = key,
.oac1.as = PSW_BITS_AS_SECONDARY,
.oac1.k = 1,
.oac1.a = 1,
};
+ int cc;
- asm volatile(
- " lr 0,%[spec]\n"
- "0: mvcos 0(%[to]),0(%[from]),%[size]\n"
- "1: jz 5f\n"
- " algr %[size],%[val]\n"
- " slgr %[to],%[val]\n"
- " slgr %[from],%[val]\n"
- " j 0b\n"
- "2: la %[rem],4095(%[to])\n" /* rem = to + 4095 */
- " nr %[rem],%[val]\n" /* rem = (to + 4095) & -4096 */
- " slgr %[rem],%[to]\n"
- " clgr %[size],%[rem]\n" /* copy crosses next page boundary? */
- " jnh 6f\n"
- "3: mvcos 0(%[to]),0(%[from]),%[rem]\n"
- "4: slgr %[size],%[rem]\n"
- " j 6f\n"
- "5: slgr %[size],%[size]\n"
- "6:\n"
- EX_TABLE(0b, 2b)
- EX_TABLE(1b, 2b)
- EX_TABLE(3b, 6b)
- EX_TABLE(4b, 6b)
- : [size] "+&a" (size), [to] "+&a" (to), [from] "+&a" (from), [rem] "=&a" (rem)
- : [val] "a" (-4096UL), [spec] "d" (spec.val)
- : "cc", "memory", "0");
- return size;
-}
-
-unsigned long raw_copy_to_user(void __user *to, const void *from, unsigned long n)
-{
- return raw_copy_to_user_key(to, from, n, 0);
+ while (1) {
+ osize = size;
+ asm_inline volatile(
+ " lr %%r0,%[spec]\n"
+ "0: mvcos %[to],%[from],%[size]\n"
+ "1: nopr %%r7\n"
+ CC_IPM(cc)
+ EX_TABLE_UA_MVCOS_TO(0b, 0b)
+ EX_TABLE_UA_MVCOS_TO(1b, 0b)
+ : CC_OUT(cc, cc), [size] "+d" (size), [to] "=Q" (*(char __user *)to)
+ : [spec] "d" (spec.val), [from] "Q" (*(const char *)from)
+ : CC_CLOBBER_LIST("memory", "0"));
+ if (CC_TRANSFORM(cc) == 0)
+ return osize - size;
+ size -= 4096;
+ to += 4096;
+ from += 4096;
+ }
}
-EXPORT_SYMBOL(raw_copy_to_user);
unsigned long _copy_to_user_key(void __user *to, const void *from,
unsigned long n, unsigned long key)
@@ -148,39 +144,3 @@ unsigned long _copy_to_user_key(void __user *to, const void *from,
return raw_copy_to_user_key(to, from, n, key);
}
EXPORT_SYMBOL(_copy_to_user_key);
-
-unsigned long __clear_user(void __user *to, unsigned long size)
-{
- unsigned long rem;
- union oac spec = {
- .oac1.as = PSW_BITS_AS_SECONDARY,
- .oac1.a = 1,
- };
-
- asm volatile(
- " lr 0,%[spec]\n"
- "0: mvcos 0(%[to]),0(%[zeropg]),%[size]\n"
- "1: jz 5f\n"
- " algr %[size],%[val]\n"
- " slgr %[to],%[val]\n"
- " j 0b\n"
- "2: la %[rem],4095(%[to])\n" /* rem = to + 4095 */
- " nr %[rem],%[val]\n" /* rem = (to + 4095) & -4096 */
- " slgr %[rem],%[to]\n"
- " clgr %[size],%[rem]\n" /* copy crosses next page boundary? */
- " jnh 6f\n"
- "3: mvcos 0(%[to]),0(%[zeropg]),%[rem]\n"
- "4: slgr %[size],%[rem]\n"
- " j 6f\n"
- "5: slgr %[size],%[size]\n"
- "6:\n"
- EX_TABLE(0b, 2b)
- EX_TABLE(1b, 2b)
- EX_TABLE(3b, 6b)
- EX_TABLE(4b, 6b)
- : [size] "+&a" (size), [to] "+&a" (to), [rem] "=&a" (rem)
- : [val] "a" (-4096UL), [zeropg] "a" (empty_zero_page), [spec] "d" (spec.val)
- : "cc", "memory", "0");
- return size;
-}
-EXPORT_SYMBOL(__clear_user);
diff --git a/arch/s390/lib/xor.c b/arch/s390/lib/xor.c
index fb924a8041dc..ce7bcf7c0032 100644
--- a/arch/s390/lib/xor.c
+++ b/arch/s390/lib/xor.c
@@ -15,7 +15,6 @@ static void xor_xc_2(unsigned long bytes, unsigned long * __restrict p1,
const unsigned long * __restrict p2)
{
asm volatile(
- " larl 1,2f\n"
" aghi %0,-1\n"
" jm 3f\n"
" srlg 0,%0,8\n"
@@ -25,12 +24,12 @@ static void xor_xc_2(unsigned long bytes, unsigned long * __restrict p1,
" la %1,256(%1)\n"
" la %2,256(%2)\n"
" brctg 0,0b\n"
- "1: ex %0,0(1)\n"
+ "1: exrl %0,2f\n"
" j 3f\n"
"2: xc 0(1,%1),0(%2)\n"
"3:\n"
: : "d" (bytes), "a" (p1), "a" (p2)
- : "0", "1", "cc", "memory");
+ : "0", "cc", "memory");
}
static void xor_xc_3(unsigned long bytes, unsigned long * __restrict p1,
@@ -38,9 +37,8 @@ static void xor_xc_3(unsigned long bytes, unsigned long * __restrict p1,
const unsigned long * __restrict p3)
{
asm volatile(
- " larl 1,2f\n"
" aghi %0,-1\n"
- " jm 3f\n"
+ " jm 4f\n"
" srlg 0,%0,8\n"
" ltgr 0,0\n"
" jz 1f\n"
@@ -50,14 +48,14 @@ static void xor_xc_3(unsigned long bytes, unsigned long * __restrict p1,
" la %2,256(%2)\n"
" la %3,256(%3)\n"
" brctg 0,0b\n"
- "1: ex %0,0(1)\n"
- " ex %0,6(1)\n"
- " j 3f\n"
+ "1: exrl %0,2f\n"
+ " exrl %0,3f\n"
+ " j 4f\n"
"2: xc 0(1,%1),0(%2)\n"
- " xc 0(1,%1),0(%3)\n"
- "3:\n"
+ "3: xc 0(1,%1),0(%3)\n"
+ "4:\n"
: "+d" (bytes), "+a" (p1), "+a" (p2), "+a" (p3)
- : : "0", "1", "cc", "memory");
+ : : "0", "cc", "memory");
}
static void xor_xc_4(unsigned long bytes, unsigned long * __restrict p1,
@@ -66,9 +64,8 @@ static void xor_xc_4(unsigned long bytes, unsigned long * __restrict p1,
const unsigned long * __restrict p4)
{
asm volatile(
- " larl 1,2f\n"
" aghi %0,-1\n"
- " jm 3f\n"
+ " jm 5f\n"
" srlg 0,%0,8\n"
" ltgr 0,0\n"
" jz 1f\n"
@@ -80,16 +77,16 @@ static void xor_xc_4(unsigned long bytes, unsigned long * __restrict p1,
" la %3,256(%3)\n"
" la %4,256(%4)\n"
" brctg 0,0b\n"
- "1: ex %0,0(1)\n"
- " ex %0,6(1)\n"
- " ex %0,12(1)\n"
- " j 3f\n"
+ "1: exrl %0,2f\n"
+ " exrl %0,3f\n"
+ " exrl %0,4f\n"
+ " j 5f\n"
"2: xc 0(1,%1),0(%2)\n"
- " xc 0(1,%1),0(%3)\n"
- " xc 0(1,%1),0(%4)\n"
- "3:\n"
+ "3: xc 0(1,%1),0(%3)\n"
+ "4: xc 0(1,%1),0(%4)\n"
+ "5:\n"
: "+d" (bytes), "+a" (p1), "+a" (p2), "+a" (p3), "+a" (p4)
- : : "0", "1", "cc", "memory");
+ : : "0", "cc", "memory");
}
static void xor_xc_5(unsigned long bytes, unsigned long * __restrict p1,
@@ -101,7 +98,7 @@ static void xor_xc_5(unsigned long bytes, unsigned long * __restrict p1,
asm volatile(
" larl 1,2f\n"
" aghi %0,-1\n"
- " jm 3f\n"
+ " jm 6f\n"
" srlg 0,%0,8\n"
" ltgr 0,0\n"
" jz 1f\n"
@@ -115,19 +112,19 @@ static void xor_xc_5(unsigned long bytes, unsigned long * __restrict p1,
" la %4,256(%4)\n"
" la %5,256(%5)\n"
" brctg 0,0b\n"
- "1: ex %0,0(1)\n"
- " ex %0,6(1)\n"
- " ex %0,12(1)\n"
- " ex %0,18(1)\n"
- " j 3f\n"
+ "1: exrl %0,2f\n"
+ " exrl %0,3f\n"
+ " exrl %0,4f\n"
+ " exrl %0,5f\n"
+ " j 6f\n"
"2: xc 0(1,%1),0(%2)\n"
- " xc 0(1,%1),0(%3)\n"
- " xc 0(1,%1),0(%4)\n"
- " xc 0(1,%1),0(%5)\n"
- "3:\n"
+ "3: xc 0(1,%1),0(%3)\n"
+ "4: xc 0(1,%1),0(%4)\n"
+ "5: xc 0(1,%1),0(%5)\n"
+ "6:\n"
: "+d" (bytes), "+a" (p1), "+a" (p2), "+a" (p3), "+a" (p4),
"+a" (p5)
- : : "0", "1", "cc", "memory");
+ : : "0", "cc", "memory");
}
struct xor_block_template xor_block_xc = {