summaryrefslogtreecommitdiffstats
path: root/arch/arm/crypto
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/crypto')
-rw-r--r--arch/arm/crypto/Kconfig49
-rw-r--r--arch/arm/crypto/Makefile4
-rw-r--r--arch/arm/crypto/aes-ce-glue.c6
-rw-r--r--arch/arm/crypto/aes-cipher-glue.c5
-rw-r--r--arch/arm/crypto/aes-cipher.h13
-rw-r--r--arch/arm/crypto/aes-neonbs-glue.c134
-rw-r--r--arch/arm/crypto/chacha-glue.c10
-rw-r--r--arch/arm/crypto/crc32-ce-core.S306
-rw-r--r--arch/arm/crypto/crc32-ce-glue.c246
-rw-r--r--arch/arm/crypto/crct10dif-ce-core.S381
-rw-r--r--arch/arm/crypto/crct10dif-ce-glue.c88
-rw-r--r--arch/arm/crypto/curve25519-glue.c1
-rw-r--r--arch/arm/crypto/ghash-ce-glue.c215
-rw-r--r--arch/arm/crypto/poly1305-glue.c3
-rw-r--r--arch/arm/crypto/sha2-ce-glue.c2
15 files changed, 98 insertions, 1365 deletions
diff --git a/arch/arm/crypto/Kconfig b/arch/arm/crypto/Kconfig
index 847b7a003356..23e4ea067ddb 100644
--- a/arch/arm/crypto/Kconfig
+++ b/arch/arm/crypto/Kconfig
@@ -3,10 +3,12 @@
menu "Accelerated Cryptographic Algorithms for CPU (arm)"
config CRYPTO_CURVE25519_NEON
- tristate "Public key crypto: Curve25519 (NEON)"
+ tristate
depends on KERNEL_MODE_NEON
+ select CRYPTO_KPP
select CRYPTO_LIB_CURVE25519_GENERIC
select CRYPTO_ARCH_HAVE_LIB_CURVE25519
+ default CRYPTO_LIB_CURVE25519_INTERNAL
help
Curve25519 algorithm
@@ -45,9 +47,10 @@ config CRYPTO_NHPOLY1305_NEON
- NEON (Advanced SIMD) extensions
config CRYPTO_POLY1305_ARM
- tristate "Hash functions: Poly1305 (NEON)"
+ tristate
select CRYPTO_HASH
select CRYPTO_ARCH_HAVE_LIB_POLY1305
+ default CRYPTO_LIB_POLY1305_INTERNAL
help
Poly1305 authenticator algorithm (RFC7539)
@@ -166,10 +169,9 @@ config CRYPTO_AES_ARM
config CRYPTO_AES_ARM_BS
tristate "Ciphers: AES, modes: ECB/CBC/CTR/XTS (bit-sliced NEON)"
depends on KERNEL_MODE_NEON
+ select CRYPTO_AES_ARM
select CRYPTO_SKCIPHER
select CRYPTO_LIB_AES
- select CRYPTO_AES
- select CRYPTO_CBC
select CRYPTO_SIMD
help
Length-preserving ciphers: AES cipher algorithms (FIPS-197)
@@ -183,8 +185,15 @@ config CRYPTO_AES_ARM_BS
Bit sliced AES gives around 45% speedup on Cortex-A15 for CTR mode
and for XTS mode encryption, CBC and XTS mode decryption speedup is
around 25%. (CBC encryption speed is not affected by this driver.)
- This implementation does not rely on any lookup tables so it is
- believed to be invulnerable to cache timing attacks.
+
+ The bit sliced AES code does not use lookup tables, so it is believed
+ to be invulnerable to cache timing attacks. However, since the bit
+ sliced AES code cannot process single blocks efficiently, in certain
+ cases table-based code with some countermeasures against cache timing
+ attacks will still be used as a fallback method; specifically CBC
+ encryption (not CBC decryption), the encryption of XTS tweaks, XTS
+ ciphertext stealing when the message isn't a multiple of 16 bytes, and
+ CTR when invoked in a context in which NEON instructions are unusable.
config CRYPTO_AES_ARM_CE
tristate "Ciphers: AES, modes: ECB/CBC/CTS/CTR/XTS (ARMv8 Crypto Extensions)"
@@ -206,9 +215,10 @@ config CRYPTO_AES_ARM_CE
- ARMv8 Crypto Extensions
config CRYPTO_CHACHA20_NEON
- tristate "Ciphers: ChaCha20, XChaCha20, XChaCha12 (NEON)"
+ tristate
select CRYPTO_SKCIPHER
select CRYPTO_ARCH_HAVE_LIB_CHACHA
+ default CRYPTO_LIB_CHACHA_INTERNAL
help
Length-preserving ciphers: ChaCha20, XChaCha20, and XChaCha12
stream cipher algorithms
@@ -216,30 +226,5 @@ config CRYPTO_CHACHA20_NEON
Architecture: arm using:
- NEON (Advanced SIMD) extensions
-config CRYPTO_CRC32_ARM_CE
- tristate "CRC32C and CRC32"
- depends on KERNEL_MODE_NEON
- depends on CRC32
- select CRYPTO_HASH
- help
- CRC32c CRC algorithm with the iSCSI polynomial (RFC 3385 and RFC 3720)
- and CRC32 CRC algorithm (IEEE 802.3)
-
- Architecture: arm using:
- - CRC and/or PMULL instructions
-
- Drivers: crc32-arm-ce and crc32c-arm-ce
-
-config CRYPTO_CRCT10DIF_ARM_CE
- tristate "CRCT10DIF"
- depends on KERNEL_MODE_NEON
- depends on CRC_T10DIF
- select CRYPTO_HASH
- help
- CRC16 CRC algorithm used for the T10 (SCSI) Data Integrity Field (DIF)
-
- Architecture: arm using:
- - PMULL (Polynomial Multiply Long) instructions
-
endmenu
diff --git a/arch/arm/crypto/Makefile b/arch/arm/crypto/Makefile
index 13e62c7c25dc..3d0e23ff9e74 100644
--- a/arch/arm/crypto/Makefile
+++ b/arch/arm/crypto/Makefile
@@ -20,8 +20,6 @@ obj-$(CONFIG_CRYPTO_AES_ARM_CE) += aes-arm-ce.o
obj-$(CONFIG_CRYPTO_SHA1_ARM_CE) += sha1-arm-ce.o
obj-$(CONFIG_CRYPTO_SHA2_ARM_CE) += sha2-arm-ce.o
obj-$(CONFIG_CRYPTO_GHASH_ARM_CE) += ghash-arm-ce.o
-obj-$(CONFIG_CRYPTO_CRCT10DIF_ARM_CE) += crct10dif-arm-ce.o
-obj-$(CONFIG_CRYPTO_CRC32_ARM_CE) += crc32-arm-ce.o
aes-arm-y := aes-cipher-core.o aes-cipher-glue.o
aes-arm-bs-y := aes-neonbs-core.o aes-neonbs-glue.o
@@ -37,8 +35,6 @@ sha1-arm-ce-y := sha1-ce-core.o sha1-ce-glue.o
sha2-arm-ce-y := sha2-ce-core.o sha2-ce-glue.o
aes-arm-ce-y := aes-ce-core.o aes-ce-glue.o
ghash-arm-ce-y := ghash-ce-core.o ghash-ce-glue.o
-crct10dif-arm-ce-y := crct10dif-ce-core.o crct10dif-ce-glue.o
-crc32-arm-ce-y:= crc32-ce-core.o crc32-ce-glue.o
chacha-neon-y := chacha-scalar-core.o chacha-glue.o
chacha-neon-$(CONFIG_KERNEL_MODE_NEON) += chacha-neon-core.o
poly1305-arm-y := poly1305-core.o poly1305-glue.o
diff --git a/arch/arm/crypto/aes-ce-glue.c b/arch/arm/crypto/aes-ce-glue.c
index b668c97663ec..1cf61f51e766 100644
--- a/arch/arm/crypto/aes-ce-glue.c
+++ b/arch/arm/crypto/aes-ce-glue.c
@@ -8,7 +8,7 @@
#include <asm/hwcap.h>
#include <asm/neon.h>
#include <asm/simd.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <crypto/aes.h>
#include <crypto/ctr.h>
#include <crypto/internal/simd.h>
@@ -399,9 +399,9 @@ static int ctr_encrypt(struct skcipher_request *req)
}
if (walk.nbytes) {
u8 __aligned(8) tail[AES_BLOCK_SIZE];
+ const u8 *tsrc = walk.src.virt.addr;
unsigned int nbytes = walk.nbytes;
u8 *tdst = walk.dst.virt.addr;
- u8 *tsrc = walk.src.virt.addr;
/*
* Tell aes_ctr_encrypt() to process a tail block.
@@ -711,7 +711,7 @@ static int __init aes_init(void)
algname = aes_algs[i].base.cra_name + 2;
drvname = aes_algs[i].base.cra_driver_name + 2;
basename = aes_algs[i].base.cra_driver_name;
- simd = simd_skcipher_create_compat(algname, drvname, basename);
+ simd = simd_skcipher_create_compat(aes_algs + i, algname, drvname, basename);
err = PTR_ERR(simd);
if (IS_ERR(simd))
goto unregister_simds;
diff --git a/arch/arm/crypto/aes-cipher-glue.c b/arch/arm/crypto/aes-cipher-glue.c
index 6dfaef2d8f91..29efb7833960 100644
--- a/arch/arm/crypto/aes-cipher-glue.c
+++ b/arch/arm/crypto/aes-cipher-glue.c
@@ -9,9 +9,10 @@
#include <crypto/aes.h>
#include <crypto/algapi.h>
#include <linux/module.h>
+#include "aes-cipher.h"
-asmlinkage void __aes_arm_encrypt(u32 *rk, int rounds, const u8 *in, u8 *out);
-asmlinkage void __aes_arm_decrypt(u32 *rk, int rounds, const u8 *in, u8 *out);
+EXPORT_SYMBOL_GPL(__aes_arm_encrypt);
+EXPORT_SYMBOL_GPL(__aes_arm_decrypt);
static void aes_arm_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
diff --git a/arch/arm/crypto/aes-cipher.h b/arch/arm/crypto/aes-cipher.h
new file mode 100644
index 000000000000..d5db2b87eb69
--- /dev/null
+++ b/arch/arm/crypto/aes-cipher.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef ARM_CRYPTO_AES_CIPHER_H
+#define ARM_CRYPTO_AES_CIPHER_H
+
+#include <linux/linkage.h>
+#include <linux/types.h>
+
+asmlinkage void __aes_arm_encrypt(const u32 rk[], int rounds,
+ const u8 *in, u8 *out);
+asmlinkage void __aes_arm_decrypt(const u32 rk[], int rounds,
+ const u8 *in, u8 *out);
+
+#endif /* ARM_CRYPTO_AES_CIPHER_H */
diff --git a/arch/arm/crypto/aes-neonbs-glue.c b/arch/arm/crypto/aes-neonbs-glue.c
index f00f042ef357..f6be80b5938b 100644
--- a/arch/arm/crypto/aes-neonbs-glue.c
+++ b/arch/arm/crypto/aes-neonbs-glue.c
@@ -9,23 +9,22 @@
#include <asm/simd.h>
#include <crypto/aes.h>
#include <crypto/ctr.h>
-#include <crypto/internal/cipher.h>
#include <crypto/internal/simd.h>
#include <crypto/internal/skcipher.h>
#include <crypto/scatterwalk.h>
#include <crypto/xts.h>
#include <linux/module.h>
+#include "aes-cipher.h"
MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
+MODULE_DESCRIPTION("Bit sliced AES using NEON instructions");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS_CRYPTO("ecb(aes)");
-MODULE_ALIAS_CRYPTO("cbc(aes)-all");
+MODULE_ALIAS_CRYPTO("cbc(aes)");
MODULE_ALIAS_CRYPTO("ctr(aes)");
MODULE_ALIAS_CRYPTO("xts(aes)");
-MODULE_IMPORT_NS(CRYPTO_INTERNAL);
-
asmlinkage void aesbs_convert_key(u8 out[], u32 const rk[], int rounds);
asmlinkage void aesbs_ecb_encrypt(u8 out[], u8 const in[], u8 const rk[],
@@ -51,13 +50,13 @@ struct aesbs_ctx {
struct aesbs_cbc_ctx {
struct aesbs_ctx key;
- struct crypto_skcipher *enc_tfm;
+ struct crypto_aes_ctx fallback;
};
struct aesbs_xts_ctx {
struct aesbs_ctx key;
- struct crypto_cipher *cts_tfm;
- struct crypto_cipher *tweak_tfm;
+ struct crypto_aes_ctx fallback;
+ struct crypto_aes_ctx tweak_key;
};
struct aesbs_ctr_ctx {
@@ -128,37 +127,49 @@ static int aesbs_cbc_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
unsigned int key_len)
{
struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct crypto_aes_ctx rk;
int err;
- err = aes_expandkey(&rk, in_key, key_len);
+ err = aes_expandkey(&ctx->fallback, in_key, key_len);
if (err)
return err;
ctx->key.rounds = 6 + key_len / 4;
kernel_neon_begin();
- aesbs_convert_key(ctx->key.rk, rk.key_enc, ctx->key.rounds);
+ aesbs_convert_key(ctx->key.rk, ctx->fallback.key_enc, ctx->key.rounds);
kernel_neon_end();
- memzero_explicit(&rk, sizeof(rk));
- return crypto_skcipher_setkey(ctx->enc_tfm, in_key, key_len);
+ return 0;
}
static int cbc_encrypt(struct skcipher_request *req)
{
- struct skcipher_request *subreq = skcipher_request_ctx(req);
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
+ const struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
+ unsigned int nbytes;
+ int err;
- skcipher_request_set_tfm(subreq, ctx->enc_tfm);
- skcipher_request_set_callback(subreq,
- skcipher_request_flags(req),
- NULL, NULL);
- skcipher_request_set_crypt(subreq, req->src, req->dst,
- req->cryptlen, req->iv);
+ err = skcipher_walk_virt(&walk, req, false);
- return crypto_skcipher_encrypt(subreq);
+ while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
+ const u8 *src = walk.src.virt.addr;
+ u8 *dst = walk.dst.virt.addr;
+ u8 *prev = walk.iv;
+
+ do {
+ crypto_xor_cpy(dst, src, prev, AES_BLOCK_SIZE);
+ __aes_arm_encrypt(ctx->fallback.key_enc,
+ ctx->key.rounds, dst, dst);
+ prev = dst;
+ src += AES_BLOCK_SIZE;
+ dst += AES_BLOCK_SIZE;
+ nbytes -= AES_BLOCK_SIZE;
+ } while (nbytes >= AES_BLOCK_SIZE);
+ memcpy(walk.iv, prev, AES_BLOCK_SIZE);
+ err = skcipher_walk_done(&walk, nbytes);
+ }
+ return err;
}
static int cbc_decrypt(struct skcipher_request *req)
@@ -189,30 +200,6 @@ static int cbc_decrypt(struct skcipher_request *req)
return err;
}
-static int cbc_init(struct crypto_skcipher *tfm)
-{
- struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
- unsigned int reqsize;
-
- ctx->enc_tfm = crypto_alloc_skcipher("cbc(aes)", 0, CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK);
- if (IS_ERR(ctx->enc_tfm))
- return PTR_ERR(ctx->enc_tfm);
-
- reqsize = sizeof(struct skcipher_request);
- reqsize += crypto_skcipher_reqsize(ctx->enc_tfm);
- crypto_skcipher_set_reqsize(tfm, reqsize);
-
- return 0;
-}
-
-static void cbc_exit(struct crypto_skcipher *tfm)
-{
- struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
-
- crypto_free_skcipher(ctx->enc_tfm);
-}
-
static int aesbs_ctr_setkey_sync(struct crypto_skcipher *tfm, const u8 *in_key,
unsigned int key_len)
{
@@ -270,16 +257,8 @@ static int ctr_encrypt(struct skcipher_request *req)
static void ctr_encrypt_one(struct crypto_skcipher *tfm, const u8 *src, u8 *dst)
{
struct aesbs_ctr_ctx *ctx = crypto_skcipher_ctx(tfm);
- unsigned long flags;
-
- /*
- * Temporarily disable interrupts to avoid races where
- * cachelines are evicted when the CPU is interrupted
- * to do something else.
- */
- local_irq_save(flags);
- aes_encrypt(&ctx->fallback, dst, src);
- local_irq_restore(flags);
+
+ __aes_arm_encrypt(ctx->fallback.key_enc, ctx->key.rounds, src, dst);
}
static int ctr_encrypt_sync(struct skcipher_request *req)
@@ -301,45 +280,23 @@ static int aesbs_xts_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
return err;
key_len /= 2;
- err = crypto_cipher_setkey(ctx->cts_tfm, in_key, key_len);
+ err = aes_expandkey(&ctx->fallback, in_key, key_len);
if (err)
return err;
- err = crypto_cipher_setkey(ctx->tweak_tfm, in_key + key_len, key_len);
+ err = aes_expandkey(&ctx->tweak_key, in_key + key_len, key_len);
if (err)
return err;
return aesbs_setkey(tfm, in_key, key_len);
}
-static int xts_init(struct crypto_skcipher *tfm)
-{
- struct aesbs_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
-
- ctx->cts_tfm = crypto_alloc_cipher("aes", 0, 0);
- if (IS_ERR(ctx->cts_tfm))
- return PTR_ERR(ctx->cts_tfm);
-
- ctx->tweak_tfm = crypto_alloc_cipher("aes", 0, 0);
- if (IS_ERR(ctx->tweak_tfm))
- crypto_free_cipher(ctx->cts_tfm);
-
- return PTR_ERR_OR_ZERO(ctx->tweak_tfm);
-}
-
-static void xts_exit(struct crypto_skcipher *tfm)
-{
- struct aesbs_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
-
- crypto_free_cipher(ctx->tweak_tfm);
- crypto_free_cipher(ctx->cts_tfm);
-}
-
static int __xts_crypt(struct skcipher_request *req, bool encrypt,
void (*fn)(u8 out[], u8 const in[], u8 const rk[],
int rounds, int blocks, u8 iv[], int))
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct aesbs_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
+ const int rounds = ctx->key.rounds;
int tail = req->cryptlen % AES_BLOCK_SIZE;
struct skcipher_request subreq;
u8 buf[2 * AES_BLOCK_SIZE];
@@ -363,7 +320,7 @@ static int __xts_crypt(struct skcipher_request *req, bool encrypt,
if (err)
return err;
- crypto_cipher_encrypt_one(ctx->tweak_tfm, walk.iv, walk.iv);
+ __aes_arm_encrypt(ctx->tweak_key.key_enc, rounds, walk.iv, walk.iv);
while (walk.nbytes >= AES_BLOCK_SIZE) {
unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE;
@@ -377,7 +334,7 @@ static int __xts_crypt(struct skcipher_request *req, bool encrypt,
kernel_neon_begin();
fn(walk.dst.virt.addr, walk.src.virt.addr, ctx->key.rk,
- ctx->key.rounds, blocks, walk.iv, reorder_last_tweak);
+ rounds, blocks, walk.iv, reorder_last_tweak);
kernel_neon_end();
err = skcipher_walk_done(&walk,
walk.nbytes - blocks * AES_BLOCK_SIZE);
@@ -395,9 +352,9 @@ static int __xts_crypt(struct skcipher_request *req, bool encrypt,
crypto_xor(buf, req->iv, AES_BLOCK_SIZE);
if (encrypt)
- crypto_cipher_encrypt_one(ctx->cts_tfm, buf, buf);
+ __aes_arm_encrypt(ctx->fallback.key_enc, rounds, buf, buf);
else
- crypto_cipher_decrypt_one(ctx->cts_tfm, buf, buf);
+ __aes_arm_decrypt(ctx->fallback.key_dec, rounds, buf, buf);
crypto_xor(buf, req->iv, AES_BLOCK_SIZE);
@@ -438,8 +395,7 @@ static struct skcipher_alg aes_algs[] = { {
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct aesbs_cbc_ctx),
.base.cra_module = THIS_MODULE,
- .base.cra_flags = CRYPTO_ALG_INTERNAL |
- CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_flags = CRYPTO_ALG_INTERNAL,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
@@ -448,8 +404,6 @@ static struct skcipher_alg aes_algs[] = { {
.setkey = aesbs_cbc_setkey,
.encrypt = cbc_encrypt,
.decrypt = cbc_decrypt,
- .init = cbc_init,
- .exit = cbc_exit,
}, {
.base.cra_name = "__ctr(aes)",
.base.cra_driver_name = "__ctr-aes-neonbs",
@@ -499,8 +453,6 @@ static struct skcipher_alg aes_algs[] = { {
.setkey = aesbs_xts_setkey,
.encrypt = xts_encrypt,
.decrypt = xts_decrypt,
- .init = xts_init,
- .exit = xts_exit,
} };
static struct simd_skcipher_alg *aes_simd_algs[ARRAY_SIZE(aes_algs)];
@@ -539,7 +491,7 @@ static int __init aes_init(void)
algname = aes_algs[i].base.cra_name + 2;
drvname = aes_algs[i].base.cra_driver_name + 2;
basename = aes_algs[i].base.cra_driver_name;
- simd = simd_skcipher_create_compat(algname, drvname, basename);
+ simd = simd_skcipher_create_compat(aes_algs + i, algname, drvname, basename);
err = PTR_ERR(simd);
if (IS_ERR(simd))
goto unregister_simds;
diff --git a/arch/arm/crypto/chacha-glue.c b/arch/arm/crypto/chacha-glue.c
index cdde8fd01f8f..50e635512046 100644
--- a/arch/arm/crypto/chacha-glue.c
+++ b/arch/arm/crypto/chacha-glue.c
@@ -76,12 +76,6 @@ void hchacha_block_arch(const u32 *state, u32 *stream, int nrounds)
}
EXPORT_SYMBOL(hchacha_block_arch);
-void chacha_init_arch(u32 *state, const u32 *key, const u8 *iv)
-{
- chacha_init_generic(state, key, iv);
-}
-EXPORT_SYMBOL(chacha_init_arch);
-
void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src, unsigned int bytes,
int nrounds)
{
@@ -116,7 +110,7 @@ static int chacha_stream_xor(struct skcipher_request *req,
err = skcipher_walk_virt(&walk, req, false);
- chacha_init_generic(state, ctx->key, iv);
+ chacha_init(state, ctx->key, iv);
while (walk.nbytes > 0) {
unsigned int nbytes = walk.nbytes;
@@ -166,7 +160,7 @@ static int do_xchacha(struct skcipher_request *req, bool neon)
u32 state[16];
u8 real_iv[16];
- chacha_init_generic(state, ctx->key, req->iv);
+ chacha_init(state, ctx->key, req->iv);
if (!IS_ENABLED(CONFIG_KERNEL_MODE_NEON) || !neon) {
hchacha_block_arm(state, subctx.key, ctx->nrounds);
diff --git a/arch/arm/crypto/crc32-ce-core.S b/arch/arm/crypto/crc32-ce-core.S
deleted file mode 100644
index 3f13a76b9066..000000000000
--- a/arch/arm/crypto/crc32-ce-core.S
+++ /dev/null
@@ -1,306 +0,0 @@
-/*
- * Accelerated CRC32(C) using ARM CRC, NEON and Crypto Extensions instructions
- *
- * Copyright (C) 2016 Linaro Ltd <ard.biesheuvel@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-/* GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see http://www.gnu.org/licenses
- *
- * Please visit http://www.xyratex.com/contact if you need additional
- * information or have any questions.
- *
- * GPL HEADER END
- */
-
-/*
- * Copyright 2012 Xyratex Technology Limited
- *
- * Using hardware provided PCLMULQDQ instruction to accelerate the CRC32
- * calculation.
- * CRC32 polynomial:0x04c11db7(BE)/0xEDB88320(LE)
- * PCLMULQDQ is a new instruction in Intel SSE4.2, the reference can be found
- * at:
- * https://www.intel.com/products/processor/manuals/
- * Intel(R) 64 and IA-32 Architectures Software Developer's Manual
- * Volume 2B: Instruction Set Reference, N-Z
- *
- * Authors: Gregory Prestas <Gregory_Prestas@us.xyratex.com>
- * Alexander Boyko <Alexander_Boyko@xyratex.com>
- */
-
-#include <linux/linkage.h>
-#include <asm/assembler.h>
-
- .text
- .align 6
- .arch armv8-a
- .arch_extension crc
- .fpu crypto-neon-fp-armv8
-
-.Lcrc32_constants:
- /*
- * [x4*128+32 mod P(x) << 32)]' << 1 = 0x154442bd4
- * #define CONSTANT_R1 0x154442bd4LL
- *
- * [(x4*128-32 mod P(x) << 32)]' << 1 = 0x1c6e41596
- * #define CONSTANT_R2 0x1c6e41596LL
- */
- .quad 0x0000000154442bd4
- .quad 0x00000001c6e41596
-
- /*
- * [(x128+32 mod P(x) << 32)]' << 1 = 0x1751997d0
- * #define CONSTANT_R3 0x1751997d0LL
- *
- * [(x128-32 mod P(x) << 32)]' << 1 = 0x0ccaa009e
- * #define CONSTANT_R4 0x0ccaa009eLL
- */
- .quad 0x00000001751997d0
- .quad 0x00000000ccaa009e
-
- /*
- * [(x64 mod P(x) << 32)]' << 1 = 0x163cd6124
- * #define CONSTANT_R5 0x163cd6124LL
- */
- .quad 0x0000000163cd6124
- .quad 0x00000000FFFFFFFF
-
- /*
- * #define CRCPOLY_TRUE_LE_FULL 0x1DB710641LL
- *
- * Barrett Reduction constant (u64`) = u` = (x**64 / P(x))`
- * = 0x1F7011641LL
- * #define CONSTANT_RU 0x1F7011641LL
- */
- .quad 0x00000001DB710641
- .quad 0x00000001F7011641
-
-.Lcrc32c_constants:
- .quad 0x00000000740eef02
- .quad 0x000000009e4addf8
- .quad 0x00000000f20c0dfe
- .quad 0x000000014cd00bd6
- .quad 0x00000000dd45aab8
- .quad 0x00000000FFFFFFFF
- .quad 0x0000000105ec76f0
- .quad 0x00000000dea713f1
-
- dCONSTANTl .req d0
- dCONSTANTh .req d1
- qCONSTANT .req q0
-
- BUF .req r0
- LEN .req r1
- CRC .req r2
-
- qzr .req q9
-
- /**
- * Calculate crc32
- * BUF - buffer
- * LEN - sizeof buffer (multiple of 16 bytes), LEN should be > 63
- * CRC - initial crc32
- * return %eax crc32
- * uint crc32_pmull_le(unsigned char const *buffer,
- * size_t len, uint crc32)
- */
-ENTRY(crc32_pmull_le)
- adr r3, .Lcrc32_constants
- b 0f
-
-ENTRY(crc32c_pmull_le)
- adr r3, .Lcrc32c_constants
-
-0: bic LEN, LEN, #15
- vld1.8 {q1-q2}, [BUF, :128]!
- vld1.8 {q3-q4}, [BUF, :128]!
- vmov.i8 qzr, #0
- vmov.i8 qCONSTANT, #0
- vmov.32 dCONSTANTl[0], CRC
- veor.8 d2, d2, dCONSTANTl
- sub LEN, LEN, #0x40
- cmp LEN, #0x40
- blt less_64
-
- vld1.64 {qCONSTANT}, [r3]
-
-loop_64: /* 64 bytes Full cache line folding */
- sub LEN, LEN, #0x40
-
- vmull.p64 q5, d3, dCONSTANTh
- vmull.p64 q6, d5, dCONSTANTh
- vmull.p64 q7, d7, dCONSTANTh
- vmull.p64 q8, d9, dCONSTANTh
-
- vmull.p64 q1, d2, dCONSTANTl
- vmull.p64 q2, d4, dCONSTANTl
- vmull.p64 q3, d6, dCONSTANTl
- vmull.p64 q4, d8, dCONSTANTl
-
- veor.8 q1, q1, q5
- vld1.8 {q5}, [BUF, :128]!
- veor.8 q2, q2, q6
- vld1.8 {q6}, [BUF, :128]!
- veor.8 q3, q3, q7
- vld1.8 {q7}, [BUF, :128]!
- veor.8 q4, q4, q8
- vld1.8 {q8}, [BUF, :128]!
-
- veor.8 q1, q1, q5
- veor.8 q2, q2, q6
- veor.8 q3, q3, q7
- veor.8 q4, q4, q8
-
- cmp LEN, #0x40
- bge loop_64
-
-less_64: /* Folding cache line into 128bit */
- vldr dCONSTANTl, [r3, #16]
- vldr dCONSTANTh, [r3, #24]
-
- vmull.p64 q5, d3, dCONSTANTh
- vmull.p64 q1, d2, dCONSTANTl
- veor.8 q1, q1, q5
- veor.8 q1, q1, q2
-
- vmull.p64 q5, d3, dCONSTANTh
- vmull.p64 q1, d2, dCONSTANTl
- veor.8 q1, q1, q5
- veor.8 q1, q1, q3
-
- vmull.p64 q5, d3, dCONSTANTh
- vmull.p64 q1, d2, dCONSTANTl
- veor.8 q1, q1, q5
- veor.8 q1, q1, q4
-
- teq LEN, #0
- beq fold_64
-
-loop_16: /* Folding rest buffer into 128bit */
- subs LEN, LEN, #0x10
-
- vld1.8 {q2}, [BUF, :128]!
- vmull.p64 q5, d3, dCONSTANTh
- vmull.p64 q1, d2, dCONSTANTl
- veor.8 q1, q1, q5
- veor.8 q1, q1, q2
-
- bne loop_16
-
-fold_64:
- /* perform the last 64 bit fold, also adds 32 zeroes
- * to the input stream */
- vmull.p64 q2, d2, dCONSTANTh
- vext.8 q1, q1, qzr, #8
- veor.8 q1, q1, q2
-
- /* final 32-bit fold */
- vldr dCONSTANTl, [r3, #32]
- vldr d6, [r3, #40]
- vmov.i8 d7, #0
-
- vext.8 q2, q1, qzr, #4
- vand.8 d2, d2, d6
- vmull.p64 q1, d2, dCONSTANTl
- veor.8 q1, q1, q2
-
- /* Finish up with the bit-reversed barrett reduction 64 ==> 32 bits */
- vldr dCONSTANTl, [r3, #48]
- vldr dCONSTANTh, [r3, #56]
-
- vand.8 q2, q1, q3
- vext.8 q2, qzr, q2, #8
- vmull.p64 q2, d5, dCONSTANTh
- vand.8 q2, q2, q3
- vmull.p64 q2, d4, dCONSTANTl
- veor.8 q1, q1, q2
- vmov r0, s5
-
- bx lr
-ENDPROC(crc32_pmull_le)
-ENDPROC(crc32c_pmull_le)
-
- .macro __crc32, c
- subs ip, r2, #8
- bmi .Ltail\c
-
- tst r1, #3
- bne .Lunaligned\c
-
- teq ip, #0
-.Laligned8\c:
- ldrd r2, r3, [r1], #8
-ARM_BE8(rev r2, r2 )
-ARM_BE8(rev r3, r3 )
- crc32\c\()w r0, r0, r2
- crc32\c\()w r0, r0, r3
- bxeq lr
- subs ip, ip, #8
- bpl .Laligned8\c
-
-.Ltail\c:
- tst ip, #4
- beq 2f
- ldr r3, [r1], #4
-ARM_BE8(rev r3, r3 )
- crc32\c\()w r0, r0, r3
-
-2: tst ip, #2
- beq 1f
- ldrh r3, [r1], #2
-ARM_BE8(rev16 r3, r3 )
- crc32\c\()h r0, r0, r3
-
-1: tst ip, #1
- bxeq lr
- ldrb r3, [r1]
- crc32\c\()b r0, r0, r3
- bx lr
-
-.Lunaligned\c:
- tst r1, #1
- beq 2f
- ldrb r3, [r1], #1
- subs r2, r2, #1
- crc32\c\()b r0, r0, r3
-
- tst r1, #2
- beq 0f
-2: ldrh r3, [r1], #2
- subs r2, r2, #2
-ARM_BE8(rev16 r3, r3 )
- crc32\c\()h r0, r0, r3
-
-0: subs ip, r2, #8
- bpl .Laligned8\c
- b .Ltail\c
- .endm
-
- .align 5
-ENTRY(crc32_armv8_le)
- __crc32
-ENDPROC(crc32_armv8_le)
-
- .align 5
-ENTRY(crc32c_armv8_le)
- __crc32 c
-ENDPROC(crc32c_armv8_le)
diff --git a/arch/arm/crypto/crc32-ce-glue.c b/arch/arm/crypto/crc32-ce-glue.c
deleted file mode 100644
index 2208445808d7..000000000000
--- a/arch/arm/crypto/crc32-ce-glue.c
+++ /dev/null
@@ -1,246 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Accelerated CRC32(C) using ARM CRC, NEON and Crypto Extensions instructions
- *
- * Copyright (C) 2016 Linaro Ltd <ard.biesheuvel@linaro.org>
- */
-
-#include <linux/cpufeature.h>
-#include <linux/crc32.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/string.h>
-
-#include <crypto/internal/hash.h>
-#include <crypto/internal/simd.h>
-
-#include <asm/hwcap.h>
-#include <asm/neon.h>
-#include <asm/simd.h>
-#include <asm/unaligned.h>
-
-#define PMULL_MIN_LEN 64L /* minimum size of buffer
- * for crc32_pmull_le_16 */
-#define SCALE_F 16L /* size of NEON register */
-
-asmlinkage u32 crc32_pmull_le(const u8 buf[], u32 len, u32 init_crc);
-asmlinkage u32 crc32_armv8_le(u32 init_crc, const u8 buf[], u32 len);
-
-asmlinkage u32 crc32c_pmull_le(const u8 buf[], u32 len, u32 init_crc);
-asmlinkage u32 crc32c_armv8_le(u32 init_crc, const u8 buf[], u32 len);
-
-static u32 (*fallback_crc32)(u32 init_crc, const u8 buf[], u32 len);
-static u32 (*fallback_crc32c)(u32 init_crc, const u8 buf[], u32 len);
-
-static int crc32_cra_init(struct crypto_tfm *tfm)
-{
- u32 *key = crypto_tfm_ctx(tfm);
-
- *key = 0;
- return 0;
-}
-
-static int crc32c_cra_init(struct crypto_tfm *tfm)
-{
- u32 *key = crypto_tfm_ctx(tfm);
-
- *key = ~0;
- return 0;
-}
-
-static int crc32_setkey(struct crypto_shash *hash, const u8 *key,
- unsigned int keylen)
-{
- u32 *mctx = crypto_shash_ctx(hash);
-
- if (keylen != sizeof(u32))
- return -EINVAL;
- *mctx = le32_to_cpup((__le32 *)key);
- return 0;
-}
-
-static int crc32_init(struct shash_desc *desc)
-{
- u32 *mctx = crypto_shash_ctx(desc->tfm);
- u32 *crc = shash_desc_ctx(desc);
-
- *crc = *mctx;
- return 0;
-}
-
-static int crc32_update(struct shash_desc *desc, const u8 *data,
- unsigned int length)
-{
- u32 *crc = shash_desc_ctx(desc);
-
- *crc = crc32_armv8_le(*crc, data, length);
- return 0;
-}
-
-static int crc32c_update(struct shash_desc *desc, const u8 *data,
- unsigned int length)
-{
- u32 *crc = shash_desc_ctx(desc);
-
- *crc = crc32c_armv8_le(*crc, data, length);
- return 0;
-}
-
-static int crc32_final(struct shash_desc *desc, u8 *out)
-{
- u32 *crc = shash_desc_ctx(desc);
-
- put_unaligned_le32(*crc, out);
- return 0;
-}
-
-static int crc32c_final(struct shash_desc *desc, u8 *out)
-{
- u32 *crc = shash_desc_ctx(desc);
-
- put_unaligned_le32(~*crc, out);
- return 0;
-}
-
-static int crc32_pmull_update(struct shash_desc *desc, const u8 *data,
- unsigned int length)
-{
- u32 *crc = shash_desc_ctx(desc);
- unsigned int l;
-
- if (crypto_simd_usable()) {
- if ((u32)data % SCALE_F) {
- l = min_t(u32, length, SCALE_F - ((u32)data % SCALE_F));
-
- *crc = fallback_crc32(*crc, data, l);
-
- data += l;
- length -= l;
- }
-
- if (length >= PMULL_MIN_LEN) {
- l = round_down(length, SCALE_F);
-
- kernel_neon_begin();
- *crc = crc32_pmull_le(data, l, *crc);
- kernel_neon_end();
-
- data += l;
- length -= l;
- }
- }
-
- if (length > 0)
- *crc = fallback_crc32(*crc, data, length);
-
- return 0;
-}
-
-static int crc32c_pmull_update(struct shash_desc *desc, const u8 *data,
- unsigned int length)
-{
- u32 *crc = shash_desc_ctx(desc);
- unsigned int l;
-
- if (crypto_simd_usable()) {
- if ((u32)data % SCALE_F) {
- l = min_t(u32, length, SCALE_F - ((u32)data % SCALE_F));
-
- *crc = fallback_crc32c(*crc, data, l);
-
- data += l;
- length -= l;
- }
-
- if (length >= PMULL_MIN_LEN) {
- l = round_down(length, SCALE_F);
-
- kernel_neon_begin();
- *crc = crc32c_pmull_le(data, l, *crc);
- kernel_neon_end();
-
- data += l;
- length -= l;
- }
- }
-
- if (length > 0)
- *crc = fallback_crc32c(*crc, data, length);
-
- return 0;
-}
-
-static struct shash_alg crc32_pmull_algs[] = { {
- .setkey = crc32_setkey,
- .init = crc32_init,
- .update = crc32_update,
- .final = crc32_final,
- .descsize = sizeof(u32),
- .digestsize = sizeof(u32),
-
- .base.cra_ctxsize = sizeof(u32),
- .base.cra_init = crc32_cra_init,
- .base.cra_name = "crc32",
- .base.cra_driver_name = "crc32-arm-ce",
- .base.cra_priority = 200,
- .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
- .base.cra_blocksize = 1,
- .base.cra_module = THIS_MODULE,
-}, {
- .setkey = crc32_setkey,
- .init = crc32_init,
- .update = crc32c_update,
- .final = crc32c_final,
- .descsize = sizeof(u32),
- .digestsize = sizeof(u32),
-
- .base.cra_ctxsize = sizeof(u32),
- .base.cra_init = crc32c_cra_init,
- .base.cra_name = "crc32c",
- .base.cra_driver_name = "crc32c-arm-ce",
- .base.cra_priority = 200,
- .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
- .base.cra_blocksize = 1,
- .base.cra_module = THIS_MODULE,
-} };
-
-static int __init crc32_pmull_mod_init(void)
-{
- if (elf_hwcap2 & HWCAP2_PMULL) {
- crc32_pmull_algs[0].update = crc32_pmull_update;
- crc32_pmull_algs[1].update = crc32c_pmull_update;
-
- if (elf_hwcap2 & HWCAP2_CRC32) {
- fallback_crc32 = crc32_armv8_le;
- fallback_crc32c = crc32c_armv8_le;
- } else {
- fallback_crc32 = crc32_le;
- fallback_crc32c = __crc32c_le;
- }
- } else if (!(elf_hwcap2 & HWCAP2_CRC32)) {
- return -ENODEV;
- }
-
- return crypto_register_shashes(crc32_pmull_algs,
- ARRAY_SIZE(crc32_pmull_algs));
-}
-
-static void __exit crc32_pmull_mod_exit(void)
-{
- crypto_unregister_shashes(crc32_pmull_algs,
- ARRAY_SIZE(crc32_pmull_algs));
-}
-
-static const struct cpu_feature __maybe_unused crc32_cpu_feature[] = {
- { cpu_feature(CRC32) }, { cpu_feature(PMULL) }, { }
-};
-MODULE_DEVICE_TABLE(cpu, crc32_cpu_feature);
-
-module_init(crc32_pmull_mod_init);
-module_exit(crc32_pmull_mod_exit);
-
-MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS_CRYPTO("crc32");
-MODULE_ALIAS_CRYPTO("crc32c");
diff --git a/arch/arm/crypto/crct10dif-ce-core.S b/arch/arm/crypto/crct10dif-ce-core.S
deleted file mode 100644
index 46c02c518a30..000000000000
--- a/arch/arm/crypto/crct10dif-ce-core.S
+++ /dev/null
@@ -1,381 +0,0 @@
-//
-// Accelerated CRC-T10DIF using ARM NEON and Crypto Extensions instructions
-//
-// Copyright (C) 2016 Linaro Ltd <ard.biesheuvel@linaro.org>
-// Copyright (C) 2019 Google LLC <ebiggers@google.com>
-//
-// This program is free software; you can redistribute it and/or modify
-// it under the terms of the GNU General Public License version 2 as
-// published by the Free Software Foundation.
-//
-
-// Derived from the x86 version:
-//
-// Implement fast CRC-T10DIF computation with SSE and PCLMULQDQ instructions
-//
-// Copyright (c) 2013, Intel Corporation
-//
-// Authors:
-// Erdinc Ozturk <erdinc.ozturk@intel.com>
-// Vinodh Gopal <vinodh.gopal@intel.com>
-// James Guilford <james.guilford@intel.com>
-// Tim Chen <tim.c.chen@linux.intel.com>
-//
-// This software is available to you under a choice of one of two
-// licenses. You may choose to be licensed under the terms of the GNU
-// General Public License (GPL) Version 2, available from the file
-// COPYING in the main directory of this source tree, or the
-// OpenIB.org BSD license below:
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// * Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the
-// distribution.
-//
-// * Neither the name of the Intel Corporation nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-//
-// THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION ""AS IS"" AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION OR
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Reference paper titled "Fast CRC Computation for Generic
-// Polynomials Using PCLMULQDQ Instruction"
-// URL: http://www.intel.com/content/dam/www/public/us/en/documents
-// /white-papers/fast-crc-computation-generic-polynomials-pclmulqdq-paper.pdf
-//
-
-#include <linux/linkage.h>
-#include <asm/assembler.h>
-
-#ifdef CONFIG_CPU_ENDIAN_BE8
-#define CPU_LE(code...)
-#else
-#define CPU_LE(code...) code
-#endif
-
- .text
- .arch armv8-a
- .fpu crypto-neon-fp-armv8
-
- init_crc .req r0
- buf .req r1
- len .req r2
-
- fold_consts_ptr .req ip
-
- q0l .req d0
- q0h .req d1
- q1l .req d2
- q1h .req d3
- q2l .req d4
- q2h .req d5
- q3l .req d6
- q3h .req d7
- q4l .req d8
- q4h .req d9
- q5l .req d10
- q5h .req d11
- q6l .req d12
- q6h .req d13
- q7l .req d14
- q7h .req d15
- q8l .req d16
- q8h .req d17
- q9l .req d18
- q9h .req d19
- q10l .req d20
- q10h .req d21
- q11l .req d22
- q11h .req d23
- q12l .req d24
- q12h .req d25
-
- FOLD_CONSTS .req q10
- FOLD_CONST_L .req q10l
- FOLD_CONST_H .req q10h
-
- // Fold reg1, reg2 into the next 32 data bytes, storing the result back
- // into reg1, reg2.
- .macro fold_32_bytes, reg1, reg2
- vld1.64 {q11-q12}, [buf]!
-
- vmull.p64 q8, \reg1\()h, FOLD_CONST_H
- vmull.p64 \reg1, \reg1\()l, FOLD_CONST_L
- vmull.p64 q9, \reg2\()h, FOLD_CONST_H
- vmull.p64 \reg2, \reg2\()l, FOLD_CONST_L
-
-CPU_LE( vrev64.8 q11, q11 )
-CPU_LE( vrev64.8 q12, q12 )
- vswp q11l, q11h
- vswp q12l, q12h
-
- veor.8 \reg1, \reg1, q8
- veor.8 \reg2, \reg2, q9
- veor.8 \reg1, \reg1, q11
- veor.8 \reg2, \reg2, q12
- .endm
-
- // Fold src_reg into dst_reg, optionally loading the next fold constants
- .macro fold_16_bytes, src_reg, dst_reg, load_next_consts
- vmull.p64 q8, \src_reg\()l, FOLD_CONST_L
- vmull.p64 \src_reg, \src_reg\()h, FOLD_CONST_H
- .ifnb \load_next_consts
- vld1.64 {FOLD_CONSTS}, [fold_consts_ptr, :128]!
- .endif
- veor.8 \dst_reg, \dst_reg, q8
- veor.8 \dst_reg, \dst_reg, \src_reg
- .endm
-
- .macro __adrl, out, sym
- movw \out, #:lower16:\sym
- movt \out, #:upper16:\sym
- .endm
-
-//
-// u16 crc_t10dif_pmull(u16 init_crc, const u8 *buf, size_t len);
-//
-// Assumes len >= 16.
-//
-ENTRY(crc_t10dif_pmull)
-
- // For sizes less than 256 bytes, we can't fold 128 bytes at a time.
- cmp len, #256
- blt .Lless_than_256_bytes
-
- __adrl fold_consts_ptr, .Lfold_across_128_bytes_consts
-
- // Load the first 128 data bytes. Byte swapping is necessary to make
- // the bit order match the polynomial coefficient order.
- vld1.64 {q0-q1}, [buf]!
- vld1.64 {q2-q3}, [buf]!
- vld1.64 {q4-q5}, [buf]!
- vld1.64 {q6-q7}, [buf]!
-CPU_LE( vrev64.8 q0, q0 )
-CPU_LE( vrev64.8 q1, q1 )
-CPU_LE( vrev64.8 q2, q2 )
-CPU_LE( vrev64.8 q3, q3 )
-CPU_LE( vrev64.8 q4, q4 )
-CPU_LE( vrev64.8 q5, q5 )
-CPU_LE( vrev64.8 q6, q6 )
-CPU_LE( vrev64.8 q7, q7 )
- vswp q0l, q0h
- vswp q1l, q1h
- vswp q2l, q2h
- vswp q3l, q3h
- vswp q4l, q4h
- vswp q5l, q5h
- vswp q6l, q6h
- vswp q7l, q7h
-
- // XOR the first 16 data *bits* with the initial CRC value.
- vmov.i8 q8h, #0
- vmov.u16 q8h[3], init_crc
- veor q0h, q0h, q8h
-
- // Load the constants for folding across 128 bytes.
- vld1.64 {FOLD_CONSTS}, [fold_consts_ptr, :128]!
-
- // Subtract 128 for the 128 data bytes just consumed. Subtract another
- // 128 to simplify the termination condition of the following loop.
- sub len, len, #256
-
- // While >= 128 data bytes remain (not counting q0-q7), fold the 128
- // bytes q0-q7 into them, storing the result back into q0-q7.
-.Lfold_128_bytes_loop:
- fold_32_bytes q0, q1
- fold_32_bytes q2, q3
- fold_32_bytes q4, q5
- fold_32_bytes q6, q7
- subs len, len, #128
- bge .Lfold_128_bytes_loop
-
- // Now fold the 112 bytes in q0-q6 into the 16 bytes in q7.
-
- // Fold across 64 bytes.
- vld1.64 {FOLD_CONSTS}, [fold_consts_ptr, :128]!
- fold_16_bytes q0, q4
- fold_16_bytes q1, q5
- fold_16_bytes q2, q6
- fold_16_bytes q3, q7, 1
- // Fold across 32 bytes.
- fold_16_bytes q4, q6
- fold_16_bytes q5, q7, 1
- // Fold across 16 bytes.
- fold_16_bytes q6, q7
-
- // Add 128 to get the correct number of data bytes remaining in 0...127
- // (not counting q7), following the previous extra subtraction by 128.
- // Then subtract 16 to simplify the termination condition of the
- // following loop.
- adds len, len, #(128-16)
-
- // While >= 16 data bytes remain (not counting q7), fold the 16 bytes q7
- // into them, storing the result back into q7.
- blt .Lfold_16_bytes_loop_done
-.Lfold_16_bytes_loop:
- vmull.p64 q8, q7l, FOLD_CONST_L
- vmull.p64 q7, q7h, FOLD_CONST_H
- veor.8 q7, q7, q8
- vld1.64 {q0}, [buf]!
-CPU_LE( vrev64.8 q0, q0 )
- vswp q0l, q0h
- veor.8 q7, q7, q0
- subs len, len, #16
- bge .Lfold_16_bytes_loop
-
-.Lfold_16_bytes_loop_done:
- // Add 16 to get the correct number of data bytes remaining in 0...15
- // (not counting q7), following the previous extra subtraction by 16.
- adds len, len, #16
- beq .Lreduce_final_16_bytes
-
-.Lhandle_partial_segment:
- // Reduce the last '16 + len' bytes where 1 <= len <= 15 and the first
- // 16 bytes are in q7 and the rest are the remaining data in 'buf'. To
- // do this without needing a fold constant for each possible 'len',
- // redivide the bytes into a first chunk of 'len' bytes and a second
- // chunk of 16 bytes, then fold the first chunk into the second.
-
- // q0 = last 16 original data bytes
- add buf, buf, len
- sub buf, buf, #16
- vld1.64 {q0}, [buf]
-CPU_LE( vrev64.8 q0, q0 )
- vswp q0l, q0h
-
- // q1 = high order part of second chunk: q7 left-shifted by 'len' bytes.
- __adrl r3, .Lbyteshift_table + 16
- sub r3, r3, len
- vld1.8 {q2}, [r3]
- vtbl.8 q1l, {q7l-q7h}, q2l
- vtbl.8 q1h, {q7l-q7h}, q2h
-
- // q3 = first chunk: q7 right-shifted by '16-len' bytes.
- vmov.i8 q3, #0x80
- veor.8 q2, q2, q3
- vtbl.8 q3l, {q7l-q7h}, q2l
- vtbl.8 q3h, {q7l-q7h}, q2h
-
- // Convert to 8-bit masks: 'len' 0x00 bytes, then '16-len' 0xff bytes.
- vshr.s8 q2, q2, #7
-
- // q2 = second chunk: 'len' bytes from q0 (low-order bytes),
- // then '16-len' bytes from q1 (high-order bytes).
- vbsl.8 q2, q1, q0
-
- // Fold the first chunk into the second chunk, storing the result in q7.
- vmull.p64 q0, q3l, FOLD_CONST_L
- vmull.p64 q7, q3h, FOLD_CONST_H
- veor.8 q7, q7, q0
- veor.8 q7, q7, q2
-
-.Lreduce_final_16_bytes:
- // Reduce the 128-bit value M(x), stored in q7, to the final 16-bit CRC.
-
- // Load 'x^48 * (x^48 mod G(x))' and 'x^48 * (x^80 mod G(x))'.
- vld1.64 {FOLD_CONSTS}, [fold_consts_ptr, :128]!
-
- // Fold the high 64 bits into the low 64 bits, while also multiplying by
- // x^64. This produces a 128-bit value congruent to x^64 * M(x) and
- // whose low 48 bits are 0.
- vmull.p64 q0, q7h, FOLD_CONST_H // high bits * x^48 * (x^80 mod G(x))
- veor.8 q0h, q0h, q7l // + low bits * x^64
-
- // Fold the high 32 bits into the low 96 bits. This produces a 96-bit
- // value congruent to x^64 * M(x) and whose low 48 bits are 0.
- vmov.i8 q1, #0
- vmov s4, s3 // extract high 32 bits
- vmov s3, s5 // zero high 32 bits
- vmull.p64 q1, q1l, FOLD_CONST_L // high 32 bits * x^48 * (x^48 mod G(x))
- veor.8 q0, q0, q1 // + low bits
-
- // Load G(x) and floor(x^48 / G(x)).
- vld1.64 {FOLD_CONSTS}, [fold_consts_ptr, :128]
-
- // Use Barrett reduction to compute the final CRC value.
- vmull.p64 q1, q0h, FOLD_CONST_H // high 32 bits * floor(x^48 / G(x))
- vshr.u64 q1l, q1l, #32 // /= x^32
- vmull.p64 q1, q1l, FOLD_CONST_L // *= G(x)
- vshr.u64 q0l, q0l, #48
- veor.8 q0l, q0l, q1l // + low 16 nonzero bits
- // Final CRC value (x^16 * M(x)) mod G(x) is in low 16 bits of q0.
-
- vmov.u16 r0, q0l[0]
- bx lr
-
-.Lless_than_256_bytes:
- // Checksumming a buffer of length 16...255 bytes
-
- __adrl fold_consts_ptr, .Lfold_across_16_bytes_consts
-
- // Load the first 16 data bytes.
- vld1.64 {q7}, [buf]!
-CPU_LE( vrev64.8 q7, q7 )
- vswp q7l, q7h
-
- // XOR the first 16 data *bits* with the initial CRC value.
- vmov.i8 q0h, #0
- vmov.u16 q0h[3], init_crc
- veor.8 q7h, q7h, q0h
-
- // Load the fold-across-16-bytes constants.
- vld1.64 {FOLD_CONSTS}, [fold_consts_ptr, :128]!
-
- cmp len, #16
- beq .Lreduce_final_16_bytes // len == 16
- subs len, len, #32
- addlt len, len, #16
- blt .Lhandle_partial_segment // 17 <= len <= 31
- b .Lfold_16_bytes_loop // 32 <= len <= 255
-ENDPROC(crc_t10dif_pmull)
-
- .section ".rodata", "a"
- .align 4
-
-// Fold constants precomputed from the polynomial 0x18bb7
-// G(x) = x^16 + x^15 + x^11 + x^9 + x^8 + x^7 + x^5 + x^4 + x^2 + x^1 + x^0
-.Lfold_across_128_bytes_consts:
- .quad 0x0000000000006123 // x^(8*128) mod G(x)
- .quad 0x0000000000002295 // x^(8*128+64) mod G(x)
-// .Lfold_across_64_bytes_consts:
- .quad 0x0000000000001069 // x^(4*128) mod G(x)
- .quad 0x000000000000dd31 // x^(4*128+64) mod G(x)
-// .Lfold_across_32_bytes_consts:
- .quad 0x000000000000857d // x^(2*128) mod G(x)
- .quad 0x0000000000007acc // x^(2*128+64) mod G(x)
-.Lfold_across_16_bytes_consts:
- .quad 0x000000000000a010 // x^(1*128) mod G(x)
- .quad 0x0000000000001faa // x^(1*128+64) mod G(x)
-// .Lfinal_fold_consts:
- .quad 0x1368000000000000 // x^48 * (x^48 mod G(x))
- .quad 0x2d56000000000000 // x^48 * (x^80 mod G(x))
-// .Lbarrett_reduction_consts:
- .quad 0x0000000000018bb7 // G(x)
- .quad 0x00000001f65a57f8 // floor(x^48 / G(x))
-
-// For 1 <= len <= 15, the 16-byte vector beginning at &byteshift_table[16 -
-// len] is the index vector to shift left by 'len' bytes, and is also {0x80,
-// ..., 0x80} XOR the index vector to shift right by '16 - len' bytes.
-.Lbyteshift_table:
- .byte 0x0, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87
- .byte 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f
- .byte 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7
- .byte 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe , 0x0
diff --git a/arch/arm/crypto/crct10dif-ce-glue.c b/arch/arm/crypto/crct10dif-ce-glue.c
deleted file mode 100644
index e9191a8c87b9..000000000000
--- a/arch/arm/crypto/crct10dif-ce-glue.c
+++ /dev/null
@@ -1,88 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Accelerated CRC-T10DIF using ARM NEON and Crypto Extensions instructions
- *
- * Copyright (C) 2016 Linaro Ltd <ard.biesheuvel@linaro.org>
- */
-
-#include <linux/crc-t10dif.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/string.h>
-
-#include <crypto/internal/hash.h>
-#include <crypto/internal/simd.h>
-
-#include <asm/neon.h>
-#include <asm/simd.h>
-
-#define CRC_T10DIF_PMULL_CHUNK_SIZE 16U
-
-asmlinkage u16 crc_t10dif_pmull(u16 init_crc, const u8 *buf, size_t len);
-
-static int crct10dif_init(struct shash_desc *desc)
-{
- u16 *crc = shash_desc_ctx(desc);
-
- *crc = 0;
- return 0;
-}
-
-static int crct10dif_update(struct shash_desc *desc, const u8 *data,
- unsigned int length)
-{
- u16 *crc = shash_desc_ctx(desc);
-
- if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE && crypto_simd_usable()) {
- kernel_neon_begin();
- *crc = crc_t10dif_pmull(*crc, data, length);
- kernel_neon_end();
- } else {
- *crc = crc_t10dif_generic(*crc, data, length);
- }
-
- return 0;
-}
-
-static int crct10dif_final(struct shash_desc *desc, u8 *out)
-{
- u16 *crc = shash_desc_ctx(desc);
-
- *(u16 *)out = *crc;
- return 0;
-}
-
-static struct shash_alg crc_t10dif_alg = {
- .digestsize = CRC_T10DIF_DIGEST_SIZE,
- .init = crct10dif_init,
- .update = crct10dif_update,
- .final = crct10dif_final,
- .descsize = CRC_T10DIF_DIGEST_SIZE,
-
- .base.cra_name = "crct10dif",
- .base.cra_driver_name = "crct10dif-arm-ce",
- .base.cra_priority = 200,
- .base.cra_blocksize = CRC_T10DIF_BLOCK_SIZE,
- .base.cra_module = THIS_MODULE,
-};
-
-static int __init crc_t10dif_mod_init(void)
-{
- if (!(elf_hwcap2 & HWCAP2_PMULL))
- return -ENODEV;
-
- return crypto_register_shash(&crc_t10dif_alg);
-}
-
-static void __exit crc_t10dif_mod_exit(void)
-{
- crypto_unregister_shash(&crc_t10dif_alg);
-}
-
-module_init(crc_t10dif_mod_init);
-module_exit(crc_t10dif_mod_exit);
-
-MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS_CRYPTO("crct10dif");
diff --git a/arch/arm/crypto/curve25519-glue.c b/arch/arm/crypto/curve25519-glue.c
index 9bdafd57888c..e7b87e09dd99 100644
--- a/arch/arm/crypto/curve25519-glue.c
+++ b/arch/arm/crypto/curve25519-glue.c
@@ -133,4 +133,5 @@ module_exit(arm_curve25519_exit);
MODULE_ALIAS_CRYPTO("curve25519");
MODULE_ALIAS_CRYPTO("curve25519-neon");
+MODULE_DESCRIPTION("Public key crypto: Curve25519 (NEON-accelerated)");
MODULE_LICENSE("GPL v2");
diff --git a/arch/arm/crypto/ghash-ce-glue.c b/arch/arm/crypto/ghash-ce-glue.c
index 3ddf05b4234d..aabfcf522a2c 100644
--- a/arch/arm/crypto/ghash-ce-glue.c
+++ b/arch/arm/crypto/ghash-ce-glue.c
@@ -9,7 +9,7 @@
#include <asm/hwcap.h>
#include <asm/neon.h>
#include <asm/simd.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <crypto/aes.h>
#include <crypto/gcm.h>
#include <crypto/b128ops.h>
@@ -55,10 +55,6 @@ struct ghash_desc_ctx {
u32 count;
};
-struct ghash_async_ctx {
- struct cryptd_ahash *cryptd_tfm;
-};
-
asmlinkage void pmull_ghash_update_p64(int blocks, u64 dg[], const char *src,
u64 const h[][2], const char *head);
@@ -78,34 +74,12 @@ static int ghash_init(struct shash_desc *desc)
static void ghash_do_update(int blocks, u64 dg[], const char *src,
struct ghash_key *key, const char *head)
{
- if (likely(crypto_simd_usable())) {
- kernel_neon_begin();
- if (static_branch_likely(&use_p64))
- pmull_ghash_update_p64(blocks, dg, src, key->h, head);
- else
- pmull_ghash_update_p8(blocks, dg, src, key->h, head);
- kernel_neon_end();
- } else {
- be128 dst = { cpu_to_be64(dg[1]), cpu_to_be64(dg[0]) };
-
- do {
- const u8 *in = src;
-
- if (head) {
- in = head;
- blocks++;
- head = NULL;
- } else {
- src += GHASH_BLOCK_SIZE;
- }
-
- crypto_xor((u8 *)&dst, in, GHASH_BLOCK_SIZE);
- gf128mul_lle(&dst, &key->k);
- } while (--blocks);
-
- dg[0] = be64_to_cpu(dst.b);
- dg[1] = be64_to_cpu(dst.a);
- }
+ kernel_neon_begin();
+ if (static_branch_likely(&use_p64))
+ pmull_ghash_update_p64(blocks, dg, src, key->h, head);
+ else
+ pmull_ghash_update_p8(blocks, dg, src, key->h, head);
+ kernel_neon_end();
}
static int ghash_update(struct shash_desc *desc, const u8 *src,
@@ -206,162 +180,13 @@ static struct shash_alg ghash_alg = {
.descsize = sizeof(struct ghash_desc_ctx),
.base.cra_name = "ghash",
- .base.cra_driver_name = "ghash-ce-sync",
- .base.cra_priority = 300 - 1,
+ .base.cra_driver_name = "ghash-ce",
+ .base.cra_priority = 300,
.base.cra_blocksize = GHASH_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct ghash_key) + sizeof(u64[2]),
.base.cra_module = THIS_MODULE,
};
-static int ghash_async_init(struct ahash_request *req)
-{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
- struct ahash_request *cryptd_req = ahash_request_ctx(req);
- struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
- struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
- struct crypto_shash *child = cryptd_ahash_child(cryptd_tfm);
-
- desc->tfm = child;
- return crypto_shash_init(desc);
-}
-
-static int ghash_async_update(struct ahash_request *req)
-{
- struct ahash_request *cryptd_req = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
- struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
-
- if (!crypto_simd_usable() ||
- (in_atomic() && cryptd_ahash_queued(cryptd_tfm))) {
- memcpy(cryptd_req, req, sizeof(*req));
- ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
- return crypto_ahash_update(cryptd_req);
- } else {
- struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
- return shash_ahash_update(req, desc);
- }
-}
-
-static int ghash_async_final(struct ahash_request *req)
-{
- struct ahash_request *cryptd_req = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
- struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
-
- if (!crypto_simd_usable() ||
- (in_atomic() && cryptd_ahash_queued(cryptd_tfm))) {
- memcpy(cryptd_req, req, sizeof(*req));
- ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
- return crypto_ahash_final(cryptd_req);
- } else {
- struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
- return crypto_shash_final(desc, req->result);
- }
-}
-
-static int ghash_async_digest(struct ahash_request *req)
-{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
- struct ahash_request *cryptd_req = ahash_request_ctx(req);
- struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
-
- if (!crypto_simd_usable() ||
- (in_atomic() && cryptd_ahash_queued(cryptd_tfm))) {
- memcpy(cryptd_req, req, sizeof(*req));
- ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
- return crypto_ahash_digest(cryptd_req);
- } else {
- struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
- struct crypto_shash *child = cryptd_ahash_child(cryptd_tfm);
-
- desc->tfm = child;
- return shash_ahash_digest(req, desc);
- }
-}
-
-static int ghash_async_import(struct ahash_request *req, const void *in)
-{
- struct ahash_request *cryptd_req = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
- struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
-
- desc->tfm = cryptd_ahash_child(ctx->cryptd_tfm);
-
- return crypto_shash_import(desc, in);
-}
-
-static int ghash_async_export(struct ahash_request *req, void *out)
-{
- struct ahash_request *cryptd_req = ahash_request_ctx(req);
- struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
-
- return crypto_shash_export(desc, out);
-}
-
-static int ghash_async_setkey(struct crypto_ahash *tfm, const u8 *key,
- unsigned int keylen)
-{
- struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
- struct crypto_ahash *child = &ctx->cryptd_tfm->base;
-
- crypto_ahash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
- crypto_ahash_set_flags(child, crypto_ahash_get_flags(tfm)
- & CRYPTO_TFM_REQ_MASK);
- return crypto_ahash_setkey(child, key, keylen);
-}
-
-static int ghash_async_init_tfm(struct crypto_tfm *tfm)
-{
- struct cryptd_ahash *cryptd_tfm;
- struct ghash_async_ctx *ctx = crypto_tfm_ctx(tfm);
-
- cryptd_tfm = cryptd_alloc_ahash("ghash-ce-sync", 0, 0);
- if (IS_ERR(cryptd_tfm))
- return PTR_ERR(cryptd_tfm);
- ctx->cryptd_tfm = cryptd_tfm;
- crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
- sizeof(struct ahash_request) +
- crypto_ahash_reqsize(&cryptd_tfm->base));
-
- return 0;
-}
-
-static void ghash_async_exit_tfm(struct crypto_tfm *tfm)
-{
- struct ghash_async_ctx *ctx = crypto_tfm_ctx(tfm);
-
- cryptd_free_ahash(ctx->cryptd_tfm);
-}
-
-static struct ahash_alg ghash_async_alg = {
- .init = ghash_async_init,
- .update = ghash_async_update,
- .final = ghash_async_final,
- .setkey = ghash_async_setkey,
- .digest = ghash_async_digest,
- .import = ghash_async_import,
- .export = ghash_async_export,
- .halg.digestsize = GHASH_DIGEST_SIZE,
- .halg.statesize = sizeof(struct ghash_desc_ctx),
- .halg.base = {
- .cra_name = "ghash",
- .cra_driver_name = "ghash-ce",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = GHASH_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct ghash_async_ctx),
- .cra_module = THIS_MODULE,
- .cra_init = ghash_async_init_tfm,
- .cra_exit = ghash_async_exit_tfm,
- },
-};
-
-
void pmull_gcm_encrypt(int blocks, u64 dg[], const char *src,
struct gcm_key const *k, char *dst,
const char *iv, int rounds, u32 counter);
@@ -459,17 +284,11 @@ static void gcm_calculate_auth_mac(struct aead_request *req, u64 dg[], u32 len)
scatterwalk_start(&walk, req->src);
do {
- u32 n = scatterwalk_clamp(&walk, len);
- u8 *p;
-
- if (!n) {
- scatterwalk_start(&walk, sg_next(walk.sg));
- n = scatterwalk_clamp(&walk, len);
- }
+ unsigned int n;
- p = scatterwalk_map(&walk);
- gcm_update_mac(dg, p, n, buf, &buf_count, ctx);
- scatterwalk_unmap(p);
+ n = scatterwalk_next(&walk, len);
+ gcm_update_mac(dg, walk.addr, n, buf, &buf_count, ctx);
+ scatterwalk_done_src(&walk, n);
if (unlikely(len / SZ_4K > (len - n) / SZ_4K)) {
kernel_neon_end();
@@ -477,8 +296,6 @@ static void gcm_calculate_auth_mac(struct aead_request *req, u64 dg[], u32 len)
}
len -= n;
- scatterwalk_advance(&walk, n);
- scatterwalk_done(&walk, 0, len);
} while (len);
if (buf_count) {
@@ -767,14 +584,9 @@ static int __init ghash_ce_mod_init(void)
err = crypto_register_shash(&ghash_alg);
if (err)
goto err_aead;
- err = crypto_register_ahash(&ghash_async_alg);
- if (err)
- goto err_shash;
return 0;
-err_shash:
- crypto_unregister_shash(&ghash_alg);
err_aead:
if (elf_hwcap2 & HWCAP2_PMULL)
crypto_unregister_aeads(gcm_aes_algs,
@@ -784,7 +596,6 @@ err_aead:
static void __exit ghash_ce_mod_exit(void)
{
- crypto_unregister_ahash(&ghash_async_alg);
crypto_unregister_shash(&ghash_alg);
if (elf_hwcap2 & HWCAP2_PMULL)
crypto_unregister_aeads(gcm_aes_algs,
diff --git a/arch/arm/crypto/poly1305-glue.c b/arch/arm/crypto/poly1305-glue.c
index c31bd8f7c092..4464ffbf8fd1 100644
--- a/arch/arm/crypto/poly1305-glue.c
+++ b/arch/arm/crypto/poly1305-glue.c
@@ -8,7 +8,7 @@
#include <asm/hwcap.h>
#include <asm/neon.h>
#include <asm/simd.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <crypto/algapi.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/poly1305.h>
@@ -267,6 +267,7 @@ static void __exit arm_poly1305_mod_exit(void)
module_init(arm_poly1305_mod_init);
module_exit(arm_poly1305_mod_exit);
+MODULE_DESCRIPTION("Accelerated Poly1305 transform for ARM");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS_CRYPTO("poly1305");
MODULE_ALIAS_CRYPTO("poly1305-arm");
diff --git a/arch/arm/crypto/sha2-ce-glue.c b/arch/arm/crypto/sha2-ce-glue.c
index c62ce89dd3e0..aeac45bfbf9f 100644
--- a/arch/arm/crypto/sha2-ce-glue.c
+++ b/arch/arm/crypto/sha2-ce-glue.c
@@ -16,7 +16,7 @@
#include <asm/hwcap.h>
#include <asm/simd.h>
#include <asm/neon.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include "sha256_glue.h"