summaryrefslogtreecommitdiffstats
path: root/crypto
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2023-02-21 18:10:50 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2023-02-21 18:10:50 -0800
commit36289a03bcd3aabdf66de75cb6d1b4ee15726438 (patch)
tree1230c6391678f9255f74d7a4f65e95ea8a39d452 /crypto
parent69308402ca6f5b80a5a090ade0b13bd146891420 (diff)
parent8b84475318641c2b89320859332544cf187e1cbd (diff)
downloadlinux-stable-36289a03bcd3aabdf66de75cb6d1b4ee15726438.tar.gz
linux-stable-36289a03bcd3aabdf66de75cb6d1b4ee15726438.tar.bz2
linux-stable-36289a03bcd3aabdf66de75cb6d1b4ee15726438.zip
Merge tag 'v6.3-p1' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto update from Herbert Xu: "API: - Use kmap_local instead of kmap_atomic - Change request callback to take void pointer - Print FIPS status in /proc/crypto (when enabled) Algorithms: - Add rfc4106/gcm support on arm64 - Add ARIA AVX2/512 support on x86 Drivers: - Add TRNG driver for StarFive SoC - Delete ux500/hash driver (subsumed by stm32/hash) - Add zlib support in qat - Add RSA support in aspeed" * tag 'v6.3-p1' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (156 commits) crypto: x86/aria-avx - Do not use avx2 instructions crypto: aspeed - Fix modular aspeed-acry crypto: hisilicon/qm - fix coding style issues crypto: hisilicon/qm - update comments to match function crypto: hisilicon/qm - change function names crypto: hisilicon/qm - use min() instead of min_t() crypto: hisilicon/qm - remove some unused defines crypto: proc - Print fips status crypto: crypto4xx - Call dma_unmap_page when done crypto: octeontx2 - Fix objects shared between several modules crypto: nx - Fix sparse warnings crypto: ecc - Silence sparse warning tls: Pass rec instead of aead_req into tls_encrypt_done crypto: api - Remove completion function scaffolding tls: Remove completion function scaffolding tipc: Remove completion function scaffolding net: ipv6: Remove completion function scaffolding net: ipv4: Remove completion function scaffolding net: macsec: Remove completion function scaffolding dm: Remove completion function scaffolding ...
Diffstat (limited to 'crypto')
-rw-r--r--crypto/adiantum.c5
-rw-r--r--crypto/af_alg.c6
-rw-r--r--crypto/ahash.c195
-rw-r--r--crypto/api.c4
-rw-r--r--crypto/aria_generic.c4
-rw-r--r--crypto/authenc.c14
-rw-r--r--crypto/authencesn.c15
-rw-r--r--crypto/ccm.c9
-rw-r--r--crypto/chacha20poly1305.c40
-rw-r--r--crypto/cryptd.c290
-rw-r--r--crypto/crypto_engine.c8
-rw-r--r--crypto/cts.c12
-rw-r--r--crypto/dh.c5
-rw-r--r--crypto/ecc.c6
-rw-r--r--crypto/essiv.c15
-rw-r--r--crypto/gcm.c36
-rw-r--r--crypto/hctr2.c5
-rw-r--r--crypto/lrw.c4
-rw-r--r--crypto/pcrypt.c4
-rw-r--r--crypto/proc.c6
-rw-r--r--crypto/rsa-pkcs1pad.c51
-rw-r--r--crypto/seqiv.c7
-rw-r--r--crypto/shash.c4
-rw-r--r--crypto/skcipher.c22
-rw-r--r--crypto/tcrypt.c8
-rw-r--r--crypto/tcrypt.h2
-rw-r--r--crypto/testmgr.c16
-rw-r--r--crypto/wp512.c2
-rw-r--r--crypto/xts.c20
29 files changed, 400 insertions, 415 deletions
diff --git a/crypto/adiantum.c b/crypto/adiantum.c
index 84450130cb6b..c33ba22a6638 100644
--- a/crypto/adiantum.c
+++ b/crypto/adiantum.c
@@ -308,10 +308,9 @@ static int adiantum_finish(struct skcipher_request *req)
return 0;
}
-static void adiantum_streamcipher_done(struct crypto_async_request *areq,
- int err)
+static void adiantum_streamcipher_done(void *data, int err)
{
- struct skcipher_request *req = areq->data;
+ struct skcipher_request *req = data;
if (!err)
err = adiantum_finish(req);
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index 0a4fa2a429e2..5f7252a5b7b4 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -1186,7 +1186,7 @@ EXPORT_SYMBOL_GPL(af_alg_free_resources);
/**
* af_alg_async_cb - AIO callback handler
- * @_req: async request info
+ * @data: async request completion data
* @err: if non-zero, error result to be returned via ki_complete();
* otherwise return the AIO output length via ki_complete().
*
@@ -1196,9 +1196,9 @@ EXPORT_SYMBOL_GPL(af_alg_free_resources);
* The number of bytes to be generated with the AIO operation must be set
* in areq->outlen before the AIO callback handler is invoked.
*/
-void af_alg_async_cb(struct crypto_async_request *_req, int err)
+void af_alg_async_cb(void *data, int err)
{
- struct af_alg_async_req *areq = _req->data;
+ struct af_alg_async_req *areq = data;
struct sock *sk = areq->sk;
struct kiocb *iocb = areq->iocb;
unsigned int resultlen;
diff --git a/crypto/ahash.c b/crypto/ahash.c
index c2ca631a111f..ff8c79d975c1 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -45,7 +45,7 @@ static int hash_walk_next(struct crypto_hash_walk *walk)
unsigned int nbytes = min(walk->entrylen,
((unsigned int)(PAGE_SIZE)) - offset);
- walk->data = kmap_atomic(walk->pg);
+ walk->data = kmap_local_page(walk->pg);
walk->data += offset;
if (offset & alignmask) {
@@ -95,7 +95,7 @@ int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
}
}
- kunmap_atomic(walk->data);
+ kunmap_local(walk->data);
crypto_yield(walk->flags);
if (err)
@@ -190,133 +190,98 @@ int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
}
EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
-static inline unsigned int ahash_align_buffer_size(unsigned len,
- unsigned long mask)
-{
- return len + (mask & ~(crypto_tfm_ctx_alignment() - 1));
-}
-
-static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt)
+static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt,
+ bool has_state)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
unsigned long alignmask = crypto_ahash_alignmask(tfm);
unsigned int ds = crypto_ahash_digestsize(tfm);
- struct ahash_request_priv *priv;
+ struct ahash_request *subreq;
+ unsigned int subreq_size;
+ unsigned int reqsize;
+ u8 *result;
+ gfp_t gfp;
+ u32 flags;
- priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask),
- (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
- GFP_KERNEL : GFP_ATOMIC);
- if (!priv)
+ subreq_size = sizeof(*subreq);
+ reqsize = crypto_ahash_reqsize(tfm);
+ reqsize = ALIGN(reqsize, crypto_tfm_ctx_alignment());
+ subreq_size += reqsize;
+ subreq_size += ds;
+ subreq_size += alignmask & ~(crypto_tfm_ctx_alignment() - 1);
+
+ flags = ahash_request_flags(req);
+ gfp = (flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC;
+ subreq = kmalloc(subreq_size, gfp);
+ if (!subreq)
return -ENOMEM;
- /*
- * WARNING: Voodoo programming below!
- *
- * The code below is obscure and hard to understand, thus explanation
- * is necessary. See include/crypto/hash.h and include/linux/crypto.h
- * to understand the layout of structures used here!
- *
- * The code here will replace portions of the ORIGINAL request with
- * pointers to new code and buffers so the hashing operation can store
- * the result in aligned buffer. We will call the modified request
- * an ADJUSTED request.
- *
- * The newly mangled request will look as such:
- *
- * req {
- * .result = ADJUSTED[new aligned buffer]
- * .base.complete = ADJUSTED[pointer to completion function]
- * .base.data = ADJUSTED[*req (pointer to self)]
- * .priv = ADJUSTED[new priv] {
- * .result = ORIGINAL(result)
- * .complete = ORIGINAL(base.complete)
- * .data = ORIGINAL(base.data)
- * }
- */
-
- priv->result = req->result;
- priv->complete = req->base.complete;
- priv->data = req->base.data;
- priv->flags = req->base.flags;
-
- /*
- * WARNING: We do not backup req->priv here! The req->priv
- * is for internal use of the Crypto API and the
- * user must _NOT_ _EVER_ depend on it's content!
- */
-
- req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1);
- req->base.complete = cplt;
- req->base.data = req;
- req->priv = priv;
+ ahash_request_set_tfm(subreq, tfm);
+ ahash_request_set_callback(subreq, flags, cplt, req);
+
+ result = (u8 *)(subreq + 1) + reqsize;
+ result = PTR_ALIGN(result, alignmask + 1);
+
+ ahash_request_set_crypt(subreq, req->src, result, req->nbytes);
+
+ if (has_state) {
+ void *state;
+
+ state = kmalloc(crypto_ahash_statesize(tfm), gfp);
+ if (!state) {
+ kfree(subreq);
+ return -ENOMEM;
+ }
+
+ crypto_ahash_export(req, state);
+ crypto_ahash_import(subreq, state);
+ kfree_sensitive(state);
+ }
+
+ req->priv = subreq;
return 0;
}
static void ahash_restore_req(struct ahash_request *req, int err)
{
- struct ahash_request_priv *priv = req->priv;
+ struct ahash_request *subreq = req->priv;
if (!err)
- memcpy(priv->result, req->result,
+ memcpy(req->result, subreq->result,
crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
- /* Restore the original crypto request. */
- req->result = priv->result;
-
- ahash_request_set_callback(req, priv->flags,
- priv->complete, priv->data);
req->priv = NULL;
- /* Free the req->priv.priv from the ADJUSTED request. */
- kfree_sensitive(priv);
-}
-
-static void ahash_notify_einprogress(struct ahash_request *req)
-{
- struct ahash_request_priv *priv = req->priv;
- struct crypto_async_request oreq;
-
- oreq.data = priv->data;
-
- priv->complete(&oreq, -EINPROGRESS);
+ kfree_sensitive(subreq);
}
-static void ahash_op_unaligned_done(struct crypto_async_request *req, int err)
+static void ahash_op_unaligned_done(void *data, int err)
{
- struct ahash_request *areq = req->data;
-
- if (err == -EINPROGRESS) {
- ahash_notify_einprogress(areq);
- return;
- }
+ struct ahash_request *areq = data;
- /*
- * Restore the original request, see ahash_op_unaligned() for what
- * goes where.
- *
- * The "struct ahash_request *req" here is in fact the "req.base"
- * from the ADJUSTED request from ahash_op_unaligned(), thus as it
- * is a pointer to self, it is also the ADJUSTED "req" .
- */
+ if (err == -EINPROGRESS)
+ goto out;
/* First copy req->result into req->priv.result */
ahash_restore_req(areq, err);
+out:
/* Complete the ORIGINAL request. */
- areq->base.complete(&areq->base, err);
+ ahash_request_complete(areq, err);
}
static int ahash_op_unaligned(struct ahash_request *req,
- int (*op)(struct ahash_request *))
+ int (*op)(struct ahash_request *),
+ bool has_state)
{
int err;
- err = ahash_save_req(req, ahash_op_unaligned_done);
+ err = ahash_save_req(req, ahash_op_unaligned_done, has_state);
if (err)
return err;
- err = op(req);
+ err = op(req->priv);
if (err == -EINPROGRESS || err == -EBUSY)
return err;
@@ -326,13 +291,14 @@ static int ahash_op_unaligned(struct ahash_request *req,
}
static int crypto_ahash_op(struct ahash_request *req,
- int (*op)(struct ahash_request *))
+ int (*op)(struct ahash_request *),
+ bool has_state)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
unsigned long alignmask = crypto_ahash_alignmask(tfm);
if ((unsigned long)req->result & alignmask)
- return ahash_op_unaligned(req, op);
+ return ahash_op_unaligned(req, op, has_state);
return op(req);
}
@@ -345,7 +311,7 @@ int crypto_ahash_final(struct ahash_request *req)
int ret;
crypto_stats_get(alg);
- ret = crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final);
+ ret = crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final, true);
crypto_stats_ahash_final(nbytes, ret, alg);
return ret;
}
@@ -359,7 +325,7 @@ int crypto_ahash_finup(struct ahash_request *req)
int ret;
crypto_stats_get(alg);
- ret = crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup);
+ ret = crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup, true);
crypto_stats_ahash_final(nbytes, ret, alg);
return ret;
}
@@ -376,32 +342,34 @@ int crypto_ahash_digest(struct ahash_request *req)
if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
ret = -ENOKEY;
else
- ret = crypto_ahash_op(req, tfm->digest);
+ ret = crypto_ahash_op(req, tfm->digest, false);
crypto_stats_ahash_final(nbytes, ret, alg);
return ret;
}
EXPORT_SYMBOL_GPL(crypto_ahash_digest);
-static void ahash_def_finup_done2(struct crypto_async_request *req, int err)
+static void ahash_def_finup_done2(void *data, int err)
{
- struct ahash_request *areq = req->data;
+ struct ahash_request *areq = data;
if (err == -EINPROGRESS)
return;
ahash_restore_req(areq, err);
- areq->base.complete(&areq->base, err);
+ ahash_request_complete(areq, err);
}
static int ahash_def_finup_finish1(struct ahash_request *req, int err)
{
+ struct ahash_request *subreq = req->priv;
+
if (err)
goto out;
- req->base.complete = ahash_def_finup_done2;
+ subreq->base.complete = ahash_def_finup_done2;
- err = crypto_ahash_reqtfm(req)->final(req);
+ err = crypto_ahash_reqtfm(req)->final(subreq);
if (err == -EINPROGRESS || err == -EBUSY)
return err;
@@ -410,22 +378,23 @@ out:
return err;
}
-static void ahash_def_finup_done1(struct crypto_async_request *req, int err)
+static void ahash_def_finup_done1(void *data, int err)
{
- struct ahash_request *areq = req->data;
+ struct ahash_request *areq = data;
+ struct ahash_request *subreq;
- if (err == -EINPROGRESS) {
- ahash_notify_einprogress(areq);
- return;
- }
+ if (err == -EINPROGRESS)
+ goto out;
- areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+ subreq = areq->priv;
+ subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
err = ahash_def_finup_finish1(areq, err);
- if (areq->priv)
+ if (err == -EINPROGRESS || err == -EBUSY)
return;
- areq->base.complete(&areq->base, err);
+out:
+ ahash_request_complete(areq, err);
}
static int ahash_def_finup(struct ahash_request *req)
@@ -433,11 +402,11 @@ static int ahash_def_finup(struct ahash_request *req)
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
int err;
- err = ahash_save_req(req, ahash_def_finup_done1);
+ err = ahash_save_req(req, ahash_def_finup_done1, true);
if (err)
return err;
- err = tfm->update(req);
+ err = tfm->update(req->priv);
if (err == -EINPROGRESS || err == -EBUSY)
return err;
diff --git a/crypto/api.c b/crypto/api.c
index b022702f6436..e67cc63368ed 100644
--- a/crypto/api.c
+++ b/crypto/api.c
@@ -643,9 +643,9 @@ int crypto_has_alg(const char *name, u32 type, u32 mask)
}
EXPORT_SYMBOL_GPL(crypto_has_alg);
-void crypto_req_done(struct crypto_async_request *req, int err)
+void crypto_req_done(void *data, int err)
{
- struct crypto_wait *wait = req->data;
+ struct crypto_wait *wait = data;
if (err == -EINPROGRESS)
return;
diff --git a/crypto/aria_generic.c b/crypto/aria_generic.c
index 4cc29b82b99d..d96dfc4fdde6 100644
--- a/crypto/aria_generic.c
+++ b/crypto/aria_generic.c
@@ -178,6 +178,10 @@ int aria_set_key(struct crypto_tfm *tfm, const u8 *in_key, unsigned int key_len)
if (key_len != 16 && key_len != 24 && key_len != 32)
return -EINVAL;
+ BUILD_BUG_ON(sizeof(ctx->enc_key) != 272);
+ BUILD_BUG_ON(sizeof(ctx->dec_key) != 272);
+ BUILD_BUG_ON(sizeof(int) != sizeof(ctx->rounds));
+
ctx->key_length = key_len;
ctx->rounds = (key_len + 32) / 4;
diff --git a/crypto/authenc.c b/crypto/authenc.c
index 17f674a7cdff..3326c7343e86 100644
--- a/crypto/authenc.c
+++ b/crypto/authenc.c
@@ -109,9 +109,9 @@ out:
return err;
}
-static void authenc_geniv_ahash_done(struct crypto_async_request *areq, int err)
+static void authenc_geniv_ahash_done(void *data, int err)
{
- struct aead_request *req = areq->data;
+ struct aead_request *req = data;
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
struct aead_instance *inst = aead_alg_instance(authenc);
struct authenc_instance_ctx *ictx = aead_instance_ctx(inst);
@@ -160,10 +160,9 @@ static int crypto_authenc_genicv(struct aead_request *req, unsigned int flags)
return 0;
}
-static void crypto_authenc_encrypt_done(struct crypto_async_request *req,
- int err)
+static void crypto_authenc_encrypt_done(void *data, int err)
{
- struct aead_request *areq = req->data;
+ struct aead_request *areq = data;
if (err)
goto out;
@@ -261,10 +260,9 @@ static int crypto_authenc_decrypt_tail(struct aead_request *req,
return crypto_skcipher_decrypt(skreq);
}
-static void authenc_verify_ahash_done(struct crypto_async_request *areq,
- int err)
+static void authenc_verify_ahash_done(void *data, int err)
{
- struct aead_request *req = areq->data;
+ struct aead_request *req = data;
if (err)
goto out;
diff --git a/crypto/authencesn.c b/crypto/authencesn.c
index b60e61b1904c..91424e791d5c 100644
--- a/crypto/authencesn.c
+++ b/crypto/authencesn.c
@@ -107,10 +107,9 @@ static int crypto_authenc_esn_genicv_tail(struct aead_request *req,
return 0;
}
-static void authenc_esn_geniv_ahash_done(struct crypto_async_request *areq,
- int err)
+static void authenc_esn_geniv_ahash_done(void *data, int err)
{
- struct aead_request *req = areq->data;
+ struct aead_request *req = data;
err = err ?: crypto_authenc_esn_genicv_tail(req, 0);
aead_request_complete(req, err);
@@ -153,10 +152,9 @@ static int crypto_authenc_esn_genicv(struct aead_request *req,
}
-static void crypto_authenc_esn_encrypt_done(struct crypto_async_request *req,
- int err)
+static void crypto_authenc_esn_encrypt_done(void *data, int err)
{
- struct aead_request *areq = req->data;
+ struct aead_request *areq = data;
if (!err)
err = crypto_authenc_esn_genicv(areq, 0);
@@ -258,10 +256,9 @@ decrypt:
return crypto_skcipher_decrypt(skreq);
}
-static void authenc_esn_verify_ahash_done(struct crypto_async_request *areq,
- int err)
+static void authenc_esn_verify_ahash_done(void *data, int err)
{
- struct aead_request *req = areq->data;
+ struct aead_request *req = data;
err = err ?: crypto_authenc_esn_decrypt_tail(req, 0);
authenc_esn_request_complete(req, err);
diff --git a/crypto/ccm.c b/crypto/ccm.c
index 30dbae72728f..a9453129c51c 100644
--- a/crypto/ccm.c
+++ b/crypto/ccm.c
@@ -224,9 +224,9 @@ out:
return err;
}
-static void crypto_ccm_encrypt_done(struct crypto_async_request *areq, int err)
+static void crypto_ccm_encrypt_done(void *data, int err)
{
- struct aead_request *req = areq->data;
+ struct aead_request *req = data;
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req);
u8 *odata = pctx->odata;
@@ -320,10 +320,9 @@ static int crypto_ccm_encrypt(struct aead_request *req)
return err;
}
-static void crypto_ccm_decrypt_done(struct crypto_async_request *areq,
- int err)
+static void crypto_ccm_decrypt_done(void *data, int err)
{
- struct aead_request *req = areq->data;
+ struct aead_request *req = data;
struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req);
struct crypto_aead *aead = crypto_aead_reqtfm(req);
unsigned int authsize = crypto_aead_authsize(aead);
diff --git a/crypto/chacha20poly1305.c b/crypto/chacha20poly1305.c
index 97bbb135e9a6..3a905c5d8f53 100644
--- a/crypto/chacha20poly1305.c
+++ b/crypto/chacha20poly1305.c
@@ -115,9 +115,9 @@ static int poly_copy_tag(struct aead_request *req)
return 0;
}
-static void chacha_decrypt_done(struct crypto_async_request *areq, int err)
+static void chacha_decrypt_done(void *data, int err)
{
- async_done_continue(areq->data, err, poly_verify_tag);
+ async_done_continue(data, err, poly_verify_tag);
}
static int chacha_decrypt(struct aead_request *req)
@@ -161,9 +161,9 @@ static int poly_tail_continue(struct aead_request *req)
return chacha_decrypt(req);
}
-static void poly_tail_done(struct crypto_async_request *areq, int err)
+static void poly_tail_done(void *data, int err)
{
- async_done_continue(areq->data, err, poly_tail_continue);
+ async_done_continue(data, err, poly_tail_continue);
}
static int poly_tail(struct aead_request *req)
@@ -191,9 +191,9 @@ static int poly_tail(struct aead_request *req)
return poly_tail_continue(req);
}
-static void poly_cipherpad_done(struct crypto_async_request *areq, int err)
+static void poly_cipherpad_done(void *data, int err)
{
- async_done_continue(areq->data, err, poly_tail);
+ async_done_continue(data, err, poly_tail);
}
static int poly_cipherpad(struct aead_request *req)
@@ -220,9 +220,9 @@ static int poly_cipherpad(struct aead_request *req)
return poly_tail(req);
}
-static void poly_cipher_done(struct crypto_async_request *areq, int err)
+static void poly_cipher_done(void *data, int err)
{
- async_done_continue(areq->data, err, poly_cipherpad);
+ async_done_continue(data, err, poly_cipherpad);
}
static int poly_cipher(struct aead_request *req)
@@ -250,9 +250,9 @@ static int poly_cipher(struct aead_request *req)
return poly_cipherpad(req);
}
-static void poly_adpad_done(struct crypto_async_request *areq, int err)
+static void poly_adpad_done(void *data, int err)
{
- async_done_continue(areq->data, err, poly_cipher);
+ async_done_continue(data, err, poly_cipher);
}
static int poly_adpad(struct aead_request *req)
@@ -279,9 +279,9 @@ static int poly_adpad(struct aead_request *req)
return poly_cipher(req);
}
-static void poly_ad_done(struct crypto_async_request *areq, int err)
+static void poly_ad_done(void *data, int err)
{
- async_done_continue(areq->data, err, poly_adpad);
+ async_done_continue(data, err, poly_adpad);
}
static int poly_ad(struct aead_request *req)
@@ -303,9 +303,9 @@ static int poly_ad(struct aead_request *req)
return poly_adpad(req);
}
-static void poly_setkey_done(struct crypto_async_request *areq, int err)
+static void poly_setkey_done(void *data, int err)
{
- async_done_continue(areq->data, err, poly_ad);
+ async_done_continue(data, err, poly_ad);
}
static int poly_setkey(struct aead_request *req)
@@ -329,9 +329,9 @@ static int poly_setkey(struct aead_request *req)
return poly_ad(req);
}
-static void poly_init_done(struct crypto_async_request *areq, int err)
+static void poly_init_done(void *data, int err)
{
- async_done_continue(areq->data, err, poly_setkey);
+ async_done_continue(data, err, poly_setkey);
}
static int poly_init(struct aead_request *req)
@@ -352,9 +352,9 @@ static int poly_init(struct aead_request *req)
return poly_setkey(req);
}
-static void poly_genkey_done(struct crypto_async_request *areq, int err)
+static void poly_genkey_done(void *data, int err)
{
- async_done_continue(areq->data, err, poly_init);
+ async_done_continue(data, err, poly_init);
}
static int poly_genkey(struct aead_request *req)
@@ -391,9 +391,9 @@ static int poly_genkey(struct aead_request *req)
return poly_init(req);
}
-static void chacha_encrypt_done(struct crypto_async_request *areq, int err)
+static void chacha_encrypt_done(void *data, int err)
{
- async_done_continue(areq->data, err, poly_genkey);
+ async_done_continue(data, err, poly_genkey);
}
static int chacha_encrypt(struct aead_request *req)
diff --git a/crypto/cryptd.c b/crypto/cryptd.c
index ca3a40fc7da9..37365ed30b38 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -72,7 +72,6 @@ struct cryptd_skcipher_ctx {
};
struct cryptd_skcipher_request_ctx {
- crypto_completion_t complete;
struct skcipher_request req;
};
@@ -83,6 +82,7 @@ struct cryptd_hash_ctx {
struct cryptd_hash_request_ctx {
crypto_completion_t complete;
+ void *data;
struct shash_desc desc;
};
@@ -92,7 +92,7 @@ struct cryptd_aead_ctx {
};
struct cryptd_aead_request_ctx {
- crypto_completion_t complete;
+ struct aead_request req;
};
static void cryptd_queue_worker(struct work_struct *work);
@@ -177,8 +177,8 @@ static void cryptd_queue_worker(struct work_struct *work)
return;
if (backlog)
- backlog->complete(backlog, -EINPROGRESS);
- req->complete(req, 0);
+ crypto_request_complete(backlog, -EINPROGRESS);
+ crypto_request_complete(req, 0);
if (cpu_queue->queue.qlen)
queue_work(cryptd_wq, &cpu_queue->work);
@@ -237,33 +237,22 @@ static int cryptd_skcipher_setkey(struct crypto_skcipher *parent,
return crypto_skcipher_setkey(child, key, keylen);
}
-static void cryptd_skcipher_complete(struct skcipher_request *req, int err)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
- int refcnt = refcount_read(&ctx->refcnt);
-
- local_bh_disable();
- rctx->complete(&req->base, err);
- local_bh_enable();
-
- if (err != -EINPROGRESS && refcnt && refcount_dec_and_test(&ctx->refcnt))
- crypto_free_skcipher(tfm);
-}
-
-static void cryptd_skcipher_encrypt(struct crypto_async_request *base,
- int err)
+static struct skcipher_request *cryptd_skcipher_prepare(
+ struct skcipher_request *req, int err)
{
- struct skcipher_request *req = skcipher_request_cast(base);
struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
struct skcipher_request *subreq = &rctx->req;
- struct crypto_skcipher *child = ctx->child;
+ struct cryptd_skcipher_ctx *ctx;
+ struct crypto_skcipher *child;
+
+ req->base.complete = subreq->base.complete;
+ req->base.data = subreq->base.data;
if (unlikely(err == -EINPROGRESS))
- goto out;
+ return NULL;
+
+ ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
+ child = ctx->child;
skcipher_request_set_tfm(subreq, child);
skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
@@ -271,41 +260,53 @@ static void cryptd_skcipher_encrypt(struct crypto_async_request *base,
skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
req->iv);
- err = crypto_skcipher_encrypt(subreq);
- skcipher_request_zero(subreq);
-
- req->base.complete = rctx->complete;
-
-out:
- cryptd_skcipher_complete(req, err);
+ return subreq;
}
-static void cryptd_skcipher_decrypt(struct crypto_async_request *base,
- int err)
+static void cryptd_skcipher_complete(struct skcipher_request *req, int err,
+ crypto_completion_t complete)
{
- struct skcipher_request *req = skcipher_request_cast(base);
struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
struct skcipher_request *subreq = &rctx->req;
- struct crypto_skcipher *child = ctx->child;
+ int refcnt = refcount_read(&ctx->refcnt);
- if (unlikely(err == -EINPROGRESS))
- goto out;
+ local_bh_disable();
+ skcipher_request_complete(req, err);
+ local_bh_enable();
- skcipher_request_set_tfm(subreq, child);
- skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
- NULL, NULL);
- skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
- req->iv);
+ if (unlikely(err == -EINPROGRESS)) {
+ subreq->base.complete = req->base.complete;
+ subreq->base.data = req->base.data;
+ req->base.complete = complete;
+ req->base.data = req;
+ } else if (refcnt && refcount_dec_and_test(&ctx->refcnt))
+ crypto_free_skcipher(tfm);
+}
- err = crypto_skcipher_decrypt(subreq);
- skcipher_request_zero(subreq);
+static void cryptd_skcipher_encrypt(void *data, int err)
+{
+ struct skcipher_request *req = data;
+ struct skcipher_request *subreq;
- req->base.complete = rctx->complete;
+ subreq = cryptd_skcipher_prepare(req, err);
+ if (likely(subreq))
+ err = crypto_skcipher_encrypt(subreq);
-out:
- cryptd_skcipher_complete(req, err);
+ cryptd_skcipher_complete(req, err, cryptd_skcipher_encrypt);
+}
+
+static void cryptd_skcipher_decrypt(void *data, int err)
+{
+ struct skcipher_request *req = data;
+ struct skcipher_request *subreq;
+
+ subreq = cryptd_skcipher_prepare(req, err);
+ if (likely(subreq))
+ err = crypto_skcipher_decrypt(subreq);
+
+ cryptd_skcipher_complete(req, err, cryptd_skcipher_decrypt);
}
static int cryptd_skcipher_enqueue(struct skcipher_request *req,
@@ -313,11 +314,14 @@ static int cryptd_skcipher_enqueue(struct skcipher_request *req,
{
struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct skcipher_request *subreq = &rctx->req;
struct cryptd_queue *queue;
queue = cryptd_get_queue(crypto_skcipher_tfm(tfm));
- rctx->complete = req->base.complete;
+ subreq->base.complete = req->base.complete;
+ subreq->base.data = req->base.data;
req->base.complete = compl;
+ req->base.data = req;
return cryptd_enqueue_request(queue, &req->base);
}
@@ -470,45 +474,63 @@ static int cryptd_hash_enqueue(struct ahash_request *req,
cryptd_get_queue(crypto_ahash_tfm(tfm));
rctx->complete = req->base.complete;
+ rctx->data = req->base.data;
req->base.complete = compl;
+ req->base.data = req;
return cryptd_enqueue_request(queue, &req->base);
}
-static void cryptd_hash_complete(struct ahash_request *req, int err)
+static struct shash_desc *cryptd_hash_prepare(struct ahash_request *req,
+ int err)
+{
+ struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
+
+ req->base.complete = rctx->complete;
+ req->base.data = rctx->data;
+
+ if (unlikely(err == -EINPROGRESS))
+ return NULL;
+
+ return &rctx->desc;
+}
+
+static void cryptd_hash_complete(struct ahash_request *req, int err,
+ crypto_completion_t complete)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
- struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
int refcnt = refcount_read(&ctx->refcnt);
local_bh_disable();
- rctx->complete(&req->base, err);
+ ahash_request_complete(req, err);
local_bh_enable();
- if (err != -EINPROGRESS && refcnt && refcount_dec_and_test(&ctx->refcnt))
+ if (err == -EINPROGRESS) {
+ req->base.complete = complete;
+ req->base.data = req;
+ } else if (refcnt && refcount_dec_and_test(&ctx->refcnt))
crypto_free_ahash(tfm);
}
-static void cryptd_hash_init(struct crypto_async_request *req_async, int err)
+static void cryptd_hash_init(void *data, int err)
{
- struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
+ struct ahash_request *req = data;
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
struct crypto_shash *child = ctx->child;
- struct ahash_request *req = ahash_request_cast(req_async);
- struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
- struct shash_desc *desc = &rctx->desc;
+ struct shash_desc *desc;
- if (unlikely(err == -EINPROGRESS))
+ desc = cryptd_hash_prepare(req, err);
+ if (unlikely(!desc))
goto out;
desc->tfm = child;
err = crypto_shash_init(desc);
- req->base.complete = rctx->complete;
-
out:
- cryptd_hash_complete(req, err);
+ cryptd_hash_complete(req, err, cryptd_hash_init);
}
static int cryptd_hash_init_enqueue(struct ahash_request *req)
@@ -516,22 +538,16 @@ static int cryptd_hash_init_enqueue(struct ahash_request *req)
return cryptd_hash_enqueue(req, cryptd_hash_init);
}
-static void cryptd_hash_update(struct crypto_async_request *req_async, int err)
+static void cryptd_hash_update(void *data, int err)
{
- struct ahash_request *req = ahash_request_cast(req_async);
- struct cryptd_hash_request_ctx *rctx;
+ struct ahash_request *req = data;
+ struct shash_desc *desc;
- rctx = ahash_request_ctx(req);
+ desc = cryptd_hash_prepare(req, err);
+ if (likely(desc))
+ err = shash_ahash_update(req, desc);
- if (unlikely(err == -EINPROGRESS))
- goto out;
-
- err = shash_ahash_update(req, &rctx->desc);
-
- req->base.complete = rctx->complete;
-
-out:
- cryptd_hash_complete(req, err);
+ cryptd_hash_complete(req, err, cryptd_hash_update);
}
static int cryptd_hash_update_enqueue(struct ahash_request *req)
@@ -539,20 +555,16 @@ static int cryptd_hash_update_enqueue(struct ahash_request *req)
return cryptd_hash_enqueue(req, cryptd_hash_update);
}
-static void cryptd_hash_final(struct crypto_async_request *req_async, int err)
+static void cryptd_hash_final(void *data, int err)
{
- struct ahash_request *req = ahash_request_cast(req_async);
- struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
-
- if (unlikely(err == -EINPROGRESS))
- goto out;
+ struct ahash_request *req = data;
+ struct shash_desc *desc;
- err = crypto_shash_final(&rctx->desc, req->result);
+ desc = cryptd_hash_prepare(req, err);
+ if (likely(desc))
+ err = crypto_shash_final(desc, req->result);
- req->base.complete = rctx->complete;
-
-out:
- cryptd_hash_complete(req, err);
+ cryptd_hash_complete(req, err, cryptd_hash_final);
}
static int cryptd_hash_final_enqueue(struct ahash_request *req)
@@ -560,20 +572,16 @@ static int cryptd_hash_final_enqueue(struct ahash_request *req)
return cryptd_hash_enqueue(req, cryptd_hash_final);
}
-static void cryptd_hash_finup(struct crypto_async_request *req_async, int err)
+static void cryptd_hash_finup(void *data, int err)
{
- struct ahash_request *req = ahash_request_cast(req_async);
- struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
-
- if (unlikely(err == -EINPROGRESS))
- goto out;
+ struct ahash_request *req = data;
+ struct shash_desc *desc;
- err = shash_ahash_finup(req, &rctx->desc);
+ desc = cryptd_hash_prepare(req, err);
+ if (likely(desc))
+ err = shash_ahash_finup(req, desc);
- req->base.complete = rctx->complete;
-
-out:
- cryptd_hash_complete(req, err);
+ cryptd_hash_complete(req, err, cryptd_hash_finup);
}
static int cryptd_hash_finup_enqueue(struct ahash_request *req)
@@ -581,25 +589,24 @@ static int cryptd_hash_finup_enqueue(struct ahash_request *req)
return cryptd_hash_enqueue(req, cryptd_hash_finup);
}
-static void cryptd_hash_digest(struct crypto_async_request *req_async, int err)
+static void cryptd_hash_digest(void *data, int err)
{
- struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
+ struct ahash_request *req = data;
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
struct crypto_shash *child = ctx->child;
- struct ahash_request *req = ahash_request_cast(req_async);
- struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
- struct shash_desc *desc = &rctx->desc;
+ struct shash_desc *desc;
- if (unlikely(err == -EINPROGRESS))
+ desc = cryptd_hash_prepare(req, err);
+ if (unlikely(!desc))
goto out;
desc->tfm = child;
err = shash_ahash_digest(req, desc);
- req->base.complete = rctx->complete;
-
out:
- cryptd_hash_complete(req, err);
+ cryptd_hash_complete(req, err, cryptd_hash_digest);
}
static int cryptd_hash_digest_enqueue(struct ahash_request *req)
@@ -712,56 +719,74 @@ static int cryptd_aead_setauthsize(struct crypto_aead *parent,
}
static void cryptd_aead_crypt(struct aead_request *req,
- struct crypto_aead *child,
- int err,
- int (*crypt)(struct aead_request *req))
+ struct crypto_aead *child, int err,
+ int (*crypt)(struct aead_request *req),
+ crypto_completion_t compl)
{
struct cryptd_aead_request_ctx *rctx;
+ struct aead_request *subreq;
struct cryptd_aead_ctx *ctx;
- crypto_completion_t compl;
struct crypto_aead *tfm;
int refcnt;
rctx = aead_request_ctx(req);
- compl = rctx->complete;
+ subreq = &rctx->req;
+ req->base.complete = subreq->base.complete;
+ req->base.data = subreq->base.data;
tfm = crypto_aead_reqtfm(req);
if (unlikely(err == -EINPROGRESS))
goto out;
- aead_request_set_tfm(req, child);
- err = crypt( req );
+
+ aead_request_set_tfm(subreq, child);
+ aead_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
+ NULL, NULL);
+ aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
+ req->iv);
+ aead_request_set_ad(subreq, req->assoclen);
+
+ err = crypt(subreq);
out:
ctx = crypto_aead_ctx(tfm);
refcnt = refcount_read(&ctx->refcnt);
local_bh_disable();
- compl(&req->base, err);
+ aead_request_complete(req, err);
local_bh_enable();
- if (err != -EINPROGRESS && refcnt && refcount_dec_and_test(&ctx->refcnt))
+ if (err == -EINPROGRESS) {
+ subreq->base.complete = req->base.complete;
+ subreq->base.data = req->base.data;
+ req->base.complete = compl;
+ req->base.data = req;
+ } else if (refcnt && refcount_dec_and_test(&ctx->refcnt))
crypto_free_aead(tfm);
}
-static void cryptd_aead_encrypt(struct crypto_async_request *areq, int err)
+static void cryptd_aead_encrypt(void *data, int err)
{
- struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
- struct crypto_aead *child = ctx->child;
- struct aead_request *req;
+ struct aead_request *req = data;
+ struct cryptd_aead_ctx *ctx;
+ struct crypto_aead *child;
- req = container_of(areq, struct aead_request, base);
- cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->encrypt);
+ ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
+ child = ctx->child;
+ cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->encrypt,
+ cryptd_aead_encrypt);
}
-static void cryptd_aead_decrypt(struct crypto_async_request *areq, int err)
+static void cryptd_aead_decrypt(void *data, int err)
{
- struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
- struct crypto_aead *child = ctx->child;
- struct aead_request *req;
+ struct aead_request *req = data;
+ struct cryptd_aead_ctx *ctx;
+ struct crypto_aead *child;
- req = container_of(areq, struct aead_request, base);
- cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->decrypt);
+ ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
+ child = ctx->child;
+ cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->decrypt,
+ cryptd_aead_decrypt);
}
static int cryptd_aead_enqueue(struct aead_request *req,
@@ -770,9 +795,12 @@ static int cryptd_aead_enqueue(struct aead_request *req,
struct cryptd_aead_request_ctx *rctx = aead_request_ctx(req);
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct cryptd_queue *queue = cryptd_get_queue(crypto_aead_tfm(tfm));
+ struct aead_request *subreq = &rctx->req;
- rctx->complete = req->base.complete;
+ subreq->base.complete = req->base.complete;
+ subreq->base.data = req->base.data;
req->base.complete = compl;
+ req->base.data = req;
return cryptd_enqueue_request(queue, &req->base);
}
@@ -800,8 +828,8 @@ static int cryptd_aead_init_tfm(struct crypto_aead *tfm)
ctx->child = cipher;
crypto_aead_set_reqsize(
- tfm, max((unsigned)sizeof(struct cryptd_aead_request_ctx),
- crypto_aead_reqsize(cipher)));
+ tfm, sizeof(struct cryptd_aead_request_ctx) +
+ crypto_aead_reqsize(cipher));
return 0;
}
diff --git a/crypto/crypto_engine.c b/crypto/crypto_engine.c
index bb8e77077f02..21f791615114 100644
--- a/crypto/crypto_engine.c
+++ b/crypto/crypto_engine.c
@@ -54,7 +54,7 @@ static void crypto_finalize_request(struct crypto_engine *engine,
}
}
lockdep_assert_in_softirq();
- req->complete(req, err);
+ crypto_request_complete(req, err);
kthread_queue_work(engine->kworker, &engine->pump_requests);
}
@@ -130,7 +130,7 @@ start_request:
engine->cur_req = async_req;
if (backlog)
- backlog->complete(backlog, -EINPROGRESS);
+ crypto_request_complete(backlog, -EINPROGRESS);
if (engine->busy)
was_busy = true;
@@ -214,7 +214,7 @@ req_err_1:
}
req_err_2:
- async_req->complete(async_req, ret);
+ crypto_request_complete(async_req, ret);
retry:
/* If retry mechanism is supported, send new requests to engine */
@@ -499,7 +499,7 @@ EXPORT_SYMBOL_GPL(crypto_engine_stop);
* This has the form:
* callback(struct crypto_engine *engine)
* where:
- * @engine: the crypto engine structure.
+ * engine: the crypto engine structure.
* @rt: whether this queue is set to run as a realtime task
* @qlen: maximum size of the crypto-engine queue
*
diff --git a/crypto/cts.c b/crypto/cts.c
index 3766d47ebcc0..8f604f6554b1 100644
--- a/crypto/cts.c
+++ b/crypto/cts.c
@@ -85,9 +85,9 @@ static int crypto_cts_setkey(struct crypto_skcipher *parent, const u8 *key,
return crypto_skcipher_setkey(child, key, keylen);
}
-static void cts_cbc_crypt_done(struct crypto_async_request *areq, int err)
+static void cts_cbc_crypt_done(void *data, int err)
{
- struct skcipher_request *req = areq->data;
+ struct skcipher_request *req = data;
if (err == -EINPROGRESS)
return;
@@ -125,9 +125,9 @@ static int cts_cbc_encrypt(struct skcipher_request *req)
return crypto_skcipher_encrypt(subreq);
}
-static void crypto_cts_encrypt_done(struct crypto_async_request *areq, int err)
+static void crypto_cts_encrypt_done(void *data, int err)
{
- struct skcipher_request *req = areq->data;
+ struct skcipher_request *req = data;
if (err)
goto out;
@@ -219,9 +219,9 @@ static int cts_cbc_decrypt(struct skcipher_request *req)
return crypto_skcipher_decrypt(subreq);
}
-static void crypto_cts_decrypt_done(struct crypto_async_request *areq, int err)
+static void crypto_cts_decrypt_done(void *data, int err)
{
- struct skcipher_request *req = areq->data;
+ struct skcipher_request *req = data;
if (err)
goto out;
diff --git a/crypto/dh.c b/crypto/dh.c
index e39c1bde1ac0..0fcad279e6fe 100644
--- a/crypto/dh.c
+++ b/crypto/dh.c
@@ -503,10 +503,9 @@ out:
return err;
}
-static void dh_safe_prime_complete_req(struct crypto_async_request *dh_req,
- int err)
+static void dh_safe_prime_complete_req(void *data, int err)
{
- struct kpp_request *req = dh_req->data;
+ struct kpp_request *req = data;
kpp_request_complete(req, err);
}
diff --git a/crypto/ecc.c b/crypto/ecc.c
index 7315217c8f73..f53fb4d6af99 100644
--- a/crypto/ecc.c
+++ b/crypto/ecc.c
@@ -1384,7 +1384,8 @@ void ecc_point_mult_shamir(const struct ecc_point *result,
num_bits = max(vli_num_bits(u1, ndigits), vli_num_bits(u2, ndigits));
i = num_bits - 1;
- idx = (!!vli_test_bit(u1, i)) | ((!!vli_test_bit(u2, i)) << 1);
+ idx = !!vli_test_bit(u1, i);
+ idx |= (!!vli_test_bit(u2, i)) << 1;
point = points[idx];
vli_set(rx, point->x, ndigits);
@@ -1394,7 +1395,8 @@ void ecc_point_mult_shamir(const struct ecc_point *result,
for (--i; i >= 0; i--) {
ecc_point_double_jacobian(rx, ry, z, curve);
- idx = (!!vli_test_bit(u1, i)) | ((!!vli_test_bit(u2, i)) << 1);
+ idx = !!vli_test_bit(u1, i);
+ idx |= (!!vli_test_bit(u2, i)) << 1;
point = points[idx];
if (point) {
u64 tx[ECC_MAX_DIGITS];
diff --git a/crypto/essiv.c b/crypto/essiv.c
index e33369df9034..f7d4ef4837e5 100644
--- a/crypto/essiv.c
+++ b/crypto/essiv.c
@@ -131,9 +131,9 @@ static int essiv_aead_setauthsize(struct crypto_aead *tfm,
return crypto_aead_setauthsize(tctx->u.aead, authsize);
}
-static void essiv_skcipher_done(struct crypto_async_request *areq, int err)
+static void essiv_skcipher_done(void *data, int err)
{
- struct skcipher_request *req = areq->data;
+ struct skcipher_request *req = data;
skcipher_request_complete(req, err);
}
@@ -166,12 +166,17 @@ static int essiv_skcipher_decrypt(struct skcipher_request *req)
return essiv_skcipher_crypt(req, false);
}
-static void essiv_aead_done(struct crypto_async_request *areq, int err)
+static void essiv_aead_done(void *data, int err)
{
- struct aead_request *req = areq->data;
+ struct aead_request *req = data;
struct essiv_aead_request_ctx *rctx = aead_request_ctx(req);
+ if (err == -EINPROGRESS)
+ goto out;
+
kfree(rctx->assoc);
+
+out:
aead_request_complete(req, err);
}
@@ -247,7 +252,7 @@ static int essiv_aead_crypt(struct aead_request *req, bool enc)
err = enc ? crypto_aead_encrypt(subreq) :
crypto_aead_decrypt(subreq);
- if (rctx->assoc && err != -EINPROGRESS)
+ if (rctx->assoc && err != -EINPROGRESS && err != -EBUSY)
kfree(rctx->assoc);
return err;
}
diff --git a/crypto/gcm.c b/crypto/gcm.c
index 338ee0769747..4ba624450c3f 100644
--- a/crypto/gcm.c
+++ b/crypto/gcm.c
@@ -197,7 +197,7 @@ static inline unsigned int gcm_remain(unsigned int len)
return len ? 16 - len : 0;
}
-static void gcm_hash_len_done(struct crypto_async_request *areq, int err);
+static void gcm_hash_len_done(void *data, int err);
static int gcm_hash_update(struct aead_request *req,
crypto_completion_t compl,
@@ -246,9 +246,9 @@ static int gcm_hash_len_continue(struct aead_request *req, u32 flags)
return gctx->complete(req, flags);
}
-static void gcm_hash_len_done(struct crypto_async_request *areq, int err)
+static void gcm_hash_len_done(void *data, int err)
{
- struct aead_request *req = areq->data;
+ struct aead_request *req = data;
if (err)
goto out;
@@ -267,10 +267,9 @@ static int gcm_hash_crypt_remain_continue(struct aead_request *req, u32 flags)
gcm_hash_len_continue(req, flags);
}
-static void gcm_hash_crypt_remain_done(struct crypto_async_request *areq,
- int err)
+static void gcm_hash_crypt_remain_done(void *data, int err)
{
- struct aead_request *req = areq->data;
+ struct aead_request *req = data;
if (err)
goto out;
@@ -298,9 +297,9 @@ static int gcm_hash_crypt_continue(struct aead_request *req, u32 flags)
return gcm_hash_crypt_remain_continue(req, flags);
}
-static void gcm_hash_crypt_done(struct crypto_async_request *areq, int err)
+static void gcm_hash_crypt_done(void *data, int err)
{
- struct aead_request *req = areq->data;
+ struct aead_request *req = data;
if (err)
goto out;
@@ -326,10 +325,9 @@ static int gcm_hash_assoc_remain_continue(struct aead_request *req, u32 flags)
return gcm_hash_crypt_remain_continue(req, flags);
}
-static void gcm_hash_assoc_remain_done(struct crypto_async_request *areq,
- int err)
+static void gcm_hash_assoc_remain_done(void *data, int err)
{
- struct aead_request *req = areq->data;
+ struct aead_request *req = data;
if (err)
goto out;
@@ -355,9 +353,9 @@ static int gcm_hash_assoc_continue(struct aead_request *req, u32 flags)
return gcm_hash_assoc_remain_continue(req, flags);
}
-static void gcm_hash_assoc_done(struct crypto_async_request *areq, int err)
+static void gcm_hash_assoc_done(void *data, int err)
{
- struct aead_request *req = areq->data;
+ struct aead_request *req = data;
if (err)
goto out;
@@ -380,9 +378,9 @@ static int gcm_hash_init_continue(struct aead_request *req, u32 flags)
return gcm_hash_assoc_remain_continue(req, flags);
}
-static void gcm_hash_init_done(struct crypto_async_request *areq, int err)
+static void gcm_hash_init_done(void *data, int err)
{
- struct aead_request *req = areq->data;
+ struct aead_request *req = data;
if (err)
goto out;
@@ -433,9 +431,9 @@ static int gcm_encrypt_continue(struct aead_request *req, u32 flags)
return gcm_hash(req, flags);
}
-static void gcm_encrypt_done(struct crypto_async_request *areq, int err)
+static void gcm_encrypt_done(void *data, int err)
{
- struct aead_request *req = areq->data;
+ struct aead_request *req = data;
if (err)
goto out;
@@ -477,9 +475,9 @@ static int crypto_gcm_verify(struct aead_request *req)
return crypto_memneq(iauth_tag, auth_tag, authsize) ? -EBADMSG : 0;
}
-static void gcm_decrypt_done(struct crypto_async_request *areq, int err)
+static void gcm_decrypt_done(void *data, int err)
{
- struct aead_request *req = areq->data;
+ struct aead_request *req = data;
if (!err)
err = crypto_gcm_verify(req);
diff --git a/crypto/hctr2.c b/crypto/hctr2.c
index 7d00a3bcb667..6f4c1884d0e9 100644
--- a/crypto/hctr2.c
+++ b/crypto/hctr2.c
@@ -252,10 +252,9 @@ static int hctr2_finish(struct skcipher_request *req)
return 0;
}
-static void hctr2_xctr_done(struct crypto_async_request *areq,
- int err)
+static void hctr2_xctr_done(void *data, int err)
{
- struct skcipher_request *req = areq->data;
+ struct skcipher_request *req = data;
if (!err)
err = hctr2_finish(req);
diff --git a/crypto/lrw.c b/crypto/lrw.c
index 8d59a66b6525..1b0f76ba3eb5 100644
--- a/crypto/lrw.c
+++ b/crypto/lrw.c
@@ -205,9 +205,9 @@ static int lrw_xor_tweak_post(struct skcipher_request *req)
return lrw_xor_tweak(req, true);
}
-static void lrw_crypt_done(struct crypto_async_request *areq, int err)
+static void lrw_crypt_done(void *data, int err)
{
- struct skcipher_request *req = areq->data;
+ struct skcipher_request *req = data;
if (!err) {
struct lrw_request_ctx *rctx = skcipher_request_ctx(req);
diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
index 9d10b846ccf7..8c1d0ca41213 100644
--- a/crypto/pcrypt.c
+++ b/crypto/pcrypt.c
@@ -63,9 +63,9 @@ static void pcrypt_aead_serial(struct padata_priv *padata)
aead_request_complete(req->base.data, padata->info);
}
-static void pcrypt_aead_done(struct crypto_async_request *areq, int err)
+static void pcrypt_aead_done(void *data, int err)
{
- struct aead_request *req = areq->data;
+ struct aead_request *req = data;
struct pcrypt_request *preq = aead_request_ctx(req);
struct padata_priv *padata = pcrypt_request_padata(preq);
diff --git a/crypto/proc.c b/crypto/proc.c
index 12fccb9c5205..56c7c78df297 100644
--- a/crypto/proc.c
+++ b/crypto/proc.c
@@ -11,6 +11,7 @@
#include <linux/atomic.h>
#include <linux/init.h>
#include <linux/crypto.h>
+#include <linux/fips.h>
#include <linux/module.h> /* for module_name() */
#include <linux/rwsem.h>
#include <linux/proc_fs.h>
@@ -48,6 +49,11 @@ static int c_show(struct seq_file *m, void *p)
seq_printf(m, "internal : %s\n",
(alg->cra_flags & CRYPTO_ALG_INTERNAL) ?
"yes" : "no");
+ if (fips_enabled) {
+ seq_printf(m, "fips : %s\n",
+ (alg->cra_flags & CRYPTO_ALG_FIPS_INTERNAL) ?
+ "no" : "yes");
+ }
if (alg->cra_flags & CRYPTO_ALG_LARVAL) {
seq_printf(m, "type : larval\n");
diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c
index 6ee5b8a060c0..d2e5e104f8cf 100644
--- a/crypto/rsa-pkcs1pad.c
+++ b/crypto/rsa-pkcs1pad.c
@@ -190,7 +190,7 @@ static int pkcs1pad_encrypt_sign_complete(struct akcipher_request *req, int err)
if (likely(!pad_len))
goto out;
- out_buf = kzalloc(ctx->key_size, GFP_KERNEL);
+ out_buf = kzalloc(ctx->key_size, GFP_ATOMIC);
err = -ENOMEM;
if (!out_buf)
goto out;
@@ -210,20 +210,17 @@ out:
return err;
}
-static void pkcs1pad_encrypt_sign_complete_cb(
- struct crypto_async_request *child_async_req, int err)
+static void pkcs1pad_encrypt_sign_complete_cb(void *data, int err)
{
- struct akcipher_request *req = child_async_req->data;
- struct crypto_async_request async_req;
+ struct akcipher_request *req = data;
if (err == -EINPROGRESS)
- return;
+ goto out;
+
+ err = pkcs1pad_encrypt_sign_complete(req, err);
- async_req.data = req->base.data;
- async_req.tfm = crypto_akcipher_tfm(crypto_akcipher_reqtfm(req));
- async_req.flags = child_async_req->flags;
- req->base.complete(&async_req,
- pkcs1pad_encrypt_sign_complete(req, err));
+out:
+ akcipher_request_complete(req, err);
}
static int pkcs1pad_encrypt(struct akcipher_request *req)
@@ -328,19 +325,17 @@ done:
return err;
}
-static void pkcs1pad_decrypt_complete_cb(
- struct crypto_async_request *child_async_req, int err)
+static void pkcs1pad_decrypt_complete_cb(void *data, int err)
{
- struct akcipher_request *req = child_async_req->data;
- struct crypto_async_request async_req;
+ struct akcipher_request *req = data;
if (err == -EINPROGRESS)
- return;
+ goto out;
+
+ err = pkcs1pad_decrypt_complete(req, err);
- async_req.data = req->base.data;
- async_req.tfm = crypto_akcipher_tfm(crypto_akcipher_reqtfm(req));
- async_req.flags = child_async_req->flags;
- req->base.complete(&async_req, pkcs1pad_decrypt_complete(req, err));
+out:
+ akcipher_request_complete(req, err);
}
static int pkcs1pad_decrypt(struct akcipher_request *req)
@@ -509,19 +504,17 @@ done:
return err;
}
-static void pkcs1pad_verify_complete_cb(
- struct crypto_async_request *child_async_req, int err)
+static void pkcs1pad_verify_complete_cb(void *data, int err)
{
- struct akcipher_request *req = child_async_req->data;
- struct crypto_async_request async_req;
+ struct akcipher_request *req = data;
if (err == -EINPROGRESS)
- return;
+ goto out;
- async_req.data = req->base.data;
- async_req.tfm = crypto_akcipher_tfm(crypto_akcipher_reqtfm(req));
- async_req.flags = child_async_req->flags;
- req->base.complete(&async_req, pkcs1pad_verify_complete(req, err));
+ err = pkcs1pad_verify_complete(req, err);
+
+out:
+ akcipher_request_complete(req, err);
}
/*
diff --git a/crypto/seqiv.c b/crypto/seqiv.c
index 0899d527c284..17e11d51ddc3 100644
--- a/crypto/seqiv.c
+++ b/crypto/seqiv.c
@@ -23,7 +23,7 @@ static void seqiv_aead_encrypt_complete2(struct aead_request *req, int err)
struct aead_request *subreq = aead_request_ctx(req);
struct crypto_aead *geniv;
- if (err == -EINPROGRESS)
+ if (err == -EINPROGRESS || err == -EBUSY)
return;
if (err)
@@ -36,10 +36,9 @@ out:
kfree_sensitive(subreq->iv);
}
-static void seqiv_aead_encrypt_complete(struct crypto_async_request *base,
- int err)
+static void seqiv_aead_encrypt_complete(void *data, int err)
{
- struct aead_request *req = base->data;
+ struct aead_request *req = data;
seqiv_aead_encrypt_complete2(req, err);
aead_request_complete(req, err);
diff --git a/crypto/shash.c b/crypto/shash.c
index 868b6ba2b3b7..58b46f198449 100644
--- a/crypto/shash.c
+++ b/crypto/shash.c
@@ -320,10 +320,10 @@ int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc)
nbytes <= min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset))) {
void *data;
- data = kmap_atomic(sg_page(sg));
+ data = kmap_local_page(sg_page(sg));
err = crypto_shash_digest(desc, data + offset, nbytes,
req->result);
- kunmap_atomic(data);
+ kunmap_local(data);
} else
err = crypto_shash_init(desc) ?:
shash_ahash_finup(req, desc);
diff --git a/crypto/skcipher.c b/crypto/skcipher.c
index 0ecab31cfe79..7bf4871fec80 100644
--- a/crypto/skcipher.c
+++ b/crypto/skcipher.c
@@ -42,38 +42,24 @@ struct skcipher_walk_buffer {
static int skcipher_walk_next(struct skcipher_walk *walk);
-static inline void skcipher_unmap(struct scatter_walk *walk, void *vaddr)
-{
- if (PageHighMem(scatterwalk_page(walk)))
- kunmap_atomic(vaddr);
-}
-
-static inline void *skcipher_map(struct scatter_walk *walk)
-{
- struct page *page = scatterwalk_page(walk);
-
- return (PageHighMem(page) ? kmap_atomic(page) : page_address(page)) +
- offset_in_page(walk->offset);
-}
-
static inline void skcipher_map_src(struct skcipher_walk *walk)
{
- walk->src.virt.addr = skcipher_map(&walk->in);
+ walk->src.virt.addr = scatterwalk_map(&walk->in);
}
static inline void skcipher_map_dst(struct skcipher_walk *walk)
{
- walk->dst.virt.addr = skcipher_map(&walk->out);
+ walk->dst.virt.addr = scatterwalk_map(&walk->out);
}
static inline void skcipher_unmap_src(struct skcipher_walk *walk)
{
- skcipher_unmap(&walk->in, walk->src.virt.addr);
+ scatterwalk_unmap(walk->src.virt.addr);
}
static inline void skcipher_unmap_dst(struct skcipher_walk *walk)
{
- skcipher_unmap(&walk->out, walk->dst.virt.addr);
+ scatterwalk_unmap(walk->dst.virt.addr);
}
static inline gfp_t skcipher_walk_gfp(struct skcipher_walk *walk)
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index a0833654ce94..6521feec7756 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -2044,11 +2044,11 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
case 211:
test_aead_speed("rfc4106(gcm(aes))", ENCRYPT, sec,
- NULL, 0, 16, 16, aead_speed_template_20);
+ NULL, 0, 16, 16, aead_speed_template_20_28_36);
test_aead_speed("gcm(aes)", ENCRYPT, sec,
NULL, 0, 16, 8, speed_template_16_24_32);
test_aead_speed("rfc4106(gcm(aes))", DECRYPT, sec,
- NULL, 0, 16, 16, aead_speed_template_20);
+ NULL, 0, 16, 16, aead_speed_template_20_28_36);
test_aead_speed("gcm(aes)", DECRYPT, sec,
NULL, 0, 16, 8, speed_template_16_24_32);
break;
@@ -2074,11 +2074,11 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
case 215:
test_mb_aead_speed("rfc4106(gcm(aes))", ENCRYPT, sec, NULL,
- 0, 16, 16, aead_speed_template_20, num_mb);
+ 0, 16, 16, aead_speed_template_20_28_36, num_mb);
test_mb_aead_speed("gcm(aes)", ENCRYPT, sec, NULL, 0, 16, 8,
speed_template_16_24_32, num_mb);
test_mb_aead_speed("rfc4106(gcm(aes))", DECRYPT, sec, NULL,
- 0, 16, 16, aead_speed_template_20, num_mb);
+ 0, 16, 16, aead_speed_template_20_28_36, num_mb);
test_mb_aead_speed("gcm(aes)", DECRYPT, sec, NULL, 0, 16, 8,
speed_template_16_24_32, num_mb);
break;
diff --git a/crypto/tcrypt.h b/crypto/tcrypt.h
index 9f654677172a..96c843a24607 100644
--- a/crypto/tcrypt.h
+++ b/crypto/tcrypt.h
@@ -62,7 +62,7 @@ static u8 speed_template_32[] = {32, 0};
* AEAD speed tests
*/
static u8 aead_speed_template_19[] = {19, 0};
-static u8 aead_speed_template_20[] = {20, 0};
+static u8 aead_speed_template_20_28_36[] = {20, 28, 36, 0};
static u8 aead_speed_template_36[] = {36, 0};
/*
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index 4476ac97baa5..c91e93ece20b 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -357,6 +357,14 @@ static const struct testvec_config default_cipher_testvec_configs[] = {
{ .proportion_of_total = 5000 },
},
}, {
+ .name = "one src, two even splits dst",
+ .inplace_mode = OUT_OF_PLACE,
+ .src_divs = { { .proportion_of_total = 10000 } },
+ .dst_divs = {
+ { .proportion_of_total = 5000 },
+ { .proportion_of_total = 5000 },
+ },
+ }, {
.name = "uneven misaligned splits, may sleep",
.req_flags = CRYPTO_TFM_REQ_MAY_SLEEP,
.src_divs = {
@@ -4501,7 +4509,6 @@ static const struct alg_test_desc alg_test_descs[] = {
}, {
#endif
.alg = "cbcmac(aes)",
- .fips_allowed = 1,
.test = alg_test_hash,
.suite = {
.hash = __VECS(aes_cbcmac_tv_template)
@@ -4782,7 +4789,6 @@ static const struct alg_test_desc alg_test_descs[] = {
}, {
/* covered by drbg_nopr_hmac_sha256 test */
.alg = "drbg_nopr_hmac_sha384",
- .fips_allowed = 1,
.test = alg_test_null,
}, {
.alg = "drbg_nopr_hmac_sha512",
@@ -4805,7 +4811,6 @@ static const struct alg_test_desc alg_test_descs[] = {
}, {
/* covered by drbg_nopr_sha256 test */
.alg = "drbg_nopr_sha384",
- .fips_allowed = 1,
.test = alg_test_null,
}, {
.alg = "drbg_nopr_sha512",
@@ -4841,7 +4846,6 @@ static const struct alg_test_desc alg_test_descs[] = {
}, {
/* covered by drbg_pr_hmac_sha256 test */
.alg = "drbg_pr_hmac_sha384",
- .fips_allowed = 1,
.test = alg_test_null,
}, {
.alg = "drbg_pr_hmac_sha512",
@@ -4861,7 +4865,6 @@ static const struct alg_test_desc alg_test_descs[] = {
}, {
/* covered by drbg_pr_sha256 test */
.alg = "drbg_pr_sha384",
- .fips_allowed = 1,
.test = alg_test_null,
}, {
.alg = "drbg_pr_sha512",
@@ -5035,12 +5038,14 @@ static const struct alg_test_desc alg_test_descs[] = {
}, {
.alg = "ecdsa-nist-p256",
.test = alg_test_akcipher,
+ .fips_allowed = 1,
.suite = {
.akcipher = __VECS(ecdsa_nist_p256_tv_template)
}
}, {
.alg = "ecdsa-nist-p384",
.test = alg_test_akcipher,
+ .fips_allowed = 1,
.suite = {
.akcipher = __VECS(ecdsa_nist_p384_tv_template)
}
@@ -5126,7 +5131,6 @@ static const struct alg_test_desc alg_test_descs[] = {
}, {
.alg = "ghash",
.test = alg_test_hash,
- .fips_allowed = 1,
.suite = {
.hash = __VECS(ghash_tv_template)
}
diff --git a/crypto/wp512.c b/crypto/wp512.c
index 5e820afa3c78..07994e5ebf4e 100644
--- a/crypto/wp512.c
+++ b/crypto/wp512.c
@@ -779,7 +779,7 @@ static const u64 rc[WHIRLPOOL_ROUNDS] = {
* The core Whirlpool transform.
*/
-static void wp512_process_buffer(struct wp512_ctx *wctx) {
+static __no_kmsan_checks void wp512_process_buffer(struct wp512_ctx *wctx) {
int i, r;
u64 K[8]; /* the round key */
u64 block[8]; /* mu(buffer) */
diff --git a/crypto/xts.c b/crypto/xts.c
index 63c85b9e64e0..09be909a6a1a 100644
--- a/crypto/xts.c
+++ b/crypto/xts.c
@@ -140,9 +140,9 @@ static int xts_xor_tweak_post(struct skcipher_request *req, bool enc)
return xts_xor_tweak(req, true, enc);
}
-static void xts_cts_done(struct crypto_async_request *areq, int err)
+static void xts_cts_done(void *data, int err)
{
- struct skcipher_request *req = areq->data;
+ struct skcipher_request *req = data;
le128 b;
if (!err) {
@@ -196,19 +196,19 @@ static int xts_cts_final(struct skcipher_request *req,
return 0;
}
-static void xts_encrypt_done(struct crypto_async_request *areq, int err)
+static void xts_encrypt_done(void *data, int err)
{
- struct skcipher_request *req = areq->data;
+ struct skcipher_request *req = data;
if (!err) {
struct xts_request_ctx *rctx = skcipher_request_ctx(req);
- rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+ rctx->subreq.base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
err = xts_xor_tweak_post(req, true);
if (!err && unlikely(req->cryptlen % XTS_BLOCK_SIZE)) {
err = xts_cts_final(req, crypto_skcipher_encrypt);
- if (err == -EINPROGRESS)
+ if (err == -EINPROGRESS || err == -EBUSY)
return;
}
}
@@ -216,19 +216,19 @@ static void xts_encrypt_done(struct crypto_async_request *areq, int err)
skcipher_request_complete(req, err);
}
-static void xts_decrypt_done(struct crypto_async_request *areq, int err)
+static void xts_decrypt_done(void *data, int err)
{
- struct skcipher_request *req = areq->data;
+ struct skcipher_request *req = data;
if (!err) {
struct xts_request_ctx *rctx = skcipher_request_ctx(req);
- rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+ rctx->subreq.base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
err = xts_xor_tweak_post(req, false);
if (!err && unlikely(req->cryptlen % XTS_BLOCK_SIZE)) {
err = xts_cts_final(req, crypto_skcipher_decrypt);
- if (err == -EINPROGRESS)
+ if (err == -EINPROGRESS || err == -EBUSY)
return;
}
}