diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-08-15 16:01:47 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-08-15 16:01:47 -0700 |
commit | dafa5f6577a9eecd2941add553d1672c30b02364 (patch) | |
tree | ff9d3d2dffafd6eba1b6ac21ba50623812041b70 /crypto | |
parent | 9a76aba02a37718242d7cdc294f0a3901928aa57 (diff) | |
parent | 22240df7ac6d76a271197571a7be45addef2ba15 (diff) | |
download | linux-dafa5f6577a9eecd2941add553d1672c30b02364.tar.gz linux-dafa5f6577a9eecd2941add553d1672c30b02364.tar.bz2 linux-dafa5f6577a9eecd2941add553d1672c30b02364.zip |
Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto updates from Herbert Xu:
"API:
- Fix dcache flushing crash in skcipher.
- Add hash finup self-tests.
- Reschedule during speed tests.
Algorithms:
- Remove insecure vmac and replace it with vmac64.
- Add public key verification for DH/ECDH.
Drivers:
- Decrease priority of sha-mb on x86.
- Improve NEON latency/throughput on ARM64.
- Add md5/sha384/sha512/des/3des to inside-secure.
- Support eip197d in inside-secure.
- Only register algorithms supported by the host in virtio.
- Add cts and remove incompatible cts1 from ccree.
- Add hisilicon SEC security accelerator driver.
- Replace msm hwrng driver with qcom pseudo rng driver.
Misc:
- Centralize CRC polynomials"
* 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (121 commits)
crypto: arm64/ghash-ce - implement 4-way aggregation
crypto: arm64/ghash-ce - replace NEON yield check with block limit
crypto: hisilicon - sec_send_request() can be static
lib/mpi: remove redundant variable esign
crypto: arm64/aes-ce-gcm - don't reload key schedule if avoidable
crypto: arm64/aes-ce-gcm - implement 2-way aggregation
crypto: arm64/aes-ce-gcm - operate on two input blocks at a time
crypto: dh - make crypto_dh_encode_key() make robust
crypto: dh - fix calculating encoded key size
crypto: ccp - Check for NULL PSP pointer at module unload
crypto: arm/chacha20 - always use vrev for 16-bit rotates
crypto: ccree - allow bigger than sector XTS op
crypto: ccree - zero all of request ctx before use
crypto: ccree - remove cipher ivgen left overs
crypto: ccree - drop useless type flag during reg
crypto: ablkcipher - fix crash flushing dcache in error path
crypto: blkcipher - fix crash flushing dcache in error path
crypto: skcipher - fix crash flushing dcache in error path
crypto: skcipher - remove unnecessary setting of walk->nbytes
crypto: scatterwalk - remove scatterwalk_samebuf()
...
Diffstat (limited to 'crypto')
36 files changed, 722 insertions, 526 deletions
diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c index d880a4897159..8882e90e868e 100644 --- a/crypto/ablkcipher.c +++ b/crypto/ablkcipher.c @@ -71,11 +71,9 @@ static inline u8 *ablkcipher_get_spot(u8 *start, unsigned int len) return max(start, end_page); } -static inline unsigned int ablkcipher_done_slow(struct ablkcipher_walk *walk, - unsigned int bsize) +static inline void ablkcipher_done_slow(struct ablkcipher_walk *walk, + unsigned int n) { - unsigned int n = bsize; - for (;;) { unsigned int len_this_page = scatterwalk_pagelen(&walk->out); @@ -87,17 +85,13 @@ static inline unsigned int ablkcipher_done_slow(struct ablkcipher_walk *walk, n -= len_this_page; scatterwalk_start(&walk->out, sg_next(walk->out.sg)); } - - return bsize; } -static inline unsigned int ablkcipher_done_fast(struct ablkcipher_walk *walk, - unsigned int n) +static inline void ablkcipher_done_fast(struct ablkcipher_walk *walk, + unsigned int n) { scatterwalk_advance(&walk->in, n); scatterwalk_advance(&walk->out, n); - - return n; } static int ablkcipher_walk_next(struct ablkcipher_request *req, @@ -107,39 +101,40 @@ int ablkcipher_walk_done(struct ablkcipher_request *req, struct ablkcipher_walk *walk, int err) { struct crypto_tfm *tfm = req->base.tfm; - unsigned int nbytes = 0; + unsigned int n; /* bytes processed */ + bool more; - if (likely(err >= 0)) { - unsigned int n = walk->nbytes - err; + if (unlikely(err < 0)) + goto finish; - if (likely(!(walk->flags & ABLKCIPHER_WALK_SLOW))) - n = ablkcipher_done_fast(walk, n); - else if (WARN_ON(err)) { - err = -EINVAL; - goto err; - } else - n = ablkcipher_done_slow(walk, n); + n = walk->nbytes - err; + walk->total -= n; + more = (walk->total != 0); - nbytes = walk->total - n; - err = 0; + if (likely(!(walk->flags & ABLKCIPHER_WALK_SLOW))) { + ablkcipher_done_fast(walk, n); + } else { + if (WARN_ON(err)) { + /* unexpected case; didn't process all bytes */ + err = -EINVAL; + goto finish; + } + ablkcipher_done_slow(walk, n); } - scatterwalk_done(&walk->in, 0, nbytes); - scatterwalk_done(&walk->out, 1, nbytes); - -err: - walk->total = nbytes; - walk->nbytes = nbytes; + scatterwalk_done(&walk->in, 0, more); + scatterwalk_done(&walk->out, 1, more); - if (nbytes) { + if (more) { crypto_yield(req->base.flags); return ablkcipher_walk_next(req, walk); } - + err = 0; +finish: + walk->nbytes = 0; if (walk->iv != req->info) memcpy(req->info, walk->iv, tfm->crt_ablkcipher.ivsize); kfree(walk->iv_buffer); - return err; } EXPORT_SYMBOL_GPL(ablkcipher_walk_done); @@ -373,6 +368,7 @@ static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg) strncpy(rblkcipher.type, "ablkcipher", sizeof(rblkcipher.type)); strncpy(rblkcipher.geniv, alg->cra_ablkcipher.geniv ?: "<default>", sizeof(rblkcipher.geniv)); + rblkcipher.geniv[sizeof(rblkcipher.geniv) - 1] = '\0'; rblkcipher.blocksize = alg->cra_blocksize; rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize; @@ -447,6 +443,7 @@ static int crypto_givcipher_report(struct sk_buff *skb, struct crypto_alg *alg) strncpy(rblkcipher.type, "givcipher", sizeof(rblkcipher.type)); strncpy(rblkcipher.geniv, alg->cra_ablkcipher.geniv ?: "<built-in>", sizeof(rblkcipher.geniv)); + rblkcipher.geniv[sizeof(rblkcipher.geniv) - 1] = '\0'; rblkcipher.blocksize = alg->cra_blocksize; rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize; diff --git a/crypto/aegis128.c b/crypto/aegis128.c index 38271303ce16..c22f4414856d 100644 --- a/crypto/aegis128.c +++ b/crypto/aegis128.c @@ -429,7 +429,6 @@ static struct aead_alg crypto_aegis128_alg = { .chunksize = AEGIS_BLOCK_SIZE, .base = { - .cra_flags = CRYPTO_ALG_TYPE_AEAD, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct aegis_ctx), .cra_alignmask = 0, diff --git a/crypto/aegis128l.c b/crypto/aegis128l.c index 0cc1a7525c85..b6fb21ebdc3e 100644 --- a/crypto/aegis128l.c +++ b/crypto/aegis128l.c @@ -121,7 +121,7 @@ static void crypto_aegis128l_ad(struct aegis_state *state, (const union aegis_chunk *)src; while (size >= AEGIS128L_CHUNK_SIZE) { - crypto_aegis128l_update_a(state, src_chunk); + crypto_aegis128l_update_a(state, src_chunk); size -= AEGIS128L_CHUNK_SIZE; src_chunk += 1; @@ -493,7 +493,6 @@ static struct aead_alg crypto_aegis128l_alg = { .chunksize = AEGIS128L_CHUNK_SIZE, .base = { - .cra_flags = CRYPTO_ALG_TYPE_AEAD, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct aegis_ctx), .cra_alignmask = 0, diff --git a/crypto/aegis256.c b/crypto/aegis256.c index a489d741d33a..11f0f8ec9c7c 100644 --- a/crypto/aegis256.c +++ b/crypto/aegis256.c @@ -444,7 +444,6 @@ static struct aead_alg crypto_aegis256_alg = { .chunksize = AEGIS_BLOCK_SIZE, .base = { - .cra_flags = CRYPTO_ALG_TYPE_AEAD, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct aegis_ctx), .cra_alignmask = 0, diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c index 01c0d4aa2563..f93abf13b5d4 100644 --- a/crypto/blkcipher.c +++ b/crypto/blkcipher.c @@ -70,19 +70,18 @@ static inline u8 *blkcipher_get_spot(u8 *start, unsigned int len) return max(start, end_page); } -static inline unsigned int blkcipher_done_slow(struct blkcipher_walk *walk, - unsigned int bsize) +static inline void blkcipher_done_slow(struct blkcipher_walk *walk, + unsigned int bsize) { u8 *addr; addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1); addr = blkcipher_get_spot(addr, bsize); scatterwalk_copychunks(addr, &walk->out, bsize, 1); - return bsize; } -static inline unsigned int blkcipher_done_fast(struct blkcipher_walk *walk, - unsigned int n) +static inline void blkcipher_done_fast(struct blkcipher_walk *walk, + unsigned int n) { if (walk->flags & BLKCIPHER_WALK_COPY) { blkcipher_map_dst(walk); @@ -96,49 +95,48 @@ static inline unsigned int blkcipher_done_fast(struct blkcipher_walk *walk, scatterwalk_advance(&walk->in, n); scatterwalk_advance(&walk->out, n); - - return n; } int blkcipher_walk_done(struct blkcipher_desc *desc, struct blkcipher_walk *walk, int err) { - unsigned int nbytes = 0; + unsigned int n; /* bytes processed */ + bool more; - if (likely(err >= 0)) { - unsigned int n = walk->nbytes - err; + if (unlikely(err < 0)) + goto finish; - if (likely(!(walk->flags & BLKCIPHER_WALK_SLOW))) - n = blkcipher_done_fast(walk, n); - else if (WARN_ON(err)) { - err = -EINVAL; - goto err; - } else - n = blkcipher_done_slow(walk, n); + n = walk->nbytes - err; + walk->total -= n; + more = (walk->total != 0); - nbytes = walk->total - n; - err = 0; + if (likely(!(walk->flags & BLKCIPHER_WALK_SLOW))) { + blkcipher_done_fast(walk, n); + } else { + if (WARN_ON(err)) { + /* unexpected case; didn't process all bytes */ + err = -EINVAL; + goto finish; + } + blkcipher_done_slow(walk, n); } - scatterwalk_done(&walk->in, 0, nbytes); - scatterwalk_done(&walk->out, 1, nbytes); + scatterwalk_done(&walk->in, 0, more); + scatterwalk_done(&walk->out, 1, more); -err: - walk->total = nbytes; - walk->nbytes = nbytes; - - if (nbytes) { + if (more) { crypto_yield(desc->flags); return blkcipher_walk_next(desc, walk); } - + err = 0; +finish: + walk->nbytes = 0; if (walk->iv != desc->info) memcpy(desc->info, walk->iv, walk->ivsize); if (walk->buffer != walk->page) kfree(walk->buffer); if (walk->page) free_page((unsigned long)walk->page); - return err; } EXPORT_SYMBOL_GPL(blkcipher_walk_done); @@ -512,6 +510,7 @@ static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg) strncpy(rblkcipher.type, "blkcipher", sizeof(rblkcipher.type)); strncpy(rblkcipher.geniv, alg->cra_blkcipher.geniv ?: "<default>", sizeof(rblkcipher.geniv)); + rblkcipher.geniv[sizeof(rblkcipher.geniv) - 1] = '\0'; rblkcipher.blocksize = alg->cra_blocksize; rblkcipher.min_keysize = alg->cra_blkcipher.min_keysize; diff --git a/crypto/crypto_null.c b/crypto/crypto_null.c index 20ff2c746e0b..0959b268966c 100644 --- a/crypto/crypto_null.c +++ b/crypto/crypto_null.c @@ -104,7 +104,6 @@ static struct shash_alg digest_null = { .final = null_final, .base = { .cra_name = "digest_null", - .cra_flags = CRYPTO_ALG_TYPE_SHASH, .cra_blocksize = NULL_BLOCK_SIZE, .cra_module = THIS_MODULE, } diff --git a/crypto/dh.c b/crypto/dh.c index 5659fe7f446d..09a44de4209d 100644 --- a/crypto/dh.c +++ b/crypto/dh.c @@ -16,14 +16,16 @@ #include <linux/mpi.h> struct dh_ctx { - MPI p; - MPI g; - MPI xa; + MPI p; /* Value is guaranteed to be set. */ + MPI q; /* Value is optional. */ + MPI g; /* Value is guaranteed to be set. */ + MPI xa; /* Value is guaranteed to be set. */ }; static void dh_clear_ctx(struct dh_ctx *ctx) { mpi_free(ctx->p); + mpi_free(ctx->q); mpi_free(ctx->g); mpi_free(ctx->xa); memset(ctx, 0, sizeof(*ctx)); @@ -60,6 +62,12 @@ static int dh_set_params(struct dh_ctx *ctx, struct dh *params) if (!ctx->p) return -EINVAL; + if (params->q && params->q_size) { + ctx->q = mpi_read_raw_data(params->q, params->q_size); + if (!ctx->q) + return -EINVAL; + } + ctx->g = mpi_read_raw_data(params->g, params->g_size); if (!ctx->g) return -EINVAL; @@ -93,6 +101,55 @@ err_clear_ctx: return -EINVAL; } +/* + * SP800-56A public key verification: + * + * * If Q is provided as part of the domain paramenters, a full validation + * according to SP800-56A section 5.6.2.3.1 is performed. + * + * * If Q is not provided, a partial validation according to SP800-56A section + * 5.6.2.3.2 is performed. + */ +static int dh_is_pubkey_valid(struct dh_ctx *ctx, MPI y) +{ + if (unlikely(!ctx->p)) + return -EINVAL; + + /* + * Step 1: Verify that 2 <= y <= p - 2. + * + * The upper limit check is actually y < p instead of y < p - 1 + * as the mpi_sub_ui function is yet missing. + */ + if (mpi_cmp_ui(y, 1) < 1 || mpi_cmp(y, ctx->p) >= 0) + return -EINVAL; + + /* Step 2: Verify that 1 = y^q mod p */ + if (ctx->q) { + MPI val = mpi_alloc(0); + int ret; + + if (!val) + return -ENOMEM; + + ret = mpi_powm(val, y, ctx->q, ctx->p); + + if (ret) { + mpi_free(val); + return ret; + } + + ret = mpi_cmp_ui(val, 1); + + mpi_free(val); + + if (ret != 0) + return -EINVAL; + } + + return 0; +} + static int dh_compute_value(struct kpp_request *req) { struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); @@ -115,6 +172,9 @@ static int dh_compute_value(struct kpp_request *req) ret = -EINVAL; goto err_free_val; } + ret = dh_is_pubkey_valid(ctx, base); + if (ret) + goto err_free_base; } else { base = ctx->g; } diff --git a/crypto/dh_helper.c b/crypto/dh_helper.c index 24fdb2ecaa85..edacda5f6a4d 100644 --- a/crypto/dh_helper.c +++ b/crypto/dh_helper.c @@ -14,10 +14,12 @@ #include <crypto/dh.h> #include <crypto/kpp.h> -#define DH_KPP_SECRET_MIN_SIZE (sizeof(struct kpp_secret) + 3 * sizeof(int)) +#define DH_KPP_SECRET_MIN_SIZE (sizeof(struct kpp_secret) + 4 * sizeof(int)) -static inline u8 *dh_pack_data(void *dst, const void *src, size_t size) +static inline u8 *dh_pack_data(u8 *dst, u8 *end, const void *src, size_t size) { + if (!dst || size > end - dst) + return NULL; memcpy(dst, src, size); return dst + size; } @@ -30,7 +32,7 @@ static inline const u8 *dh_unpack_data(void *dst, const void *src, size_t size) static inline unsigned int dh_data_size(const struct dh *p) { - return p->key_size + p->p_size + p->g_size; + return p->key_size + p->p_size + p->q_size + p->g_size; } unsigned int crypto_dh_key_len(const struct dh *p) @@ -42,25 +44,27 @@ EXPORT_SYMBOL_GPL(crypto_dh_key_len); int crypto_dh_encode_key(char *buf, unsigned int len, const struct dh *params) { u8 *ptr = buf; + u8 * const end = ptr + len; struct kpp_secret secret = { .type = CRYPTO_KPP_SECRET_TYPE_DH, .len = len }; - if (unlikely(!buf)) + if (unlikely(!len)) return -EINVAL; - if (len != crypto_dh_key_len(params)) + ptr = dh_pack_data(ptr, end, &secret, sizeof(secret)); + ptr = dh_pack_data(ptr, end, ¶ms->key_size, + sizeof(params->key_size)); + ptr = dh_pack_data(ptr, end, ¶ms->p_size, sizeof(params->p_size)); + ptr = dh_pack_data(ptr, end, ¶ms->q_size, sizeof(params->q_size)); + ptr = dh_pack_data(ptr, end, ¶ms->g_size, sizeof(params->g_size)); + ptr = dh_pack_data(ptr, end, params->key, params->key_size); + ptr = dh_pack_data(ptr, end, params->p, params->p_size); + ptr = dh_pack_data(ptr, end, params->q, params->q_size); + ptr = dh_pack_data(ptr, end, params->g, params->g_size); + if (ptr != end) return -EINVAL; - - ptr = dh_pack_data(ptr, &secret, sizeof(secret)); - ptr = dh_pack_data(ptr, ¶ms->key_size, sizeof(params->key_size)); - ptr = dh_pack_data(ptr, ¶ms->p_size, sizeof(params->p_size)); - ptr = dh_pack_data(ptr, ¶ms->g_size, sizeof(params->g_size)); - ptr = dh_pack_data(ptr, params->key, params->key_size); - ptr = dh_pack_data(ptr, params->p, params->p_size); - dh_pack_data(ptr, params->g, params->g_size); - return 0; } EXPORT_SYMBOL_GPL(crypto_dh_encode_key); @@ -79,6 +83,7 @@ int crypto_dh_decode_key(const char *buf, unsigned int len, struct dh *params) ptr = dh_unpack_data(¶ms->key_size, ptr, sizeof(params->key_size)); ptr = dh_unpack_data(¶ms->p_size, ptr, sizeof(params->p_size)); + ptr = dh_unpack_data(¶ms->q_size, ptr, sizeof(params->q_size)); ptr = dh_unpack_data(¶ms->g_size, ptr, sizeof(params->g_size)); if (secret.len != crypto_dh_key_len(params)) return -EINVAL; @@ -88,7 +93,7 @@ int crypto_dh_decode_key(const char *buf, unsigned int len, struct dh *params) * some drivers assume otherwise. */ if (params->key_size > params->p_size || - params->g_size > params->p_size) + params->g_size > params->p_size || params->q_size > params->p_size) return -EINVAL; /* Don't allocate memory. Set pointers to data within @@ -96,7 +101,9 @@ int crypto_dh_decode_key(const char *buf, unsigned int len, struct dh *params) */ params->key = (void *)ptr; params->p = (void *)(ptr + params->key_size); - params->g = (void *)(ptr + params->key_size + params->p_size); + params->q = (void *)(ptr + params->key_size + params->p_size); + params->g = (void *)(ptr + params->key_size + params->p_size + + params->q_size); /* * Don't permit 'p' to be 0. It's not a prime number, and it's subject @@ -106,6 +113,10 @@ int crypto_dh_decode_key(const char *buf, unsigned int len, struct dh *params) if (memchr_inv(params->p, 0, params->p_size) == NULL) return -EINVAL; + /* It is permissible to not provide Q. */ + if (params->q_size == 0) + params->q = NULL; + return 0; } EXPORT_SYMBOL_GPL(crypto_dh_decode_key); diff --git a/crypto/drbg.c b/crypto/drbg.c index 466a112a4446..bc52d9562611 100644 --- a/crypto/drbg.c +++ b/crypto/drbg.c @@ -261,8 +261,7 @@ static int drbg_fini_sym_kernel(struct drbg_state *drbg); static int drbg_kcapi_sym_ctr(struct drbg_state *drbg, u8 *inbuf, u32 inbuflen, u8 *outbuf, u32 outlen); -#define DRBG_CTR_NULL_LEN 128 -#define DRBG_OUTSCRATCHLEN DRBG_CTR_NULL_LEN +#define DRBG_OUTSCRATCHLEN 256 /* BCC function for CTR DRBG as defined in 10.4.3 */ static int drbg_ctr_bcc(struct drbg_state *drbg, @@ -555,8 +554,7 @@ static int drbg_ctr_generate(struct drbg_state *drbg, } /* 10.2.1.5.2 step 4.1 */ - ret = drbg_kcapi_sym_ctr(drbg, drbg->ctr_null_value, DRBG_CTR_NULL_LEN, - buf, len); + ret = drbg_kcapi_sym_ctr(drbg, NULL, 0, buf, len); if (ret) return ret; @@ -1644,9 +1642,6 @@ static int drbg_fini_sym_kernel(struct drbg_state *drbg) skcipher_request_free(drbg->ctr_req); drbg->ctr_req = NULL; - kfree(drbg->ctr_null_value_buf); - drbg->ctr_null_value = NULL; - kfree(drbg->outscratchpadbuf); drbg->outscratchpadbuf = NULL; @@ -1697,15 +1692,6 @@ static int drbg_init_sym_kernel(struct drbg_state *drbg) crypto_req_done, &drbg->ctr_wait); alignmask = crypto_skcipher_alignmask(sk_tfm); - drbg->ctr_null_value_buf = kzalloc(DRBG_CTR_NULL_LEN + alignmask, - GFP_KERNEL); - if (!drbg->ctr_null_value_buf) { - drbg_fini_sym_kernel(drbg); - return -ENOMEM; - } - drbg->ctr_null_value = (u8 *)PTR_ALIGN(drbg->ctr_null_value_buf, - alignmask + 1); - drbg->outscratchpadbuf = kmalloc(DRBG_OUTSCRATCHLEN + alignmask, GFP_KERNEL); if (!drbg->outscratchpadbuf) { @@ -1715,6 +1701,9 @@ static int drbg_init_sym_kernel(struct drbg_state *drbg) drbg->outscratchpad = (u8 *)PTR_ALIGN(drbg->outscratchpadbuf, alignmask + 1); + sg_init_table(&drbg->sg_in, 1); + sg_init_one(&drbg->sg_out, drbg->outscratchpad, DRBG_OUTSCRATCHLEN); + return alignmask; } @@ -1743,17 +1732,25 @@ static int drbg_kcapi_sym_ctr(struct drbg_state *drbg, u8 *inbuf, u32 inlen, u8 *outbuf, u32 outlen) { - struct scatterlist sg_in, sg_out; + struct scatterlist *sg_in = &drbg->sg_in, *sg_out = &drbg->sg_out; + u32 scratchpad_use = min_t(u32, outlen, DRBG_OUTSCRATCHLEN); int ret; - sg_init_one(&sg_in, inbuf, inlen); - sg_init_one(&sg_out, drbg->outscratchpad, DRBG_OUTSCRATCHLEN); + if (inbuf) { + /* Use caller-provided input buffer */ + sg_set_buf(sg_in, inbuf, inlen); + } else { + /* Use scratchpad for in-place operation */ + inlen = scratchpad_use; + memset(drbg->outscratchpad, 0, scratchpad_use); + sg_set_buf(sg_in, drbg->outscratchpad, scratchpad_use); + } while (outlen) { u32 cryptlen = min3(inlen, outlen, (u32)DRBG_OUTSCRATCHLEN); /* Output buffer may not be valid for SGL, use scratchpad */ - skcipher_request_set_crypt(drbg->ctr_req, &sg_in, &sg_out, + skcipher_request_set_crypt(drbg->ctr_req, sg_in, sg_out, cryptlen, drbg->V); ret = crypto_wait_req(crypto_skcipher_encrypt(drbg->ctr_req), &drbg->ctr_wait); @@ -1763,6 +1760,7 @@ static int drbg_kcapi_sym_ctr(struct drbg_state *drbg, crypto_init_wait(&drbg->ctr_wait); memcpy(outbuf, drbg->outscratchpad, cryptlen); + memzero_explicit(drbg->outscratchpad, cryptlen); outlen -= cryptlen; outbuf += cryptlen; @@ -1770,7 +1768,6 @@ static int drbg_kcapi_sym_ctr(struct drbg_state *drbg, ret = 0; out: - memzero_explicit(drbg->outscratchpad, DRBG_OUTSCRATCHLEN); return ret; } #endif /* CONFIG_CRYPTO_DRBG_CTR */ diff --git a/crypto/ecc.c b/crypto/ecc.c index 815541309a95..8facafd67802 100644 --- a/crypto/ecc.c +++ b/crypto/ecc.c @@ -1019,6 +1019,36 @@ out: return ret; } +/* SP800-56A section 5.6.2.3.4 partial verification: ephemeral keys only */ +static int ecc_is_pubkey_valid_partial(const struct ecc_curve *curve, + struct ecc_point *pk) +{ + u64 yy[ECC_MAX_DIGITS], xxx[ECC_MAX_DIGITS], w[ECC_MAX_DIGITS]; + + /* Check 1: Verify key is not the zero point. */ + if (ecc_point_is_zero(pk)) + return -EINVAL; + + /* Check 2: Verify key is in the range [1, p-1]. */ + if (vli_cmp(curve->p, pk->x, pk->ndigits) != 1) + return -EINVAL; + if (vli_cmp(curve->p, pk->y, pk->ndigits) != 1) + return -EINVAL; + + /* Check 3: Verify that y^2 == (x^3 + a·x + b) mod p */ + vli_mod_square_fast(yy, pk->y, curve->p, pk->ndigits); /* y^2 */ + vli_mod_square_fast(xxx, pk->x, curve->p, pk->ndigits); /* x^2 */ + vli_mod_mult_fast(xxx, xxx, pk->x, curve->p, pk->ndigits); /* x^3 */ + vli_mod_mult_fast(w, curve->a, pk->x, curve->p, pk->ndigits); /* a·x */ + vli_mod_add(w, w, curve->b, curve->p, pk->ndigits); /* a·x + b */ + vli_mod_add(w, w, xxx, curve->p, pk->ndigits); /* x^3 + a·x + b */ + if (vli_cmp(yy, w, pk->ndigits) != 0) /* Equation */ + return -EINVAL; + + return 0; + +} + int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits, const u64 *private_key, const u64 *public_key, u64 *secret) @@ -1046,16 +1076,20 @@ int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits, goto out; } + ecc_swap_digits(public_key, pk->x, ndigits); + ecc_swap_digits(&public_key[ndigits], pk->y, ndigits); + ret = ecc_is_pubkey_valid_partial(curve, pk); + if (ret) + goto err_alloc_product; + + ecc_swap_digits(private_key, priv, ndigits); + product = ecc_alloc_point(ndigits); if (!product) { ret = -ENOMEM; goto err_alloc_product; } - ecc_swap_digits(public_key, pk->x, ndigits); - ecc_swap_digits(&public_key[ndigits], pk->y, ndigits); - ecc_swap_digits(private_key, priv, ndigits); - ecc_point_mult(product, pk, priv, rand_z, curve->p, ndigits); ecc_swap_digits(product->x, secret, ndigits); diff --git a/crypto/ecc_curve_defs.h b/crypto/ecc_curve_defs.h index b80f45da829c..336ab1805639 100644 --- a/crypto/ecc_curve_defs.h +++ b/crypto/ecc_curve_defs.h @@ -13,9 +13,11 @@ struct ecc_curve { struct ecc_point g; u64 *p; u64 *n; + u64 *a; + u64 *b; }; -/* NIST P-192 */ +/* NIST P-192: a = p - 3 */ static u64 nist_p192_g_x[] = { 0xF4FF0AFD82FF1012ull, 0x7CBF20EB43A18800ull, 0x188DA80EB03090F6ull }; static u64 nist_p192_g_y[] = { 0x73F977A11E794811ull, 0x631011ED6B24CDD5ull, @@ -24,6 +26,10 @@ static u64 nist_p192_p[] = { 0xFFFFFFFFFFFFFFFFull, 0xFFFFFFFFFFFFFFFEull, 0xFFFFFFFFFFFFFFFFull }; static u64 nist_p192_n[] = { 0x146BC9B1B4D22831ull, 0xFFFFFFFF99DEF836ull, 0xFFFFFFFFFFFFFFFFull }; +static u64 nist_p192_a[] = { 0xFFFFFFFFFFFFFFFCull, 0xFFFFFFFFFFFFFFFEull, + 0xFFFFFFFFFFFFFFFFull }; +static u64 nist_p192_b[] = { 0xFEB8DEECC146B9B1ull, 0x0FA7E9AB72243049ull, + 0x64210519E59C80E7ull }; static struct ecc_curve nist_p192 = { .name = "nist_192", .g = { @@ -32,10 +38,12 @@ static struct ecc_curve nist_p192 = { .ndigits = 3, }, .p = nist_p192_p, - .n = nist_p192_n + .n = nist_p192_n, + .a = nist_p192_a, + .b = nist_p192_b }; -/* NIST P-256 */ +/* NIST P-256: a = p - 3 */ static u64 nist_p256_g_x[] = { 0xF4A13945D898C296ull, 0x77037D812DEB33A0ull, 0xF8BCE6E563A440F2ull, 0x6B17D1F2E12C4247ull }; static u64 nist_p256_g_y[] = { 0xCBB6406837BF51F5ull, 0x2BCE33576B315ECEull, @@ -44,6 +52,10 @@ static u64 nist_p256_p[] = { 0xFFFFFFFFFFFFFFFFull, 0x00000000FFFFFFFFull, 0x0000000000000000ull, 0xFFFFFFFF00000001ull }; static u64 nist_p256_n[] = { 0xF3B9CAC2FC632551ull, 0xBCE6FAADA7179E84ull, 0xFFFFFFFFFFFFFFFFull, 0xFFFFFFFF00000000ull }; +static u64 nist_p256_a[] = { 0xFFFFFFFFFFFFFFFCull, 0x00000000FFFFFFFFull, + 0x0000000000000000ull, 0xFFFFFFFF00000001ull }; +static u64 nist_p256_b[] = { 0x3BCE3C3E27D2604Bull, 0x651D06B0CC53B0F6ull, + 0xB3EBBD55769886BCull, 0x5AC635D8AA3A93E7ull }; static struct ecc_curve nist_p256 = { .name = "nist_256", .g = { @@ -52,7 +64,9 @@ static struct ecc_curve nist_p256 = { .ndigits = 4, }, .p = nist_p256_p, - .n = nist_p256_n + .n = nist_p256_n, + .a = nist_p256_a, + .b = nist_p256_b }; #endif diff --git a/crypto/ghash-generic.c b/crypto/ghash-generic.c index 1bffb3f712dd..d9f192b953b2 100644 --- a/crypto/ghash-generic.c +++ b/crypto/ghash-generic.c @@ -132,7 +132,6 @@ static struct shash_alg ghash_alg = { .cra_name = "ghash", .cra_driver_name = "ghash-generic", .cra_priority = 100, - .cra_flags = CRYPTO_ALG_TYPE_SHASH, .cra_blocksize = GHASH_BLOCK_SIZE, .cra_ctxsize = sizeof(struct ghash_ctx), .cra_module = THIS_MODULE, diff --git a/crypto/lrw.c b/crypto/lrw.c index 954a7064a179..393a782679c7 100644 --- a/crypto/lrw.c +++ b/crypto/lrw.c @@ -188,7 +188,7 @@ static int post_crypt(struct skcipher_request *req) if (rctx->dst != sg) { rctx->dst[0] = *sg; sg_unmark_end(rctx->dst); - scatterwalk_crypto_chain(rctx->dst, sg_next(sg), 0, 2); + scatterwalk_crypto_chain(rctx->dst, sg_next(sg), 2); } rctx->dst[0].length -= offset - sg->offset; rctx->dst[0].offset = offset; @@ -265,7 +265,7 @@ static int pre_crypt(struct skcipher_request *req) if (rctx->src != sg) { rctx->src[0] = *sg; sg_unmark_end(rctx->src); - scatterwalk_crypto_chain(rctx->src, sg_next(sg), 0, 2); + scatterwalk_crypto_chain(rctx->src, sg_next(sg), 2); } rctx->src[0].length -= offset - sg->offset; rctx->src[0].offset = offset; diff --git a/crypto/md4.c b/crypto/md4.c index 810fefb0a007..9965ec40d9f9 100644 --- a/crypto/md4.c +++ b/crypto/md4.c @@ -217,7 +217,6 @@ static struct shash_alg alg = { .descsize = sizeof(struct md4_ctx), .base = { .cra_name = "md4", - .cra_flags = CRYPTO_ALG_TYPE_SHASH, .cra_blocksize = MD4_HMAC_BLOCK_SIZE, .cra_module = THIS_MODULE, } diff --git a/crypto/md5.c b/crypto/md5.c index f776ef43d621..94dd78144ba3 100644 --- a/crypto/md5.c +++ b/crypto/md5.c @@ -229,7 +229,6 @@ static struct shash_alg alg = { .statesize = sizeof(struct md5_state), .base = { .cra_name = "md5", - .cra_flags = CRYPTO_ALG_TYPE_SHASH, .cra_blocksize = MD5_HMAC_BLOCK_SIZE, .cra_module = THIS_MODULE, } diff --git a/crypto/morus1280.c b/crypto/morus1280.c index 6180b2557836..d057cf5ac4a8 100644 --- a/crypto/morus1280.c +++ b/crypto/morus1280.c @@ -514,7 +514,6 @@ static struct aead_alg crypto_morus1280_alg = { .chunksize = MORUS1280_BLOCK_SIZE, .base = { - .cra_flags = CRYPTO_ALG_TYPE_AEAD, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct morus1280_ctx), .cra_alignmask = 0, diff --git a/crypto/morus640.c b/crypto/morus640.c index 5eede3749e64..1ca76e54281b 100644 --- a/crypto/morus640.c +++ b/crypto/morus640.c @@ -511,7 +511,6 @@ static struct aead_alg crypto_morus640_alg = { .chunksize = MORUS640_BLOCK_SIZE, .base = { - .cra_flags = CRYPTO_ALG_TYPE_AEAD, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct morus640_ctx), .cra_alignmask = 0, diff --git a/crypto/poly1305_generic.c b/crypto/poly1305_generic.c index b7a3a0613a30..47d3a6b83931 100644 --- a/crypto/poly1305_generic.c +++ b/crypto/poly1305_generic.c @@ -279,7 +279,6 @@ static struct shash_alg poly1305_alg = { .cra_name = "poly1305", .cra_driver_name = "poly1305-generic", .cra_priority = 100, - .cra_flags = CRYPTO_ALG_TYPE_SHASH, .cra_blocksize = POLY1305_BLOCK_SIZE, .cra_module = THIS_MODULE, }, diff --git a/crypto/rmd128.c b/crypto/rmd128.c index 40e053b97b69..5f4472256e27 100644 --- a/crypto/rmd128.c +++ b/crypto/rmd128.c @@ -303,7 +303,6 @@ static struct shash_alg alg = { .descsize = sizeof(struct rmd128_ctx), .base = { .cra_name = "rmd128", - .cra_flags = CRYPTO_ALG_TYPE_SHASH, .cra_blocksize = RMD128_BLOCK_SIZE, .cra_module = THIS_MODULE, } diff --git a/crypto/rmd160.c b/crypto/rmd160.c index 5f3e6ea35268..737645344d1c 100644 --- a/crypto/rmd160.c +++ b/crypto/rmd160.c @@ -347,7 +347,6 @@ static struct shash_alg alg = { .descsize = sizeof(struct rmd160_ctx), .base = { .cra_name = "rmd160", - .cra_flags = CRYPTO_ALG_TYPE_SHASH, .cra_blocksize = RMD160_BLOCK_SIZE, .cra_module = THIS_MODULE, } diff --git a/crypto/rmd256.c b/crypto/rmd256.c index f50c025cc962..0e9d30676a01 100644 --- a/crypto/rmd256.c +++ b/crypto/rmd256.c @@ -49,7 +49,7 @@ struct rmd256_ctx { static void rmd256_transform(u32 *state, const __le32 *in) { - u32 aa, bb, cc, dd, aaa, bbb, ccc, ddd, tmp; + u32 aa, bb, cc, dd, aaa, bbb, ccc, ddd; /* Initialize left lane */ aa = state[0]; @@ -100,7 +100,7 @@ static void rmd256_transform(u32 *state, const __le32 *in) ROUND(bbb, ccc, ddd, aaa, F4, KK1, in[12], 6); /* Swap contents of "a" registers */ - tmp = aa; aa = aaa; aaa = tmp; + swap(aa, aaa); /* round 2: left lane */ ROUND(aa, bb, cc, dd, F2, K2, in[7], 7); @@ -139,7 +139,7 @@ static void rmd256_transform(u32 *state, const __le32 *in) ROUND(bbb, ccc, ddd, aaa, F3, KK2, in[2], 11); /* Swap contents of "b" registers */ - tmp = bb; bb = bbb; bbb = tmp; + swap(bb, bbb); /* round 3: left lane */ ROUND(aa, bb, cc, dd, F3, K3, in[3], 11); @@ -178,7 +178,7 @@ static void rmd256_transform(u32 *state, const __le32 *in) ROUND(bbb, ccc, ddd, aaa, F2, KK3, in[13], 5); /* Swap contents of "c" registers */ - tmp = cc; cc = ccc; ccc = tmp; + swap(cc, ccc); /* round 4: left lane */ ROUND(aa, bb, cc, dd, F4, K4, in[1], 11); @@ -217,7 +217,7 @@ static void rmd256_transform(u32 *state, const __le32 *in) ROUND(bbb, ccc, ddd, aaa, F1, KK4, in[14], 8); /* Swap contents of "d" registers */ - tmp = dd; dd = ddd; ddd = tmp; + swap(dd, ddd); /* combine results */ state[0] += aa; @@ -322,7 +322,6 @@ static struct shash_alg alg = { .descsize = sizeof(struct rmd256_ctx), .base = { .cra_name = "rmd256", - .cra_flags = CRYPTO_ALG_TYPE_SHASH, .cra_blocksize = RMD256_BLOCK_SIZE, .cra_module = THIS_MODULE, } diff --git a/crypto/rmd320.c b/crypto/rmd320.c index e1315e4869e8..3ae1df5bb48c 100644 --- a/crypto/rmd320.c +++ b/crypto/rmd320.c @@ -53,7 +53,7 @@ struct rmd320_ctx { static void rmd320_transform(u32 *state, const __le32 *in) { - u32 aa, bb, cc, dd, ee, aaa, bbb, ccc, ddd, eee, tmp; + u32 aa, bb, cc, dd, ee, aaa, bbb, ccc, ddd, eee; /* Initialize left lane */ aa = state[0]; @@ -106,7 +106,7 @@ static void rmd320_transform(u32 *state, const __le32 *in) ROUND(aaa, bbb, ccc, ddd, eee, F5, KK1, in[12], 6); /* Swap contents of "a" registers */ - tmp = aa; aa = aaa; aaa = tmp; + swap(aa, aaa); /* round 2: left lane" */ ROUND(ee, aa, bb, cc, dd, F2, K2, in[7], 7); @@ -145,7 +145,7 @@ static void rmd320_transform(u32 *state, const __le32 *in) ROUND(eee, aaa, bbb, ccc, ddd, F4, KK2, in[2], 11); /* Swap contents of "b" registers */ - tmp = bb; bb = bbb; bbb = tmp; + swap(bb, bbb); /* round 3: left lane" */ ROUND(dd, ee, aa, bb, cc, F3, K3, in[3], 11); @@ -184,7 +184,7 @@ static void rmd320_transform(u32 *state, const __le32 *in) ROUND(ddd, eee, aaa, bbb, ccc, F3, KK3, in[13], 5); /* Swap contents of "c" registers */ - tmp = cc; cc = ccc; ccc = tmp; + swap(cc, ccc); /* round 4: left lane" */ ROUND(cc, dd, ee, aa, bb, F4, K4, in[1], 11); @@ -223,7 +223,7 @@ static void rmd320_transform(u32 *state, const __le32 *in) ROUND(ccc, ddd, eee, aaa, bbb, F2, KK4, in[14], 8); /* Swap contents of "d" registers */ - tmp = dd; dd = ddd; ddd = tmp; + swap(dd, ddd); /* round 5: left lane" */ ROUND(bb, cc, dd, ee, aa, F5, K5, in[4], 9); @@ -262,7 +262,7 @@ static void rmd320_transform(u32 *state, const __le32 *in) ROUND(bbb, ccc, ddd, eee, aaa, F1, KK5, in[11], 11); /* Swap contents of "e" registers */ - tmp = ee; ee = eee; eee = tmp; + swap(ee, eee); /* combine results */ state[0] += aa; @@ -371,7 +371,6 @@ static struct shash_alg alg = { .descsize = sizeof(struct rmd320_ctx), .base = { .cra_name = "rmd320", - .cra_flags = CRYPTO_ALG_TYPE_SHASH, .cra_blocksize = RMD320_BLOCK_SIZE, .cra_module = THIS_MODULE, } diff --git a/crypto/scatterwalk.c b/crypto/scatterwalk.c index c16c94f88733..d0b92c1cd6e9 100644 --- a/crypto/scatterwalk.c +++ b/crypto/scatterwalk.c @@ -91,7 +91,7 @@ struct scatterlist *scatterwalk_ffwd(struct scatterlist dst[2], sg_init_table(dst, 2); sg_set_page(dst, sg_page(src), src->length - len, src->offset + len); - scatterwalk_crypto_chain(dst, sg_next(src), 0, 2); + scatterwalk_crypto_chain(dst, sg_next(src), 2); return dst; } diff --git a/crypto/sha1_generic.c b/crypto/sha1_generic.c index 6877cbb9105f..2af64ef81f40 100644 --- a/crypto/sha1_generic.c +++ b/crypto/sha1_generic.c @@ -76,7 +76,7 @@ static struct shash_alg alg = { .base = { .cra_name = "sha1", .cra_driver_name= "sha1-generic", - .cra_flags = CRYPTO_ALG_TYPE_SHASH, + .cra_priority = 100, .cra_blocksize = SHA1_BLOCK_SIZE, .cra_module = THIS_MODULE, } diff --git a/crypto/sha256_generic.c b/crypto/sha256_generic.c index 8f9c47e1a96e..1e5ba6649e8d 100644 --- a/crypto/sha256_generic.c +++ b/crypto/sha256_generic.c @@ -271,7 +271,7 @@ static struct shash_alg sha256_algs[2] = { { .base = { .cra_name = "sha256", .cra_driver_name= "sha256-generic", - .cra_flags = CRYPTO_ALG_TYPE_SHASH, + .cra_priority = 100, .cra_blocksize = SHA256_BLOCK_SIZE, .cra_module = THIS_MODULE, } @@ -285,7 +285,7 @@ static struct shash_alg sha256_algs[2] = { { .base = { .cra_name = "sha224", .cra_driver_name= "sha224-generic", - .cra_flags = CRYPTO_ALG_TYPE_SHASH, + .cra_priority = 100, .cra_blocksize = SHA224_BLOCK_SIZE, .cra_module = THIS_MODULE, } diff --git a/crypto/sha3_generic.c b/crypto/sha3_generic.c index 7f6735d9003f..7ed98367d4fb 100644 --- a/crypto/sha3_generic.c +++ b/crypto/sha3_generic.c @@ -250,7 +250,6 @@ static struct shash_alg algs[] = { { .descsize = sizeof(struct sha3_state), .base.cra_name = "sha3-224", .base.cra_driver_name = "sha3-224-generic", - .base.cra_flags = CRYPTO_ALG_TYPE_SHASH, .base.cra_blocksize = SHA3_224_BLOCK_SIZE, .base.cra_module = THIS_MODULE, }, { @@ -261,7 +260,6 @@ static struct shash_alg algs[] = { { .descsize = sizeof(struct sha3_state), .base.cra_name = "sha3-256", .base.cra_driver_name = "sha3-256-generic", - .base.cra_flags = CRYPTO_ALG_TYPE_SHASH, .base.cra_blocksize = SHA3_256_BLOCK_SIZE, .base.cra_module = THIS_MODULE, }, { @@ -272,7 +270,6 @@ static struct shash_alg algs[] = { { .descsize = sizeof(struct sha3_state), .base.cra_name = "sha3-384", .base.cra_driver_name = "sha3-384-generic", - .base.cra_flags = CRYPTO_ALG_TYPE_SHASH, .base.cra_blocksize = SHA3_384_BLOCK_SIZE, .base.cra_module = THIS_MODULE, }, { @@ -283,7 +280,6 @@ static struct shash_alg algs[] = { { .descsize = sizeof(struct sha3_state), .base.cra_name = "sha3-512", .base.cra_driver_name = "sha3-512-generic", - .base.cra_flags = CRYPTO_ALG_TYPE_SHASH, .base.cra_blocksize = SHA3_512_BLOCK_SIZE, .base.cra_module = THIS_MODULE, } }; diff --git a/crypto/sha512_generic.c b/crypto/sha512_generic.c index eba965d18bfc..4097cd555eb6 100644 --- a/crypto/sha512_generic.c +++ b/crypto/sha512_generic.c @@ -23,6 +23,28 @@ #include <asm/byteorder.h> #include <asm/unaligned.h> +const u8 sha384_zero_message_hash[SHA384_DIGEST_SIZE] = { + 0x38, 0xb0, 0x60, 0xa7, 0x51, 0xac, 0x96, 0x38, + 0x4c, 0xd9, 0x32, 0x7e, 0xb1, 0xb1, 0xe3, 0x6a, + 0x21, 0xfd, 0xb7, 0x11, 0x14, 0xbe, 0x07, 0x43, + 0x4c, 0x0c, 0xc7, 0xbf, 0x63, 0xf6, 0xe1, 0xda, + 0x27, 0x4e, 0xde, 0xbf, 0xe7, 0x6f, 0x65, 0xfb, + 0xd5, 0x1a, 0xd2, 0xf1, 0x48, 0x98, 0xb9, 0x5b +}; +EXPORT_SYMBOL_GPL(sha384_zero_message_hash); + +const u8 sha512_zero_message_hash[SHA512_DIGEST_SIZE] = { + 0xcf, 0x83, 0xe1, 0x35, 0x7e, 0xef, 0xb8, 0xbd, + 0xf1, 0x54, 0x28, 0x50, 0xd6, 0x6d, 0x80, 0x07, + 0xd6, 0x20, 0xe4, 0x05, 0x0b, 0x57, 0x15, 0xdc, + 0x83, 0xf4, 0xa9, 0x21, 0xd3, 0x6c, 0xe9, 0xce, + 0x47, 0xd0, 0xd1, 0x3c, 0x5d, 0x85, 0xf2, 0xb0, + 0xff, 0x83, 0x18, 0xd2, 0x87, 0x7e, 0xec, 0x2f, + 0x63, 0xb9, 0x31, 0xbd, 0x47, 0x41, 0x7a, 0x81, + 0xa5, 0x38, 0x32, 0x7a, 0xf9, 0x27, 0xda, 0x3e +}; +EXPORT_SYMBOL_GPL(sha512_zero_message_hash); + static inline u64 Ch(u64 x, u64 y, u64 z) { return z ^ (x & (y ^ z)); @@ -171,7 +193,7 @@ static struct shash_alg sha512_algs[2] = { { .base = { .cra_name = "sha512", .cra_driver_name = "sha512-generic", - .cra_flags = CRYPTO_ALG_TYPE_SHASH, + .cra_priority = 100, .cra_blocksize = SHA512_BLOCK_SIZE, .cra_module = THIS_MODULE, } @@ -185,7 +207,7 @@ static struct shash_alg sha512_algs[2] = { { .base = { .cra_name = "sha384", .cra_driver_name = "sha384-generic", - .cra_flags = CRYPTO_ALG_TYPE_SHASH, + .cra_priority = 100, .cra_blocksize = SHA384_BLOCK_SIZE, .cra_module = THIS_MODULE, } diff --git a/crypto/skcipher.c b/crypto/skcipher.c index 0fe2a2923ad0..0bd8c6caa498 100644 --- a/crypto/skcipher.c +++ b/crypto/skcipher.c @@ -95,7 +95,7 @@ static inline u8 *skcipher_get_spot(u8 *start, unsigned int len) return max(start, end_page); } -static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize) +static void skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize) { u8 *addr; @@ -103,23 +103,24 @@ static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize) addr = skcipher_get_spot(addr, bsize); scatterwalk_copychunks(addr, &walk->out, bsize, (walk->flags & SKCIPHER_WALK_PHYS) ? 2 : 1); - return 0; } int skcipher_walk_done(struct skcipher_walk *walk, int err) { - unsigned int n = walk->nbytes - err; - unsigned int nbytes; - - nbytes = walk->total - n; - - if (unlikely(err < 0)) { - nbytes = 0; - n = 0; - } else if (likely(!(walk->flags & (SKCIPHER_WALK_PHYS | - SKCIPHER_WALK_SLOW | - SKCIPHER_WALK_COPY | - SKCIPHER_WALK_DIFF)))) { + unsigned int n; /* bytes processed */ + bool more; + + if (unlikely(err < 0)) + goto finish; + + n = walk->nbytes - err; + walk->total -= n; + more = (walk->total != 0); + + if (likely(!(walk->flags & (SKCIPHER_WALK_PHYS | + SKCIPHER_WALK_SLOW | + SKCIPHER_WALK_COPY | + SKCIPHER_WALK_DIFF)))) { unmap_src: skcipher_unmap_src(walk); } else if (walk->flags & SKCIPHER_WALK_DIFF) { @@ -131,28 +132,28 @@ unmap_src: skcipher_unmap_dst(walk); } else if (unlikely(walk->flags & SKCIPHER_WALK_SLOW)) { if (WARN_ON(err)) { + /* unexpected case; didn't process all bytes */ err = -EINVAL; - nbytes = 0; - } else - n = skcipher_done_slow(walk, n); + goto finish; + } + skcipher_done_slow(walk, n); + goto already_advanced; } - if (err > 0) - err = 0; - - walk->total = nbytes; - walk->nbytes = nbytes; - scatterwalk_advance(&walk->in, n); scatterwalk_advance(&walk->out, n); - scatterwalk_done(&walk->in, 0, nbytes); - scatterwalk_done(&walk->out, 1, nbytes); +already_advanced: + scatterwalk_done(&walk->in, 0, more); + scatterwalk_done(&walk->out, 1, more); - if (nbytes) { + if (more) { crypto_yield(walk->flags & SKCIPHER_WALK_SLEEP ? CRYPTO_TFM_REQ_MAY_SLEEP : 0); return skcipher_walk_next(walk); } + err = 0; +finish: + walk->nbytes = 0; /* Short-circuit for the common/fast path. */ if (!((unsigned long)walk->buffer | (unsigned long)walk->page)) @@ -387,7 +388,6 @@ set_phys_lowmem: } return err; } -EXPORT_SYMBOL_GPL(skcipher_walk_next); static int skcipher_copy_iv(struct skcipher_walk *walk) { @@ -399,7 +399,7 @@ static int skcipher_copy_iv(struct skcipher_walk *walk) unsigned size; u8 *iv; - aligned_bs = ALIGN(bs, alignmask); + aligned_bs = ALIGN(bs, alignmask + 1); /* Minimum size to align buffer by alignmask. */ size = alignmask & ~a; @@ -437,7 +437,6 @@ static int skcipher_walk_first(struct skcipher_walk *walk) } walk->page = NULL; - walk->nbytes = walk->total; return skcipher_walk_next(walk); } diff --git a/crypto/sm3_generic.c b/crypto/sm3_generic.c index 9e823d99f095..9a5c60f08aad 100644 --- a/crypto/sm3_generic.c +++ b/crypto/sm3_generic.c @@ -184,7 +184,6 @@ static struct shash_alg sm3_alg = { .base = { .cra_name = "sm3", .cra_driver_name = "sm3-generic", - .cra_flags = CRYPTO_ALG_TYPE_SHASH, .cra_blocksize = SM3_BLOCK_SIZE, .cra_module = THIS_MODULE, } diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index d5bcdd905007..bdde95e8d369 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -415,12 +415,14 @@ static void test_mb_aead_speed(const char *algo, int enc, int secs, } - if (secs) + if (secs) { ret = test_mb_aead_jiffies(data, enc, *b_size, secs, num_mb); - else + cond_resched(); + } else { ret = test_mb_aead_cycles(data, enc, *b_size, num_mb); + } if (ret) { pr_err("%s() failed return code=%d\n", e, ret); @@ -660,11 +662,13 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs, *b_size + (enc ? 0 : authsize), iv); - if (secs) + if (secs) { ret = test_aead_jiffies(req, enc, *b_size, secs); - else + cond_resched(); + } else { ret = test_aead_cycles(req, enc, *b_size); + } if (ret) { pr_err("%s() failed return code=%d\n", e, ret); @@ -876,11 +880,13 @@ static void test_mb_ahash_speed(const char *algo, unsigned int secs, i, speed[i].blen, speed[i].plen, speed[i].blen / speed[i].plen); - if (secs) + if (secs) { ret = test_mb_ahash_jiffies(data, speed[i].blen, secs, num_mb); - else + cond_resched(); + } else { ret = test_mb_ahash_cycles(data, speed[i].blen, num_mb); + } if (ret) { @@ -1103,12 +1109,14 @@ static void test_ahash_speed_common(const char *algo, unsigned int secs, ahash_request_set_crypt(req, sg, output, speed[i].plen); - if (secs) + if (secs) { ret = test_ahash_jiffies(req, speed[i].blen, speed[i].plen, output, secs); - else + cond_resched(); + } else { ret = test_ahash_cycles(req, speed[i].blen, speed[i].plen, output); + } if (ret) { pr_err("hashing failed ret=%d\n", ret); @@ -1367,13 +1375,15 @@ static void test_mb_skcipher_speed(const char *algo, int enc, int secs, iv); } - if (secs) + if (secs) { ret = test_mb_acipher_jiffies(data, enc, *b_size, secs, num_mb); - else + cond_resched(); + } else { ret = test_mb_acipher_cycles(data, enc, *b_size, num_mb); + } if (ret) { pr_err("%s() failed flags=%x\n", e, @@ -1581,12 +1591,14 @@ static void test_skcipher_speed(const char *algo, int enc, unsigned int secs, skcipher_request_set_crypt(req, sg, sg, *b_size, iv); - if (secs) + if (secs) { ret = test_acipher_jiffies(req, enc, *b_size, secs); - else + cond_resched(); + } else { ret = test_acipher_cycles(req, enc, *b_size); + } if (ret) { pr_err("%s() failed flags=%x\n", e, @@ -1939,7 +1951,7 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb) break; case 109: - ret += tcrypt_test("vmac(aes)"); + ret += tcrypt_test("vmac64(aes)"); break; case 111: diff --git a/crypto/testmgr.c b/crypto/testmgr.c index 11e45352fd0b..a1d42245082a 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -259,9 +259,15 @@ out_nostate: return ret; } +enum hash_test { + HASH_TEST_DIGEST, + HASH_TEST_FINAL, + HASH_TEST_FINUP +}; + static int __test_hash(struct crypto_ahash *tfm, const struct hash_testvec *template, unsigned int tcount, - bool use_digest, const int align_offset) + enum hash_test test_type, const int align_offset) { const char *algo = crypto_tfm_alg_driver_name(crypto_ahash_tfm(tfm)); size_t digest_size = crypto_ahash_digestsize(tfm); @@ -332,14 +338,17 @@ static int __test_hash(struct crypto_ahash *tfm, } ahash_request_set_crypt(req, sg, result, template[i].psize); - if (use_digest) { + switch (test_type) { + case HASH_TEST_DIGEST: ret = crypto_wait_req(crypto_ahash_digest(req), &wait); if (ret) { pr_err("alg: hash: digest failed on test %d " "for %s: ret=%d\n", j, algo, -ret); goto out; } - } else { + break; + + case HASH_TEST_FINAL: memset(result, 1, digest_size); ret = crypto_wait_req(crypto_ahash_init(req), &wait); if (ret) { @@ -371,6 +380,29 @@ static int __test_hash(struct crypto_ahash *tfm, "for %s: ret=%d\n", j, algo, -ret); goto out; } + break; + + case HASH_TEST_FINUP: + memset(result, 1, digest_size); + ret = crypto_wait_req(crypto_ahash_init(req), &wait); + if (ret) { + pr_err("alg: hash: init failed on test %d " + "for %s: ret=%d\n", j, algo, -ret); + goto out; + } + ret = ahash_guard_result(result, 1, digest_size); + if (ret) { + pr_err("alg: hash: init failed on test %d " + "for %s: used req->result\n", j, algo); + goto out; + } + ret = crypto_wait_req(crypto_ahash_finup(req), &wait); + if (ret) { + pr_err("alg: hash: final failed on test %d " + "for %s: ret=%d\n", j, algo, -ret); + goto out; + } + break; } if (memcmp(result, template[i].digest, @@ -383,6 +415,9 @@ static int __test_hash(struct crypto_ahash *tfm, } } + if (test_type) + goto out; + j = 0; for (i = 0; i < tcount; i++) { /* alignment tests are only done with continuous buffers */ @@ -540,24 +575,24 @@ out_nobuf: static int test_hash(struct crypto_ahash *tfm, const struct hash_testvec *template, - unsigned int tcount, bool use_digest) + unsigned int tcount, enum hash_test test_type) { unsigned int alignmask; int ret; - ret = __test_hash(tfm, template, tcount, use_digest, 0); + ret = __test_hash(tfm, template, tcount, test_type, 0); if (ret) return ret; /* test unaligned buffers, check with one byte offset */ - ret = __test_hash(tfm, template, tcount, use_digest, 1); + ret = __test_hash(tfm, template, tcount, test_type, 1); if (ret) return ret; alignmask = crypto_tfm_alg_alignmask(&tfm->base); if (alignmask) { /* Check if alignment mask for tfm is correctly set. */ - ret = __test_hash(tfm, template, tcount, use_digest, + ret = __test_hash(tfm, template, tcount, test_type, alignmask + 1); if (ret) return ret; @@ -1803,9 +1838,11 @@ static int __alg_test_hash(const struct hash_testvec *template, return PTR_ERR(tfm); } - err = test_hash(tfm, template, tcount, true); + err = test_hash(tfm, template, tcount, HASH_TEST_DIGEST); + if (!err) + err = test_hash(tfm, template, tcount, HASH_TEST_FINAL); if (!err) - err = test_hash(tfm, template, tcount, false); + err = test_hash(tfm, template, tcount, HASH_TEST_FINUP); crypto_free_ahash(tfm); return err; } @@ -3478,10 +3515,10 @@ static const struct alg_test_desc alg_test_descs[] = { .hash = __VECS(tgr192_tv_template) } }, { - .alg = "vmac(aes)", + .alg = "vmac64(aes)", .test = alg_test_hash, .suite = { - .hash = __VECS(aes_vmac128_tv_template) + .hash = __VECS(vmac64_aes_tv_template) } }, { .alg = "wp256", diff --git a/crypto/testmgr.h b/crypto/testmgr.h index b950aa234e43..173111c70746 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h @@ -641,15 +641,17 @@ static const struct kpp_testvec dh_tv_template[] = { .secret = #ifdef __LITTLE_ENDIAN "\x01\x00" /* type */ - "\x11\x02" /* len */ + "\x15\x02" /* len */ "\x00\x01\x00\x00" /* key_size */ "\x00\x01\x00\x00" /* p_size */ + "\x00\x00\x00\x00" /* q_size */ "\x01\x00\x00\x00" /* g_size */ #else "\x00\x01" /* type */ - "\x02\x11" /* len */ + "\x02\x15" /* len */ "\x00\x00\x01\x00" /* key_size */ "\x00\x00\x01\x00" /* p_size */ + "\x00\x00\x00\x00" /* q_size */ "\x00\x00\x00\x01" /* g_size */ #endif /* xa */ @@ -739,7 +741,7 @@ static const struct kpp_testvec dh_tv_template[] = { "\xd3\x34\x49\xad\x64\xa6\xb1\xc0\x59\x28\x75\x60\xa7\x8a\xb0\x11" "\x56\x89\x42\x74\x11\xf5\xf6\x5e\x6f\x16\x54\x6a\xb1\x76\x4d\x50" "\x8a\x68\xc1\x5b\x82\xb9\x0d\x00\x32\x50\xed\x88\x87\x48\x92\x17", - .secret_size = 529, + .secret_size = 533, .b_public_size = 256, .expected_a_public_size = 256, .expected_ss_size = 256, @@ -748,15 +750,17 @@ static const struct kpp_testvec dh_tv_template[] = { .secret = #ifdef __LITTLE_ENDIAN "\x01\x00" /* type */ - "\x11\x02" /* len */ + "\x15\x02" /* len */ "\x00\x01\x00\x00" /* key_size */ "\x00\x01\x00\x00" /* p_size */ + "\x00\x00\x00\x00" /* q_size */ "\x01\x00\x00\x00" /* g_size */ #else "\x00\x01" /* type */ - "\x02\x11" /* len */ + "\x02\x15" /* len */ "\x00\x00\x01\x00" /* key_size */ "\x00\x00\x01\x00" /* p_size */ + "\x00\x00\x00\x00" /* q_size */ "\x00\x00\x00\x01" /* g_size */ #endif /* xa */ @@ -846,7 +850,7 @@ static const struct kpp_testvec dh_tv_template[] = { "\x5e\x5a\x64\xbd\xf6\x85\x04\xe8\x28\x6a\xac\xef\xce\x19\x8e\x9a" "\xfe\x75\xc0\x27\x69\xe3\xb3\x7b\x21\xa7\xb1\x16\xa4\x85\x23\xee" "\xb0\x1b\x04\x6e\xbd\xab\x16\xde\xfd\x86\x6b\xa9\x95\xd7\x0b\xfd", - .secret_size = 529, + .secret_size = 533, .b_public_size = 256, .expected_a_public_size = 256, .expected_ss_size = 256, @@ -4603,105 +4607,158 @@ static const struct hash_testvec aes_xcbc128_tv_template[] = { } }; -static const char vmac_string1[128] = {'\x01', '\x01', '\x01', '\x01', - '\x02', '\x03', '\x02', '\x02', - '\x02', '\x04', '\x01', '\x07', - '\x04', '\x01', '\x04', '\x03',}; -static const char vmac_string2[128] = {'a', 'b', 'c',}; -static const char vmac_string3[128] = {'a', 'b', 'c', 'a', 'b', 'c', - 'a', 'b', 'c', 'a', 'b', 'c', - 'a', 'b', 'c', 'a', 'b', 'c', - 'a', 'b', 'c', 'a', 'b', 'c', - 'a', 'b', 'c', 'a', 'b', 'c', - 'a', 'b', 'c', 'a', 'b', 'c', - 'a', 'b', 'c', 'a', 'b', 'c', - 'a', 'b', 'c', 'a', 'b', 'c', - }; +static const char vmac64_string1[144] = { + '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', + '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', + '\x01', '\x01', '\x01', '\x01', '\x02', '\x03', '\x02', '\x02', + '\x02', '\x04', '\x01', '\x07', '\x04', '\x01', '\x04', '\x03', +}; -static const char vmac_string4[17] = {'b', 'c', 'e', 'f', - 'i', 'j', 'l', 'm', - 'o', 'p', 'r', 's', - 't', 'u', 'w', 'x', 'z'}; +static const char vmac64_string2[144] = { + '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', + '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', + 'a', 'b', 'c', +}; -static const char vmac_string5[127] = {'r', 'm', 'b', 't', 'c', - 'o', 'l', 'k', ']', '%', - '9', '2', '7', '!', 'A'}; +static const char vmac64_string3[144] = { + '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', + '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', + 'a', 'b', 'c', 'a', 'b', 'c', 'a', 'b', + 'c', 'a', 'b', 'c', 'a', 'b', 'c', 'a', + 'b', 'c', 'a', 'b', 'c', 'a', 'b', 'c', + 'a', 'b', 'c', 'a', 'b', 'c', 'a', 'b', + 'c', 'a', 'b', 'c', 'a', 'b', 'c', 'a', + 'b', 'c', 'a', 'b', 'c', 'a', 'b', 'c', +}; -static const char vmac_string6[129] = {'p', 't', '*', '7', 'l', - 'i', '!', '#', 'w', '0', - 'z', '/', '4', 'A', 'n'}; +static const char vmac64_string4[33] = { + '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', + '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', + 'b', 'c', 'e', 'f', 'i', 'j', 'l', 'm', + 'o', 'p', 'r', 's', 't', 'u', 'w', 'x', + 'z', +}; -static const struct hash_testvec aes_vmac128_tv_template[] = { - { +static const char vmac64_string5[143] = { + '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', + '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', + 'r', 'm', 'b', 't', 'c', 'o', 'l', 'k', + ']', '%', '9', '2', '7', '!', 'A', +}; + +static const char vmac64_string6[145] = { + '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', + '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', + 'p', 't', '*', '7', 'l', 'i', '!', '#', + 'w', '0', 'z', '/', '4', 'A', 'n', +}; + +static const struct hash_testvec vmac64_aes_tv_template[] = { + { /* draft-krovetz-vmac-01 test vector 1 */ + .key = "abcdefghijklmnop", + .ksize = 16, + .plaintext = "\0\0\0\0\0\0\0\0bcdefghi", + .psize = 16, + .digest = "\x25\x76\xbe\x1c\x56\xd8\xb8\x1b", + }, { /* draft-krovetz-vmac-01 test vector 2 */ + .key = "abcdefghijklmnop", + .ksize = 16, + .plaintext = "\0\0\0\0\0\0\0\0bcdefghiabc", + .psize = 19, + .digest = "\x2d\x37\x6c\xf5\xb1\x81\x3c\xe5", + }, { /* draft-krovetz-vmac-01 test vector 3 */ + .key = "abcdefghijklmnop", + .ksize = 16, + .plaintext = "\0\0\0\0\0\0\0\0bcdefghi" + "abcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabc", + .psize = 64, + .digest = "\xe8\x42\x1f\x61\xd5\x73\xd2\x98", + }, { /* draft-krovetz-vmac-01 test vector 4 */ + .key = "abcdefghijklmnop", + .ksize = 16, + .plaintext = "\0\0\0\0\0\0\0\0bcdefghi" + "abcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabc" + "abcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabc" + "abcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabc" + "abcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabc" + "abcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabc" + "abcabcabcabcabcabcabcabcabcabcabcabcabcabcabc", + .psize = 316, + .digest = "\x44\x92\xdf\x6c\x5c\xac\x1b\xbe", + .tap = { 1, 100, 200, 15 }, + .np = 4, + }, { .key = "\x00\x01\x02\x03\x04\x05\x06\x07" "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", - .plaintext = NULL, - .digest = "\x07\x58\x80\x35\x77\xa4\x7b\x54", - .psize = 0, .ksize = 16, + .plaintext = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .psize = 16, + .digest = "\x54\x7b\xa4\x77\x35\x80\x58\x07", }, { - .key = "\x00\x01\x02\x03\x04\x05\x06\x07" + .key = "\x00\x01\x02\x03\x04\x05\x06\x07" "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", - .plaintext = vmac_string1, - .digest = "\xce\xf5\x3c\xd3\xae\x68\x8c\xa1", - .psize = 128, - .ksize = 16, + .ksize = 16, + .plaintext = vmac64_string1, + .psize = sizeof(vmac64_string1), + .digest = "\xa1\x8c\x68\xae\xd3\x3c\xf5\xce", }, { - .key = "\x00\x01\x02\x03\x04\x05\x06\x07" + .key = "\x00\x01\x02\x03\x04\x05\x06\x07" "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", - .plaintext = vmac_string2, - .digest = "\xc9\x27\xb0\x73\x81\xbd\x14\x2d", - .psize = 128, - .ksize = 16, + .ksize = 16, + .plaintext = vmac64_string2, + .psize = sizeof(vmac64_string2), + .digest = "\x2d\x14\xbd\x81\x73\xb0\x27\xc9", }, { - .key = "\x00\x01\x02\x03\x04\x05\x06\x07" + .key = "\x00\x01\x02\x03\x04\x05\x06\x07" "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", - .plaintext = vmac_string3, - .digest = "\x8d\x1a\x95\x8c\x98\x47\x0b\x19", - .psize = 128, - .ksize = 16, + .ksize = 16, + .plaintext = vmac64_string3, + .psize = sizeof(vmac64_string3), + .digest = "\x19\x0b\x47\x98\x8c\x95\x1a\x8d", }, { .key = "abcdefghijklmnop", - .plaintext = NULL, - .digest = "\x3b\x89\xa1\x26\x9e\x55\x8f\x84", - .psize = 0, .ksize = 16, + .plaintext = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .psize = 16, + .digest = "\x84\x8f\x55\x9e\x26\xa1\x89\x3b", }, { - .key = "abcdefghijklmnop", - .plaintext = vmac_string1, - .digest = "\xab\x5e\xab\xb0\xf6\x8d\x74\xc2", - .psize = 128, - .ksize = 16, - }, { - .key = "abcdefghijklmnop", - .plaintext = vmac_string2, - .digest = "\x11\x15\x68\x42\x3d\x7b\x09\xdf", - .psize = 128, - .ksize = 16, - }, { - .key = "abcdefghijklmnop", - .plaintext = vmac_string3, - .digest = "\x8b\x32\x8f\xe1\xed\x8f\xfa\xd4", - .psize = 128, - .ksize = 16, - }, { - .key = "a09b5cd!f#07K\x00\x00\x00", - .plaintext = vmac_string4, - .digest = "\xab\xa5\x0f\xea\x42\x4e\xa1\x5f", - .psize = sizeof(vmac_string4), - .ksize = 16, - }, { - .key = "a09b5cd!f#07K\x00\x00\x00", - .plaintext = vmac_string5, - .digest = "\x25\x31\x98\xbc\x1d\xe8\x67\x60", - .psize = sizeof(vmac_string5), - .ksize = 16, - }, { - .key = "a09b5cd!f#07K\x00\x00\x00", - .plaintext = vmac_string6, - .digest = "\xc4\xae\x9b\x47\x95\x65\xeb\x41", - .psize = sizeof(vmac_string6), - .ksize = 16, + .key = "abcdefghijklmnop", + .ksize = 16, + .plaintext = vmac64_string1, + .psize = sizeof(vmac64_string1), + .digest = "\xc2\x74\x8d\xf6\xb0\xab\x5e\xab", + }, { + .key = "abcdefghijklmnop", + .ksize = 16, + .plaintext = vmac64_string2, + .psize = sizeof(vmac64_string2), + .digest = "\xdf\x09\x7b\x3d\x42\x68\x15\x11", + }, { + .key = "abcdefghijklmnop", + .ksize = 16, + .plaintext = vmac64_string3, + .psize = sizeof(vmac64_string3), + .digest = "\xd4\xfa\x8f\xed\xe1\x8f\x32\x8b", + }, { + .key = "a09b5cd!f#07K\x00\x00\x00", + .ksize = 16, + .plaintext = vmac64_string4, + .psize = sizeof(vmac64_string4), + .digest = "\x5f\xa1\x4e\x42\xea\x0f\xa5\xab", + }, { + .key = "a09b5cd!f#07K\x00\x00\x00", + .ksize = 16, + .plaintext = vmac64_string5, + .psize = sizeof(vmac64_string5), + .digest = "\x60\x67\xe8\x1d\xbc\x98\x31\x25", + }, { + .key = "a09b5cd!f#07K\x00\x00\x00", + .ksize = 16, + .plaintext = vmac64_string6, + .psize = sizeof(vmac64_string6), + .digest = "\x41\xeb\x65\x95\x47\x9b\xae\xc4", }, }; diff --git a/crypto/tgr192.c b/crypto/tgr192.c index 321bc6ff2a9d..022d3dd76c3b 100644 --- a/crypto/tgr192.c +++ b/crypto/tgr192.c @@ -636,7 +636,6 @@ static struct shash_alg tgr_algs[3] = { { .descsize = sizeof(struct tgr192_ctx), .base = { .cra_name = "tgr192", - .cra_flags = CRYPTO_ALG_TYPE_SHASH, .cra_blocksize = TGR192_BLOCK_SIZE, .cra_module = THIS_MODULE, } @@ -648,7 +647,6 @@ static struct shash_alg tgr_algs[3] = { { .descsize = sizeof(struct tgr192_ctx), .base = { .cra_name = "tgr160", - .cra_flags = CRYPTO_ALG_TYPE_SHASH, .cra_blocksize = TGR192_BLOCK_SIZE, .cra_module = THIS_MODULE, } @@ -660,7 +658,6 @@ static struct shash_alg tgr_algs[3] = { { .descsize = sizeof(struct tgr192_ctx), .base = { .cra_name = "tgr128", - .cra_flags = CRYPTO_ALG_TYPE_SHASH, .cra_blocksize = TGR192_BLOCK_SIZE, .cra_module = THIS_MODULE, } diff --git a/crypto/vmac.c b/crypto/vmac.c index df76a816cfb2..5f436dfdfc61 100644 --- a/crypto/vmac.c +++ b/crypto/vmac.c @@ -1,6 +1,10 @@ /* - * Modified to interface to the Linux kernel + * VMAC: Message Authentication Code using Universal Hashing + * + * Reference: https://tools.ietf.org/html/draft-krovetz-vmac-01 + * * Copyright (c) 2009, Intel Corporation. + * Copyright (c) 2018, Google Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -16,14 +20,15 @@ * Place - Suite 330, Boston, MA 02111-1307 USA. */ -/* -------------------------------------------------------------------------- - * VMAC and VHASH Implementation by Ted Krovetz (tdk@acm.org) and Wei Dai. - * This implementation is herby placed in the public domain. - * The authors offers no warranty. Use at your own risk. - * Please send bug reports to the authors. - * Last modified: 17 APR 08, 1700 PDT - * ----------------------------------------------------------------------- */ +/* + * Derived from: + * VMAC and VHASH Implementation by Ted Krovetz (tdk@acm.org) and Wei Dai. + * This implementation is herby placed in the public domain. + * The authors offers no warranty. Use at your own risk. + * Last modified: 17 APR 08, 1700 PDT + */ +#include <asm/unaligned.h> #include <linux/init.h> #include <linux/types.h> #include <linux/crypto.h> @@ -31,10 +36,42 @@ #include <linux/scatterlist.h> #include <asm/byteorder.h> #include <crypto/scatterwalk.h> -#include <crypto/vmac.h> #include <crypto/internal/hash.h> /* + * User definable settings. + */ +#define VMAC_TAG_LEN 64 +#define VMAC_KEY_SIZE 128/* Must be 128, 192 or 256 */ +#define VMAC_KEY_LEN (VMAC_KEY_SIZE/8) +#define VMAC_NHBYTES 128/* Must 2^i for any 3 < i < 13 Standard = 128*/ +#define VMAC_NONCEBYTES 16 + +/* per-transform (per-key) context */ +struct vmac_tfm_ctx { + struct crypto_cipher *cipher; + u64 nhkey[(VMAC_NHBYTES/8)+2*(VMAC_TAG_LEN/64-1)]; + u64 polykey[2*VMAC_TAG_LEN/64]; + u64 l3key[2*VMAC_TAG_LEN/64]; +}; + +/* per-request context */ +struct vmac_desc_ctx { + union { + u8 partial[VMAC_NHBYTES]; /* partial block */ + __le64 partial_words[VMAC_NHBYTES / 8]; + }; + unsigned int partial_size; /* size of the partial block */ + bool first_block_processed; + u64 polytmp[2*VMAC_TAG_LEN/64]; /* running total of L2-hash */ + union { + u8 bytes[VMAC_NONCEBYTES]; + __be64 pads[VMAC_NONCEBYTES / 8]; + } nonce; + unsigned int nonce_size; /* nonce bytes filled so far */ +}; + +/* * Constants and masks */ #define UINT64_C(x) x##ULL @@ -318,13 +355,6 @@ static void poly_step_func(u64 *ahi, u64 *alo, } while (0) #endif -static void vhash_abort(struct vmac_ctx *ctx) -{ - ctx->polytmp[0] = ctx->polykey[0] ; - ctx->polytmp[1] = ctx->polykey[1] ; - ctx->first_block_processed = 0; -} - static u64 l3hash(u64 p1, u64 p2, u64 k1, u64 k2, u64 len) { u64 rh, rl, t, z = 0; @@ -364,280 +394,227 @@ static u64 l3hash(u64 p1, u64 p2, u64 k1, u64 k2, u64 len) return rl; } -static void vhash_update(const unsigned char *m, - unsigned int mbytes, /* Pos multiple of VMAC_NHBYTES */ - struct vmac_ctx *ctx) +/* L1 and L2-hash one or more VMAC_NHBYTES-byte blocks */ +static void vhash_blocks(const struct vmac_tfm_ctx *tctx, + struct vmac_desc_ctx *dctx, + const __le64 *mptr, unsigned int blocks) { - u64 rh, rl, *mptr; - const u64 *kptr = (u64 *)ctx->nhkey; - int i; - u64 ch, cl; - u64 pkh = ctx->polykey[0]; - u64 pkl = ctx->polykey[1]; - - if (!mbytes) - return; - - BUG_ON(mbytes % VMAC_NHBYTES); - - mptr = (u64 *)m; - i = mbytes / VMAC_NHBYTES; /* Must be non-zero */ - - ch = ctx->polytmp[0]; - cl = ctx->polytmp[1]; - - if (!ctx->first_block_processed) { - ctx->first_block_processed = 1; + const u64 *kptr = tctx->nhkey; + const u64 pkh = tctx->polykey[0]; + const u64 pkl = tctx->polykey[1]; + u64 ch = dctx->polytmp[0]; + u64 cl = dctx->polytmp[1]; + u64 rh, rl; + + if (!dctx->first_block_processed) { + dctx->first_block_processed = true; nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl); rh &= m62; ADD128(ch, cl, rh, rl); mptr += (VMAC_NHBYTES/sizeof(u64)); - i--; + blocks--; } - while (i--) { + while (blocks--) { nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl); rh &= m62; poly_step(ch, cl, pkh, pkl, rh, rl); mptr += (VMAC_NHBYTES/sizeof(u64)); } - ctx->polytmp[0] = ch; - ctx->polytmp[1] = cl; + dctx->polytmp[0] = ch; + dctx->polytmp[1] = cl; } -static u64 vhash(unsigned char m[], unsigned int mbytes, - u64 *tagl, struct vmac_ctx *ctx) +static int vmac_setkey(struct crypto_shash *tfm, + const u8 *key, unsigned int keylen) { - u64 rh, rl, *mptr; - const u64 *kptr = (u64 *)ctx->nhkey; - int i, remaining; - u64 ch, cl; - u64 pkh = ctx->polykey[0]; - u64 pkl = ctx->polykey[1]; - - mptr = (u64 *)m; - i = mbytes / VMAC_NHBYTES; - remaining = mbytes % VMAC_NHBYTES; - - if (ctx->first_block_processed) { - ch = ctx->polytmp[0]; - cl = ctx->polytmp[1]; - } else if (i) { - nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, ch, cl); - ch &= m62; - ADD128(ch, cl, pkh, pkl); - mptr += (VMAC_NHBYTES/sizeof(u64)); - i--; - } else if (remaining) { - nh_16(mptr, kptr, 2*((remaining+15)/16), ch, cl); - ch &= m62; - ADD128(ch, cl, pkh, pkl); - mptr += (VMAC_NHBYTES/sizeof(u64)); - goto do_l3; - } else {/* Empty String */ - ch = pkh; cl = pkl; - goto do_l3; - } - - while (i--) { - nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl); - rh &= m62; - poly_step(ch, cl, pkh, pkl, rh, rl); - mptr += (VMAC_NHBYTES/sizeof(u64)); - } - if (remaining) { - nh_16(mptr, kptr, 2*((remaining+15)/16), rh, rl); - rh &= m62; - poly_step(ch, cl, pkh, pkl, rh, rl); - } - -do_l3: - vhash_abort(ctx); - remaining *= 8; - return l3hash(ch, cl, ctx->l3key[0], ctx->l3key[1], remaining); -} + struct vmac_tfm_ctx *tctx = crypto_shash_ctx(tfm); + __be64 out[2]; + u8 in[16] = { 0 }; + unsigned int i; + int err; -static u64 vmac(unsigned char m[], unsigned int mbytes, - const unsigned char n[16], u64 *tagl, - struct vmac_ctx_t *ctx) -{ - u64 *in_n, *out_p; - u64 p, h; - int i; - - in_n = ctx->__vmac_ctx.cached_nonce; - out_p = ctx->__vmac_ctx.cached_aes; - - i = n[15] & 1; - if ((*(u64 *)(n+8) != in_n[1]) || (*(u64 *)(n) != in_n[0])) { - in_n[0] = *(u64 *)(n); - in_n[1] = *(u64 *)(n+8); - ((unsigned char *)in_n)[15] &= 0xFE; - crypto_cipher_encrypt_one(ctx->child, - (unsigned char *)out_p, (unsigned char *)in_n); - - ((unsigned char *)in_n)[15] |= (unsigned char)(1-i); + if (keylen != VMAC_KEY_LEN) { + crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; } - p = be64_to_cpup(out_p + i); - h = vhash(m, mbytes, (u64 *)0, &ctx->__vmac_ctx); - return le64_to_cpu(p + h); -} -static int vmac_set_key(unsigned char user_key[], struct vmac_ctx_t *ctx) -{ - u64 in[2] = {0}, out[2]; - unsigned i; - int err = 0; - - err = crypto_cipher_setkey(ctx->child, user_key, VMAC_KEY_LEN); + err = crypto_cipher_setkey(tctx->cipher, key, keylen); if (err) return err; /* Fill nh key */ - ((unsigned char *)in)[0] = 0x80; - for (i = 0; i < sizeof(ctx->__vmac_ctx.nhkey)/8; i += 2) { - crypto_cipher_encrypt_one(ctx->child, - (unsigned char *)out, (unsigned char *)in); - ctx->__vmac_ctx.nhkey[i] = be64_to_cpup(out); - ctx->__vmac_ctx.nhkey[i+1] = be64_to_cpup(out+1); - ((unsigned char *)in)[15] += 1; + in[0] = 0x80; + for (i = 0; i < ARRAY_SIZE(tctx->nhkey); i += 2) { + crypto_cipher_encrypt_one(tctx->cipher, (u8 *)out, in); + tctx->nhkey[i] = be64_to_cpu(out[0]); + tctx->nhkey[i+1] = be64_to_cpu(out[1]); + in[15]++; } /* Fill poly key */ - ((unsigned char *)in)[0] = 0xC0; - in[1] = 0; - for (i = 0; i < sizeof(ctx->__vmac_ctx.polykey)/8; i += 2) { - crypto_cipher_encrypt_one(ctx->child, - (unsigned char *)out, (unsigned char *)in); - ctx->__vmac_ctx.polytmp[i] = - ctx->__vmac_ctx.polykey[i] = - be64_to_cpup(out) & mpoly; - ctx->__vmac_ctx.polytmp[i+1] = - ctx->__vmac_ctx.polykey[i+1] = - be64_to_cpup(out+1) & mpoly; - ((unsigned char *)in)[15] += 1; + in[0] = 0xC0; + in[15] = 0; + for (i = 0; i < ARRAY_SIZE(tctx->polykey); i += 2) { + crypto_cipher_encrypt_one(tctx->cipher, (u8 *)out, in); + tctx->polykey[i] = be64_to_cpu(out[0]) & mpoly; + tctx->polykey[i+1] = be64_to_cpu(out[1]) & mpoly; + in[15]++; } /* Fill ip key */ - ((unsigned char *)in)[0] = 0xE0; - in[1] = 0; - for (i = 0; i < sizeof(ctx->__vmac_ctx.l3key)/8; i += 2) { + in[0] = 0xE0; + in[15] = 0; + for (i = 0; i < ARRAY_SIZE(tctx->l3key); i += 2) { do { - crypto_cipher_encrypt_one(ctx->child, - (unsigned char *)out, (unsigned char *)in); - ctx->__vmac_ctx.l3key[i] = be64_to_cpup(out); - ctx->__vmac_ctx.l3key[i+1] = be64_to_cpup(out+1); - ((unsigned char *)in)[15] += 1; - } while (ctx->__vmac_ctx.l3key[i] >= p64 - || ctx->__vmac_ctx.l3key[i+1] >= p64); + crypto_cipher_encrypt_one(tctx->cipher, (u8 *)out, in); + tctx->l3key[i] = be64_to_cpu(out[0]); + tctx->l3key[i+1] = be64_to_cpu(out[1]); + in[15]++; + } while (tctx->l3key[i] >= p64 || tctx->l3key[i+1] >= p64); } - /* Invalidate nonce/aes cache and reset other elements */ - ctx->__vmac_ctx.cached_nonce[0] = (u64)-1; /* Ensure illegal nonce */ - ctx->__vmac_ctx.cached_nonce[1] = (u64)0; /* Ensure illegal nonce */ - ctx->__vmac_ctx.first_block_processed = 0; - - return err; + return 0; } -static int vmac_setkey(struct crypto_shash *parent, - const u8 *key, unsigned int keylen) +static int vmac_init(struct shash_desc *desc) { - struct vmac_ctx_t *ctx = crypto_shash_ctx(parent); + const struct vmac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); + struct vmac_desc_ctx *dctx = shash_desc_ctx(desc); - if (keylen != VMAC_KEY_LEN) { - crypto_shash_set_flags(parent, CRYPTO_TFM_RES_BAD_KEY_LEN); - return -EINVAL; - } - - return vmac_set_key((u8 *)key, ctx); -} - -static int vmac_init(struct shash_desc *pdesc) -{ + dctx->partial_size = 0; + dctx->first_block_processed = false; + memcpy(dctx->polytmp, tctx->polykey, sizeof(dctx->polytmp)); + dctx->nonce_size = 0; return 0; } -static int vmac_update(struct shash_desc *pdesc, const u8 *p, - unsigned int len) +static int vmac_update(struct shash_desc *desc, const u8 *p, unsigned int len) { - struct crypto_shash *parent = pdesc->tfm; - struct vmac_ctx_t *ctx = crypto_shash_ctx(parent); - int expand; - int min; - - expand = VMAC_NHBYTES - ctx->partial_size > 0 ? - VMAC_NHBYTES - ctx->partial_size : 0; - - min = len < expand ? len : expand; + const struct vmac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); + struct vmac_desc_ctx *dctx = shash_desc_ctx(desc); + unsigned int n; + + /* Nonce is passed as first VMAC_NONCEBYTES bytes of data */ + if (dctx->nonce_size < VMAC_NONCEBYTES) { + n = min(len, VMAC_NONCEBYTES - dctx->nonce_size); + memcpy(&dctx->nonce.bytes[dctx->nonce_size], p, n); + dctx->nonce_size += n; + p += n; + len -= n; + } - memcpy(ctx->partial + ctx->partial_size, p, min); - ctx->partial_size += min; + if (dctx->partial_size) { + n = min(len, VMAC_NHBYTES - dctx->partial_size); + memcpy(&dctx->partial[dctx->partial_size], p, n); + dctx->partial_size += n; + p += n; + len -= n; + if (dctx->partial_size == VMAC_NHBYTES) { + vhash_blocks(tctx, dctx, dctx->partial_words, 1); + dctx->partial_size = 0; + } + } - if (len < expand) - return 0; + if (len >= VMAC_NHBYTES) { + n = round_down(len, VMAC_NHBYTES); + /* TODO: 'p' may be misaligned here */ + vhash_blocks(tctx, dctx, (const __le64 *)p, n / VMAC_NHBYTES); + p += n; + len -= n; + } - vhash_update(ctx->partial, VMAC_NHBYTES, &ctx->__vmac_ctx); - ctx->partial_size = 0; + if (len) { + memcpy(dctx->partial, p, len); + dctx->partial_size = len; + } - len -= expand; - p += expand; + return 0; +} - if (len % VMAC_NHBYTES) { - memcpy(ctx->partial, p + len - (len % VMAC_NHBYTES), - len % VMAC_NHBYTES); - ctx->partial_size = len % VMAC_NHBYTES; +static u64 vhash_final(const struct vmac_tfm_ctx *tctx, + struct vmac_desc_ctx *dctx) +{ + unsigned int partial = dctx->partial_size; + u64 ch = dctx->polytmp[0]; + u64 cl = dctx->polytmp[1]; + + /* L1 and L2-hash the final block if needed */ + if (partial) { + /* Zero-pad to next 128-bit boundary */ + unsigned int n = round_up(partial, 16); + u64 rh, rl; + + memset(&dctx->partial[partial], 0, n - partial); + nh_16(dctx->partial_words, tctx->nhkey, n / 8, rh, rl); + rh &= m62; + if (dctx->first_block_processed) + poly_step(ch, cl, tctx->polykey[0], tctx->polykey[1], + rh, rl); + else + ADD128(ch, cl, rh, rl); } - vhash_update(p, len - len % VMAC_NHBYTES, &ctx->__vmac_ctx); - - return 0; + /* L3-hash the 128-bit output of L2-hash */ + return l3hash(ch, cl, tctx->l3key[0], tctx->l3key[1], partial * 8); } -static int vmac_final(struct shash_desc *pdesc, u8 *out) +static int vmac_final(struct shash_desc *desc, u8 *out) { - struct crypto_shash *parent = pdesc->tfm; - struct vmac_ctx_t *ctx = crypto_shash_ctx(parent); - vmac_t mac; - u8 nonce[16] = {}; - - /* vmac() ends up accessing outside the array bounds that - * we specify. In appears to access up to the next 2-word - * boundary. We'll just be uber cautious and zero the - * unwritten bytes in the buffer. + const struct vmac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); + struct vmac_desc_ctx *dctx = shash_desc_ctx(desc); + int index; + u64 hash, pad; + + if (dctx->nonce_size != VMAC_NONCEBYTES) + return -EINVAL; + + /* + * The VMAC specification requires a nonce at least 1 bit shorter than + * the block cipher's block length, so we actually only accept a 127-bit + * nonce. We define the unused bit to be the first one and require that + * it be 0, so the needed prepending of a 0 bit is implicit. */ - if (ctx->partial_size) { - memset(ctx->partial + ctx->partial_size, 0, - VMAC_NHBYTES - ctx->partial_size); - } - mac = vmac(ctx->partial, ctx->partial_size, nonce, NULL, ctx); - memcpy(out, &mac, sizeof(vmac_t)); - memzero_explicit(&mac, sizeof(vmac_t)); - memset(&ctx->__vmac_ctx, 0, sizeof(struct vmac_ctx)); - ctx->partial_size = 0; + if (dctx->nonce.bytes[0] & 0x80) + return -EINVAL; + + /* Finish calculating the VHASH of the message */ + hash = vhash_final(tctx, dctx); + + /* Generate pseudorandom pad by encrypting the nonce */ + BUILD_BUG_ON(VMAC_NONCEBYTES != 2 * (VMAC_TAG_LEN / 8)); + index = dctx->nonce.bytes[VMAC_NONCEBYTES - 1] & 1; + dctx->nonce.bytes[VMAC_NONCEBYTES - 1] &= ~1; + crypto_cipher_encrypt_one(tctx->cipher, dctx->nonce.bytes, + dctx->nonce.bytes); + pad = be64_to_cpu(dctx->nonce.pads[index]); + + /* The VMAC is the sum of VHASH and the pseudorandom pad */ + put_unaligned_be64(hash + pad, out); return 0; } static int vmac_init_tfm(struct crypto_tfm *tfm) { - struct crypto_cipher *cipher; - struct crypto_instance *inst = (void *)tfm->__crt_alg; + struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); struct crypto_spawn *spawn = crypto_instance_ctx(inst); - struct vmac_ctx_t *ctx = crypto_tfm_ctx(tfm); + struct vmac_tfm_ctx *tctx = crypto_tfm_ctx(tfm); + struct crypto_cipher *cipher; cipher = crypto_spawn_cipher(spawn); if (IS_ERR(cipher)) return PTR_ERR(cipher); - ctx->child = cipher; + tctx->cipher = cipher; return 0; } static void vmac_exit_tfm(struct crypto_tfm *tfm) { - struct vmac_ctx_t *ctx = crypto_tfm_ctx(tfm); - crypto_free_cipher(ctx->child); + struct vmac_tfm_ctx *tctx = crypto_tfm_ctx(tfm); + + crypto_free_cipher(tctx->cipher); } static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb) @@ -655,7 +632,11 @@ static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb) if (IS_ERR(alg)) return PTR_ERR(alg); - inst = shash_alloc_instance("vmac", alg); + err = -EINVAL; + if (alg->cra_blocksize != VMAC_NONCEBYTES) + goto out_put_alg; + + inst = shash_alloc_instance(tmpl->name, alg); err = PTR_ERR(inst); if (IS_ERR(inst)) goto out_put_alg; @@ -670,11 +651,12 @@ static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb) inst->alg.base.cra_blocksize = alg->cra_blocksize; inst->alg.base.cra_alignmask = alg->cra_alignmask; - inst->alg.digestsize = sizeof(vmac_t); - inst->alg.base.cra_ctxsize = sizeof(struct vmac_ctx_t); + inst->alg.base.cra_ctxsize = sizeof(struct vmac_tfm_ctx); inst->alg.base.cra_init = vmac_init_tfm; inst->alg.base.cra_exit = vmac_exit_tfm; + inst->alg.descsize = sizeof(struct vmac_desc_ctx); + inst->alg.digestsize = VMAC_TAG_LEN / 8; inst->alg.init = vmac_init; inst->alg.update = vmac_update; inst->alg.final = vmac_final; @@ -691,8 +673,8 @@ out_put_alg: return err; } -static struct crypto_template vmac_tmpl = { - .name = "vmac", +static struct crypto_template vmac64_tmpl = { + .name = "vmac64", .create = vmac_create, .free = shash_free_instance, .module = THIS_MODULE, @@ -700,12 +682,12 @@ static struct crypto_template vmac_tmpl = { static int __init vmac_module_init(void) { - return crypto_register_template(&vmac_tmpl); + return crypto_register_template(&vmac64_tmpl); } static void __exit vmac_module_exit(void) { - crypto_unregister_template(&vmac_tmpl); + crypto_unregister_template(&vmac64_tmpl); } module_init(vmac_module_init); @@ -713,4 +695,4 @@ module_exit(vmac_module_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("VMAC hash algorithm"); -MODULE_ALIAS_CRYPTO("vmac"); +MODULE_ALIAS_CRYPTO("vmac64"); diff --git a/crypto/wp512.c b/crypto/wp512.c index 7ee5a043a988..149e577fb772 100644 --- a/crypto/wp512.c +++ b/crypto/wp512.c @@ -1127,7 +1127,6 @@ static struct shash_alg wp_algs[3] = { { .descsize = sizeof(struct wp512_ctx), .base = { .cra_name = "wp512", - .cra_flags = CRYPTO_ALG_TYPE_SHASH, .cra_blocksize = WP512_BLOCK_SIZE, .cra_module = THIS_MODULE, } @@ -1139,7 +1138,6 @@ static struct shash_alg wp_algs[3] = { { .descsize = sizeof(struct wp512_ctx), .base = { .cra_name = "wp384", - .cra_flags = CRYPTO_ALG_TYPE_SHASH, .cra_blocksize = WP512_BLOCK_SIZE, .cra_module = THIS_MODULE, } @@ -1151,7 +1149,6 @@ static struct shash_alg wp_algs[3] = { { .descsize = sizeof(struct wp512_ctx), .base = { .cra_name = "wp256", - .cra_flags = CRYPTO_ALG_TYPE_SHASH, .cra_blocksize = WP512_BLOCK_SIZE, .cra_module = THIS_MODULE, } diff --git a/crypto/xts.c b/crypto/xts.c index 12284183bd20..ccf55fbb8bc2 100644 --- a/crypto/xts.c +++ b/crypto/xts.c @@ -138,7 +138,7 @@ static int post_crypt(struct skcipher_request *req) if (rctx->dst != sg) { rctx->dst[0] = *sg; sg_unmark_end(rctx->dst); - scatterwalk_crypto_chain(rctx->dst, sg_next(sg), 0, 2); + scatterwalk_crypto_chain(rctx->dst, sg_next(sg), 2); } rctx->dst[0].length -= offset - sg->offset; rctx->dst[0].offset = offset; @@ -204,7 +204,7 @@ static int pre_crypt(struct skcipher_request *req) if (rctx->src != sg) { rctx->src[0] = *sg; sg_unmark_end(rctx->src); - scatterwalk_crypto_chain(rctx->src, sg_next(sg), 0, 2); + scatterwalk_crypto_chain(rctx->src, sg_next(sg), 2); } rctx->src[0].length -= offset - sg->offset; rctx->src[0].offset = offset; |