diff options
author | Eric Biggers <ebiggers@google.com> | 2018-02-19 23:48:25 -0800 |
---|---|---|
committer | Herbert Xu <herbert@gondor.apana.org.au> | 2018-03-03 00:03:34 +0800 |
commit | 217afccf65064709fb032652ee17cc0a8f68b7b5 (patch) | |
tree | a23d950380c22371b64cddbfc5f05aef22db6756 | |
parent | eb66ecd56107e563de65121866990ec07142245d (diff) | |
download | linux-217afccf65064709fb032652ee17cc0a8f68b7b5.tar.gz linux-217afccf65064709fb032652ee17cc0a8f68b7b5.tar.bz2 linux-217afccf65064709fb032652ee17cc0a8f68b7b5.zip |
crypto: lrw - remove lrw_crypt()
Now that all users of lrw_crypt() have been removed in favor of the LRW
template wrapping an ECB mode algorithm, remove lrw_crypt(). Also
remove crypto/lrw.h as that is no longer needed either; and fold
'struct lrw_table_ctx' into 'struct priv', lrw_init_table() into
setkey(), and lrw_free_table() into exit_tfm().
Signed-off-by: Eric Biggers <ebiggers@google.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
-rw-r--r-- | arch/x86/crypto/glue_helper.c | 1 | ||||
-rw-r--r-- | crypto/lrw.c | 152 | ||||
-rw-r--r-- | include/crypto/lrw.h | 44 |
3 files changed, 39 insertions, 158 deletions
diff --git a/arch/x86/crypto/glue_helper.c b/arch/x86/crypto/glue_helper.c index 5b909790bf8a..cd5e7cebdb9f 100644 --- a/arch/x86/crypto/glue_helper.c +++ b/arch/x86/crypto/glue_helper.c @@ -29,7 +29,6 @@ #include <crypto/b128ops.h> #include <crypto/gf128mul.h> #include <crypto/internal/skcipher.h> -#include <crypto/lrw.h> #include <crypto/xts.h> #include <asm/crypto/glue_helper.h> diff --git a/crypto/lrw.c b/crypto/lrw.c index cbbd7c50ad19..a09cdaa6ddf3 100644 --- a/crypto/lrw.c +++ b/crypto/lrw.c @@ -28,13 +28,31 @@ #include <crypto/b128ops.h> #include <crypto/gf128mul.h> -#include <crypto/lrw.h> #define LRW_BUFFER_SIZE 128u +#define LRW_BLOCK_SIZE 16 + struct priv { struct crypto_skcipher *child; - struct lrw_table_ctx table; + + /* + * optimizes multiplying a random (non incrementing, as at the + * start of a new sector) value with key2, we could also have + * used 4k optimization tables or no optimization at all. In the + * latter case we would have to store key2 here + */ + struct gf128mul_64k *table; + + /* + * stores: + * key2*{ 0,0,...0,0,0,0,1 }, key2*{ 0,0,...0,0,0,1,1 }, + * key2*{ 0,0,...0,0,1,1,1 }, key2*{ 0,0,...0,1,1,1,1 } + * key2*{ 0,0,...1,1,1,1,1 }, etc + * needed for optimized multiplication of incrementing values + * with key2 + */ + be128 mulinc[128]; }; struct rctx { @@ -65,11 +83,25 @@ static inline void setbit128_bbe(void *b, int bit) ), b); } -int lrw_init_table(struct lrw_table_ctx *ctx, const u8 *tweak) +static int setkey(struct crypto_skcipher *parent, const u8 *key, + unsigned int keylen) { + struct priv *ctx = crypto_skcipher_ctx(parent); + struct crypto_skcipher *child = ctx->child; + int err, bsize = LRW_BLOCK_SIZE; + const u8 *tweak = key + keylen - bsize; be128 tmp = { 0 }; int i; + crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); + crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) & + CRYPTO_TFM_REQ_MASK); + err = crypto_skcipher_setkey(child, key, keylen - bsize); + crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) & + CRYPTO_TFM_RES_MASK); + if (err) + return err; + if (ctx->table) gf128mul_free_64k(ctx->table); @@ -87,34 +119,6 @@ int lrw_init_table(struct lrw_table_ctx *ctx, const u8 *tweak) return 0; } -EXPORT_SYMBOL_GPL(lrw_init_table); - -void lrw_free_table(struct lrw_table_ctx *ctx) -{ - if (ctx->table) - gf128mul_free_64k(ctx->table); -} -EXPORT_SYMBOL_GPL(lrw_free_table); - -static int setkey(struct crypto_skcipher *parent, const u8 *key, - unsigned int keylen) -{ - struct priv *ctx = crypto_skcipher_ctx(parent); - struct crypto_skcipher *child = ctx->child; - int err, bsize = LRW_BLOCK_SIZE; - const u8 *tweak = key + keylen - bsize; - - crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); - crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) & - CRYPTO_TFM_REQ_MASK); - err = crypto_skcipher_setkey(child, key, keylen - bsize); - crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) & - CRYPTO_TFM_RES_MASK); - if (err) - return err; - - return lrw_init_table(&ctx->table, tweak); -} static inline void inc(be128 *iv) { @@ -238,7 +242,7 @@ static int pre_crypt(struct skcipher_request *req) /* T <- I*Key2, using the optimization * discussed in the specification */ be128_xor(&rctx->t, &rctx->t, - &ctx->table.mulinc[get_index128(iv)]); + &ctx->mulinc[get_index128(iv)]); inc(iv); } while ((avail -= bs) >= bs); @@ -301,7 +305,7 @@ static int init_crypt(struct skcipher_request *req, crypto_completion_t done) memcpy(&rctx->t, req->iv, sizeof(rctx->t)); /* T <- I*Key2 */ - gf128mul_64k_bbe(&rctx->t, ctx->table.table); + gf128mul_64k_bbe(&rctx->t, ctx->table); return 0; } @@ -416,85 +420,6 @@ static int decrypt(struct skcipher_request *req) return do_decrypt(req, init_crypt(req, decrypt_done)); } -int lrw_crypt(struct blkcipher_desc *desc, struct scatterlist *sdst, - struct scatterlist *ssrc, unsigned int nbytes, - struct lrw_crypt_req *req) -{ - const unsigned int bsize = LRW_BLOCK_SIZE; - const unsigned int max_blks = req->tbuflen / bsize; - struct lrw_table_ctx *ctx = req->table_ctx; - struct blkcipher_walk walk; - unsigned int nblocks; - be128 *iv, *src, *dst, *t; - be128 *t_buf = req->tbuf; - int err, i; - - BUG_ON(max_blks < 1); - - blkcipher_walk_init(&walk, sdst, ssrc, nbytes); - - err = blkcipher_walk_virt(desc, &walk); - nbytes = walk.nbytes; - if (!nbytes) - return err; - - nblocks = min(walk.nbytes / bsize, max_blks); - src = (be128 *)walk.src.virt.addr; - dst = (be128 *)walk.dst.virt.addr; - - /* calculate first value of T */ - iv = (be128 *)walk.iv; - t_buf[0] = *iv; - - /* T <- I*Key2 */ - gf128mul_64k_bbe(&t_buf[0], ctx->table); - - i = 0; - goto first; - - for (;;) { - do { - for (i = 0; i < nblocks; i++) { - /* T <- I*Key2, using the optimization - * discussed in the specification */ - be128_xor(&t_buf[i], t, - &ctx->mulinc[get_index128(iv)]); - inc(iv); -first: - t = &t_buf[i]; - - /* PP <- T xor P */ - be128_xor(dst + i, t, src + i); - } - - /* CC <- E(Key2,PP) */ - req->crypt_fn(req->crypt_ctx, (u8 *)dst, - nblocks * bsize); - - /* C <- T xor CC */ - for (i = 0; i < nblocks; i++) - be128_xor(dst + i, dst + i, &t_buf[i]); - - src += nblocks; - dst += nblocks; - nbytes -= nblocks * bsize; - nblocks = min(nbytes / bsize, max_blks); - } while (nblocks > 0); - - err = blkcipher_walk_done(desc, &walk, nbytes); - nbytes = walk.nbytes; - if (!nbytes) - break; - - nblocks = min(nbytes / bsize, max_blks); - src = (be128 *)walk.src.virt.addr; - dst = (be128 *)walk.dst.virt.addr; - } - - return err; -} -EXPORT_SYMBOL_GPL(lrw_crypt); - static int init_tfm(struct crypto_skcipher *tfm) { struct skcipher_instance *inst = skcipher_alg_instance(tfm); @@ -518,7 +443,8 @@ static void exit_tfm(struct crypto_skcipher *tfm) { struct priv *ctx = crypto_skcipher_ctx(tfm); - lrw_free_table(&ctx->table); + if (ctx->table) + gf128mul_free_64k(ctx->table); crypto_free_skcipher(ctx->child); } diff --git a/include/crypto/lrw.h b/include/crypto/lrw.h deleted file mode 100644 index a9d44c06d081..000000000000 --- a/include/crypto/lrw.h +++ /dev/null @@ -1,44 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _CRYPTO_LRW_H -#define _CRYPTO_LRW_H - -#include <crypto/b128ops.h> - -struct scatterlist; -struct gf128mul_64k; -struct blkcipher_desc; - -#define LRW_BLOCK_SIZE 16 - -struct lrw_table_ctx { - /* optimizes multiplying a random (non incrementing, as at the - * start of a new sector) value with key2, we could also have - * used 4k optimization tables or no optimization at all. In the - * latter case we would have to store key2 here */ - struct gf128mul_64k *table; - /* stores: - * key2*{ 0,0,...0,0,0,0,1 }, key2*{ 0,0,...0,0,0,1,1 }, - * key2*{ 0,0,...0,0,1,1,1 }, key2*{ 0,0,...0,1,1,1,1 } - * key2*{ 0,0,...1,1,1,1,1 }, etc - * needed for optimized multiplication of incrementing values - * with key2 */ - be128 mulinc[128]; -}; - -int lrw_init_table(struct lrw_table_ctx *ctx, const u8 *tweak); -void lrw_free_table(struct lrw_table_ctx *ctx); - -struct lrw_crypt_req { - be128 *tbuf; - unsigned int tbuflen; - - struct lrw_table_ctx *table_ctx; - void *crypt_ctx; - void (*crypt_fn)(void *ctx, u8 *blks, unsigned int nbytes); -}; - -int lrw_crypt(struct blkcipher_desc *desc, struct scatterlist *dst, - struct scatterlist *src, unsigned int nbytes, - struct lrw_crypt_req *req); - -#endif /* _CRYPTO_LRW_H */ |