diff options
Diffstat (limited to 'drivers/md/dm-crypt.c')
-rw-r--r-- | drivers/md/dm-crypt.c | 1253 |
1 files changed, 1012 insertions, 241 deletions
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index ef1d836bd81b..ebf9e72d479b 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -1,8 +1,8 @@ /* * Copyright (C) 2003 Jana Saout <jana@saout.de> * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org> - * Copyright (C) 2006-2015 Red Hat, Inc. All rights reserved. - * Copyright (C) 2013 Milan Broz <gmazyland@gmail.com> + * Copyright (C) 2006-2017 Red Hat, Inc. All rights reserved. + * Copyright (C) 2013-2017 Milan Broz <gmazyland@gmail.com> * * This file is released under the GPL. */ @@ -31,6 +31,9 @@ #include <crypto/md5.h> #include <crypto/algapi.h> #include <crypto/skcipher.h> +#include <crypto/aead.h> +#include <crypto/authenc.h> +#include <linux/rtnetlink.h> /* for struct rtattr and RTA macros only */ #include <keys/user-type.h> #include <linux/device-mapper.h> @@ -48,7 +51,11 @@ struct convert_context { struct bvec_iter iter_out; sector_t cc_sector; atomic_t cc_pending; - struct skcipher_request *req; + union { + struct skcipher_request *req; + struct aead_request *req_aead; + } r; + }; /* @@ -57,6 +64,8 @@ struct convert_context { struct dm_crypt_io { struct crypt_config *cc; struct bio *base_bio; + u8 *integrity_metadata; + bool integrity_metadata_from_pool; struct work_struct work; struct convert_context ctx; @@ -70,8 +79,8 @@ struct dm_crypt_io { struct dm_crypt_request { struct convert_context *ctx; - struct scatterlist sg_in; - struct scatterlist sg_out; + struct scatterlist sg_in[4]; + struct scatterlist sg_out[4]; sector_t iv_sector; }; @@ -118,6 +127,11 @@ struct iv_tcw_private { enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID, DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD }; +enum cipher_flags { + CRYPT_MODE_INTEGRITY_AEAD, /* Use authenticated mode for cihper */ + CRYPT_IV_LARGE_SECTORS, /* Calculate IV from sector_size, not 512B sectors */ +}; + /* * The fields in here must be read only after initialization. */ @@ -126,11 +140,14 @@ struct crypt_config { sector_t start; /* - * pool for per bio private data, crypto requests and - * encryption requeusts/buffer pages + * pool for per bio private data, crypto requests, + * encryption requeusts/buffer pages and integrity tags */ mempool_t *req_pool; mempool_t *page_pool; + mempool_t *tag_pool; + unsigned tag_pool_max_sectors; + struct bio_set *bs; struct mutex bio_alloc_lock; @@ -143,6 +160,7 @@ struct crypt_config { char *cipher; char *cipher_string; + char *cipher_auth; char *key_string; const struct crypt_iv_operations *iv_gen_ops; @@ -154,11 +172,17 @@ struct crypt_config { } iv_gen_private; sector_t iv_offset; unsigned int iv_size; + unsigned short int sector_size; + unsigned char sector_shift; /* ESSIV: struct crypto_cipher *essiv_tfm */ void *iv_private; - struct crypto_skcipher **tfms; + union { + struct crypto_skcipher **tfms; + struct crypto_aead **tfms_aead; + } cipher_tfm; unsigned tfms_count; + unsigned long cipher_flags; /* * Layout of each crypto request: @@ -181,21 +205,36 @@ struct crypt_config { unsigned int key_size; unsigned int key_parts; /* independent parts in key buffer */ unsigned int key_extra_size; /* additional keys length */ + unsigned int key_mac_size; /* MAC key size for authenc(...) */ + + unsigned int integrity_tag_size; + unsigned int integrity_iv_size; + unsigned int on_disk_tag_size; + + u8 *authenc_key; /* space for keys in authenc() format (if used) */ u8 key[0]; }; -#define MIN_IOS 64 +#define MIN_IOS 64 +#define MAX_TAG_SIZE 480 +#define POOL_ENTRY_SIZE 512 static void clone_init(struct dm_crypt_io *, struct bio *); static void kcryptd_queue_crypt(struct dm_crypt_io *io); -static u8 *iv_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq); +static struct scatterlist *crypt_get_sg_data(struct crypt_config *cc, + struct scatterlist *sg); /* - * Use this to access cipher attributes that are the same for each CPU. + * Use this to access cipher attributes that are independent of the key. */ static struct crypto_skcipher *any_tfm(struct crypt_config *cc) { - return cc->tfms[0]; + return cc->cipher_tfm.tfms[0]; +} + +static struct crypto_aead *any_tfm_aead(struct crypt_config *cc) +{ + return cc->cipher_tfm.tfms_aead[0]; } /* @@ -310,10 +349,11 @@ static int crypt_iv_essiv_wipe(struct crypt_config *cc) return err; } -/* Set up per cpu cipher state */ -static struct crypto_cipher *setup_essiv_cpu(struct crypt_config *cc, - struct dm_target *ti, - u8 *salt, unsigned saltsize) +/* Allocate the cipher for ESSIV */ +static struct crypto_cipher *alloc_essiv_cipher(struct crypt_config *cc, + struct dm_target *ti, + const u8 *salt, + unsigned int saltsize) { struct crypto_cipher *essiv_tfm; int err; @@ -325,8 +365,7 @@ static struct crypto_cipher *setup_essiv_cpu(struct crypt_config *cc, return essiv_tfm; } - if (crypto_cipher_blocksize(essiv_tfm) != - crypto_skcipher_ivsize(any_tfm(cc))) { + if (crypto_cipher_blocksize(essiv_tfm) != cc->iv_size) { ti->error = "Block size of ESSIV cipher does " "not match IV size of block cipher"; crypto_free_cipher(essiv_tfm); @@ -393,8 +432,8 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti, cc->iv_gen_private.essiv.salt = salt; cc->iv_gen_private.essiv.hash_tfm = hash_tfm; - essiv_tfm = setup_essiv_cpu(cc, ti, salt, - crypto_ahash_digestsize(hash_tfm)); + essiv_tfm = alloc_essiv_cipher(cc, ti, salt, + crypto_ahash_digestsize(hash_tfm)); if (IS_ERR(essiv_tfm)) { crypt_iv_essiv_dtr(cc); return PTR_ERR(essiv_tfm); @@ -488,6 +527,11 @@ static int crypt_iv_lmk_ctr(struct crypt_config *cc, struct dm_target *ti, { struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk; + if (cc->sector_size != (1 << SECTOR_SHIFT)) { + ti->error = "Unsupported sector size for LMK"; + return -EINVAL; + } + lmk->hash_tfm = crypto_alloc_shash("md5", 0, 0); if (IS_ERR(lmk->hash_tfm)) { ti->error = "Error initializing LMK hash"; @@ -585,12 +629,14 @@ static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv, static int crypt_iv_lmk_gen(struct crypt_config *cc, u8 *iv, struct dm_crypt_request *dmreq) { + struct scatterlist *sg; u8 *src; int r = 0; if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) { - src = kmap_atomic(sg_page(&dmreq->sg_in)); - r = crypt_iv_lmk_one(cc, iv, dmreq, src + dmreq->sg_in.offset); + sg = crypt_get_sg_data(cc, dmreq->sg_in); + src = kmap_atomic(sg_page(sg)); + r = crypt_iv_lmk_one(cc, iv, dmreq, src + sg->offset); kunmap_atomic(src); } else memset(iv, 0, cc->iv_size); @@ -601,18 +647,20 @@ static int crypt_iv_lmk_gen(struct crypt_config *cc, u8 *iv, static int crypt_iv_lmk_post(struct crypt_config *cc, u8 *iv, struct dm_crypt_request *dmreq) { + struct scatterlist *sg; u8 *dst; int r; if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) return 0; - dst = kmap_atomic(sg_page(&dmreq->sg_out)); - r = crypt_iv_lmk_one(cc, iv, dmreq, dst + dmreq->sg_out.offset); + sg = crypt_get_sg_data(cc, dmreq->sg_out); + dst = kmap_atomic(sg_page(sg)); + r = crypt_iv_lmk_one(cc, iv, dmreq, dst + sg->offset); /* Tweak the first block of plaintext sector */ if (!r) - crypto_xor(dst + dmreq->sg_out.offset, iv, cc->iv_size); + crypto_xor(dst + sg->offset, iv, cc->iv_size); kunmap_atomic(dst); return r; @@ -637,6 +685,11 @@ static int crypt_iv_tcw_ctr(struct crypt_config *cc, struct dm_target *ti, { struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw; + if (cc->sector_size != (1 << SECTOR_SHIFT)) { + ti->error = "Unsupported sector size for TCW"; + return -EINVAL; + } + if (cc->key_size <= (cc->iv_size + TCW_WHITENING_SIZE)) { ti->error = "Wrong key size for TCW"; return -EINVAL; @@ -724,6 +777,7 @@ out: static int crypt_iv_tcw_gen(struct crypt_config *cc, u8 *iv, struct dm_crypt_request *dmreq) { + struct scatterlist *sg; struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw; __le64 sector = cpu_to_le64(dmreq->iv_sector); u8 *src; @@ -731,8 +785,9 @@ static int crypt_iv_tcw_gen(struct crypt_config *cc, u8 *iv, /* Remove whitening from ciphertext */ if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) { - src = kmap_atomic(sg_page(&dmreq->sg_in)); - r = crypt_iv_tcw_whitening(cc, dmreq, src + dmreq->sg_in.offset); + sg = crypt_get_sg_data(cc, dmreq->sg_in); + src = kmap_atomic(sg_page(sg)); + r = crypt_iv_tcw_whitening(cc, dmreq, src + sg->offset); kunmap_atomic(src); } @@ -748,6 +803,7 @@ static int crypt_iv_tcw_gen(struct crypt_config *cc, u8 *iv, static int crypt_iv_tcw_post(struct crypt_config *cc, u8 *iv, struct dm_crypt_request *dmreq) { + struct scatterlist *sg; u8 *dst; int r; @@ -755,13 +811,22 @@ static int crypt_iv_tcw_post(struct crypt_config *cc, u8 *iv, return 0; /* Apply whitening on ciphertext */ - dst = kmap_atomic(sg_page(&dmreq->sg_out)); - r = crypt_iv_tcw_whitening(cc, dmreq, dst + dmreq->sg_out.offset); + sg = crypt_get_sg_data(cc, dmreq->sg_out); + dst = kmap_atomic(sg_page(sg)); + r = crypt_iv_tcw_whitening(cc, dmreq, dst + sg->offset); kunmap_atomic(dst); return r; } +static int crypt_iv_random_gen(struct crypt_config *cc, u8 *iv, + struct dm_crypt_request *dmreq) +{ + /* Used only for writes, there must be an additional space to store IV */ + get_random_bytes(iv, cc->iv_size); + return 0; +} + static const struct crypt_iv_operations crypt_iv_plain_ops = { .generator = crypt_iv_plain_gen }; @@ -806,6 +871,108 @@ static const struct crypt_iv_operations crypt_iv_tcw_ops = { .post = crypt_iv_tcw_post }; +static struct crypt_iv_operations crypt_iv_random_ops = { + .generator = crypt_iv_random_gen +}; + +/* + * Integrity extensions + */ +static bool crypt_integrity_aead(struct crypt_config *cc) +{ + return test_bit(CRYPT_MODE_INTEGRITY_AEAD, &cc->cipher_flags); +} + +static bool crypt_integrity_hmac(struct crypt_config *cc) +{ + return crypt_integrity_aead(cc) && cc->key_mac_size; +} + +/* Get sg containing data */ +static struct scatterlist *crypt_get_sg_data(struct crypt_config *cc, + struct scatterlist *sg) +{ + if (unlikely(crypt_integrity_aead(cc))) + return &sg[2]; + + return sg; +} + +static int dm_crypt_integrity_io_alloc(struct dm_crypt_io *io, struct bio *bio) +{ + struct bio_integrity_payload *bip; + unsigned int tag_len; + int ret; + + if (!bio_sectors(bio) || !io->cc->on_disk_tag_size) + return 0; + + bip = bio_integrity_alloc(bio, GFP_NOIO, 1); + if (IS_ERR(bip)) + return PTR_ERR(bip); + + tag_len = io->cc->on_disk_tag_size * bio_sectors(bio); + + bip->bip_iter.bi_size = tag_len; + bip->bip_iter.bi_sector = io->cc->start + io->sector; + + /* We own the metadata, do not let bio_free to release it */ + bip->bip_flags &= ~BIP_BLOCK_INTEGRITY; + + ret = bio_integrity_add_page(bio, virt_to_page(io->integrity_metadata), + tag_len, offset_in_page(io->integrity_metadata)); + if (unlikely(ret != tag_len)) + return -ENOMEM; + + return 0; +} + +static int crypt_integrity_ctr(struct crypt_config *cc, struct dm_target *ti) +{ +#ifdef CONFIG_BLK_DEV_INTEGRITY + struct blk_integrity *bi = blk_get_integrity(cc->dev->bdev->bd_disk); + + /* From now we require underlying device with our integrity profile */ + if (!bi || strcasecmp(bi->profile->name, "DM-DIF-EXT-TAG")) { + ti->error = "Integrity profile not supported."; + return -EINVAL; + } + + if (bi->tag_size != cc->on_disk_tag_size || + bi->tuple_size != cc->on_disk_tag_size) { + ti->error = "Integrity profile tag size mismatch."; + return -EINVAL; + } + if (1 << bi->interval_exp != cc->sector_size) { + ti->error = "Integrity profile sector size mismatch."; + return -EINVAL; + } + + if (crypt_integrity_aead(cc)) { + cc->integrity_tag_size = cc->on_disk_tag_size - cc->integrity_iv_size; + DMINFO("Integrity AEAD, tag size %u, IV size %u.", + cc->integrity_tag_size, cc->integrity_iv_size); + + if (crypto_aead_setauthsize(any_tfm_aead(cc), cc->integrity_tag_size)) { + ti->error = "Integrity AEAD auth tag size is not supported."; + return -EINVAL; + } + } else if (cc->integrity_iv_size) + DMINFO("Additional per-sector space %u bytes for IV.", + cc->integrity_iv_size); + + if ((cc->integrity_tag_size + cc->integrity_iv_size) != bi->tag_size) { + ti->error = "Not enough space for integrity tag in the profile."; + return -EINVAL; + } + + return 0; +#else + ti->error = "Integrity profile not supported."; + return -EINVAL; +#endif +} + static void crypt_convert_init(struct crypt_config *cc, struct convert_context *ctx, struct bio *bio_out, struct bio *bio_in, @@ -822,58 +989,217 @@ static void crypt_convert_init(struct crypt_config *cc, } static struct dm_crypt_request *dmreq_of_req(struct crypt_config *cc, - struct skcipher_request *req) + void *req) { return (struct dm_crypt_request *)((char *)req + cc->dmreq_start); } -static struct skcipher_request *req_of_dmreq(struct crypt_config *cc, - struct dm_crypt_request *dmreq) +static void *req_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq) { - return (struct skcipher_request *)((char *)dmreq - cc->dmreq_start); + return (void *)((char *)dmreq - cc->dmreq_start); } static u8 *iv_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq) { - return (u8 *)ALIGN((unsigned long)(dmreq + 1), - crypto_skcipher_alignmask(any_tfm(cc)) + 1); + if (crypt_integrity_aead(cc)) + return (u8 *)ALIGN((unsigned long)(dmreq + 1), + crypto_aead_alignmask(any_tfm_aead(cc)) + 1); + else + return (u8 *)ALIGN((unsigned long)(dmreq + 1), + crypto_skcipher_alignmask(any_tfm(cc)) + 1); } -static int crypt_convert_block(struct crypt_config *cc, - struct convert_context *ctx, - struct skcipher_request *req) +static u8 *org_iv_of_dmreq(struct crypt_config *cc, + struct dm_crypt_request *dmreq) +{ + return iv_of_dmreq(cc, dmreq) + cc->iv_size; +} + +static uint64_t *org_sector_of_dmreq(struct crypt_config *cc, + struct dm_crypt_request *dmreq) +{ + u8 *ptr = iv_of_dmreq(cc, dmreq) + cc->iv_size + cc->iv_size; + return (uint64_t*) ptr; +} + +static unsigned int *org_tag_of_dmreq(struct crypt_config *cc, + struct dm_crypt_request *dmreq) +{ + u8 *ptr = iv_of_dmreq(cc, dmreq) + cc->iv_size + + cc->iv_size + sizeof(uint64_t); + return (unsigned int*)ptr; +} + +static void *tag_from_dmreq(struct crypt_config *cc, + struct dm_crypt_request *dmreq) +{ + struct convert_context *ctx = dmreq->ctx; + struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx); + + return &io->integrity_metadata[*org_tag_of_dmreq(cc, dmreq) * + cc->on_disk_tag_size]; +} + +static void *iv_tag_from_dmreq(struct crypt_config *cc, + struct dm_crypt_request *dmreq) +{ + return tag_from_dmreq(cc, dmreq) + cc->integrity_tag_size; +} + +static int crypt_convert_block_aead(struct crypt_config *cc, + struct convert_context *ctx, + struct aead_request *req, + unsigned int tag_offset) { struct bio_vec bv_in = bio_iter_iovec(ctx->bio_in, ctx->iter_in); struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out); struct dm_crypt_request *dmreq; - u8 *iv; - int r; + u8 *iv, *org_iv, *tag_iv, *tag; + uint64_t *sector; + int r = 0; + + BUG_ON(cc->integrity_iv_size && cc->integrity_iv_size != cc->iv_size); + + /* Reject unexpected unaligned bio. */ + if (unlikely(bv_in.bv_offset & (cc->sector_size - 1))) + return -EIO; dmreq = dmreq_of_req(cc, req); + dmreq->iv_sector = ctx->cc_sector; + if (test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags)) + dmreq->iv_sector >>= cc->sector_shift; + dmreq->ctx = ctx; + + *org_tag_of_dmreq(cc, dmreq) = tag_offset; + + sector = org_sector_of_dmreq(cc, dmreq); + *sector = cpu_to_le64(ctx->cc_sector - cc->iv_offset); + iv = iv_of_dmreq(cc, dmreq); + org_iv = org_iv_of_dmreq(cc, dmreq); + tag = tag_from_dmreq(cc, dmreq); + tag_iv = iv_tag_from_dmreq(cc, dmreq); + + /* AEAD request: + * |----- AAD -------|------ DATA -------|-- AUTH TAG --| + * | (authenticated) | (auth+encryption) | | + * | sector_LE | IV | sector in/out | tag in/out | + */ + sg_init_table(dmreq->sg_in, 4); + sg_set_buf(&dmreq->sg_in[0], sector, sizeof(uint64_t)); + sg_set_buf(&dmreq->sg_in[1], org_iv, cc->iv_size); + sg_set_page(&dmreq->sg_in[2], bv_in.bv_page, cc->sector_size, bv_in.bv_offset); + sg_set_buf(&dmreq->sg_in[3], tag, cc->integrity_tag_size); + + sg_init_table(dmreq->sg_out, 4); + sg_set_buf(&dmreq->sg_out[0], sector, sizeof(uint64_t)); + sg_set_buf(&dmreq->sg_out[1], org_iv, cc->iv_size); + sg_set_page(&dmreq->sg_out[2], bv_out.bv_page, cc->sector_size, bv_out.bv_offset); + sg_set_buf(&dmreq->sg_out[3], tag, cc->integrity_tag_size); + + if (cc->iv_gen_ops) { + /* For READs use IV stored in integrity metadata */ + if (cc->integrity_iv_size && bio_data_dir(ctx->bio_in) != WRITE) { + memcpy(org_iv, tag_iv, cc->iv_size); + } else { + r = cc->iv_gen_ops->generator(cc, org_iv, dmreq); + if (r < 0) + return r; + /* Store generated IV in integrity metadata */ + if (cc->integrity_iv_size) + memcpy(tag_iv, org_iv, cc->iv_size); + } + /* Working copy of IV, to be modified in crypto API */ + memcpy(iv, org_iv, cc->iv_size); + } + + aead_request_set_ad(req, sizeof(uint64_t) + cc->iv_size); + if (bio_data_dir(ctx->bio_in) == WRITE) { + aead_request_set_crypt(req, dmreq->sg_in, dmreq->sg_out, + cc->sector_size, iv); + r = crypto_aead_encrypt(req); + if (cc->integrity_tag_size + cc->integrity_iv_size != cc->on_disk_tag_size) + memset(tag + cc->integrity_tag_size + cc->integrity_iv_size, 0, + cc->on_disk_tag_size - (cc->integrity_tag_size + cc->integrity_iv_size)); + } else { + aead_request_set_crypt(req, dmreq->sg_in, dmreq->sg_out, + cc->sector_size + cc->integrity_tag_size, iv); + r = crypto_aead_decrypt(req); + } + + if (r == -EBADMSG) + DMERR_LIMIT("INTEGRITY AEAD ERROR, sector %llu", + (unsigned long long)le64_to_cpu(*sector)); + + if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post) + r = cc->iv_gen_ops->post(cc, org_iv, dmreq); + + bio_advance_iter(ctx->bio_in, &ctx->iter_in, cc->sector_size); + bio_advance_iter(ctx->bio_out, &ctx->iter_out, cc->sector_size); + + return r; +} + +static int crypt_convert_block_skcipher(struct crypt_config *cc, + struct convert_context *ctx, + struct skcipher_request *req, + unsigned int tag_offset) +{ + struct bio_vec bv_in = bio_iter_iovec(ctx->bio_in, ctx->iter_in); + struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out); + struct scatterlist *sg_in, *sg_out; + struct dm_crypt_request *dmreq; + u8 *iv, *org_iv, *tag_iv; + uint64_t *sector; + int r = 0; + /* Reject unexpected unaligned bio. */ + if (unlikely(bv_in.bv_offset & (cc->sector_size - 1))) + return -EIO; + + dmreq = dmreq_of_req(cc, req); dmreq->iv_sector = ctx->cc_sector; + if (test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags)) + dmreq->iv_sector >>= cc->sector_shift; dmreq->ctx = ctx; - sg_init_table(&dmreq->sg_in, 1); - sg_set_page(&dmreq->sg_in, bv_in.bv_page, 1 << SECTOR_SHIFT, - bv_in.bv_offset); - sg_init_table(&dmreq->sg_out, 1); - sg_set_page(&dmreq->sg_out, bv_out.bv_page, 1 << SECTOR_SHIFT, - bv_out.bv_offset); + *org_tag_of_dmreq(cc, dmreq) = tag_offset; + + iv = iv_of_dmreq(cc, dmreq); + org_iv = org_iv_of_dmreq(cc, dmreq); + tag_iv = iv_tag_from_dmreq(cc, dmreq); + + sector = org_sector_of_dmreq(cc, dmreq); + *sector = cpu_to_le64(ctx->cc_sector - cc->iv_offset); + + /* For skcipher we use only the first sg item */ + sg_in = &dmreq->sg_in[0]; + sg_out = &dmreq->sg_out[0]; - bio_advance_iter(ctx->bio_in, &ctx->iter_in, 1 << SECTOR_SHIFT); - bio_advance_iter(ctx->bio_out, &ctx->iter_out, 1 << SECTOR_SHIFT); + sg_init_table(sg_in, 1); + sg_set_page(sg_in, bv_in.bv_page, cc->sector_size, bv_in.bv_offset); + + sg_init_table(sg_out, 1); + sg_set_page(sg_out, bv_out.bv_page, cc->sector_size, bv_out.bv_offset); if (cc->iv_gen_ops) { - r = cc->iv_gen_ops->generator(cc, iv, dmreq); - if (r < 0) - return r; + /* For READs use IV stored in integrity metadata */ + if (cc->integrity_iv_size && bio_data_dir(ctx->bio_in) != WRITE) { + memcpy(org_iv, tag_iv, cc->integrity_iv_size); + } else { + r = cc->iv_gen_ops->generator(cc, org_iv, dmreq); + if (r < 0) + return r; + /* Store generated IV in integrity metadata */ + if (cc->integrity_iv_size) + memcpy(tag_iv, org_iv, cc->integrity_iv_size); + } + /* Working copy of IV, to be modified in crypto API */ + memcpy(iv, org_iv, cc->iv_size); } - skcipher_request_set_crypt(req, &dmreq->sg_in, &dmreq->sg_out, - 1 << SECTOR_SHIFT, iv); + skcipher_request_set_crypt(req, sg_in, sg_out, cc->sector_size, iv); if (bio_data_dir(ctx->bio_in) == WRITE) r = crypto_skcipher_encrypt(req); @@ -881,7 +1207,10 @@ static int crypt_convert_block(struct crypt_config *cc, r = crypto_skcipher_decrypt(req); if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post) - r = cc->iv_gen_ops->post(cc, iv, dmreq); + r = cc->iv_gen_ops->post(cc, org_iv, dmreq); + + bio_advance_iter(ctx->bio_in, &ctx->iter_in, cc->sector_size); + bio_advance_iter(ctx->bio_out, &ctx->iter_out, cc->sector_size); return r; } @@ -889,27 +1218,53 @@ static int crypt_convert_block(struct crypt_config *cc, static void kcryptd_async_done(struct crypto_async_request *async_req, int error); -static void crypt_alloc_req(struct crypt_config *cc, - struct convert_context *ctx) +static void crypt_alloc_req_skcipher(struct crypt_config *cc, + struct convert_context *ctx) { unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1); - if (!ctx->req) - ctx->req = mempool_alloc(cc->req_pool, GFP_NOIO); + if (!ctx->r.req) + ctx->r.req = mempool_alloc(cc->req_pool, GFP_NOIO); - skcipher_request_set_tfm(ctx->req, cc->tfms[key_index]); + skcipher_request_set_tfm(ctx->r.req, cc->cipher_tfm.tfms[key_index]); /* * Use REQ_MAY_BACKLOG so a cipher driver internally backlogs * requests if driver request queue is full. */ - skcipher_request_set_callback(ctx->req, + skcipher_request_set_callback(ctx->r.req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, - kcryptd_async_done, dmreq_of_req(cc, ctx->req)); + kcryptd_async_done, dmreq_of_req(cc, ctx->r.req)); } -static void crypt_free_req(struct crypt_config *cc, - struct skcipher_request *req, struct bio *base_bio) +static void crypt_alloc_req_aead(struct crypt_config *cc, + struct convert_context *ctx) +{ + if (!ctx->r.req_aead) + ctx->r.req_aead = mempool_alloc(cc->req_pool, GFP_NOIO); + + aead_request_set_tfm(ctx->r.req_aead, cc->cipher_tfm.tfms_aead[0]); + + /* + * Use REQ_MAY_BACKLOG so a cipher driver internally backlogs + * requests if driver request queue is full. + */ + aead_request_set_callback(ctx->r.req_aead, + CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, + kcryptd_async_done, dmreq_of_req(cc, ctx->r.req_aead)); +} + +static void crypt_alloc_req(struct crypt_config *cc, + struct convert_context *ctx) +{ + if (crypt_integrity_aead(cc)) + crypt_alloc_req_aead(cc, ctx); + else + crypt_alloc_req_skcipher(cc, ctx); +} + +static void crypt_free_req_skcipher(struct crypt_config *cc, + struct skcipher_request *req, struct bio *base_bio) { struct dm_crypt_io *io = dm_per_bio_data(base_bio, cc->per_bio_data_size); @@ -917,12 +1272,31 @@ static void crypt_free_req(struct crypt_config *cc, mempool_free(req, cc->req_pool); } +static void crypt_free_req_aead(struct crypt_config *cc, + struct aead_request *req, struct bio *base_bio) +{ + struct dm_crypt_io *io = dm_per_bio_data(base_bio, cc->per_bio_data_size); + + if ((struct aead_request *)(io + 1) != req) + mempool_free(req, cc->req_pool); +} + +static void crypt_free_req(struct crypt_config *cc, void *req, struct bio *base_bio) +{ + if (crypt_integrity_aead(cc)) + crypt_free_req_aead(cc, req, base_bio); + else + crypt_free_req_skcipher(cc, req, base_bio); +} + /* * Encrypt / decrypt data from one bio to another one (can be the same one) */ static int crypt_convert(struct crypt_config *cc, struct convert_context *ctx) { + unsigned int tag_offset = 0; + unsigned int sector_step = cc->sector_size >> SECTOR_SHIFT; int r; atomic_set(&ctx->cc_pending, 1); @@ -930,10 +1304,12 @@ static int crypt_convert(struct crypt_config *cc, while (ctx->iter_in.bi_size && ctx->iter_out.bi_size) { crypt_alloc_req(cc, ctx); - atomic_inc(&ctx->cc_pending); - r = crypt_convert_block(cc, ctx, ctx->req); + if (crypt_integrity_aead(cc)) + r = crypt_convert_block_aead(cc, ctx, ctx->r.req_aead, tag_offset); + else + r = crypt_convert_block_skcipher(cc, ctx, ctx->r.req, tag_offset); switch (r) { /* @@ -949,22 +1325,31 @@ static int crypt_convert(struct crypt_config *cc, * completion function kcryptd_async_done() will be called. */ case -EINPROGRESS: - ctx->req = NULL; - ctx->cc_sector++; + ctx->r.req = NULL; + ctx->cc_sector += sector_step; + tag_offset++; continue; /* * The request was already processed (synchronously). */ case 0: atomic_dec(&ctx->cc_pending); - ctx->cc_sector++; + ctx->cc_sector += sector_step; + tag_offset++; cond_resched(); continue; - - /* There was an error while processing the request. */ + /* + * There was a data integrity error. + */ + case -EBADMSG: + atomic_dec(&ctx->cc_pending); + return -EILSEQ; + /* + * There was an error while processing the request. + */ default: atomic_dec(&ctx->cc_pending); - return r; + return -EIO; } } @@ -1005,7 +1390,7 @@ retry: clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs); if (!clone) - goto return_clone; + goto out; clone_init(io, clone); @@ -1027,7 +1412,13 @@ retry: remaining_size -= len; } -return_clone: + /* Allocate space for integrity tags */ + if (dm_crypt_integrity_io_alloc(io, clone)) { + crypt_free_buffer_pages(cc, clone); + bio_put(clone); + clone = NULL; + } +out: if (unlikely(gfp_mask & __GFP_DIRECT_RECLAIM)) mutex_unlock(&cc->bio_alloc_lock); @@ -1053,7 +1444,9 @@ static void crypt_io_init(struct dm_crypt_io *io, struct crypt_config *cc, io->base_bio = bio; io->sector = sector; io->error = 0; - io->ctx.req = NULL; + io->ctx.r.req = NULL; + io->integrity_metadata = NULL; + io->integrity_metadata_from_pool = false; atomic_set(&io->io_pending, 0); } @@ -1075,8 +1468,13 @@ static void crypt_dec_pending(struct dm_crypt_io *io) if (!atomic_dec_and_test(&io->io_pending)) return; - if (io->ctx.req) - crypt_free_req(cc, io->ctx.req, base_bio); + if (io->ctx.r.req) + crypt_free_req(cc, io->ctx.r.req, base_bio); + + if (unlikely(io->integrity_metadata_from_pool)) + mempool_free(io->integrity_metadata, io->cc->tag_pool); + else + kfree(io->integrity_metadata); base_bio->bi_error = error; bio_endio(base_bio); @@ -1156,6 +1554,12 @@ static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp) clone_init(io, clone); clone->bi_iter.bi_sector = cc->start + io->sector; + if (dm_crypt_integrity_io_alloc(io, clone)) { + crypt_dec_pending(io); + bio_put(clone); + return 1; + } + generic_make_request(clone); return 0; } @@ -1314,8 +1718,8 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) crypt_inc_pending(io); r = crypt_convert(cc, &io->ctx); - if (r) - io->error = -EIO; + if (r < 0) + io->error = r; crypt_finished = atomic_dec_and_test(&io->ctx.cc_pending); /* Encryption was already finished, submit io now */ @@ -1345,7 +1749,7 @@ static void kcryptd_crypt_read_convert(struct dm_crypt_io *io) r = crypt_convert(cc, &io->ctx); if (r < 0) - io->error = -EIO; + io->error = r; if (atomic_dec_and_test(&io->ctx.cc_pending)) kcryptd_crypt_read_done(io); @@ -1372,9 +1776,13 @@ static void kcryptd_async_done(struct crypto_async_request *async_req, } if (!error && cc->iv_gen_ops && cc->iv_gen_ops->post) - error = cc->iv_gen_ops->post(cc, iv_of_dmreq(cc, dmreq), dmreq); + error = cc->iv_gen_ops->post(cc, org_iv_of_dmreq(cc, dmreq), dmreq); - if (error < 0) + if (error == -EBADMSG) { + DMERR_LIMIT("INTEGRITY AEAD ERROR, sector %llu", + (unsigned long long)le64_to_cpu(*org_sector_of_dmreq(cc, dmreq))); + io->error = -EILSEQ; + } else if (error < 0) io->error = -EIO; crypt_free_req(cc, req_of_dmreq(cc, dmreq), io->base_bio); @@ -1406,61 +1814,59 @@ static void kcryptd_queue_crypt(struct dm_crypt_io *io) queue_work(cc->crypt_queue, &io->work); } -/* - * Decode key from its hex representation - */ -static int crypt_decode_key(u8 *key, char *hex, unsigned int size) +static void crypt_free_tfms_aead(struct crypt_config *cc) { - char buffer[3]; - unsigned int i; - - buffer[2] = '\0'; - - for (i = 0; i < size; i++) { - buffer[0] = *hex++; - buffer[1] = *hex++; + if (!cc->cipher_tfm.tfms_aead) + return; - if (kstrtou8(buffer, 16, &key[i])) - return -EINVAL; + if (cc->cipher_tfm.tfms_aead[0] && !IS_ERR(cc->cipher_tfm.tfms_aead[0])) { + crypto_free_aead(cc->cipher_tfm.tfms_aead[0]); + cc->cipher_tfm.tfms_aead[0] = NULL; } - if (*hex != '\0') - return -EINVAL; - - return 0; + kfree(cc->cipher_tfm.tfms_aead); + cc->cipher_tfm.tfms_aead = NULL; } -static void crypt_free_tfms(struct crypt_config *cc) +static void crypt_free_tfms_skcipher(struct crypt_config *cc) { unsigned i; - if (!cc->tfms) + if (!cc->cipher_tfm.tfms) return; for (i = 0; i < cc->tfms_count; i++) - if (cc->tfms[i] && !IS_ERR(cc->tfms[i])) { - crypto_free_skcipher(cc->tfms[i]); - cc->tfms[i] = NULL; + if (cc->cipher_tfm.tfms[i] && !IS_ERR(cc->cipher_tfm.tfms[i])) { + crypto_free_skcipher(cc->cipher_tfm.tfms[i]); + cc->cipher_tfm.tfms[i] = NULL; } - kfree(cc->tfms); - cc->tfms = NULL; + kfree(cc->cipher_tfm.tfms); + cc->cipher_tfm.tfms = NULL; } -static int crypt_alloc_tfms(struct crypt_config *cc, char *ciphermode) +static void crypt_free_tfms(struct crypt_config *cc) +{ + if (crypt_integrity_aead(cc)) + crypt_free_tfms_aead(cc); + else + crypt_free_tfms_skcipher(cc); +} + +static int crypt_alloc_tfms_skcipher(struct crypt_config *cc, char *ciphermode) { unsigned i; int err; - cc->tfms = kzalloc(cc->tfms_count * sizeof(struct crypto_skcipher *), - GFP_KERNEL); - if (!cc->tfms) + cc->cipher_tfm.tfms = kzalloc(cc->tfms_count * + sizeof(struct crypto_skcipher *), GFP_KERNEL); + if (!cc->cipher_tfm.tfms) return -ENOMEM; for (i = 0; i < cc->tfms_count; i++) { - cc->tfms[i] = crypto_alloc_skcipher(ciphermode, 0, 0); - if (IS_ERR(cc->tfms[i])) { - err = PTR_ERR(cc->tfms[i]); + cc->cipher_tfm.tfms[i] = crypto_alloc_skcipher(ciphermode, 0, 0); + if (IS_ERR(cc->cipher_tfm.tfms[i])) { + err = PTR_ERR(cc->cipher_tfm.tfms[i]); crypt_free_tfms(cc); return err; } @@ -1469,22 +1875,95 @@ static int crypt_alloc_tfms(struct crypt_config *cc, char *ciphermode) return 0; } +static int crypt_alloc_tfms_aead(struct crypt_config *cc, char *ciphermode) +{ + int err; + + cc->cipher_tfm.tfms = kmalloc(sizeof(struct crypto_aead *), GFP_KERNEL); + if (!cc->cipher_tfm.tfms) + return -ENOMEM; + + cc->cipher_tfm.tfms_aead[0] = crypto_alloc_aead(ciphermode, 0, 0); + if (IS_ERR(cc->cipher_tfm.tfms_aead[0])) { + err = PTR_ERR(cc->cipher_tfm.tfms_aead[0]); + crypt_free_tfms(cc); + return err; + } + + return 0; +} + +static int crypt_alloc_tfms(struct crypt_config *cc, char *ciphermode) +{ + if (crypt_integrity_aead(cc)) + return crypt_alloc_tfms_aead(cc, ciphermode); + else + return crypt_alloc_tfms_skcipher(cc, ciphermode); +} + +static unsigned crypt_subkey_size(struct crypt_config *cc) +{ + return (cc->key_size - cc->key_extra_size) >> ilog2(cc->tfms_count); +} + +static unsigned crypt_authenckey_size(struct crypt_config *cc) +{ + return crypt_subkey_size(cc) + RTA_SPACE(sizeof(struct crypto_authenc_key_param)); +} + +/* + * If AEAD is composed like authenc(hmac(sha256),xts(aes)), + * the key must be for some reason in special format. + * This funcion converts cc->key to this special format. + */ +static void crypt_copy_authenckey(char *p, const void *key, + unsigned enckeylen, unsigned authkeylen) +{ + struct crypto_authenc_key_param *param; + struct rtattr *rta; + + rta = (struct rtattr *)p; + param = RTA_DATA(rta); + param->enckeylen = cpu_to_be32(enckeylen); + rta->rta_len = RTA_LENGTH(sizeof(*param)); + rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM; + p += RTA_SPACE(sizeof(*param)); + memcpy(p, key + enckeylen, authkeylen); + p += authkeylen; + memcpy(p, key, enckeylen); +} + static int crypt_setkey(struct crypt_config *cc) { unsigned subkey_size; int err = 0, i, r; /* Ignore extra keys (which are used for IV etc) */ - subkey_size = (cc->key_size - cc->key_extra_size) >> ilog2(cc->tfms_count); + subkey_size = crypt_subkey_size(cc); + if (crypt_integrity_hmac(cc)) + crypt_copy_authenckey(cc->authenc_key, cc->key, + subkey_size - cc->key_mac_size, + cc->key_mac_size); for (i = 0; i < cc->tfms_count; i++) { - r = crypto_skcipher_setkey(cc->tfms[i], - cc->key + (i * subkey_size), - subkey_size); + if (crypt_integrity_hmac(cc)) + r = crypto_aead_setkey(cc->cipher_tfm.tfms_aead[i], + cc->authenc_key, crypt_authenckey_size(cc)); + else if (crypt_integrity_aead(cc)) + r = crypto_aead_setkey(cc->cipher_tfm.tfms_aead[i], + cc->key + (i * subkey_size), + subkey_size); + else + r = crypto_skcipher_setkey(cc->cipher_tfm.tfms[i], + cc->key + (i * subkey_size), + subkey_size); if (r) err = r; } + if (crypt_integrity_hmac(cc)) + memzero_explicit(cc->authenc_key, crypt_authenckey_size(cc)); + return err; } @@ -1633,7 +2112,8 @@ static int crypt_set_key(struct crypt_config *cc, char *key) kzfree(cc->key_string); cc->key_string = NULL; - if (cc->key_size && crypt_decode_key(cc->key, key, cc->key_size) < 0) + /* Decode key from its hex representation. */ + if (cc->key_size && hex2bin(cc->key, key, cc->key_size) < 0) goto out; r = crypt_setkey(cc); @@ -1649,12 +2129,16 @@ out: static int crypt_wipe_key(struct crypt_config *cc) { + int r; + clear_bit(DM_CRYPT_KEY_VALID, &cc->flags); - memset(&cc->key, 0, cc->key_size * sizeof(u8)); + get_random_bytes(&cc->key, cc->key_size); kzfree(cc->key_string); cc->key_string = NULL; + r = crypt_setkey(cc); + memset(&cc->key, 0, cc->key_size * sizeof(u8)); - return crypt_setkey(cc); + return r; } static void crypt_dtr(struct dm_target *ti) @@ -1681,6 +2165,7 @@ static void crypt_dtr(struct dm_target *ti) mempool_destroy(cc->page_pool); mempool_destroy(cc->req_pool); + mempool_destroy(cc->tag_pool); if (cc->iv_gen_ops && cc->iv_gen_ops->dtr) cc->iv_gen_ops->dtr(cc); @@ -1691,30 +2176,221 @@ static void crypt_dtr(struct dm_target *ti) kzfree(cc->cipher); kzfree(cc->cipher_string); kzfree(cc->key_string); + kzfree(cc->cipher_auth); + kzfree(cc->authenc_key); /* Must zero key material before freeing */ kzfree(cc); } -static int crypt_ctr_cipher(struct dm_target *ti, - char *cipher_in, char *key) +static int crypt_ctr_ivmode(struct dm_target *ti, const char *ivmode) +{ + struct crypt_config *cc = ti->private; + + if (crypt_integrity_aead(cc)) + cc->iv_size = crypto_aead_ivsize(any_tfm_aead(cc)); + else + cc->iv_size = crypto_skcipher_ivsize(any_tfm(cc)); + + if (cc->iv_size) + /* at least a 64 bit sector number should fit in our buffer */ + cc->iv_size = max(cc->iv_size, + (unsigned int)(sizeof(u64) / sizeof(u8))); + else if (ivmode) { + DMWARN("Selected cipher does not support IVs"); + ivmode = NULL; + } + + /* Choose ivmode, see comments at iv code. */ + if (ivmode == NULL) + cc->iv_gen_ops = NULL; + else if (strcmp(ivmode, "plain") == 0) + cc->iv_gen_ops = &crypt_iv_plain_ops; + else if (strcmp(ivmode, "plain64") == 0) + cc->iv_gen_ops = &crypt_iv_plain64_ops; + else if (strcmp(ivmode, "essiv") == 0) + cc->iv_gen_ops = &crypt_iv_essiv_ops; + else if (strcmp(ivmode, "benbi") == 0) + cc->iv_gen_ops = &crypt_iv_benbi_ops; + else if (strcmp(ivmode, "null") == 0) + cc->iv_gen_ops = &crypt_iv_null_ops; + else if (strcmp(ivmode, "lmk") == 0) { + cc->iv_gen_ops = &crypt_iv_lmk_ops; + /* + * Version 2 and 3 is recognised according + * to length of provided multi-key string. + * If present (version 3), last key is used as IV seed. + * All keys (including IV seed) are always the same size. + */ + if (cc->key_size % cc->key_parts) { + cc->key_parts++; + cc->key_extra_size = cc->key_size / cc->key_parts; + } + } else if (strcmp(ivmode, "tcw") == 0) { + cc->iv_gen_ops = &crypt_iv_tcw_ops; + cc->key_parts += 2; /* IV + whitening */ + cc->key_extra_size = cc->iv_size + TCW_WHITENING_SIZE; + } else if (strcmp(ivmode, "random") == 0) { + cc->iv_gen_ops = &crypt_iv_random_ops; + /* Need storage space in integrity fields. */ + cc->integrity_iv_size = cc->iv_size; + } else { + ti->error = "Invalid IV mode"; + return -EINVAL; + } + + return 0; +} + +/* + * Workaround to parse cipher algorithm from crypto API spec. + * The cc->cipher is currently used only in ESSIV. + * This should be probably done by crypto-api calls (once available...) + */ +static int crypt_ctr_blkdev_cipher(struct crypt_config *cc) +{ + const char *alg_name = NULL; + char *start, *end; + + if (crypt_integrity_aead(cc)) { + alg_name = crypto_tfm_alg_name(crypto_aead_tfm(any_tfm_aead(cc))); + if (!alg_name) + return -EINVAL; + if (crypt_integrity_hmac(cc)) { + alg_name = strchr(alg_name, ','); + if (!alg_name) + return -EINVAL; + } + alg_name++; + } else { + alg_name = crypto_tfm_alg_name(crypto_skcipher_tfm(any_tfm(cc))); + if (!alg_name) + return -EINVAL; + } + + start = strchr(alg_name, '('); + end = strchr(alg_name, ')'); + + if (!start && !end) { + cc->cipher = kstrdup(alg_name, GFP_KERNEL); + return cc->cipher ? 0 : -ENOMEM; + } + + if (!start || !end || ++start >= end) + return -EINVAL; + + cc->cipher = kzalloc(end - start + 1, GFP_KERNEL); + if (!cc->cipher) + return -ENOMEM; + + strncpy(cc->cipher, start, end - start); + + return 0; +} + +/* + * Workaround to parse HMAC algorithm from AEAD crypto API spec. + * The HMAC is needed to calculate tag size (HMAC digest size). + * This should be probably done by crypto-api calls (once available...) + */ +static int crypt_ctr_auth_cipher(struct crypt_config *cc, char *cipher_api) +{ + char *start, *end, *mac_alg = NULL; + struct crypto_ahash *mac; + + if (!strstarts(cipher_api, "authenc(")) + return 0; + + start = strchr(cipher_api, '('); + end = strchr(cipher_api, ','); + if (!start || !end || ++start > end) + return -EINVAL; + + mac_alg = kzalloc(end - start + 1, GFP_KERNEL); + if (!mac_alg) + return -ENOMEM; + strncpy(mac_alg, start, end - start); + + mac = crypto_alloc_ahash(mac_alg, 0, 0); + kfree(mac_alg); + + if (IS_ERR(mac)) + return PTR_ERR(mac); + + cc->key_mac_size = crypto_ahash_digestsize(mac); + crypto_free_ahash(mac); + + cc->authenc_key = kmalloc(crypt_authenckey_size(cc), GFP_KERNEL); + if (!cc->authenc_key) + return -ENOMEM; + + return 0; +} + +static int crypt_ctr_cipher_new(struct dm_target *ti, char *cipher_in, char *key, + char **ivmode, char **ivopts) +{ + struct crypt_config *cc = ti->private; + char *tmp, *cipher_api; + int ret = -EINVAL; + + cc->tfms_count = 1; + + /* + * New format (capi: prefix) + * capi:cipher_api_spec-iv:ivopts + */ + tmp = &cipher_in[strlen("capi:")]; + cipher_api = strsep(&tmp, "-"); + *ivmode = strsep(&tmp, ":"); + *ivopts = tmp; + + if (*ivmode && !strcmp(*ivmode, "lmk")) + cc->tfms_count = 64; + + cc->key_parts = cc->tfms_count; + + /* Allocate cipher */ + ret = crypt_alloc_tfms(cc, cipher_api); + if (ret < 0) { + ti->error = "Error allocating crypto tfm"; + return ret; + } + + /* Alloc AEAD, can be used only in new format. */ + if (crypt_integrity_aead(cc)) { + ret = crypt_ctr_auth_cipher(cc, cipher_api); + if (ret < 0) { + ti->error = "Invalid AEAD cipher spec"; + return -ENOMEM; + } + cc->iv_size = crypto_aead_ivsize(any_tfm_aead(cc)); + } else + cc->iv_size = crypto_skcipher_ivsize(any_tfm(cc)); + + ret = crypt_ctr_blkdev_cipher(cc); + if (ret < 0) { + ti->error = "Cannot allocate cipher string"; + return -ENOMEM; + } + + return 0; +} + +static int crypt_ctr_cipher_old(struct dm_target *ti, char *cipher_in, char *key, + char **ivmode, char **ivopts) { struct crypt_config *cc = ti->private; - char *tmp, *cipher, *chainmode, *ivmode, *ivopts, *keycount; + char *tmp, *cipher, *chainmode, *keycount; char *cipher_api = NULL; int ret = -EINVAL; char dummy; - /* Convert to crypto api definition? */ - if (strchr(cipher_in, '(')) { + if (strchr(cipher_in, '(') || crypt_integrity_aead(cc)) { ti->error = "Bad cipher specification"; return -EINVAL; } - cc->cipher_string = kstrdup(cipher_in, GFP_KERNEL); - if (!cc->cipher_string) - goto bad_mem; - /* * Legacy dm-crypt cipher specification * cipher[:keycount]-mode-iv:ivopts @@ -1731,15 +2407,14 @@ static int crypt_ctr_cipher(struct dm_target *ti, return -EINVAL; } cc->key_parts = cc->tfms_count; - cc->key_extra_size = 0; cc->cipher = kstrdup(cipher, GFP_KERNEL); if (!cc->cipher) goto bad_mem; chainmode = strsep(&tmp, "-"); - ivopts = strsep(&tmp, "-"); - ivmode = strsep(&ivopts, ":"); + *ivopts = strsep(&tmp, "-"); + *ivmode = strsep(&*ivopts, ":"); if (tmp) DMWARN("Ignoring unexpected additional cipher options"); @@ -1748,12 +2423,12 @@ static int crypt_ctr_cipher(struct dm_target *ti, * For compatibility with the original dm-crypt mapping format, if * only the cipher name is supplied, use cbc-plain. */ - if (!chainmode || (!strcmp(chainmode, "plain") && !ivmode)) { + if (!chainmode || (!strcmp(chainmode, "plain") && !*ivmode)) { chainmode = "cbc"; - ivmode = "plain"; + *ivmode = "plain"; } - if (strcmp(chainmode, "ecb") && !ivmode) { + if (strcmp(chainmode, "ecb") && !*ivmode) { ti->error = "IV mechanism required"; return -EINVAL; } @@ -1773,60 +2448,45 @@ static int crypt_ctr_cipher(struct dm_target *ti, ret = crypt_alloc_tfms(cc, cipher_api); if (ret < 0) { ti->error = "Error allocating crypto tfm"; - goto bad; + kfree(cipher_api); + return ret; } - /* Initialize IV */ - cc->iv_size = crypto_skcipher_ivsize(any_tfm(cc)); - if (cc->iv_size) - /* at least a 64 bit sector number should fit in our buffer */ - cc->iv_size = max(cc->iv_size, - (unsigned int)(sizeof(u64) / sizeof(u8))); - else if (ivmode) { - DMWARN("Selected cipher does not support IVs"); - ivmode = NULL; - } + return 0; +bad_mem: + ti->error = "Cannot allocate cipher strings"; + return -ENOMEM; +} - /* Choose ivmode, see comments at iv code. */ - if (ivmode == NULL) - cc->iv_gen_ops = NULL; - else if (strcmp(ivmode, "plain") == 0) - cc->iv_gen_ops = &crypt_iv_plain_ops; - else if (strcmp(ivmode, "plain64") == 0) - cc->iv_gen_ops = &crypt_iv_plain64_ops; - else if (strcmp(ivmode, "essiv") == 0) - cc->iv_gen_ops = &crypt_iv_essiv_ops; - else if (strcmp(ivmode, "benbi") == 0) - cc->iv_gen_ops = &crypt_iv_benbi_ops; - else if (strcmp(ivmode, "null") == 0) - cc->iv_gen_ops = &crypt_iv_null_ops; - else if (strcmp(ivmode, "lmk") == 0) { - cc->iv_gen_ops = &crypt_iv_lmk_ops; - /* - * Version 2 and 3 is recognised according - * to length of provided multi-key string. - * If present (version 3), last key is used as IV seed. - * All keys (including IV seed) are always the same size. - */ - if (cc->key_size % cc->key_parts) { - cc->key_parts++; - cc->key_extra_size = cc->key_size / cc->key_parts; - } - } else if (strcmp(ivmode, "tcw") == 0) { - cc->iv_gen_ops = &crypt_iv_tcw_ops; - cc->key_parts += 2; /* IV + whitening */ - cc->key_extra_size = cc->iv_size + TCW_WHITENING_SIZE; - } else { - ret = -EINVAL; - ti->error = "Invalid IV mode"; - goto bad; +static int crypt_ctr_cipher(struct dm_target *ti, char *cipher_in, char *key) +{ + struct crypt_config *cc = ti->private; + char *ivmode = NULL, *ivopts = NULL; + int ret; + + cc->cipher_string = kstrdup(cipher_in, GFP_KERNEL); + if (!cc->cipher_string) { + ti->error = "Cannot allocate cipher strings"; + return -ENOMEM; } + if (strstarts(cipher_in, "capi:")) + ret = crypt_ctr_cipher_new(ti, cipher_in, key, &ivmode, &ivopts); + else + ret = crypt_ctr_cipher_old(ti, cipher_in, key, &ivmode, &ivopts); + if (ret) + return ret; + + /* Initialize IV */ + ret = crypt_ctr_ivmode(ti, ivmode); + if (ret < 0) + return ret; + /* Initialize and set key */ ret = crypt_set_key(cc, key); if (ret < 0) { ti->error = "Error decoding and setting key"; - goto bad; + return ret; } /* Allocate IV */ @@ -1834,7 +2494,7 @@ static int crypt_ctr_cipher(struct dm_target *ti, ret = cc->iv_gen_ops->ctr(cc, ti, ivopts); if (ret < 0) { ti->error = "Error creating IV"; - goto bad; + return ret; } } @@ -1843,18 +2503,82 @@ static int crypt_ctr_cipher(struct dm_target *ti, ret = cc->iv_gen_ops->init(cc); if (ret < 0) { ti->error = "Error initialising IV"; - goto bad; + return ret; } } - ret = 0; -bad: - kfree(cipher_api); return ret; +} -bad_mem: - ti->error = "Cannot allocate cipher strings"; - return -ENOMEM; +static int crypt_ctr_optional(struct dm_target *ti, unsigned int argc, char **argv) +{ + struct crypt_config *cc = ti->private; + struct dm_arg_set as; + static struct dm_arg _args[] = { + {0, 6, "Invalid number of feature args"}, + }; + unsigned int opt_params, val; + const char *opt_string, *sval; + char dummy; + int ret; + + /* Optional parameters */ + as.argc = argc; + as.argv = argv; + + ret = dm_read_arg_group(_args, &as, &opt_params, &ti->error); + if (ret) + return ret; + + while (opt_params--) { + opt_string = dm_shift_arg(&as); + if (!opt_string) { + ti->error = "Not enough feature arguments"; + return -EINVAL; + } + + if (!strcasecmp(opt_string, "allow_discards")) + ti->num_discard_bios = 1; + + else if (!strcasecmp(opt_string, "same_cpu_crypt")) + set_bit(DM_CRYPT_SAME_CPU, &cc->flags); + + else if (!strcasecmp(opt_string, "submit_from_crypt_cpus")) + set_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags); + else if (sscanf(opt_string, "integrity:%u:", &val) == 1) { + if (val == 0 || val > MAX_TAG_SIZE) { + ti->error = "Invalid integrity arguments"; + return -EINVAL; + } + cc->on_disk_tag_size = val; + sval = strchr(opt_string + strlen("integrity:"), ':') + 1; + if (!strcasecmp(sval, "aead")) { + set_bit(CRYPT_MODE_INTEGRITY_AEAD, &cc->cipher_flags); + } else if (strcasecmp(sval, "none")) { + ti->error = "Unknown integrity profile"; + return -EINVAL; + } + + cc->cipher_auth = kstrdup(sval, GFP_KERNEL); + if (!cc->cipher_auth) + return -ENOMEM; + } else if (sscanf(opt_string, "sector_size:%hu%c", &cc->sector_size, &dummy) == 1) { + if (cc->sector_size < (1 << SECTOR_SHIFT) || + cc->sector_size > 4096 || + (cc->sector_size & (cc->sector_size - 1))) { + ti->error = "Invalid feature value for sector_size"; + return -EINVAL; + } + cc->sector_shift = __ffs(cc->sector_size) - SECTOR_SHIFT; + } else if (!strcasecmp(opt_string, "iv_large_sectors")) + set_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags); + else { + ti->error = "Invalid feature arguments"; + return -EINVAL; + } + } + + return 0; } /* @@ -1865,18 +2589,12 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) { struct crypt_config *cc; int key_size; - unsigned int opt_params; + unsigned int align_mask; unsigned long long tmpll; int ret; - size_t iv_size_padding; - struct dm_arg_set as; - const char *opt_string; + size_t iv_size_padding, additional_req_size; char dummy; - static struct dm_arg _args[] = { - {0, 3, "Invalid number of feature args"}, - }; - if (argc < 5) { ti->error = "Not enough arguments"; return -EINVAL; @@ -1894,40 +2612,63 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) return -ENOMEM; } cc->key_size = key_size; + cc->sector_size = (1 << SECTOR_SHIFT); + cc->sector_shift = 0; ti->private = cc; + + /* Optional parameters need to be read before cipher constructor */ + if (argc > 5) { + ret = crypt_ctr_optional(ti, argc - 5, &argv[5]); + if (ret) + goto bad; + } + ret = crypt_ctr_cipher(ti, argv[0], argv[1]); if (ret < 0) goto bad; - cc->dmreq_start = sizeof(struct skcipher_request); - cc->dmreq_start += crypto_skcipher_reqsize(any_tfm(cc)); + if (crypt_integrity_aead(cc)) { + cc->dmreq_start = sizeof(struct aead_request); + cc->dmreq_start += crypto_aead_reqsize(any_tfm_aead(cc)); + align_mask = crypto_aead_alignmask(any_tfm_aead(cc)); + } else { + cc->dmreq_start = sizeof(struct skcipher_request); + cc->dmreq_start += crypto_skcipher_reqsize(any_tfm(cc)); + align_mask = crypto_skcipher_alignmask(any_tfm(cc)); + } cc->dmreq_start = ALIGN(cc->dmreq_start, __alignof__(struct dm_crypt_request)); - if (crypto_skcipher_alignmask(any_tfm(cc)) < CRYPTO_MINALIGN) { + if (align_mask < CRYPTO_MINALIGN) { /* Allocate the padding exactly */ iv_size_padding = -(cc->dmreq_start + sizeof(struct dm_crypt_request)) - & crypto_skcipher_alignmask(any_tfm(cc)); + & align_mask; } else { /* * If the cipher requires greater alignment than kmalloc * alignment, we don't know the exact position of the * initialization vector. We must assume worst case. */ - iv_size_padding = crypto_skcipher_alignmask(any_tfm(cc)); + iv_size_padding = align_mask; } ret = -ENOMEM; - cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start + - sizeof(struct dm_crypt_request) + iv_size_padding + cc->iv_size); + + /* ...| IV + padding | original IV | original sec. number | bio tag offset | */ + additional_req_size = sizeof(struct dm_crypt_request) + + iv_size_padding + cc->iv_size + + cc->iv_size + + sizeof(uint64_t) + + sizeof(unsigned int); + + cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start + additional_req_size); if (!cc->req_pool) { ti->error = "Cannot allocate crypt request mempool"; goto bad; } cc->per_bio_data_size = ti->per_io_data_size = - ALIGN(sizeof(struct dm_crypt_io) + cc->dmreq_start + - sizeof(struct dm_crypt_request) + iv_size_padding + cc->iv_size, + ALIGN(sizeof(struct dm_crypt_io) + cc->dmreq_start + additional_req_size, ARCH_KMALLOC_MINALIGN); cc->page_pool = mempool_create_page_pool(BIO_MAX_PAGES, 0); @@ -1945,7 +2686,8 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) mutex_init(&cc->bio_alloc_lock); ret = -EINVAL; - if (sscanf(argv[2], "%llu%c", &tmpll, &dummy) != 1) { + if ((sscanf(argv[2], "%llu%c", &tmpll, &dummy) != 1) || + (tmpll & ((cc->sector_size >> SECTOR_SHIFT) - 1))) { ti->error = "Invalid iv_offset sector"; goto bad; } @@ -1964,53 +2706,37 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) } cc->start = tmpll; - argv += 5; - argc -= 5; - - /* Optional parameters */ - if (argc) { - as.argc = argc; - as.argv = argv; - - ret = dm_read_arg_group(_args, &as, &opt_params, &ti->error); + if (crypt_integrity_aead(cc) || cc->integrity_iv_size) { + ret = crypt_integrity_ctr(cc, ti); if (ret) goto bad; - ret = -EINVAL; - while (opt_params--) { - opt_string = dm_shift_arg(&as); - if (!opt_string) { - ti->error = "Not enough feature arguments"; - goto bad; - } - - if (!strcasecmp(opt_string, "allow_discards")) - ti->num_discard_bios = 1; + cc->tag_pool_max_sectors = POOL_ENTRY_SIZE / cc->on_disk_tag_size; + if (!cc->tag_pool_max_sectors) + cc->tag_pool_max_sectors = 1; - else if (!strcasecmp(opt_string, "same_cpu_crypt")) - set_bit(DM_CRYPT_SAME_CPU, &cc->flags); - - else if (!strcasecmp(opt_string, "submit_from_crypt_cpus")) - set_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags); - - else { - ti->error = "Invalid feature arguments"; - goto bad; - } + cc->tag_pool = mempool_create_kmalloc_pool(MIN_IOS, + cc->tag_pool_max_sectors * cc->on_disk_tag_size); + if (!cc->tag_pool) { + ti->error = "Cannot allocate integrity tags mempool"; + goto bad; } + + cc->tag_pool_max_sectors <<= cc->sector_shift; } ret = -ENOMEM; - cc->io_queue = alloc_workqueue("kcryptd_io", WQ_MEM_RECLAIM, 1); + cc->io_queue = alloc_workqueue("kcryptd_io", WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 1); if (!cc->io_queue) { ti->error = "Couldn't create kcryptd io queue"; goto bad; } if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags)) - cc->crypt_queue = alloc_workqueue("kcryptd", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 1); + cc->crypt_queue = alloc_workqueue("kcryptd", WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 1); else - cc->crypt_queue = alloc_workqueue("kcryptd", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND, + cc->crypt_queue = alloc_workqueue("kcryptd", + WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND, num_online_cpus()); if (!cc->crypt_queue) { ti->error = "Couldn't create kcryptd queue"; @@ -2061,12 +2787,39 @@ static int crypt_map(struct dm_target *ti, struct bio *bio) * Check if bio is too large, split as needed. */ if (unlikely(bio->bi_iter.bi_size > (BIO_MAX_PAGES << PAGE_SHIFT)) && - bio_data_dir(bio) == WRITE) + (bio_data_dir(bio) == WRITE || cc->on_disk_tag_size)) dm_accept_partial_bio(bio, ((BIO_MAX_PAGES << PAGE_SHIFT) >> SECTOR_SHIFT)); + /* + * Ensure that bio is a multiple of internal sector encryption size + * and is aligned to this size as defined in IO hints. + */ + if (unlikely((bio->bi_iter.bi_sector & ((cc->sector_size >> SECTOR_SHIFT) - 1)) != 0)) + return -EIO; + + if (unlikely(bio->bi_iter.bi_size & (cc->sector_size - 1))) + return -EIO; + io = dm_per_bio_data(bio, cc->per_bio_data_size); crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector)); - io->ctx.req = (struct skcipher_request *)(io + 1); + + if (cc->on_disk_tag_size) { + unsigned tag_len = cc->on_disk_tag_size * (bio_sectors(bio) >> cc->sector_shift); + + if (unlikely(tag_len > KMALLOC_MAX_SIZE) || + unlikely(!(io->integrity_metadata = kmalloc(tag_len, + GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN)))) { + if (bio_sectors(bio) > cc->tag_pool_max_sectors) + dm_accept_partial_bio(bio, cc->tag_pool_max_sectors); + io->integrity_metadata = mempool_alloc(cc->tag_pool, GFP_NOIO); + io->integrity_metadata_from_pool = true; + } + } + + if (crypt_integrity_aead(cc)) + io->ctx.r.req_aead = (struct aead_request *)(io + 1); + else + io->ctx.r.req = (struct skcipher_request *)(io + 1); if (bio_data_dir(io->base_bio) == READ) { if (kcryptd_io_read(io, GFP_NOWAIT)) @@ -2107,6 +2860,10 @@ static void crypt_status(struct dm_target *ti, status_type_t type, num_feature_args += !!ti->num_discard_bios; num_feature_args += test_bit(DM_CRYPT_SAME_CPU, &cc->flags); num_feature_args += test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags); + num_feature_args += cc->sector_size != (1 << SECTOR_SHIFT); + num_feature_args += test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags); + if (cc->on_disk_tag_size) + num_feature_args++; if (num_feature_args) { DMEMIT(" %d", num_feature_args); if (ti->num_discard_bios) @@ -2115,6 +2872,12 @@ static void crypt_status(struct dm_target *ti, status_type_t type, DMEMIT(" same_cpu_crypt"); if (test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags)) DMEMIT(" submit_from_crypt_cpus"); + if (cc->on_disk_tag_size) + DMEMIT(" integrity:%u:%s", cc->on_disk_tag_size, cc->cipher_auth); + if (cc->sector_size != (1 << SECTOR_SHIFT)) + DMEMIT(" sector_size:%d", cc->sector_size); + if (test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags)) + DMEMIT(" iv_large_sectors"); } break; @@ -2204,6 +2967,8 @@ static int crypt_iterate_devices(struct dm_target *ti, static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits) { + struct crypt_config *cc = ti->private; + /* * Unfortunate constraint that is required to avoid the potential * for exceeding underlying device's max_segments limits -- due to @@ -2211,11 +2976,17 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits) * bio that are not as physically contiguous as the original bio. */ limits->max_segment_size = PAGE_SIZE; + + if (cc->sector_size != (1 << SECTOR_SHIFT)) { + limits->logical_block_size = cc->sector_size; + limits->physical_block_size = cc->sector_size; + blk_limits_io_min(limits, cc->sector_size); + } } static struct target_type crypt_target = { .name = "crypt", - .version = {1, 15, 0}, + .version = {1, 17, 0}, .module = THIS_MODULE, .ctr = crypt_ctr, .dtr = crypt_dtr, |