diff options
Diffstat (limited to 'crypto')
-rw-r--r-- | crypto/Kconfig | 19 | ||||
-rw-r--r-- | crypto/Makefile | 5 | ||||
-rw-r--r-- | crypto/aegis128-core.c | 2 | ||||
-rw-r--r-- | crypto/ahash.c | 158 | ||||
-rw-r--r-- | crypto/algapi.c | 31 | ||||
-rw-r--r-- | crypto/anubis.c | 14 | ||||
-rw-r--r-- | crypto/aria_generic.c | 37 | ||||
-rw-r--r-- | crypto/asymmetric_keys/asymmetric_type.c | 10 | ||||
-rw-r--r-- | crypto/crc32_generic.c | 8 | ||||
-rw-r--r-- | crypto/crc32c_generic.c | 12 | ||||
-rw-r--r-- | crypto/crct10dif_common.c | 82 | ||||
-rw-r--r-- | crypto/crct10dif_generic.c | 82 | ||||
-rw-r--r-- | crypto/crypto_engine.c | 2 | ||||
-rw-r--r-- | crypto/fips.c | 4 | ||||
-rw-r--r-- | crypto/keywrap.c | 320 | ||||
-rw-r--r-- | crypto/khazad.c | 17 | ||||
-rw-r--r-- | crypto/proc.c | 9 | ||||
-rw-r--r-- | crypto/seed.c | 48 | ||||
-rw-r--r-- | crypto/sig.c | 4 | ||||
-rw-r--r-- | crypto/skcipher.c | 369 | ||||
-rw-r--r-- | crypto/tcrypt.c | 4 | ||||
-rw-r--r-- | crypto/tea.c | 83 | ||||
-rw-r--r-- | crypto/testmgr.c | 26 | ||||
-rw-r--r-- | crypto/testmgr.h | 192 | ||||
-rw-r--r-- | crypto/vmac.c | 696 |
25 files changed, 380 insertions, 1854 deletions
diff --git a/crypto/Kconfig b/crypto/Kconfig index 6b0bfbccac08..74ae5f52b784 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -684,14 +684,6 @@ config CRYPTO_HCTR2 See https://eprint.iacr.org/2021/1441 -config CRYPTO_KEYWRAP - tristate "KW (AES Key Wrap)" - select CRYPTO_SKCIPHER - select CRYPTO_MANAGER - help - KW (AES Key Wrap) authenticated encryption mode (NIST SP800-38F - and RFC3394) without padding. - config CRYPTO_LRW tristate "LRW (Liskov Rivest Wagner)" select CRYPTO_LIB_GF128MUL @@ -1029,16 +1021,6 @@ config CRYPTO_STREEBOG https://tc26.ru/upload/iblock/fed/feddbb4d26b685903faa2ba11aea43f6.pdf https://tools.ietf.org/html/rfc6986 -config CRYPTO_VMAC - tristate "VMAC" - select CRYPTO_HASH - select CRYPTO_MANAGER - help - VMAC is a message authentication algorithm designed for - very high speed on 64-bit architectures. - - See https://fastcrypto.org/vmac for further information. - config CRYPTO_WP512 tristate "Whirlpool" select CRYPTO_HASH @@ -1102,6 +1084,7 @@ config CRYPTO_CRC32 config CRYPTO_CRCT10DIF tristate "CRCT10DIF" select CRYPTO_HASH + select CRC_T10DIF help CRC16 CRC algorithm used for the T10 (SCSI) Data Integrity Field (DIF) diff --git a/crypto/Makefile b/crypto/Makefile index 77abca715445..f67e853c4690 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -69,7 +69,6 @@ obj-$(CONFIG_CRYPTO_MANAGER2) += cryptomgr.o obj-$(CONFIG_CRYPTO_USER) += crypto_user.o obj-$(CONFIG_CRYPTO_CMAC) += cmac.o obj-$(CONFIG_CRYPTO_HMAC) += hmac.o -obj-$(CONFIG_CRYPTO_VMAC) += vmac.o obj-$(CONFIG_CRYPTO_XCBC) += xcbc.o obj-$(CONFIG_CRYPTO_NULL2) += crypto_null.o obj-$(CONFIG_CRYPTO_MD4) += md4.o @@ -95,7 +94,6 @@ obj-$(CONFIG_CRYPTO_XTS) += xts.o obj-$(CONFIG_CRYPTO_CTR) += ctr.o obj-$(CONFIG_CRYPTO_XCTR) += xctr.o obj-$(CONFIG_CRYPTO_HCTR2) += hctr2.o -obj-$(CONFIG_CRYPTO_KEYWRAP) += keywrap.o obj-$(CONFIG_CRYPTO_ADIANTUM) += adiantum.o obj-$(CONFIG_CRYPTO_NHPOLY1305) += nhpoly1305.o obj-$(CONFIG_CRYPTO_GCM) += gcm.o @@ -157,7 +155,8 @@ obj-$(CONFIG_CRYPTO_CRC32C) += crc32c_generic.o obj-$(CONFIG_CRYPTO_CRC32) += crc32_generic.o CFLAGS_crc32c_generic.o += -DARCH=$(ARCH) CFLAGS_crc32_generic.o += -DARCH=$(ARCH) -obj-$(CONFIG_CRYPTO_CRCT10DIF) += crct10dif_common.o crct10dif_generic.o +obj-$(CONFIG_CRYPTO_CRCT10DIF) += crct10dif_generic.o +CFLAGS_crct10dif_generic.o += -DARCH=$(ARCH) obj-$(CONFIG_CRYPTO_CRC64_ROCKSOFT) += crc64_rocksoft_generic.o obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o authencesn.o obj-$(CONFIG_CRYPTO_LZO) += lzo.o lzo-rle.o diff --git a/crypto/aegis128-core.c b/crypto/aegis128-core.c index 4fdb53435827..6cbff298722b 100644 --- a/crypto/aegis128-core.c +++ b/crypto/aegis128-core.c @@ -516,7 +516,6 @@ static struct aead_alg crypto_aegis128_alg_generic = { .base.cra_blocksize = 1, .base.cra_ctxsize = sizeof(struct aegis_ctx), - .base.cra_alignmask = 0, .base.cra_priority = 100, .base.cra_name = "aegis128", .base.cra_driver_name = "aegis128-generic", @@ -535,7 +534,6 @@ static struct aead_alg crypto_aegis128_alg_simd = { .base.cra_blocksize = 1, .base.cra_ctxsize = sizeof(struct aegis_ctx), - .base.cra_alignmask = 0, .base.cra_priority = 200, .base.cra_name = "aegis128", .base.cra_driver_name = "aegis128-simd", diff --git a/crypto/ahash.c b/crypto/ahash.c index bcd9de009a91..b08b89ec26ec 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -27,6 +27,93 @@ #define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000e +struct crypto_hash_walk { + char *data; + + unsigned int offset; + unsigned int flags; + + struct page *pg; + unsigned int entrylen; + + unsigned int total; + struct scatterlist *sg; +}; + +static int hash_walk_next(struct crypto_hash_walk *walk) +{ + unsigned int offset = walk->offset; + unsigned int nbytes = min(walk->entrylen, + ((unsigned int)(PAGE_SIZE)) - offset); + + walk->data = kmap_local_page(walk->pg); + walk->data += offset; + walk->entrylen -= nbytes; + return nbytes; +} + +static int hash_walk_new_entry(struct crypto_hash_walk *walk) +{ + struct scatterlist *sg; + + sg = walk->sg; + walk->offset = sg->offset; + walk->pg = sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT); + walk->offset = offset_in_page(walk->offset); + walk->entrylen = sg->length; + + if (walk->entrylen > walk->total) + walk->entrylen = walk->total; + walk->total -= walk->entrylen; + + return hash_walk_next(walk); +} + +static int crypto_hash_walk_first(struct ahash_request *req, + struct crypto_hash_walk *walk) +{ + walk->total = req->nbytes; + + if (!walk->total) { + walk->entrylen = 0; + return 0; + } + + walk->sg = req->src; + walk->flags = req->base.flags; + + return hash_walk_new_entry(walk); +} + +static int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err) +{ + walk->data -= walk->offset; + + kunmap_local(walk->data); + crypto_yield(walk->flags); + + if (err) + return err; + + if (walk->entrylen) { + walk->offset = 0; + walk->pg++; + return hash_walk_next(walk); + } + + if (!walk->total) + return 0; + + walk->sg = sg_next(walk->sg); + + return hash_walk_new_entry(walk); +} + +static inline int crypto_hash_walk_last(struct crypto_hash_walk *walk) +{ + return !(walk->entrylen | walk->total); +} + /* * For an ahash tfm that is using an shash algorithm (instead of an ahash * algorithm), this returns the underlying shash tfm. @@ -137,77 +224,6 @@ static int crypto_init_ahash_using_shash(struct crypto_tfm *tfm) return 0; } -static int hash_walk_next(struct crypto_hash_walk *walk) -{ - unsigned int offset = walk->offset; - unsigned int nbytes = min(walk->entrylen, - ((unsigned int)(PAGE_SIZE)) - offset); - - walk->data = kmap_local_page(walk->pg); - walk->data += offset; - walk->entrylen -= nbytes; - return nbytes; -} - -static int hash_walk_new_entry(struct crypto_hash_walk *walk) -{ - struct scatterlist *sg; - - sg = walk->sg; - walk->offset = sg->offset; - walk->pg = sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT); - walk->offset = offset_in_page(walk->offset); - walk->entrylen = sg->length; - - if (walk->entrylen > walk->total) - walk->entrylen = walk->total; - walk->total -= walk->entrylen; - - return hash_walk_next(walk); -} - -int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err) -{ - walk->data -= walk->offset; - - kunmap_local(walk->data); - crypto_yield(walk->flags); - - if (err) - return err; - - if (walk->entrylen) { - walk->offset = 0; - walk->pg++; - return hash_walk_next(walk); - } - - if (!walk->total) - return 0; - - walk->sg = sg_next(walk->sg); - - return hash_walk_new_entry(walk); -} -EXPORT_SYMBOL_GPL(crypto_hash_walk_done); - -int crypto_hash_walk_first(struct ahash_request *req, - struct crypto_hash_walk *walk) -{ - walk->total = req->nbytes; - - if (!walk->total) { - walk->entrylen = 0; - return 0; - } - - walk->sg = req->src; - walk->flags = req->base.flags; - - return hash_walk_new_entry(walk); -} -EXPORT_SYMBOL_GPL(crypto_hash_walk_first); - static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen) { diff --git a/crypto/algapi.c b/crypto/algapi.c index 16f7c7a9d8ab..5318c214debb 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -407,6 +407,7 @@ EXPORT_SYMBOL_GPL(crypto_remove_final); int crypto_register_alg(struct crypto_alg *alg) { struct crypto_larval *larval; + bool test_started = false; LIST_HEAD(algs_to_put); int err; @@ -418,17 +419,19 @@ int crypto_register_alg(struct crypto_alg *alg) down_write(&crypto_alg_sem); larval = __crypto_register_alg(alg, &algs_to_put); if (!IS_ERR_OR_NULL(larval)) { - bool test_started = crypto_boot_test_finished(); - + test_started = crypto_boot_test_finished(); larval->test_started = test_started; - if (test_started) - crypto_schedule_test(larval); } up_write(&crypto_alg_sem); if (IS_ERR(larval)) return PTR_ERR(larval); - crypto_remove_final(&algs_to_put); + + if (test_started) + crypto_schedule_test(larval); + else + crypto_remove_final(&algs_to_put); + return 0; } EXPORT_SYMBOL_GPL(crypto_register_alg); @@ -642,10 +645,8 @@ int crypto_register_instance(struct crypto_template *tmpl, larval = __crypto_register_alg(&inst->alg, &algs_to_put); if (IS_ERR(larval)) goto unlock; - else if (larval) { + else if (larval) larval->test_started = true; - crypto_schedule_test(larval); - } hlist_add_head(&inst->list, &tmpl->instances); inst->tmpl = tmpl; @@ -655,7 +656,12 @@ unlock: if (IS_ERR(larval)) return PTR_ERR(larval); - crypto_remove_final(&algs_to_put); + + if (larval) + crypto_schedule_test(larval); + else + crypto_remove_final(&algs_to_put); + return 0; } EXPORT_SYMBOL_GPL(crypto_register_instance); @@ -1016,6 +1022,8 @@ static void __init crypto_start_tests(void) if (IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS)) return; + set_crypto_boot_test_finished(); + for (;;) { struct crypto_larval *larval = NULL; struct crypto_alg *q; @@ -1038,7 +1046,6 @@ static void __init crypto_start_tests(void) l->test_started = true; larval = l; - crypto_schedule_test(larval); break; } @@ -1046,9 +1053,9 @@ static void __init crypto_start_tests(void) if (!larval) break; - } - set_crypto_boot_test_finished(); + crypto_schedule_test(larval); + } } static int __init crypto_algapi_init(void) diff --git a/crypto/anubis.c b/crypto/anubis.c index 9f0cf61bbc6e..886e7c913688 100644 --- a/crypto/anubis.c +++ b/crypto/anubis.c @@ -33,7 +33,7 @@ #include <linux/init.h> #include <linux/module.h> #include <linux/mm.h> -#include <asm/byteorder.h> +#include <linux/unaligned.h> #include <linux/types.h> #define ANUBIS_MIN_KEY_SIZE 16 @@ -463,7 +463,6 @@ static int anubis_setkey(struct crypto_tfm *tfm, const u8 *in_key, unsigned int key_len) { struct anubis_ctx *ctx = crypto_tfm_ctx(tfm); - const __be32 *key = (const __be32 *)in_key; int N, R, i, r; u32 kappa[ANUBIS_MAX_N]; u32 inter[ANUBIS_MAX_N]; @@ -482,7 +481,7 @@ static int anubis_setkey(struct crypto_tfm *tfm, const u8 *in_key, /* * map cipher key to initial key state (mu): */ for (i = 0; i < N; i++) - kappa[i] = be32_to_cpu(key[i]); + kappa[i] = get_unaligned_be32(&in_key[4 * i]); /* * generate R + 1 round keys: @@ -570,10 +569,8 @@ static int anubis_setkey(struct crypto_tfm *tfm, const u8 *in_key, } static void anubis_crypt(u32 roundKey[ANUBIS_MAX_ROUNDS + 1][4], - u8 *ciphertext, const u8 *plaintext, const int R) + u8 *dst, const u8 *src, const int R) { - const __be32 *src = (const __be32 *)plaintext; - __be32 *dst = (__be32 *)ciphertext; int i, r; u32 state[4]; u32 inter[4]; @@ -583,7 +580,7 @@ static void anubis_crypt(u32 roundKey[ANUBIS_MAX_ROUNDS + 1][4], * and add initial round key (sigma[K^0]): */ for (i = 0; i < 4; i++) - state[i] = be32_to_cpu(src[i]) ^ roundKey[0][i]; + state[i] = get_unaligned_be32(&src[4 * i]) ^ roundKey[0][i]; /* * R - 1 full rounds: @@ -654,7 +651,7 @@ static void anubis_crypt(u32 roundKey[ANUBIS_MAX_ROUNDS + 1][4], */ for (i = 0; i < 4; i++) - dst[i] = cpu_to_be32(inter[i]); + put_unaligned_be32(inter[i], &dst[4 * i]); } static void anubis_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) @@ -675,7 +672,6 @@ static struct crypto_alg anubis_alg = { .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = ANUBIS_BLOCK_SIZE, .cra_ctxsize = sizeof (struct anubis_ctx), - .cra_alignmask = 3, .cra_module = THIS_MODULE, .cra_u = { .cipher = { .cia_min_keysize = ANUBIS_MIN_KEY_SIZE, diff --git a/crypto/aria_generic.c b/crypto/aria_generic.c index d96dfc4fdde6..bd359d3313c2 100644 --- a/crypto/aria_generic.c +++ b/crypto/aria_generic.c @@ -15,6 +15,7 @@ */ #include <crypto/aria.h> +#include <linux/unaligned.h> static const u32 key_rc[20] = { 0x517cc1b7, 0x27220a94, 0xfe13abe8, 0xfa9a6ee0, @@ -27,7 +28,6 @@ static const u32 key_rc[20] = { static void aria_set_encrypt_key(struct aria_ctx *ctx, const u8 *in_key, unsigned int key_len) { - const __be32 *key = (const __be32 *)in_key; u32 w0[4], w1[4], w2[4], w3[4]; u32 reg0, reg1, reg2, reg3; const u32 *ck; @@ -35,10 +35,10 @@ static void aria_set_encrypt_key(struct aria_ctx *ctx, const u8 *in_key, ck = &key_rc[(key_len - 16) / 2]; - w0[0] = be32_to_cpu(key[0]); - w0[1] = be32_to_cpu(key[1]); - w0[2] = be32_to_cpu(key[2]); - w0[3] = be32_to_cpu(key[3]); + w0[0] = get_unaligned_be32(&in_key[0]); + w0[1] = get_unaligned_be32(&in_key[4]); + w0[2] = get_unaligned_be32(&in_key[8]); + w0[3] = get_unaligned_be32(&in_key[12]); reg0 = w0[0] ^ ck[0]; reg1 = w0[1] ^ ck[1]; @@ -48,11 +48,11 @@ static void aria_set_encrypt_key(struct aria_ctx *ctx, const u8 *in_key, aria_subst_diff_odd(®0, ®1, ®2, ®3); if (key_len > 16) { - w1[0] = be32_to_cpu(key[4]); - w1[1] = be32_to_cpu(key[5]); + w1[0] = get_unaligned_be32(&in_key[16]); + w1[1] = get_unaligned_be32(&in_key[20]); if (key_len > 24) { - w1[2] = be32_to_cpu(key[6]); - w1[3] = be32_to_cpu(key[7]); + w1[2] = get_unaligned_be32(&in_key[24]); + w1[3] = get_unaligned_be32(&in_key[28]); } else { w1[2] = 0; w1[3] = 0; @@ -195,17 +195,15 @@ EXPORT_SYMBOL_GPL(aria_set_key); static void __aria_crypt(struct aria_ctx *ctx, u8 *out, const u8 *in, u32 key[][ARIA_RD_KEY_WORDS]) { - const __be32 *src = (const __be32 *)in; - __be32 *dst = (__be32 *)out; u32 reg0, reg1, reg2, reg3; int rounds, rkidx = 0; rounds = ctx->rounds; - reg0 = be32_to_cpu(src[0]); - reg1 = be32_to_cpu(src[1]); - reg2 = be32_to_cpu(src[2]); - reg3 = be32_to_cpu(src[3]); + reg0 = get_unaligned_be32(&in[0]); + reg1 = get_unaligned_be32(&in[4]); + reg2 = get_unaligned_be32(&in[8]); + reg3 = get_unaligned_be32(&in[12]); aria_add_round_key(key[rkidx], ®0, ®1, ®2, ®3); rkidx++; @@ -241,10 +239,10 @@ static void __aria_crypt(struct aria_ctx *ctx, u8 *out, const u8 *in, (u8)(s1[get_u8(reg3, 2)]), (u8)(s2[get_u8(reg3, 3)])); - dst[0] = cpu_to_be32(reg0); - dst[1] = cpu_to_be32(reg1); - dst[2] = cpu_to_be32(reg2); - dst[3] = cpu_to_be32(reg3); + put_unaligned_be32(reg0, &out[0]); + put_unaligned_be32(reg1, &out[4]); + put_unaligned_be32(reg2, &out[8]); + put_unaligned_be32(reg3, &out[12]); } void aria_encrypt(void *_ctx, u8 *out, const u8 *in) @@ -284,7 +282,6 @@ static struct crypto_alg aria_alg = { .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = ARIA_BLOCK_SIZE, .cra_ctxsize = sizeof(struct aria_ctx), - .cra_alignmask = 3, .cra_module = THIS_MODULE, .cra_u = { .cipher = { diff --git a/crypto/asymmetric_keys/asymmetric_type.c b/crypto/asymmetric_keys/asymmetric_type.c index 43af5fa510c0..ba2d9d1ea235 100644 --- a/crypto/asymmetric_keys/asymmetric_type.c +++ b/crypto/asymmetric_keys/asymmetric_type.c @@ -18,16 +18,6 @@ #include "asymmetric_keys.h" -const char *const key_being_used_for[NR__KEY_BEING_USED_FOR] = { - [VERIFYING_MODULE_SIGNATURE] = "mod sig", - [VERIFYING_FIRMWARE_SIGNATURE] = "firmware sig", - [VERIFYING_KEXEC_PE_SIGNATURE] = "kexec PE sig", - [VERIFYING_KEY_SIGNATURE] = "key sig", - [VERIFYING_KEY_SELF_SIGNATURE] = "key self sig", - [VERIFYING_UNSPECIFIED_SIGNATURE] = "unspec sig", -}; -EXPORT_SYMBOL_GPL(key_being_used_for); - static LIST_HEAD(asymmetric_key_parsers); static DECLARE_RWSEM(asymmetric_key_parsers_sem); diff --git a/crypto/crc32_generic.c b/crypto/crc32_generic.c index 6a55d206fab3..783a30b27398 100644 --- a/crypto/crc32_generic.c +++ b/crypto/crc32_generic.c @@ -157,15 +157,19 @@ static struct shash_alg algs[] = {{ .base.cra_init = crc32_cra_init, }}; +static int num_algs; + static int __init crc32_mod_init(void) { /* register the arch flavor only if it differs from the generic one */ - return crypto_register_shashes(algs, 1 + (&crc32_le != &crc32_le_base)); + num_algs = 1 + ((crc32_optimizations() & CRC32_LE_OPTIMIZATION) != 0); + + return crypto_register_shashes(algs, num_algs); } static void __exit crc32_mod_fini(void) { - crypto_unregister_shashes(algs, 1 + (&crc32_le != &crc32_le_base)); + crypto_unregister_shashes(algs, num_algs); } subsys_initcall(crc32_mod_init); diff --git a/crypto/crc32c_generic.c b/crypto/crc32c_generic.c index 7c2357c30fdf..985da981d6e2 100644 --- a/crypto/crc32c_generic.c +++ b/crypto/crc32c_generic.c @@ -85,7 +85,7 @@ static int chksum_update(struct shash_desc *desc, const u8 *data, { struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); - ctx->crc = __crc32c_le_base(ctx->crc, data, length); + ctx->crc = crc32c_le_base(ctx->crc, data, length); return 0; } @@ -108,7 +108,7 @@ static int chksum_final(struct shash_desc *desc, u8 *out) static int __chksum_finup(u32 *crcp, const u8 *data, unsigned int len, u8 *out) { - put_unaligned_le32(~__crc32c_le_base(*crcp, data, len), out); + put_unaligned_le32(~crc32c_le_base(*crcp, data, len), out); return 0; } @@ -197,15 +197,19 @@ static struct shash_alg algs[] = {{ .base.cra_init = crc32c_cra_init, }}; +static int num_algs; + static int __init crc32c_mod_init(void) { /* register the arch flavor only if it differs from the generic one */ - return crypto_register_shashes(algs, 1 + (&__crc32c_le != &__crc32c_le_base)); + num_algs = 1 + ((crc32_optimizations() & CRC32C_OPTIMIZATION) != 0); + + return crypto_register_shashes(algs, num_algs); } static void __exit crc32c_mod_fini(void) { - crypto_unregister_shashes(algs, 1 + (&__crc32c_le != &__crc32c_le_base)); + crypto_unregister_shashes(algs, num_algs); } subsys_initcall(crc32c_mod_init); diff --git a/crypto/crct10dif_common.c b/crypto/crct10dif_common.c deleted file mode 100644 index b2fab366f518..000000000000 --- a/crypto/crct10dif_common.c +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Cryptographic API. - * - * T10 Data Integrity Field CRC16 Crypto Transform - * - * Copyright (c) 2007 Oracle Corporation. All rights reserved. - * Written by Martin K. Petersen <martin.petersen@oracle.com> - * Copyright (C) 2013 Intel Corporation - * Author: Tim Chen <tim.c.chen@linux.intel.com> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - * - */ - -#include <linux/crc-t10dif.h> -#include <linux/module.h> -#include <linux/kernel.h> - -/* Table generated using the following polynomium: - * x^16 + x^15 + x^11 + x^9 + x^8 + x^7 + x^5 + x^4 + x^2 + x + 1 - * gt: 0x8bb7 - */ -static const __u16 t10_dif_crc_table[256] = { - 0x0000, 0x8BB7, 0x9CD9, 0x176E, 0xB205, 0x39B2, 0x2EDC, 0xA56B, - 0xEFBD, 0x640A, 0x7364, 0xF8D3, 0x5DB8, 0xD60F, 0xC161, 0x4AD6, - 0x54CD, 0xDF7A, 0xC814, 0x43A3, 0xE6C8, 0x6D7F, 0x7A11, 0xF1A6, - 0xBB70, 0x30C7, 0x27A9, 0xAC1E, 0x0975, 0x82C2, 0x95AC, 0x1E1B, - 0xA99A, 0x222D, 0x3543, 0xBEF4, 0x1B9F, 0x9028, 0x8746, 0x0CF1, - 0x4627, 0xCD90, 0xDAFE, 0x5149, 0xF422, 0x7F95, 0x68FB, 0xE34C, - 0xFD57, 0x76E0, 0x618E, 0xEA39, 0x4F52, 0xC4E5, 0xD38B, 0x583C, - 0x12EA, 0x995D, 0x8E33, 0x0584, 0xA0EF, 0x2B58, 0x3C36, 0xB781, - 0xD883, 0x5334, 0x445A, 0xCFED, 0x6A86, 0xE131, 0xF65F, 0x7DE8, - 0x373E, 0xBC89, 0xABE7, 0x2050, 0x853B, 0x0E8C, 0x19E2, 0x9255, - 0x8C4E, 0x07F9, 0x1097, 0x9B20, 0x3E4B, 0xB5FC, 0xA292, 0x2925, - 0x63F3, 0xE844, 0xFF2A, 0x749D, 0xD1F6, 0x5A41, 0x4D2F, 0xC698, - 0x7119, 0xFAAE, 0xEDC0, 0x6677, 0xC31C, 0x48AB, 0x5FC5, 0xD472, - 0x9EA4, 0x1513, 0x027D, 0x89CA, 0x2CA1, 0xA716, 0xB078, 0x3BCF, - 0x25D4, 0xAE63, 0xB90D, 0x32BA, 0x97D1, 0x1C66, 0x0B08, 0x80BF, - 0xCA69, 0x41DE, 0x56B0, 0xDD07, 0x786C, 0xF3DB, 0xE4B5, 0x6F02, - 0x3AB1, 0xB106, 0xA668, 0x2DDF, 0x88B4, 0x0303, 0x146D, 0x9FDA, - 0xD50C, 0x5EBB, 0x49D5, 0xC262, 0x6709, 0xECBE, 0xFBD0, 0x7067, - 0x6E7C, 0xE5CB, 0xF2A5, 0x7912, 0xDC79, 0x57CE, 0x40A0, 0xCB17, - 0x81C1, 0x0A76, 0x1D18, 0x96AF, 0x33C4, 0xB873, 0xAF1D, 0x24AA, - 0x932B, 0x189C, 0x0FF2, 0x8445, 0x212E, 0xAA99, 0xBDF7, 0x3640, - 0x7C96, 0xF721, 0xE04F, 0x6BF8, 0xCE93, 0x4524, 0x524A, 0xD9FD, - 0xC7E6, 0x4C51, 0x5B3F, 0xD088, 0x75E3, 0xFE54, 0xE93A, 0x628D, - 0x285B, 0xA3EC, 0xB482, 0x3F35, 0x9A5E, 0x11E9, 0x0687, 0x8D30, - 0xE232, 0x6985, 0x7EEB, 0xF55C, 0x5037, 0xDB80, 0xCCEE, 0x4759, - 0x0D8F, 0x8638, 0x9156, 0x1AE1, 0xBF8A, 0x343D, 0x2353, 0xA8E4, - 0xB6FF, 0x3D48, 0x2A26, 0xA191, 0x04FA, 0x8F4D, 0x9823, 0x1394, - 0x5942, 0xD2F5, 0xC59B, 0x4E2C, 0xEB47, 0x60F0, 0x779E, 0xFC29, - 0x4BA8, 0xC01F, 0xD771, 0x5CC6, 0xF9AD, 0x721A, 0x6574, 0xEEC3, - 0xA415, 0x2FA2, 0x38CC, 0xB37B, 0x1610, 0x9DA7, 0x8AC9, 0x017E, - 0x1F65, 0x94D2, 0x83BC, 0x080B, 0xAD60, 0x26D7, 0x31B9, 0xBA0E, - 0xF0D8, 0x7B6F, 0x6C01, 0xE7B6, 0x42DD, 0xC96A, 0xDE04, 0x55B3 -}; - -__u16 crc_t10dif_generic(__u16 crc, const unsigned char *buffer, size_t len) -{ - unsigned int i; - - for (i = 0 ; i < len ; i++) - crc = (crc << 8) ^ t10_dif_crc_table[((crc >> 8) ^ buffer[i]) & 0xff]; - - return crc; -} -EXPORT_SYMBOL(crc_t10dif_generic); - -MODULE_DESCRIPTION("T10 DIF CRC calculation common code"); -MODULE_LICENSE("GPL"); diff --git a/crypto/crct10dif_generic.c b/crypto/crct10dif_generic.c index e843982073bb..259cb01932cb 100644 --- a/crypto/crct10dif_generic.c +++ b/crypto/crct10dif_generic.c @@ -57,6 +57,15 @@ static int chksum_update(struct shash_desc *desc, const u8 *data, return 0; } +static int chksum_update_arch(struct shash_desc *desc, const u8 *data, + unsigned int length) +{ + struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); + + ctx->crc = crc_t10dif_update(ctx->crc, data, length); + return 0; +} + static int chksum_final(struct shash_desc *desc, u8 *out) { struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); @@ -71,6 +80,13 @@ static int __chksum_finup(__u16 crc, const u8 *data, unsigned int len, u8 *out) return 0; } +static int __chksum_finup_arch(__u16 crc, const u8 *data, unsigned int len, + u8 *out) +{ + *(__u16 *)out = crc_t10dif_update(crc, data, len); + return 0; +} + static int chksum_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { @@ -79,37 +95,67 @@ static int chksum_finup(struct shash_desc *desc, const u8 *data, return __chksum_finup(ctx->crc, data, len, out); } +static int chksum_finup_arch(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out) +{ + struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); + + return __chksum_finup_arch(ctx->crc, data, len, out); +} + static int chksum_digest(struct shash_desc *desc, const u8 *data, unsigned int length, u8 *out) { return __chksum_finup(0, data, length, out); } -static struct shash_alg alg = { - .digestsize = CRC_T10DIF_DIGEST_SIZE, - .init = chksum_init, - .update = chksum_update, - .final = chksum_final, - .finup = chksum_finup, - .digest = chksum_digest, - .descsize = sizeof(struct chksum_desc_ctx), - .base = { - .cra_name = "crct10dif", - .cra_driver_name = "crct10dif-generic", - .cra_priority = 100, - .cra_blocksize = CRC_T10DIF_BLOCK_SIZE, - .cra_module = THIS_MODULE, - } -}; +static int chksum_digest_arch(struct shash_desc *desc, const u8 *data, + unsigned int length, u8 *out) +{ + return __chksum_finup_arch(0, data, length, out); +} + +static struct shash_alg algs[] = {{ + .digestsize = CRC_T10DIF_DIGEST_SIZE, + .init = chksum_init, + .update = chksum_update, + .final = chksum_final, + .finup = chksum_finup, + .digest = chksum_digest, + .descsize = sizeof(struct chksum_desc_ctx), + .base.cra_name = "crct10dif", + .base.cra_driver_name = "crct10dif-generic", + .base.cra_priority = 100, + .base.cra_blocksize = CRC_T10DIF_BLOCK_SIZE, + .base.cra_module = THIS_MODULE, +}, { + .digestsize = CRC_T10DIF_DIGEST_SIZE, + .init = chksum_init, + .update = chksum_update_arch, + .final = chksum_final, + .finup = chksum_finup_arch, + .digest = chksum_digest_arch, + .descsize = sizeof(struct chksum_desc_ctx), + .base.cra_name = "crct10dif", + .base.cra_driver_name = "crct10dif-" __stringify(ARCH), + .base.cra_priority = 150, + .base.cra_blocksize = CRC_T10DIF_BLOCK_SIZE, + .base.cra_module = THIS_MODULE, +}}; + +static int num_algs; static int __init crct10dif_mod_init(void) { - return crypto_register_shash(&alg); + /* register the arch flavor only if it differs from the generic one */ + num_algs = 1 + crc_t10dif_is_optimized(); + + return crypto_register_shashes(algs, num_algs); } static void __exit crct10dif_mod_fini(void) { - crypto_unregister_shash(&alg); + crypto_unregister_shashes(algs, num_algs); } subsys_initcall(crct10dif_mod_init); diff --git a/crypto/crypto_engine.c b/crypto/crypto_engine.c index e60a0eb628e8..c7c16da5e649 100644 --- a/crypto/crypto_engine.c +++ b/crypto/crypto_engine.c @@ -517,7 +517,7 @@ struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev, crypto_init_queue(&engine->queue, qlen); spin_lock_init(&engine->queue_lock); - engine->kworker = kthread_create_worker(0, "%s", engine->name); + engine->kworker = kthread_run_worker(0, "%s", engine->name); if (IS_ERR(engine->kworker)) { dev_err(dev, "failed to create crypto request pump task\n"); return NULL; diff --git a/crypto/fips.c b/crypto/fips.c index 8a784018ebfc..a58e7750f532 100644 --- a/crypto/fips.c +++ b/crypto/fips.c @@ -12,6 +12,7 @@ #include <linux/kernel.h> #include <linux/sysctl.h> #include <linux/notifier.h> +#include <linux/string_choices.h> #include <generated/utsrelease.h> int fips_enabled; @@ -24,8 +25,7 @@ EXPORT_SYMBOL_GPL(fips_fail_notif_chain); static int fips_enable(char *str) { fips_enabled = !!simple_strtol(str, NULL, 0); - printk(KERN_INFO "fips mode: %s\n", - fips_enabled ? "enabled" : "disabled"); + pr_info("fips mode: %s\n", str_enabled_disabled(fips_enabled)); return 1; } diff --git a/crypto/keywrap.c b/crypto/keywrap.c deleted file mode 100644 index 385ffdfd5a9b..000000000000 --- a/crypto/keywrap.c +++ /dev/null @@ -1,320 +0,0 @@ -/* - * Key Wrapping: RFC3394 / NIST SP800-38F - * - * Copyright (C) 2015, Stephan Mueller <smueller@chronox.de> - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, and the entire permission notice in its entirety, - * including the disclaimer of warranties. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. The name of the author may not be used to endorse or promote - * products derived from this software without specific prior - * written permission. - * - * ALTERNATIVELY, this product may be distributed under the terms of - * the GNU General Public License, in which case the provisions of the GPL2 - * are required INSTEAD OF the above restrictions. (This clause is - * necessary due to a potential bad interaction between the GPL and - * the restrictions contained in a BSD-style copyright.) - * - * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED - * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF - * WHICH ARE HEREBY DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT - * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR - * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF - * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE - * USE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE POSSIBILITY OF SUCH - * DAMAGE. - */ - -/* - * Note for using key wrapping: - * - * * The result of the encryption operation is the ciphertext starting - * with the 2nd semiblock. The first semiblock is provided as the IV. - * The IV used to start the encryption operation is the default IV. - * - * * The input for the decryption is the first semiblock handed in as an - * IV. The ciphertext is the data starting with the 2nd semiblock. The - * return code of the decryption operation will be EBADMSG in case an - * integrity error occurs. - * - * To obtain the full result of an encryption as expected by SP800-38F, the - * caller must allocate a buffer of plaintext + 8 bytes: - * - * unsigned int datalen = ptlen + crypto_skcipher_ivsize(tfm); - * u8 data[datalen]; - * u8 *iv = data; - * u8 *pt = data + crypto_skcipher_ivsize(tfm); - * <ensure that pt contains the plaintext of size ptlen> - * sg_init_one(&sg, pt, ptlen); - * skcipher_request_set_crypt(req, &sg, &sg, ptlen, iv); - * - * ==> After encryption, data now contains full KW result as per SP800-38F. - * - * In case of decryption, ciphertext now already has the expected length - * and must be segmented appropriately: - * - * unsigned int datalen = CTLEN; - * u8 data[datalen]; - * <ensure that data contains full ciphertext> - * u8 *iv = data; - * u8 *ct = data + crypto_skcipher_ivsize(tfm); - * unsigned int ctlen = datalen - crypto_skcipher_ivsize(tfm); - * sg_init_one(&sg, ct, ctlen); - * skcipher_request_set_crypt(req, &sg, &sg, ctlen, iv); - * - * ==> After decryption (which hopefully does not return EBADMSG), the ct - * pointer now points to the plaintext of size ctlen. - * - * Note 2: KWP is not implemented as this would defy in-place operation. - * If somebody wants to wrap non-aligned data, he should simply pad - * the input with zeros to fill it up to the 8 byte boundary. - */ - -#include <linux/module.h> -#include <linux/crypto.h> -#include <linux/scatterlist.h> -#include <crypto/scatterwalk.h> -#include <crypto/internal/cipher.h> -#include <crypto/internal/skcipher.h> - -struct crypto_kw_block { -#define SEMIBSIZE 8 - __be64 A; - __be64 R; -}; - -/* - * Fast forward the SGL to the "end" length minus SEMIBSIZE. - * The start in the SGL defined by the fast-forward is returned with - * the walk variable - */ -static void crypto_kw_scatterlist_ff(struct scatter_walk *walk, - struct scatterlist *sg, - unsigned int end) -{ - unsigned int skip = 0; - - /* The caller should only operate on full SEMIBLOCKs. */ - BUG_ON(end < SEMIBSIZE); - - skip = end - SEMIBSIZE; - while (sg) { - if (sg->length > skip) { - scatterwalk_start(walk, sg); - scatterwalk_advance(walk, skip); - break; - } - - skip -= sg->length; - sg = sg_next(sg); - } -} - -static int crypto_kw_decrypt(struct skcipher_request *req) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct crypto_cipher *cipher = skcipher_cipher_simple(tfm); - struct crypto_kw_block block; - struct scatterlist *src, *dst; - u64 t = 6 * ((req->cryptlen) >> 3); - unsigned int i; - int ret = 0; - - /* - * Require at least 2 semiblocks (note, the 3rd semiblock that is - * required by SP800-38F is the IV. - */ - if (req->cryptlen < (2 * SEMIBSIZE) || req->cryptlen % SEMIBSIZE) - return -EINVAL; - - /* Place the IV into block A */ - memcpy(&block.A, req->iv, SEMIBSIZE); - - /* - * src scatterlist is read-only. dst scatterlist is r/w. During the - * first loop, src points to req->src and dst to req->dst. For any - * subsequent round, the code operates on req->dst only. - */ - src = req->src; - dst = req->dst; - - for (i = 0; i < 6; i++) { - struct scatter_walk src_walk, dst_walk; - unsigned int nbytes = req->cryptlen; - - while (nbytes) { - /* move pointer by nbytes in the SGL */ - crypto_kw_scatterlist_ff(&src_walk, src, nbytes); - /* get the source block */ - scatterwalk_copychunks(&block.R, &src_walk, SEMIBSIZE, - false); - - /* perform KW operation: modify IV with counter */ - block.A ^= cpu_to_be64(t); - t--; - /* perform KW operation: decrypt block */ - crypto_cipher_decrypt_one(cipher, (u8 *)&block, - (u8 *)&block); - - /* move pointer by nbytes in the SGL */ - crypto_kw_scatterlist_ff(&dst_walk, dst, nbytes); - /* Copy block->R into place */ - scatterwalk_copychunks(&block.R, &dst_walk, SEMIBSIZE, - true); - - nbytes -= SEMIBSIZE; - } - - /* we now start to operate on the dst SGL only */ - src = req->dst; - dst = req->dst; - } - - /* Perform authentication check */ - if (block.A != cpu_to_be64(0xa6a6a6a6a6a6a6a6ULL)) - ret = -EBADMSG; - - memzero_explicit(&block, sizeof(struct crypto_kw_block)); - - return ret; -} - -static int crypto_kw_encrypt(struct skcipher_request *req) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct crypto_cipher *cipher = skcipher_cipher_simple(tfm); - struct crypto_kw_block block; - struct scatterlist *src, *dst; - u64 t = 1; - unsigned int i; - - /* - * Require at least 2 semiblocks (note, the 3rd semiblock that is - * required by SP800-38F is the IV that occupies the first semiblock. - * This means that the dst memory must be one semiblock larger than src. - * Also ensure that the given data is aligned to semiblock. - */ - if (req->cryptlen < (2 * SEMIBSIZE) || req->cryptlen % SEMIBSIZE) - return -EINVAL; - - /* - * Place the predefined IV into block A -- for encrypt, the caller - * does not need to provide an IV, but he needs to fetch the final IV. - */ - block.A = cpu_to_be64(0xa6a6a6a6a6a6a6a6ULL); - - /* - * src scatterlist is read-only. dst scatterlist is r/w. During the - * first loop, src points to req->src and dst to req->dst. For any - * subsequent round, the code operates on req->dst only. - */ - src = req->src; - dst = req->dst; - - for (i = 0; i < 6; i++) { - struct scatter_walk src_walk, dst_walk; - unsigned int nbytes = req->cryptlen; - - scatterwalk_start(&src_walk, src); - scatterwalk_start(&dst_walk, dst); - - while (nbytes) { - /* get the source block */ - scatterwalk_copychunks(&block.R, &src_walk, SEMIBSIZE, - false); - - /* perform KW operation: encrypt block */ - crypto_cipher_encrypt_one(cipher, (u8 *)&block, - (u8 *)&block); - /* perform KW operation: modify IV with counter */ - block.A ^= cpu_to_be64(t); - t++; - - /* Copy block->R into place */ - scatterwalk_copychunks(&block.R, &dst_walk, SEMIBSIZE, - true); - - nbytes -= SEMIBSIZE; - } - - /* we now start to operate on the dst SGL only */ - src = req->dst; - dst = req->dst; - } - - /* establish the IV for the caller to pick up */ - memcpy(req->iv, &block.A, SEMIBSIZE); - - memzero_explicit(&block, sizeof(struct crypto_kw_block)); - - return 0; -} - -static int crypto_kw_create(struct crypto_template *tmpl, struct rtattr **tb) -{ - struct skcipher_instance *inst; - struct crypto_alg *alg; - int err; - - inst = skcipher_alloc_instance_simple(tmpl, tb); - if (IS_ERR(inst)) - return PTR_ERR(inst); - - alg = skcipher_ialg_simple(inst); - - err = -EINVAL; - /* Section 5.1 requirement for KW */ - if (alg->cra_blocksize != sizeof(struct crypto_kw_block)) - goto out_free_inst; - - inst->alg.base.cra_blocksize = SEMIBSIZE; - inst->alg.base.cra_alignmask = 0; - inst->alg.ivsize = SEMIBSIZE; - - inst->alg.encrypt = crypto_kw_encrypt; - inst->alg.decrypt = crypto_kw_decrypt; - - err = skcipher_register_instance(tmpl, inst); - if (err) { -out_free_inst: - inst->free(inst); - } - - return err; -} - -static struct crypto_template crypto_kw_tmpl = { - .name = "kw", - .create = crypto_kw_create, - .module = THIS_MODULE, -}; - -static int __init crypto_kw_init(void) -{ - return crypto_register_template(&crypto_kw_tmpl); -} - -static void __exit crypto_kw_exit(void) -{ - crypto_unregister_template(&crypto_kw_tmpl); -} - -subsys_initcall(crypto_kw_init); -module_exit(crypto_kw_exit); - -MODULE_LICENSE("Dual BSD/GPL"); -MODULE_AUTHOR("Stephan Mueller <smueller@chronox.de>"); -MODULE_DESCRIPTION("Key Wrapping (RFC3394 / NIST SP800-38F)"); -MODULE_ALIAS_CRYPTO("kw"); -MODULE_IMPORT_NS("CRYPTO_INTERNAL"); diff --git a/crypto/khazad.c b/crypto/khazad.c index 70cafe73f974..7ad338ca2c18 100644 --- a/crypto/khazad.c +++ b/crypto/khazad.c @@ -23,7 +23,7 @@ #include <linux/init.h> #include <linux/module.h> #include <linux/mm.h> -#include <asm/byteorder.h> +#include <linux/unaligned.h> #include <linux/types.h> #define KHAZAD_KEY_SIZE 16 @@ -757,14 +757,12 @@ static int khazad_setkey(struct crypto_tfm *tfm, const u8 *in_key, unsigned int key_len) { struct khazad_ctx *ctx = crypto_tfm_ctx(tfm); - const __be32 *key = (const __be32 *)in_key; int r; const u64 *S = T7; u64 K2, K1; - /* key is supposed to be 32-bit aligned */ - K2 = ((u64)be32_to_cpu(key[0]) << 32) | be32_to_cpu(key[1]); - K1 = ((u64)be32_to_cpu(key[2]) << 32) | be32_to_cpu(key[3]); + K2 = get_unaligned_be64(&in_key[0]); + K1 = get_unaligned_be64(&in_key[8]); /* setup the encrypt key */ for (r = 0; r <= KHAZAD_ROUNDS; r++) { @@ -800,14 +798,12 @@ static int khazad_setkey(struct crypto_tfm *tfm, const u8 *in_key, } static void khazad_crypt(const u64 roundKey[KHAZAD_ROUNDS + 1], - u8 *ciphertext, const u8 *plaintext) + u8 *dst, const u8 *src) { - const __be64 *src = (const __be64 *)plaintext; - __be64 *dst = (__be64 *)ciphertext; int r; u64 state; - state = be64_to_cpu(*src) ^ roundKey[0]; + state = get_unaligned_be64(src) ^ roundKey[0]; for (r = 1; r < KHAZAD_ROUNDS; r++) { state = T0[(int)(state >> 56) ] ^ @@ -831,7 +827,7 @@ static void khazad_crypt(const u64 roundKey[KHAZAD_ROUNDS + 1], (T7[(int)(state ) & 0xff] & 0x00000000000000ffULL) ^ roundKey[KHAZAD_ROUNDS]; - *dst = cpu_to_be64(state); + put_unaligned_be64(state, dst); } static void khazad_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) @@ -852,7 +848,6 @@ static struct crypto_alg khazad_alg = { .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = KHAZAD_BLOCK_SIZE, .cra_ctxsize = sizeof (struct khazad_ctx), - .cra_alignmask = 7, .cra_module = THIS_MODULE, .cra_u = { .cipher = { .cia_min_keysize = KHAZAD_KEY_SIZE, diff --git a/crypto/proc.c b/crypto/proc.c index 56c7c78df297..522b27d90d29 100644 --- a/crypto/proc.c +++ b/crypto/proc.c @@ -47,13 +47,10 @@ static int c_show(struct seq_file *m, void *p) (alg->cra_flags & CRYPTO_ALG_TESTED) ? "passed" : "unknown"); seq_printf(m, "internal : %s\n", - (alg->cra_flags & CRYPTO_ALG_INTERNAL) ? - "yes" : "no"); - if (fips_enabled) { + str_yes_no(alg->cra_flags & CRYPTO_ALG_INTERNAL)); + if (fips_enabled) seq_printf(m, "fips : %s\n", - (alg->cra_flags & CRYPTO_ALG_FIPS_INTERNAL) ? - "no" : "yes"); - } + str_no_yes(alg->cra_flags & CRYPTO_ALG_FIPS_INTERNAL)); if (alg->cra_flags & CRYPTO_ALG_LARVAL) { seq_printf(m, "type : larval\n"); diff --git a/crypto/seed.c b/crypto/seed.c index d0506ade2a5f..d05d8ed909fa 100644 --- a/crypto/seed.c +++ b/crypto/seed.c @@ -13,7 +13,7 @@ #include <linux/init.h> #include <linux/types.h> #include <linux/errno.h> -#include <asm/byteorder.h> +#include <linux/unaligned.h> #define SEED_NUM_KCONSTANTS 16 #define SEED_KEY_SIZE 16 @@ -329,13 +329,12 @@ static int seed_set_key(struct crypto_tfm *tfm, const u8 *in_key, { struct seed_ctx *ctx = crypto_tfm_ctx(tfm); u32 *keyout = ctx->keysched; - const __be32 *key = (const __be32 *)in_key; u32 i, t0, t1, x1, x2, x3, x4; - x1 = be32_to_cpu(key[0]); - x2 = be32_to_cpu(key[1]); - x3 = be32_to_cpu(key[2]); - x4 = be32_to_cpu(key[3]); + x1 = get_unaligned_be32(&in_key[0]); + x2 = get_unaligned_be32(&in_key[4]); + x3 = get_unaligned_be32(&in_key[8]); + x4 = get_unaligned_be32(&in_key[12]); for (i = 0; i < SEED_NUM_KCONSTANTS; i++) { t0 = x1 + x3 - KC[i]; @@ -364,15 +363,13 @@ static int seed_set_key(struct crypto_tfm *tfm, const u8 *in_key, static void seed_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) { const struct seed_ctx *ctx = crypto_tfm_ctx(tfm); - const __be32 *src = (const __be32 *)in; - __be32 *dst = (__be32 *)out; u32 x1, x2, x3, x4, t0, t1; const u32 *ks = ctx->keysched; - x1 = be32_to_cpu(src[0]); - x2 = be32_to_cpu(src[1]); - x3 = be32_to_cpu(src[2]); - x4 = be32_to_cpu(src[3]); + x1 = get_unaligned_be32(&in[0]); + x2 = get_unaligned_be32(&in[4]); + x3 = get_unaligned_be32(&in[8]); + x4 = get_unaligned_be32(&in[12]); OP(x1, x2, x3, x4, 0); OP(x3, x4, x1, x2, 2); @@ -391,10 +388,10 @@ static void seed_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) OP(x1, x2, x3, x4, 28); OP(x3, x4, x1, x2, 30); - dst[0] = cpu_to_be32(x3); - dst[1] = cpu_to_be32(x4); - dst[2] = cpu_to_be32(x1); - dst[3] = cpu_to_be32(x2); + put_unaligned_be32(x3, &out[0]); + put_unaligned_be32(x4, &out[4]); + put_unaligned_be32(x1, &out[8]); + put_unaligned_be32(x2, &out[12]); } /* decrypt a block of text */ @@ -402,15 +399,13 @@ static void seed_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) static void seed_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) { const struct seed_ctx *ctx = crypto_tfm_ctx(tfm); - const __be32 *src = (const __be32 *)in; - __be32 *dst = (__be32 *)out; u32 x1, x2, x3, x4, t0, t1; const u32 *ks = ctx->keysched; - x1 = be32_to_cpu(src[0]); - x2 = be32_to_cpu(src[1]); - x3 = be32_to_cpu(src[2]); - x4 = be32_to_cpu(src[3]); + x1 = get_unaligned_be32(&in[0]); + x2 = get_unaligned_be32(&in[4]); + x3 = get_unaligned_be32(&in[8]); + x4 = get_unaligned_be32(&in[12]); OP(x1, x2, x3, x4, 30); OP(x3, x4, x1, x2, 28); @@ -429,10 +424,10 @@ static void seed_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) OP(x1, x2, x3, x4, 2); OP(x3, x4, x1, x2, 0); - dst[0] = cpu_to_be32(x3); - dst[1] = cpu_to_be32(x4); - dst[2] = cpu_to_be32(x1); - dst[3] = cpu_to_be32(x2); + put_unaligned_be32(x3, &out[0]); + put_unaligned_be32(x4, &out[4]); + put_unaligned_be32(x1, &out[8]); + put_unaligned_be32(x2, &out[12]); } @@ -443,7 +438,6 @@ static struct crypto_alg seed_alg = { .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = SEED_BLOCK_SIZE, .cra_ctxsize = sizeof(struct seed_ctx), - .cra_alignmask = 3, .cra_module = THIS_MODULE, .cra_u = { .cipher = { diff --git a/crypto/sig.c b/crypto/sig.c index 5e1f1f739da2..dfc7cae90802 100644 --- a/crypto/sig.c +++ b/crypto/sig.c @@ -15,8 +15,6 @@ #include "internal.h" -#define CRYPTO_ALG_TYPE_SIG_MASK 0x0000000e - static void crypto_sig_exit_tfm(struct crypto_tfm *tfm) { struct crypto_sig *sig = __crypto_sig_tfm(tfm); @@ -73,7 +71,7 @@ static const struct crypto_type crypto_sig_type = { .report = crypto_sig_report, #endif .maskclear = ~CRYPTO_ALG_TYPE_MASK, - .maskset = CRYPTO_ALG_TYPE_SIG_MASK, + .maskset = CRYPTO_ALG_TYPE_MASK, .type = CRYPTO_ALG_TYPE_SIG, .tfmsize = offsetof(struct crypto_sig, base), }; diff --git a/crypto/skcipher.c b/crypto/skcipher.c index f74e4d0d87a2..a9eb2dcf2898 100644 --- a/crypto/skcipher.c +++ b/crypto/skcipher.c @@ -17,7 +17,6 @@ #include <linux/cryptouser.h> #include <linux/err.h> #include <linux/kernel.h> -#include <linux/list.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/seq_file.h> @@ -29,19 +28,10 @@ #define CRYPTO_ALG_TYPE_SKCIPHER_MASK 0x0000000e enum { - SKCIPHER_WALK_PHYS = 1 << 0, - SKCIPHER_WALK_SLOW = 1 << 1, - SKCIPHER_WALK_COPY = 1 << 2, - SKCIPHER_WALK_DIFF = 1 << 3, - SKCIPHER_WALK_SLEEP = 1 << 4, -}; - -struct skcipher_walk_buffer { - struct list_head entry; - struct scatter_walk dst; - unsigned int len; - u8 *data; - u8 buffer[]; + SKCIPHER_WALK_SLOW = 1 << 0, + SKCIPHER_WALK_COPY = 1 << 1, + SKCIPHER_WALK_DIFF = 1 << 2, + SKCIPHER_WALK_SLEEP = 1 << 3, }; static const struct crypto_type crypto_skcipher_type; @@ -73,16 +63,6 @@ static inline gfp_t skcipher_walk_gfp(struct skcipher_walk *walk) return walk->flags & SKCIPHER_WALK_SLEEP ? GFP_KERNEL : GFP_ATOMIC; } -/* Get a spot of the specified length that does not straddle a page. - * The caller needs to ensure that there is enough space for this operation. - */ -static inline u8 *skcipher_get_spot(u8 *start, unsigned int len) -{ - u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK); - - return max(start, end_page); -} - static inline struct skcipher_alg *__crypto_skcipher_alg( struct crypto_alg *alg) { @@ -91,30 +71,44 @@ static inline struct skcipher_alg *__crypto_skcipher_alg( static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize) { - u8 *addr; + u8 *addr = PTR_ALIGN(walk->buffer, walk->alignmask + 1); - addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1); - addr = skcipher_get_spot(addr, bsize); - scatterwalk_copychunks(addr, &walk->out, bsize, - (walk->flags & SKCIPHER_WALK_PHYS) ? 2 : 1); + scatterwalk_copychunks(addr, &walk->out, bsize, 1); return 0; } -int skcipher_walk_done(struct skcipher_walk *walk, int err) +/** + * skcipher_walk_done() - finish one step of a skcipher_walk + * @walk: the skcipher_walk + * @res: number of bytes *not* processed (>= 0) from walk->nbytes, + * or a -errno value to terminate the walk due to an error + * + * This function cleans up after one step of walking through the source and + * destination scatterlists, and advances to the next step if applicable. + * walk->nbytes is set to the number of bytes available in the next step, + * walk->total is set to the new total number of bytes remaining, and + * walk->{src,dst}.virt.addr is set to the next pair of data pointers. If there + * is no more data, or if an error occurred (i.e. -errno return), then + * walk->nbytes and walk->total are set to 0 and all resources owned by the + * skcipher_walk are freed. + * + * Return: 0 or a -errno value. If @res was a -errno value then it will be + * returned, but other errors may occur too. + */ +int skcipher_walk_done(struct skcipher_walk *walk, int res) { - unsigned int n = walk->nbytes; - unsigned int nbytes = 0; + unsigned int n = walk->nbytes; /* num bytes processed this step */ + unsigned int total = 0; /* new total remaining */ if (!n) goto finish; - if (likely(err >= 0)) { - n -= err; - nbytes = walk->total - n; + if (likely(res >= 0)) { + n -= res; /* subtract num bytes *not* processed */ + total = walk->total - n; } - if (likely(!(walk->flags & (SKCIPHER_WALK_PHYS | - SKCIPHER_WALK_SLOW | + if (likely(!(walk->flags & (SKCIPHER_WALK_SLOW | SKCIPHER_WALK_COPY | SKCIPHER_WALK_DIFF)))) { unmap_src: @@ -126,34 +120,36 @@ unmap_src: skcipher_map_dst(walk); memcpy(walk->dst.virt.addr, walk->page, n); skcipher_unmap_dst(walk); - } else if (unlikely(walk->flags & SKCIPHER_WALK_SLOW)) { - if (err > 0) { + } else { /* SKCIPHER_WALK_SLOW */ + if (res > 0) { /* * Didn't process all bytes. Either the algorithm is * broken, or this was the last step and it turned out * the message wasn't evenly divisible into blocks but * the algorithm requires it. */ - err = -EINVAL; - nbytes = 0; + res = -EINVAL; + total = 0; } else n = skcipher_done_slow(walk, n); } - if (err > 0) - err = 0; + if (res > 0) + res = 0; - walk->total = nbytes; + walk->total = total; walk->nbytes = 0; scatterwalk_advance(&walk->in, n); scatterwalk_advance(&walk->out, n); - scatterwalk_done(&walk->in, 0, nbytes); - scatterwalk_done(&walk->out, 1, nbytes); - - if (nbytes) { - crypto_yield(walk->flags & SKCIPHER_WALK_SLEEP ? - CRYPTO_TFM_REQ_MAY_SLEEP : 0); + scatterwalk_done(&walk->in, 0, total); + scatterwalk_done(&walk->out, 1, total); + + if (total) { + if (walk->flags & SKCIPHER_WALK_SLEEP) + cond_resched(); + walk->flags &= ~(SKCIPHER_WALK_SLOW | SKCIPHER_WALK_COPY | + SKCIPHER_WALK_DIFF); return skcipher_walk_next(walk); } @@ -162,9 +158,6 @@ finish: if (!((unsigned long)walk->buffer | (unsigned long)walk->page)) goto out; - if (walk->flags & SKCIPHER_WALK_PHYS) - goto out; - if (walk->iv != walk->oiv) memcpy(walk->oiv, walk->iv, walk->ivsize); if (walk->buffer != walk->page) @@ -173,104 +166,29 @@ finish: free_page((unsigned long)walk->page); out: - return err; + return res; } EXPORT_SYMBOL_GPL(skcipher_walk_done); -void skcipher_walk_complete(struct skcipher_walk *walk, int err) -{ - struct skcipher_walk_buffer *p, *tmp; - - list_for_each_entry_safe(p, tmp, &walk->buffers, entry) { - u8 *data; - - if (err) - goto done; - - data = p->data; - if (!data) { - data = PTR_ALIGN(&p->buffer[0], walk->alignmask + 1); - data = skcipher_get_spot(data, walk->stride); - } - - scatterwalk_copychunks(data, &p->dst, p->len, 1); - - if (offset_in_page(p->data) + p->len + walk->stride > - PAGE_SIZE) - free_page((unsigned long)p->data); - -done: - list_del(&p->entry); - kfree(p); - } - - if (!err && walk->iv != walk->oiv) - memcpy(walk->oiv, walk->iv, walk->ivsize); - if (walk->buffer != walk->page) - kfree(walk->buffer); - if (walk->page) - free_page((unsigned long)walk->page); -} -EXPORT_SYMBOL_GPL(skcipher_walk_complete); - -static void skcipher_queue_write(struct skcipher_walk *walk, - struct skcipher_walk_buffer *p) -{ - p->dst = walk->out; - list_add_tail(&p->entry, &walk->buffers); -} - static int skcipher_next_slow(struct skcipher_walk *walk, unsigned int bsize) { - bool phys = walk->flags & SKCIPHER_WALK_PHYS; unsigned alignmask = walk->alignmask; - struct skcipher_walk_buffer *p; - unsigned a; unsigned n; u8 *buffer; - void *v; - - if (!phys) { - if (!walk->buffer) - walk->buffer = walk->page; - buffer = walk->buffer; - if (buffer) - goto ok; - } - - /* Start with the minimum alignment of kmalloc. */ - a = crypto_tfm_ctx_alignment() - 1; - n = bsize; - - if (phys) { - /* Calculate the minimum alignment of p->buffer. */ - a &= (sizeof(*p) ^ (sizeof(*p) - 1)) >> 1; - n += sizeof(*p); - } - - /* Minimum size to align p->buffer by alignmask. */ - n += alignmask & ~a; - /* Minimum size to ensure p->buffer does not straddle a page. */ - n += (bsize - 1) & ~(alignmask | a); - - v = kzalloc(n, skcipher_walk_gfp(walk)); - if (!v) - return skcipher_walk_done(walk, -ENOMEM); - - if (phys) { - p = v; - p->len = bsize; - skcipher_queue_write(walk, p); - buffer = p->buffer; - } else { - walk->buffer = v; - buffer = v; + if (!walk->buffer) + walk->buffer = walk->page; + buffer = walk->buffer; + if (!buffer) { + /* Min size for a buffer of bsize bytes aligned to alignmask */ + n = bsize + (alignmask & ~(crypto_tfm_ctx_alignment() - 1)); + + buffer = kzalloc(n, skcipher_walk_gfp(walk)); + if (!buffer) + return skcipher_walk_done(walk, -ENOMEM); + walk->buffer = buffer; } - -ok: walk->dst.virt.addr = PTR_ALIGN(buffer, alignmask + 1); - walk->dst.virt.addr = skcipher_get_spot(walk->dst.virt.addr, bsize); walk->src.virt.addr = walk->dst.virt.addr; scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0); @@ -283,7 +201,6 @@ ok: static int skcipher_next_copy(struct skcipher_walk *walk) { - struct skcipher_walk_buffer *p; u8 *tmp = walk->page; skcipher_map_src(walk); @@ -292,24 +209,6 @@ static int skcipher_next_copy(struct skcipher_walk *walk) walk->src.virt.addr = tmp; walk->dst.virt.addr = tmp; - - if (!(walk->flags & SKCIPHER_WALK_PHYS)) - return 0; - - p = kmalloc(sizeof(*p), skcipher_walk_gfp(walk)); - if (!p) - return -ENOMEM; - - p->data = walk->page; - p->len = walk->nbytes; - skcipher_queue_write(walk, p); - - if (offset_in_page(walk->page) + walk->nbytes + walk->stride > - PAGE_SIZE) - walk->page = NULL; - else - walk->page += walk->nbytes; - return 0; } @@ -317,16 +216,10 @@ static int skcipher_next_fast(struct skcipher_walk *walk) { unsigned long diff; - walk->src.phys.page = scatterwalk_page(&walk->in); - walk->src.phys.offset = offset_in_page(walk->in.offset); - walk->dst.phys.page = scatterwalk_page(&walk->out); - walk->dst.phys.offset = offset_in_page(walk->out.offset); - - if (walk->flags & SKCIPHER_WALK_PHYS) - return 0; - - diff = walk->src.phys.offset - walk->dst.phys.offset; - diff |= walk->src.virt.page - walk->dst.virt.page; + diff = offset_in_page(walk->in.offset) - + offset_in_page(walk->out.offset); + diff |= (u8 *)scatterwalk_page(&walk->in) - + (u8 *)scatterwalk_page(&walk->out); skcipher_map_src(walk); walk->dst.virt.addr = walk->src.virt.addr; @@ -343,10 +236,6 @@ static int skcipher_walk_next(struct skcipher_walk *walk) { unsigned int bsize; unsigned int n; - int err; - - walk->flags &= ~(SKCIPHER_WALK_SLOW | SKCIPHER_WALK_COPY | - SKCIPHER_WALK_DIFF); n = walk->total; bsize = min(walk->stride, max(n, walk->blocksize)); @@ -358,9 +247,9 @@ static int skcipher_walk_next(struct skcipher_walk *walk) return skcipher_walk_done(walk, -EINVAL); slow_path: - err = skcipher_next_slow(walk, bsize); - goto set_phys_lowmem; + return skcipher_next_slow(walk, bsize); } + walk->nbytes = n; if (unlikely((walk->in.offset | walk->out.offset) & walk->alignmask)) { if (!walk->page) { @@ -370,58 +259,30 @@ slow_path: if (!walk->page) goto slow_path; } - - walk->nbytes = min_t(unsigned, n, - PAGE_SIZE - offset_in_page(walk->page)); walk->flags |= SKCIPHER_WALK_COPY; - err = skcipher_next_copy(walk); - goto set_phys_lowmem; + return skcipher_next_copy(walk); } - walk->nbytes = n; - return skcipher_next_fast(walk); - -set_phys_lowmem: - if (!err && (walk->flags & SKCIPHER_WALK_PHYS)) { - walk->src.phys.page = virt_to_page(walk->src.virt.addr); - walk->dst.phys.page = virt_to_page(walk->dst.virt.addr); - walk->src.phys.offset &= PAGE_SIZE - 1; - walk->dst.phys.offset &= PAGE_SIZE - 1; - } - return err; } static int skcipher_copy_iv(struct skcipher_walk *walk) { - unsigned a = crypto_tfm_ctx_alignment() - 1; unsigned alignmask = walk->alignmask; unsigned ivsize = walk->ivsize; - unsigned bs = walk->stride; - unsigned aligned_bs; + unsigned aligned_stride = ALIGN(walk->stride, alignmask + 1); unsigned size; u8 *iv; - aligned_bs = ALIGN(bs, alignmask + 1); - - /* Minimum size to align buffer by alignmask. */ - size = alignmask & ~a; - - if (walk->flags & SKCIPHER_WALK_PHYS) - size += ivsize; - else { - size += aligned_bs + ivsize; - - /* Minimum size to ensure buffer does not straddle a page. */ - size += (bs - 1) & ~(alignmask | a); - } + /* Min size for a buffer of stride + ivsize, aligned to alignmask */ + size = aligned_stride + ivsize + + (alignmask & ~(crypto_tfm_ctx_alignment() - 1)); walk->buffer = kmalloc(size, skcipher_walk_gfp(walk)); if (!walk->buffer) return -ENOMEM; - iv = PTR_ALIGN(walk->buffer, alignmask + 1); - iv = skcipher_get_spot(iv, bs) + aligned_bs; + iv = PTR_ALIGN(walk->buffer, alignmask + 1) + aligned_stride; walk->iv = memcpy(iv, walk->iv, walk->ivsize); return 0; @@ -444,16 +305,22 @@ static int skcipher_walk_first(struct skcipher_walk *walk) return skcipher_walk_next(walk); } -static int skcipher_walk_skcipher(struct skcipher_walk *walk, - struct skcipher_request *req) +int skcipher_walk_virt(struct skcipher_walk *walk, + struct skcipher_request *req, bool atomic) { - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct skcipher_alg *alg = crypto_skcipher_alg(tfm); + const struct skcipher_alg *alg = + crypto_skcipher_alg(crypto_skcipher_reqtfm(req)); + + might_sleep_if(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); walk->total = req->cryptlen; walk->nbytes = 0; walk->iv = req->iv; walk->oiv = req->iv; + if ((req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) && !atomic) + walk->flags = SKCIPHER_WALK_SLEEP; + else + walk->flags = 0; if (unlikely(!walk->total)) return 0; @@ -461,13 +328,14 @@ static int skcipher_walk_skcipher(struct skcipher_walk *walk, scatterwalk_start(&walk->in, req->src); scatterwalk_start(&walk->out, req->dst); - walk->flags &= ~SKCIPHER_WALK_SLEEP; - walk->flags |= req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? - SKCIPHER_WALK_SLEEP : 0; - - walk->blocksize = crypto_skcipher_blocksize(tfm); - walk->ivsize = crypto_skcipher_ivsize(tfm); - walk->alignmask = crypto_skcipher_alignmask(tfm); + /* + * Accessing 'alg' directly generates better code than using the + * crypto_skcipher_blocksize() and similar helper functions here, as it + * prevents the algorithm pointer from being repeatedly reloaded. + */ + walk->blocksize = alg->base.cra_blocksize; + walk->ivsize = alg->co.ivsize; + walk->alignmask = alg->base.cra_alignmask; if (alg->co.base.cra_type != &crypto_skcipher_type) walk->stride = alg->co.chunksize; @@ -476,50 +344,24 @@ static int skcipher_walk_skcipher(struct skcipher_walk *walk, return skcipher_walk_first(walk); } - -int skcipher_walk_virt(struct skcipher_walk *walk, - struct skcipher_request *req, bool atomic) -{ - int err; - - might_sleep_if(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); - - walk->flags &= ~SKCIPHER_WALK_PHYS; - - err = skcipher_walk_skcipher(walk, req); - - walk->flags &= atomic ? ~SKCIPHER_WALK_SLEEP : ~0; - - return err; -} EXPORT_SYMBOL_GPL(skcipher_walk_virt); -int skcipher_walk_async(struct skcipher_walk *walk, - struct skcipher_request *req) -{ - walk->flags |= SKCIPHER_WALK_PHYS; - - INIT_LIST_HEAD(&walk->buffers); - - return skcipher_walk_skcipher(walk, req); -} -EXPORT_SYMBOL_GPL(skcipher_walk_async); - static int skcipher_walk_aead_common(struct skcipher_walk *walk, struct aead_request *req, bool atomic) { - struct crypto_aead *tfm = crypto_aead_reqtfm(req); - int err; + const struct aead_alg *alg = crypto_aead_alg(crypto_aead_reqtfm(req)); walk->nbytes = 0; walk->iv = req->iv; walk->oiv = req->iv; + if ((req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) && !atomic) + walk->flags = SKCIPHER_WALK_SLEEP; + else + walk->flags = 0; if (unlikely(!walk->total)) return 0; - walk->flags &= ~SKCIPHER_WALK_PHYS; - scatterwalk_start(&walk->in, req->src); scatterwalk_start(&walk->out, req->dst); @@ -529,22 +371,17 @@ static int skcipher_walk_aead_common(struct skcipher_walk *walk, scatterwalk_done(&walk->in, 0, walk->total); scatterwalk_done(&walk->out, 0, walk->total); - if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) - walk->flags |= SKCIPHER_WALK_SLEEP; - else - walk->flags &= ~SKCIPHER_WALK_SLEEP; - - walk->blocksize = crypto_aead_blocksize(tfm); - walk->stride = crypto_aead_chunksize(tfm); - walk->ivsize = crypto_aead_ivsize(tfm); - walk->alignmask = crypto_aead_alignmask(tfm); - - err = skcipher_walk_first(walk); - - if (atomic) - walk->flags &= ~SKCIPHER_WALK_SLEEP; + /* + * Accessing 'alg' directly generates better code than using the + * crypto_aead_blocksize() and similar helper functions here, as it + * prevents the algorithm pointer from being repeatedly reloaded. + */ + walk->blocksize = alg->base.cra_blocksize; + walk->stride = alg->chunksize; + walk->ivsize = alg->ivsize; + walk->alignmask = alg->base.cra_alignmask; - return err; + return skcipher_walk_first(walk); } int skcipher_walk_aead_encrypt(struct skcipher_walk *walk, diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index e9e7dceb606e..e1a74cb2cfbe 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -1738,10 +1738,6 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb) ret = min(ret, tcrypt_test("hmac(rmd160)")); break; - case 109: - ret = min(ret, tcrypt_test("vmac64(aes)")); - break; - case 111: ret = min(ret, tcrypt_test("hmac(sha3-224)")); break; diff --git a/crypto/tea.c b/crypto/tea.c index 896f863f3067..b315da8c89eb 100644 --- a/crypto/tea.c +++ b/crypto/tea.c @@ -18,7 +18,7 @@ #include <linux/init.h> #include <linux/module.h> #include <linux/mm.h> -#include <asm/byteorder.h> +#include <linux/unaligned.h> #include <linux/types.h> #define TEA_KEY_SIZE 16 @@ -43,12 +43,11 @@ static int tea_setkey(struct crypto_tfm *tfm, const u8 *in_key, unsigned int key_len) { struct tea_ctx *ctx = crypto_tfm_ctx(tfm); - const __le32 *key = (const __le32 *)in_key; - ctx->KEY[0] = le32_to_cpu(key[0]); - ctx->KEY[1] = le32_to_cpu(key[1]); - ctx->KEY[2] = le32_to_cpu(key[2]); - ctx->KEY[3] = le32_to_cpu(key[3]); + ctx->KEY[0] = get_unaligned_le32(&in_key[0]); + ctx->KEY[1] = get_unaligned_le32(&in_key[4]); + ctx->KEY[2] = get_unaligned_le32(&in_key[8]); + ctx->KEY[3] = get_unaligned_le32(&in_key[12]); return 0; @@ -59,11 +58,9 @@ static void tea_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) u32 y, z, n, sum = 0; u32 k0, k1, k2, k3; struct tea_ctx *ctx = crypto_tfm_ctx(tfm); - const __le32 *in = (const __le32 *)src; - __le32 *out = (__le32 *)dst; - y = le32_to_cpu(in[0]); - z = le32_to_cpu(in[1]); + y = get_unaligned_le32(&src[0]); + z = get_unaligned_le32(&src[4]); k0 = ctx->KEY[0]; k1 = ctx->KEY[1]; @@ -78,8 +75,8 @@ static void tea_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) z += ((y << 4) + k2) ^ (y + sum) ^ ((y >> 5) + k3); } - out[0] = cpu_to_le32(y); - out[1] = cpu_to_le32(z); + put_unaligned_le32(y, &dst[0]); + put_unaligned_le32(z, &dst[4]); } static void tea_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) @@ -87,11 +84,9 @@ static void tea_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) u32 y, z, n, sum; u32 k0, k1, k2, k3; struct tea_ctx *ctx = crypto_tfm_ctx(tfm); - const __le32 *in = (const __le32 *)src; - __le32 *out = (__le32 *)dst; - y = le32_to_cpu(in[0]); - z = le32_to_cpu(in[1]); + y = get_unaligned_le32(&src[0]); + z = get_unaligned_le32(&src[4]); k0 = ctx->KEY[0]; k1 = ctx->KEY[1]; @@ -108,20 +103,19 @@ static void tea_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) sum -= TEA_DELTA; } - out[0] = cpu_to_le32(y); - out[1] = cpu_to_le32(z); + put_unaligned_le32(y, &dst[0]); + put_unaligned_le32(z, &dst[4]); } static int xtea_setkey(struct crypto_tfm *tfm, const u8 *in_key, unsigned int key_len) { struct xtea_ctx *ctx = crypto_tfm_ctx(tfm); - const __le32 *key = (const __le32 *)in_key; - ctx->KEY[0] = le32_to_cpu(key[0]); - ctx->KEY[1] = le32_to_cpu(key[1]); - ctx->KEY[2] = le32_to_cpu(key[2]); - ctx->KEY[3] = le32_to_cpu(key[3]); + ctx->KEY[0] = get_unaligned_le32(&in_key[0]); + ctx->KEY[1] = get_unaligned_le32(&in_key[4]); + ctx->KEY[2] = get_unaligned_le32(&in_key[8]); + ctx->KEY[3] = get_unaligned_le32(&in_key[12]); return 0; @@ -132,11 +126,9 @@ static void xtea_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) u32 y, z, sum = 0; u32 limit = XTEA_DELTA * XTEA_ROUNDS; struct xtea_ctx *ctx = crypto_tfm_ctx(tfm); - const __le32 *in = (const __le32 *)src; - __le32 *out = (__le32 *)dst; - y = le32_to_cpu(in[0]); - z = le32_to_cpu(in[1]); + y = get_unaligned_le32(&src[0]); + z = get_unaligned_le32(&src[4]); while (sum != limit) { y += ((z << 4 ^ z >> 5) + z) ^ (sum + ctx->KEY[sum&3]); @@ -144,19 +136,17 @@ static void xtea_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) z += ((y << 4 ^ y >> 5) + y) ^ (sum + ctx->KEY[sum>>11 &3]); } - out[0] = cpu_to_le32(y); - out[1] = cpu_to_le32(z); + put_unaligned_le32(y, &dst[0]); + put_unaligned_le32(z, &dst[4]); } static void xtea_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { u32 y, z, sum; struct tea_ctx *ctx = crypto_tfm_ctx(tfm); - const __le32 *in = (const __le32 *)src; - __le32 *out = (__le32 *)dst; - y = le32_to_cpu(in[0]); - z = le32_to_cpu(in[1]); + y = get_unaligned_le32(&src[0]); + z = get_unaligned_le32(&src[4]); sum = XTEA_DELTA * XTEA_ROUNDS; @@ -166,8 +156,8 @@ static void xtea_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) y -= ((z << 4 ^ z >> 5) + z) ^ (sum + ctx->KEY[sum & 3]); } - out[0] = cpu_to_le32(y); - out[1] = cpu_to_le32(z); + put_unaligned_le32(y, &dst[0]); + put_unaligned_le32(z, &dst[4]); } @@ -176,11 +166,9 @@ static void xeta_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) u32 y, z, sum = 0; u32 limit = XTEA_DELTA * XTEA_ROUNDS; struct xtea_ctx *ctx = crypto_tfm_ctx(tfm); - const __le32 *in = (const __le32 *)src; - __le32 *out = (__le32 *)dst; - y = le32_to_cpu(in[0]); - z = le32_to_cpu(in[1]); + y = get_unaligned_le32(&src[0]); + z = get_unaligned_le32(&src[4]); while (sum != limit) { y += (z << 4 ^ z >> 5) + (z ^ sum) + ctx->KEY[sum&3]; @@ -188,19 +176,17 @@ static void xeta_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) z += (y << 4 ^ y >> 5) + (y ^ sum) + ctx->KEY[sum>>11 &3]; } - out[0] = cpu_to_le32(y); - out[1] = cpu_to_le32(z); + put_unaligned_le32(y, &dst[0]); + put_unaligned_le32(z, &dst[4]); } static void xeta_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { u32 y, z, sum; struct tea_ctx *ctx = crypto_tfm_ctx(tfm); - const __le32 *in = (const __le32 *)src; - __le32 *out = (__le32 *)dst; - y = le32_to_cpu(in[0]); - z = le32_to_cpu(in[1]); + y = get_unaligned_le32(&src[0]); + z = get_unaligned_le32(&src[4]); sum = XTEA_DELTA * XTEA_ROUNDS; @@ -210,8 +196,8 @@ static void xeta_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) y -= (z << 4 ^ z >> 5) + (z ^ sum) + ctx->KEY[sum & 3]; } - out[0] = cpu_to_le32(y); - out[1] = cpu_to_le32(z); + put_unaligned_le32(y, &dst[0]); + put_unaligned_le32(z, &dst[4]); } static struct crypto_alg tea_algs[3] = { { @@ -220,7 +206,6 @@ static struct crypto_alg tea_algs[3] = { { .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = TEA_BLOCK_SIZE, .cra_ctxsize = sizeof (struct tea_ctx), - .cra_alignmask = 3, .cra_module = THIS_MODULE, .cra_u = { .cipher = { .cia_min_keysize = TEA_KEY_SIZE, @@ -234,7 +219,6 @@ static struct crypto_alg tea_algs[3] = { { .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = XTEA_BLOCK_SIZE, .cra_ctxsize = sizeof (struct xtea_ctx), - .cra_alignmask = 3, .cra_module = THIS_MODULE, .cra_u = { .cipher = { .cia_min_keysize = XTEA_KEY_SIZE, @@ -248,7 +232,6 @@ static struct crypto_alg tea_algs[3] = { { .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = XTEA_BLOCK_SIZE, .cra_ctxsize = sizeof (struct xtea_ctx), - .cra_alignmask = 3, .cra_module = THIS_MODULE, .cra_u = { .cipher = { .cia_min_keysize = XTEA_KEY_SIZE, diff --git a/crypto/testmgr.c b/crypto/testmgr.c index 1f5f48ab18c7..e61490ba4095 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -2885,18 +2885,11 @@ static int test_skcipher_vec_cfg(int enc, const struct cipher_testvec *vec, if (ivsize) { if (WARN_ON(ivsize > MAX_IVLEN)) return -EINVAL; - if (vec->generates_iv && !enc) - memcpy(iv, vec->iv_out, ivsize); - else if (vec->iv) + if (vec->iv) memcpy(iv, vec->iv, ivsize); else memset(iv, 0, ivsize); } else { - if (vec->generates_iv) { - pr_err("alg: skcipher: %s has ivsize=0 but test vector %s generates IV!\n", - driver, vec_name); - return -EINVAL; - } iv = NULL; } @@ -3133,10 +3126,6 @@ static int test_skcipher_vs_generic_impl(const char *generic_driver, if (noextratests) return 0; - /* Keywrap isn't supported here yet as it handles its IV differently. */ - if (strncmp(algname, "kw(", 3) == 0) - return 0; - init_rnd_state(&rng); if (!generic_driver) { /* Use default naming convention? */ @@ -5409,13 +5398,6 @@ static const struct alg_test_desc alg_test_descs[] = { .fips_allowed = 1, .test = alg_test_null, }, { - .alg = "kw(aes)", - .test = alg_test_skcipher, - .fips_allowed = 1, - .suite = { - .cipher = __VECS(aes_kw_tv_template) - } - }, { .alg = "lrw(aes)", .generic_driver = "lrw(ecb(aes-generic))", .test = alg_test_skcipher, @@ -5750,12 +5732,6 @@ static const struct alg_test_desc alg_test_descs[] = { .hash = __VECS(streebog512_tv_template) } }, { - .alg = "vmac64(aes)", - .test = alg_test_hash, - .suite = { - .hash = __VECS(vmac64_aes_tv_template) - } - }, { .alg = "wp256", .test = alg_test_hash, .suite = { diff --git a/crypto/testmgr.h b/crypto/testmgr.h index 430d33d9ac13..d754ab997186 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h @@ -59,8 +59,6 @@ struct hash_testvec { * @wk: Does the test need CRYPTO_TFM_REQ_FORBID_WEAK_KEYS? * ( e.g. test needs to fail due to a weak key ) * @fips_skip: Skip the test vector in FIPS mode - * @generates_iv: Encryption should ignore the given IV, and output @iv_out. - * Decryption takes @iv_out. Needed for AES Keywrap ("kw(aes)"). * @setkey_error: Expected error from setkey() * @crypt_error: Expected error from encrypt() and decrypt() */ @@ -74,7 +72,6 @@ struct cipher_testvec { unsigned short klen; unsigned int len; bool fips_skip; - bool generates_iv; int setkey_error; int crypt_error; }; @@ -8561,159 +8558,6 @@ static const struct hash_testvec aes_xcbc128_tv_template[] = { } }; -static const char vmac64_string1[144] = { - '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', - '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', - '\x01', '\x01', '\x01', '\x01', '\x02', '\x03', '\x02', '\x02', - '\x02', '\x04', '\x01', '\x07', '\x04', '\x01', '\x04', '\x03', -}; - -static const char vmac64_string2[144] = { - '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', - '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', - 'a', 'b', 'c', -}; - -static const char vmac64_string3[144] = { - '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', - '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', - 'a', 'b', 'c', 'a', 'b', 'c', 'a', 'b', - 'c', 'a', 'b', 'c', 'a', 'b', 'c', 'a', - 'b', 'c', 'a', 'b', 'c', 'a', 'b', 'c', - 'a', 'b', 'c', 'a', 'b', 'c', 'a', 'b', - 'c', 'a', 'b', 'c', 'a', 'b', 'c', 'a', - 'b', 'c', 'a', 'b', 'c', 'a', 'b', 'c', -}; - -static const char vmac64_string4[33] = { - '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', - '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', - 'b', 'c', 'e', 'f', 'i', 'j', 'l', 'm', - 'o', 'p', 'r', 's', 't', 'u', 'w', 'x', - 'z', -}; - -static const char vmac64_string5[143] = { - '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', - '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', - 'r', 'm', 'b', 't', 'c', 'o', 'l', 'k', - ']', '%', '9', '2', '7', '!', 'A', -}; - -static const char vmac64_string6[145] = { - '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', - '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', - 'p', 't', '*', '7', 'l', 'i', '!', '#', - 'w', '0', 'z', '/', '4', 'A', 'n', -}; - -static const struct hash_testvec vmac64_aes_tv_template[] = { - { /* draft-krovetz-vmac-01 test vector 1 */ - .key = "abcdefghijklmnop", - .ksize = 16, - .plaintext = "\0\0\0\0\0\0\0\0bcdefghi", - .psize = 16, - .digest = "\x25\x76\xbe\x1c\x56\xd8\xb8\x1b", - }, { /* draft-krovetz-vmac-01 test vector 2 */ - .key = "abcdefghijklmnop", - .ksize = 16, - .plaintext = "\0\0\0\0\0\0\0\0bcdefghiabc", - .psize = 19, - .digest = "\x2d\x37\x6c\xf5\xb1\x81\x3c\xe5", - }, { /* draft-krovetz-vmac-01 test vector 3 */ - .key = "abcdefghijklmnop", - .ksize = 16, - .plaintext = "\0\0\0\0\0\0\0\0bcdefghi" - "abcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabc", - .psize = 64, - .digest = "\xe8\x42\x1f\x61\xd5\x73\xd2\x98", - }, { /* draft-krovetz-vmac-01 test vector 4 */ - .key = "abcdefghijklmnop", - .ksize = 16, - .plaintext = "\0\0\0\0\0\0\0\0bcdefghi" - "abcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabc" - "abcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabc" - "abcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabc" - "abcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabc" - "abcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabc" - "abcabcabcabcabcabcabcabcabcabcabcabcabcabcabc", - .psize = 316, - .digest = "\x44\x92\xdf\x6c\x5c\xac\x1b\xbe", - }, { - .key = "\x00\x01\x02\x03\x04\x05\x06\x07" - "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", - .ksize = 16, - .plaintext = "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00", - .psize = 16, - .digest = "\x54\x7b\xa4\x77\x35\x80\x58\x07", - }, { - .key = "\x00\x01\x02\x03\x04\x05\x06\x07" - "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", - .ksize = 16, - .plaintext = vmac64_string1, - .psize = sizeof(vmac64_string1), - .digest = "\xa1\x8c\x68\xae\xd3\x3c\xf5\xce", - }, { - .key = "\x00\x01\x02\x03\x04\x05\x06\x07" - "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", - .ksize = 16, - .plaintext = vmac64_string2, - .psize = sizeof(vmac64_string2), - .digest = "\x2d\x14\xbd\x81\x73\xb0\x27\xc9", - }, { - .key = "\x00\x01\x02\x03\x04\x05\x06\x07" - "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", - .ksize = 16, - .plaintext = vmac64_string3, - .psize = sizeof(vmac64_string3), - .digest = "\x19\x0b\x47\x98\x8c\x95\x1a\x8d", - }, { - .key = "abcdefghijklmnop", - .ksize = 16, - .plaintext = "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00", - .psize = 16, - .digest = "\x84\x8f\x55\x9e\x26\xa1\x89\x3b", - }, { - .key = "abcdefghijklmnop", - .ksize = 16, - .plaintext = vmac64_string1, - .psize = sizeof(vmac64_string1), - .digest = "\xc2\x74\x8d\xf6\xb0\xab\x5e\xab", - }, { - .key = "abcdefghijklmnop", - .ksize = 16, - .plaintext = vmac64_string2, - .psize = sizeof(vmac64_string2), - .digest = "\xdf\x09\x7b\x3d\x42\x68\x15\x11", - }, { - .key = "abcdefghijklmnop", - .ksize = 16, - .plaintext = vmac64_string3, - .psize = sizeof(vmac64_string3), - .digest = "\xd4\xfa\x8f\xed\xe1\x8f\x32\x8b", - }, { - .key = "a09b5cd!f#07K\x00\x00\x00", - .ksize = 16, - .plaintext = vmac64_string4, - .psize = sizeof(vmac64_string4), - .digest = "\x5f\xa1\x4e\x42\xea\x0f\xa5\xab", - }, { - .key = "a09b5cd!f#07K\x00\x00\x00", - .ksize = 16, - .plaintext = vmac64_string5, - .psize = sizeof(vmac64_string5), - .digest = "\x60\x67\xe8\x1d\xbc\x98\x31\x25", - }, { - .key = "a09b5cd!f#07K\x00\x00\x00", - .ksize = 16, - .plaintext = vmac64_string6, - .psize = sizeof(vmac64_string6), - .digest = "\x41\xeb\x65\x95\x47\x9b\xae\xc4", - }, -}; - /* * SHA384 HMAC test vectors from RFC4231 */ @@ -24349,42 +24193,6 @@ static const struct aead_testvec aegis128_tv_template[] = { }; /* - * All key wrapping test vectors taken from - * http://csrc.nist.gov/groups/STM/cavp/documents/mac/kwtestvectors.zip - * - * Note: as documented in keywrap.c, the ivout for encryption is the first - * semiblock of the ciphertext from the test vector. For decryption, iv is - * the first semiblock of the ciphertext. - */ -static const struct cipher_testvec aes_kw_tv_template[] = { - { - .key = "\x75\x75\xda\x3a\x93\x60\x7c\xc2" - "\xbf\xd8\xce\xc7\xaa\xdf\xd9\xa6", - .klen = 16, - .ptext = "\x42\x13\x6d\x3c\x38\x4a\x3e\xea" - "\xc9\x5a\x06\x6f\xd2\x8f\xed\x3f", - .ctext = "\xf6\x85\x94\x81\x6f\x64\xca\xa3" - "\xf5\x6f\xab\xea\x25\x48\xf5\xfb", - .len = 16, - .iv_out = "\x03\x1f\x6b\xd7\xe6\x1e\x64\x3d", - .generates_iv = true, - }, { - .key = "\x80\xaa\x99\x73\x27\xa4\x80\x6b" - "\x6a\x7a\x41\xa5\x2b\x86\xc3\x71" - "\x03\x86\xf9\x32\x78\x6e\xf7\x96" - "\x76\xfa\xfb\x90\xb8\x26\x3c\x5f", - .klen = 32, - .ptext = "\x0a\x25\x6b\xa7\x5c\xfa\x03\xaa" - "\xa0\x2b\xa9\x42\x03\xf1\x5b\xaa", - .ctext = "\xd3\x3d\x3d\x97\x7b\xf0\xa9\x15" - "\x59\xf9\x9c\x8a\xcd\x29\x3d\x43", - .len = 16, - .iv_out = "\x42\x3c\x96\x0d\x8a\x2a\xc4\xc1", - .generates_iv = true, - }, -}; - -/* * ANSI X9.31 Continuous Pseudo-Random Number Generator (AES mode) * test vectors, taken from Appendix B.2.9 and B.2.10: * http://csrc.nist.gov/groups/STM/cavp/documents/rng/RNGVS.pdf diff --git a/crypto/vmac.c b/crypto/vmac.c deleted file mode 100644 index 2ea384645ecf..000000000000 --- a/crypto/vmac.c +++ /dev/null @@ -1,696 +0,0 @@ -/* - * VMAC: Message Authentication Code using Universal Hashing - * - * Reference: https://tools.ietf.org/html/draft-krovetz-vmac-01 - * - * Copyright (c) 2009, Intel Corporation. - * Copyright (c) 2018, Google Inc. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., 59 Temple - * Place - Suite 330, Boston, MA 02111-1307 USA. - */ - -/* - * Derived from: - * VMAC and VHASH Implementation by Ted Krovetz (tdk@acm.org) and Wei Dai. - * This implementation is herby placed in the public domain. - * The authors offers no warranty. Use at your own risk. - * Last modified: 17 APR 08, 1700 PDT - */ - -#include <linux/unaligned.h> -#include <linux/init.h> -#include <linux/types.h> -#include <linux/crypto.h> -#include <linux/module.h> -#include <linux/scatterlist.h> -#include <asm/byteorder.h> -#include <crypto/scatterwalk.h> -#include <crypto/internal/cipher.h> -#include <crypto/internal/hash.h> - -/* - * User definable settings. - */ -#define VMAC_TAG_LEN 64 -#define VMAC_KEY_SIZE 128/* Must be 128, 192 or 256 */ -#define VMAC_KEY_LEN (VMAC_KEY_SIZE/8) -#define VMAC_NHBYTES 128/* Must 2^i for any 3 < i < 13 Standard = 128*/ -#define VMAC_NONCEBYTES 16 - -/* per-transform (per-key) context */ -struct vmac_tfm_ctx { - struct crypto_cipher *cipher; - u64 nhkey[(VMAC_NHBYTES/8)+2*(VMAC_TAG_LEN/64-1)]; - u64 polykey[2*VMAC_TAG_LEN/64]; - u64 l3key[2*VMAC_TAG_LEN/64]; -}; - -/* per-request context */ -struct vmac_desc_ctx { - union { - u8 partial[VMAC_NHBYTES]; /* partial block */ - __le64 partial_words[VMAC_NHBYTES / 8]; - }; - unsigned int partial_size; /* size of the partial block */ - bool first_block_processed; - u64 polytmp[2*VMAC_TAG_LEN/64]; /* running total of L2-hash */ - union { - u8 bytes[VMAC_NONCEBYTES]; - __be64 pads[VMAC_NONCEBYTES / 8]; - } nonce; - unsigned int nonce_size; /* nonce bytes filled so far */ -}; - -/* - * Constants and masks - */ -#define UINT64_C(x) x##ULL -static const u64 p64 = UINT64_C(0xfffffffffffffeff); /* 2^64 - 257 prime */ -static const u64 m62 = UINT64_C(0x3fffffffffffffff); /* 62-bit mask */ -static const u64 m63 = UINT64_C(0x7fffffffffffffff); /* 63-bit mask */ -static const u64 m64 = UINT64_C(0xffffffffffffffff); /* 64-bit mask */ -static const u64 mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */ - -#define pe64_to_cpup le64_to_cpup /* Prefer little endian */ - -#ifdef __LITTLE_ENDIAN -#define INDEX_HIGH 1 -#define INDEX_LOW 0 -#else -#define INDEX_HIGH 0 -#define INDEX_LOW 1 -#endif - -/* - * The following routines are used in this implementation. They are - * written via macros to simulate zero-overhead call-by-reference. - * - * MUL64: 64x64->128-bit multiplication - * PMUL64: assumes top bits cleared on inputs - * ADD128: 128x128->128-bit addition - */ - -#define ADD128(rh, rl, ih, il) \ - do { \ - u64 _il = (il); \ - (rl) += (_il); \ - if ((rl) < (_il)) \ - (rh)++; \ - (rh) += (ih); \ - } while (0) - -#define MUL32(i1, i2) ((u64)(u32)(i1)*(u32)(i2)) - -#define PMUL64(rh, rl, i1, i2) /* Assumes m doesn't overflow */ \ - do { \ - u64 _i1 = (i1), _i2 = (i2); \ - u64 m = MUL32(_i1, _i2>>32) + MUL32(_i1>>32, _i2); \ - rh = MUL32(_i1>>32, _i2>>32); \ - rl = MUL32(_i1, _i2); \ - ADD128(rh, rl, (m >> 32), (m << 32)); \ - } while (0) - -#define MUL64(rh, rl, i1, i2) \ - do { \ - u64 _i1 = (i1), _i2 = (i2); \ - u64 m1 = MUL32(_i1, _i2>>32); \ - u64 m2 = MUL32(_i1>>32, _i2); \ - rh = MUL32(_i1>>32, _i2>>32); \ - rl = MUL32(_i1, _i2); \ - ADD128(rh, rl, (m1 >> 32), (m1 << 32)); \ - ADD128(rh, rl, (m2 >> 32), (m2 << 32)); \ - } while (0) - -/* - * For highest performance the L1 NH and L2 polynomial hashes should be - * carefully implemented to take advantage of one's target architecture. - * Here these two hash functions are defined multiple time; once for - * 64-bit architectures, once for 32-bit SSE2 architectures, and once - * for the rest (32-bit) architectures. - * For each, nh_16 *must* be defined (works on multiples of 16 bytes). - * Optionally, nh_vmac_nhbytes can be defined (for multiples of - * VMAC_NHBYTES), and nh_16_2 and nh_vmac_nhbytes_2 (versions that do two - * NH computations at once). - */ - -#ifdef CONFIG_64BIT - -#define nh_16(mp, kp, nw, rh, rl) \ - do { \ - int i; u64 th, tl; \ - rh = rl = 0; \ - for (i = 0; i < nw; i += 2) { \ - MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \ - pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \ - ADD128(rh, rl, th, tl); \ - } \ - } while (0) - -#define nh_16_2(mp, kp, nw, rh, rl, rh1, rl1) \ - do { \ - int i; u64 th, tl; \ - rh1 = rl1 = rh = rl = 0; \ - for (i = 0; i < nw; i += 2) { \ - MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \ - pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \ - ADD128(rh, rl, th, tl); \ - MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i+2], \ - pe64_to_cpup((mp)+i+1)+(kp)[i+3]); \ - ADD128(rh1, rl1, th, tl); \ - } \ - } while (0) - -#if (VMAC_NHBYTES >= 64) /* These versions do 64-bytes of message at a time */ -#define nh_vmac_nhbytes(mp, kp, nw, rh, rl) \ - do { \ - int i; u64 th, tl; \ - rh = rl = 0; \ - for (i = 0; i < nw; i += 8) { \ - MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \ - pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \ - ADD128(rh, rl, th, tl); \ - MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+2], \ - pe64_to_cpup((mp)+i+3)+(kp)[i+3]); \ - ADD128(rh, rl, th, tl); \ - MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+4], \ - pe64_to_cpup((mp)+i+5)+(kp)[i+5]); \ - ADD128(rh, rl, th, tl); \ - MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+6], \ - pe64_to_cpup((mp)+i+7)+(kp)[i+7]); \ - ADD128(rh, rl, th, tl); \ - } \ - } while (0) - -#define nh_vmac_nhbytes_2(mp, kp, nw, rh, rl, rh1, rl1) \ - do { \ - int i; u64 th, tl; \ - rh1 = rl1 = rh = rl = 0; \ - for (i = 0; i < nw; i += 8) { \ - MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \ - pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \ - ADD128(rh, rl, th, tl); \ - MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i+2], \ - pe64_to_cpup((mp)+i+1)+(kp)[i+3]); \ - ADD128(rh1, rl1, th, tl); \ - MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+2], \ - pe64_to_cpup((mp)+i+3)+(kp)[i+3]); \ - ADD128(rh, rl, th, tl); \ - MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+4], \ - pe64_to_cpup((mp)+i+3)+(kp)[i+5]); \ - ADD128(rh1, rl1, th, tl); \ - MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+4], \ - pe64_to_cpup((mp)+i+5)+(kp)[i+5]); \ - ADD128(rh, rl, th, tl); \ - MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+6], \ - pe64_to_cpup((mp)+i+5)+(kp)[i+7]); \ - ADD128(rh1, rl1, th, tl); \ - MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+6], \ - pe64_to_cpup((mp)+i+7)+(kp)[i+7]); \ - ADD128(rh, rl, th, tl); \ - MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+8], \ - pe64_to_cpup((mp)+i+7)+(kp)[i+9]); \ - ADD128(rh1, rl1, th, tl); \ - } \ - } while (0) -#endif - -#define poly_step(ah, al, kh, kl, mh, ml) \ - do { \ - u64 t1h, t1l, t2h, t2l, t3h, t3l, z = 0; \ - /* compute ab*cd, put bd into result registers */ \ - PMUL64(t3h, t3l, al, kh); \ - PMUL64(t2h, t2l, ah, kl); \ - PMUL64(t1h, t1l, ah, 2*kh); \ - PMUL64(ah, al, al, kl); \ - /* add 2 * ac to result */ \ - ADD128(ah, al, t1h, t1l); \ - /* add together ad + bc */ \ - ADD128(t2h, t2l, t3h, t3l); \ - /* now (ah,al), (t2l,2*t2h) need summing */ \ - /* first add the high registers, carrying into t2h */ \ - ADD128(t2h, ah, z, t2l); \ - /* double t2h and add top bit of ah */ \ - t2h = 2 * t2h + (ah >> 63); \ - ah &= m63; \ - /* now add the low registers */ \ - ADD128(ah, al, mh, ml); \ - ADD128(ah, al, z, t2h); \ - } while (0) - -#else /* ! CONFIG_64BIT */ - -#ifndef nh_16 -#define nh_16(mp, kp, nw, rh, rl) \ - do { \ - u64 t1, t2, m1, m2, t; \ - int i; \ - rh = rl = t = 0; \ - for (i = 0; i < nw; i += 2) { \ - t1 = pe64_to_cpup(mp+i) + kp[i]; \ - t2 = pe64_to_cpup(mp+i+1) + kp[i+1]; \ - m2 = MUL32(t1 >> 32, t2); \ - m1 = MUL32(t1, t2 >> 32); \ - ADD128(rh, rl, MUL32(t1 >> 32, t2 >> 32), \ - MUL32(t1, t2)); \ - rh += (u64)(u32)(m1 >> 32) \ - + (u32)(m2 >> 32); \ - t += (u64)(u32)m1 + (u32)m2; \ - } \ - ADD128(rh, rl, (t >> 32), (t << 32)); \ - } while (0) -#endif - -static void poly_step_func(u64 *ahi, u64 *alo, - const u64 *kh, const u64 *kl, - const u64 *mh, const u64 *ml) -{ -#define a0 (*(((u32 *)alo)+INDEX_LOW)) -#define a1 (*(((u32 *)alo)+INDEX_HIGH)) -#define a2 (*(((u32 *)ahi)+INDEX_LOW)) -#define a3 (*(((u32 *)ahi)+INDEX_HIGH)) -#define k0 (*(((u32 *)kl)+INDEX_LOW)) -#define k1 (*(((u32 *)kl)+INDEX_HIGH)) -#define k2 (*(((u32 *)kh)+INDEX_LOW)) -#define k3 (*(((u32 *)kh)+INDEX_HIGH)) - - u64 p, q, t; - u32 t2; - - p = MUL32(a3, k3); - p += p; - p += *(u64 *)mh; - p += MUL32(a0, k2); - p += MUL32(a1, k1); - p += MUL32(a2, k0); - t = (u32)(p); - p >>= 32; - p += MUL32(a0, k3); - p += MUL32(a1, k2); - p += MUL32(a2, k1); - p += MUL32(a3, k0); - t |= ((u64)((u32)p & 0x7fffffff)) << 32; - p >>= 31; - p += (u64)(((u32 *)ml)[INDEX_LOW]); - p += MUL32(a0, k0); - q = MUL32(a1, k3); - q += MUL32(a2, k2); - q += MUL32(a3, k1); - q += q; - p += q; - t2 = (u32)(p); - p >>= 32; - p += (u64)(((u32 *)ml)[INDEX_HIGH]); - p += MUL32(a0, k1); - p += MUL32(a1, k0); - q = MUL32(a2, k3); - q += MUL32(a3, k2); - q += q; - p += q; - *(u64 *)(alo) = (p << 32) | t2; - p >>= 32; - *(u64 *)(ahi) = p + t; - -#undef a0 -#undef a1 -#undef a2 -#undef a3 -#undef k0 -#undef k1 -#undef k2 -#undef k3 -} - -#define poly_step(ah, al, kh, kl, mh, ml) \ - poly_step_func(&(ah), &(al), &(kh), &(kl), &(mh), &(ml)) - -#endif /* end of specialized NH and poly definitions */ - -/* At least nh_16 is defined. Defined others as needed here */ -#ifndef nh_16_2 -#define nh_16_2(mp, kp, nw, rh, rl, rh2, rl2) \ - do { \ - nh_16(mp, kp, nw, rh, rl); \ - nh_16(mp, ((kp)+2), nw, rh2, rl2); \ - } while (0) -#endif -#ifndef nh_vmac_nhbytes -#define nh_vmac_nhbytes(mp, kp, nw, rh, rl) \ - nh_16(mp, kp, nw, rh, rl) -#endif -#ifndef nh_vmac_nhbytes_2 -#define nh_vmac_nhbytes_2(mp, kp, nw, rh, rl, rh2, rl2) \ - do { \ - nh_vmac_nhbytes(mp, kp, nw, rh, rl); \ - nh_vmac_nhbytes(mp, ((kp)+2), nw, rh2, rl2); \ - } while (0) -#endif - -static u64 l3hash(u64 p1, u64 p2, u64 k1, u64 k2, u64 len) -{ - u64 rh, rl, t, z = 0; - - /* fully reduce (p1,p2)+(len,0) mod p127 */ - t = p1 >> 63; - p1 &= m63; - ADD128(p1, p2, len, t); - /* At this point, (p1,p2) is at most 2^127+(len<<64) */ - t = (p1 > m63) + ((p1 == m63) && (p2 == m64)); - ADD128(p1, p2, z, t); - p1 &= m63; - - /* compute (p1,p2)/(2^64-2^32) and (p1,p2)%(2^64-2^32) */ - t = p1 + (p2 >> 32); - t += (t >> 32); - t += (u32)t > 0xfffffffeu; - p1 += (t >> 32); - p2 += (p1 << 32); - - /* compute (p1+k1)%p64 and (p2+k2)%p64 */ - p1 += k1; - p1 += (0 - (p1 < k1)) & 257; - p2 += k2; - p2 += (0 - (p2 < k2)) & 257; - - /* compute (p1+k1)*(p2+k2)%p64 */ - MUL64(rh, rl, p1, p2); - t = rh >> 56; - ADD128(t, rl, z, rh); - rh <<= 8; - ADD128(t, rl, z, rh); - t += t << 8; - rl += t; - rl += (0 - (rl < t)) & 257; - rl += (0 - (rl > p64-1)) & 257; - return rl; -} - -/* L1 and L2-hash one or more VMAC_NHBYTES-byte blocks */ -static void vhash_blocks(const struct vmac_tfm_ctx *tctx, - struct vmac_desc_ctx *dctx, - const __le64 *mptr, unsigned int blocks) -{ - const u64 *kptr = tctx->nhkey; - const u64 pkh = tctx->polykey[0]; - const u64 pkl = tctx->polykey[1]; - u64 ch = dctx->polytmp[0]; - u64 cl = dctx->polytmp[1]; - u64 rh, rl; - - if (!dctx->first_block_processed) { - dctx->first_block_processed = true; - nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl); - rh &= m62; - ADD128(ch, cl, rh, rl); - mptr += (VMAC_NHBYTES/sizeof(u64)); - blocks--; - } - - while (blocks--) { - nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl); - rh &= m62; - poly_step(ch, cl, pkh, pkl, rh, rl); - mptr += (VMAC_NHBYTES/sizeof(u64)); - } - - dctx->polytmp[0] = ch; - dctx->polytmp[1] = cl; -} - -static int vmac_setkey(struct crypto_shash *tfm, - const u8 *key, unsigned int keylen) -{ - struct vmac_tfm_ctx *tctx = crypto_shash_ctx(tfm); - __be64 out[2]; - u8 in[16] = { 0 }; - unsigned int i; - int err; - - if (keylen != VMAC_KEY_LEN) - return -EINVAL; - - err = crypto_cipher_setkey(tctx->cipher, key, keylen); - if (err) - return err; - - /* Fill nh key */ - in[0] = 0x80; - for (i = 0; i < ARRAY_SIZE(tctx->nhkey); i += 2) { - crypto_cipher_encrypt_one(tctx->cipher, (u8 *)out, in); - tctx->nhkey[i] = be64_to_cpu(out[0]); - tctx->nhkey[i+1] = be64_to_cpu(out[1]); - in[15]++; - } - - /* Fill poly key */ - in[0] = 0xC0; - in[15] = 0; - for (i = 0; i < ARRAY_SIZE(tctx->polykey); i += 2) { - crypto_cipher_encrypt_one(tctx->cipher, (u8 *)out, in); - tctx->polykey[i] = be64_to_cpu(out[0]) & mpoly; - tctx->polykey[i+1] = be64_to_cpu(out[1]) & mpoly; - in[15]++; - } - - /* Fill ip key */ - in[0] = 0xE0; - in[15] = 0; - for (i = 0; i < ARRAY_SIZE(tctx->l3key); i += 2) { - do { - crypto_cipher_encrypt_one(tctx->cipher, (u8 *)out, in); - tctx->l3key[i] = be64_to_cpu(out[0]); - tctx->l3key[i+1] = be64_to_cpu(out[1]); - in[15]++; - } while (tctx->l3key[i] >= p64 || tctx->l3key[i+1] >= p64); - } - - return 0; -} - -static int vmac_init(struct shash_desc *desc) -{ - const struct vmac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); - struct vmac_desc_ctx *dctx = shash_desc_ctx(desc); - - dctx->partial_size = 0; - dctx->first_block_processed = false; - memcpy(dctx->polytmp, tctx->polykey, sizeof(dctx->polytmp)); - dctx->nonce_size = 0; - return 0; -} - -static int vmac_update(struct shash_desc *desc, const u8 *p, unsigned int len) -{ - const struct vmac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); - struct vmac_desc_ctx *dctx = shash_desc_ctx(desc); - unsigned int n; - - /* Nonce is passed as first VMAC_NONCEBYTES bytes of data */ - if (dctx->nonce_size < VMAC_NONCEBYTES) { - n = min(len, VMAC_NONCEBYTES - dctx->nonce_size); - memcpy(&dctx->nonce.bytes[dctx->nonce_size], p, n); - dctx->nonce_size += n; - p += n; - len -= n; - } - - if (dctx->partial_size) { - n = min(len, VMAC_NHBYTES - dctx->partial_size); - memcpy(&dctx->partial[dctx->partial_size], p, n); - dctx->partial_size += n; - p += n; - len -= n; - if (dctx->partial_size == VMAC_NHBYTES) { - vhash_blocks(tctx, dctx, dctx->partial_words, 1); - dctx->partial_size = 0; - } - } - - if (len >= VMAC_NHBYTES) { - n = round_down(len, VMAC_NHBYTES); - /* TODO: 'p' may be misaligned here */ - vhash_blocks(tctx, dctx, (const __le64 *)p, n / VMAC_NHBYTES); - p += n; - len -= n; - } - - if (len) { - memcpy(dctx->partial, p, len); - dctx->partial_size = len; - } - - return 0; -} - -static u64 vhash_final(const struct vmac_tfm_ctx *tctx, - struct vmac_desc_ctx *dctx) -{ - unsigned int partial = dctx->partial_size; - u64 ch = dctx->polytmp[0]; - u64 cl = dctx->polytmp[1]; - - /* L1 and L2-hash the final block if needed */ - if (partial) { - /* Zero-pad to next 128-bit boundary */ - unsigned int n = round_up(partial, 16); - u64 rh, rl; - - memset(&dctx->partial[partial], 0, n - partial); - nh_16(dctx->partial_words, tctx->nhkey, n / 8, rh, rl); - rh &= m62; - if (dctx->first_block_processed) - poly_step(ch, cl, tctx->polykey[0], tctx->polykey[1], - rh, rl); - else - ADD128(ch, cl, rh, rl); - } - - /* L3-hash the 128-bit output of L2-hash */ - return l3hash(ch, cl, tctx->l3key[0], tctx->l3key[1], partial * 8); -} - -static int vmac_final(struct shash_desc *desc, u8 *out) -{ - const struct vmac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); - struct vmac_desc_ctx *dctx = shash_desc_ctx(desc); - int index; - u64 hash, pad; - - if (dctx->nonce_size != VMAC_NONCEBYTES) - return -EINVAL; - - /* - * The VMAC specification requires a nonce at least 1 bit shorter than - * the block cipher's block length, so we actually only accept a 127-bit - * nonce. We define the unused bit to be the first one and require that - * it be 0, so the needed prepending of a 0 bit is implicit. - */ - if (dctx->nonce.bytes[0] & 0x80) - return -EINVAL; - - /* Finish calculating the VHASH of the message */ - hash = vhash_final(tctx, dctx); - - /* Generate pseudorandom pad by encrypting the nonce */ - BUILD_BUG_ON(VMAC_NONCEBYTES != 2 * (VMAC_TAG_LEN / 8)); - index = dctx->nonce.bytes[VMAC_NONCEBYTES - 1] & 1; - dctx->nonce.bytes[VMAC_NONCEBYTES - 1] &= ~1; - crypto_cipher_encrypt_one(tctx->cipher, dctx->nonce.bytes, - dctx->nonce.bytes); - pad = be64_to_cpu(dctx->nonce.pads[index]); - - /* The VMAC is the sum of VHASH and the pseudorandom pad */ - put_unaligned_be64(hash + pad, out); - return 0; -} - -static int vmac_init_tfm(struct crypto_tfm *tfm) -{ - struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); - struct crypto_cipher_spawn *spawn = crypto_instance_ctx(inst); - struct vmac_tfm_ctx *tctx = crypto_tfm_ctx(tfm); - struct crypto_cipher *cipher; - - cipher = crypto_spawn_cipher(spawn); - if (IS_ERR(cipher)) - return PTR_ERR(cipher); - - tctx->cipher = cipher; - return 0; -} - -static void vmac_exit_tfm(struct crypto_tfm *tfm) -{ - struct vmac_tfm_ctx *tctx = crypto_tfm_ctx(tfm); - - crypto_free_cipher(tctx->cipher); -} - -static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb) -{ - struct shash_instance *inst; - struct crypto_cipher_spawn *spawn; - struct crypto_alg *alg; - u32 mask; - int err; - - err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH, &mask); - if (err) - return err; - - inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); - if (!inst) - return -ENOMEM; - spawn = shash_instance_ctx(inst); - - err = crypto_grab_cipher(spawn, shash_crypto_instance(inst), - crypto_attr_alg_name(tb[1]), 0, mask); - if (err) - goto err_free_inst; - alg = crypto_spawn_cipher_alg(spawn); - - err = -EINVAL; - if (alg->cra_blocksize != VMAC_NONCEBYTES) - goto err_free_inst; - - err = crypto_inst_setname(shash_crypto_instance(inst), tmpl->name, alg); - if (err) - goto err_free_inst; - - inst->alg.base.cra_priority = alg->cra_priority; - inst->alg.base.cra_blocksize = alg->cra_blocksize; - - inst->alg.base.cra_ctxsize = sizeof(struct vmac_tfm_ctx); - inst->alg.base.cra_init = vmac_init_tfm; - inst->alg.base.cra_exit = vmac_exit_tfm; - - inst->alg.descsize = sizeof(struct vmac_desc_ctx); - inst->alg.digestsize = VMAC_TAG_LEN / 8; - inst->alg.init = vmac_init; - inst->alg.update = vmac_update; - inst->alg.final = vmac_final; - inst->alg.setkey = vmac_setkey; - - inst->free = shash_free_singlespawn_instance; - - err = shash_register_instance(tmpl, inst); - if (err) { -err_free_inst: - shash_free_singlespawn_instance(inst); - } - return err; -} - -static struct crypto_template vmac64_tmpl = { - .name = "vmac64", - .create = vmac_create, - .module = THIS_MODULE, -}; - -static int __init vmac_module_init(void) -{ - return crypto_register_template(&vmac64_tmpl); -} - -static void __exit vmac_module_exit(void) -{ - crypto_unregister_template(&vmac64_tmpl); -} - -subsys_initcall(vmac_module_init); -module_exit(vmac_module_exit); - -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("VMAC hash algorithm"); -MODULE_ALIAS_CRYPTO("vmac64"); -MODULE_IMPORT_NS("CRYPTO_INTERNAL"); |