diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-09-18 12:11:14 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-09-18 12:11:14 -0700 |
commit | 8b53c76533aa4356602aea98f98a2f3b4051464c (patch) | |
tree | ab10ba58e21501407f8108a6bb9003daa2176962 /lib | |
parent | 6cfae0c26b21dce323fe8799b66cf4bc996e3565 (diff) | |
parent | 9575d1a5c0780ea26ff8dd29c94a32be32ce3c85 (diff) | |
download | linux-stable-8b53c76533aa4356602aea98f98a2f3b4051464c.tar.gz linux-stable-8b53c76533aa4356602aea98f98a2f3b4051464c.tar.bz2 linux-stable-8b53c76533aa4356602aea98f98a2f3b4051464c.zip |
Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto updates from Herbert Xu:
"API:
- Add the ability to abort a skcipher walk.
Algorithms:
- Fix XTS to actually do the stealing.
- Add library helpers for AES and DES for single-block users.
- Add library helpers for SHA256.
- Add new DES key verification helper.
- Add surrounding bits for ESSIV generator.
- Add accelerations for aegis128.
- Add test vectors for lzo-rle.
Drivers:
- Add i.MX8MQ support to caam.
- Add gcm/ccm/cfb/ofb aes support in inside-secure.
- Add ofb/cfb aes support in media-tek.
- Add HiSilicon ZIP accelerator support.
Others:
- Fix potential race condition in padata.
- Use unbound workqueues in padata"
* 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (311 commits)
crypto: caam - Cast to long first before pointer conversion
crypto: ccree - enable CTS support in AES-XTS
crypto: inside-secure - Probe transform record cache RAM sizes
crypto: inside-secure - Base RD fetchcount on actual RD FIFO size
crypto: inside-secure - Base CD fetchcount on actual CD FIFO size
crypto: inside-secure - Enable extended algorithms on newer HW
crypto: inside-secure: Corrected configuration of EIP96_TOKEN_CTRL
crypto: inside-secure - Add EIP97/EIP197 and endianness detection
padata: remove cpu_index from the parallel_queue
padata: unbind parallel jobs from specific CPUs
padata: use separate workqueues for parallel and serial work
padata, pcrypt: take CPU hotplug lock internally in padata_alloc_possible
crypto: pcrypt - remove padata cpumask notifier
padata: make padata_do_parallel find alternate callback CPU
workqueue: require CPU hotplug read exclusion for apply_workqueue_attrs
workqueue: unconfine alloc/apply/free_workqueue_attrs()
padata: allocate workqueue internally
arm64: dts: imx8mq: Add CAAM node
random: Use wait_event_freezable() in add_hwgenerator_randomness()
crypto: ux500 - Fix COMPILE_TEST warnings
...
Diffstat (limited to 'lib')
-rw-r--r-- | lib/crypto/Makefile | 9 | ||||
-rw-r--r-- | lib/crypto/aes.c | 356 | ||||
-rw-r--r-- | lib/crypto/des.c | 902 | ||||
-rw-r--r-- | lib/crypto/sha256.c (renamed from lib/sha256.c) | 150 | ||||
-rw-r--r-- | lib/mpi/longlong.h | 36 |
5 files changed, 1347 insertions, 106 deletions
diff --git a/lib/crypto/Makefile b/lib/crypto/Makefile index 88195c34932d..cbe0b6a6450d 100644 --- a/lib/crypto/Makefile +++ b/lib/crypto/Makefile @@ -1,4 +1,13 @@ # SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_CRYPTO_LIB_AES) += libaes.o +libaes-y := aes.o + obj-$(CONFIG_CRYPTO_LIB_ARC4) += libarc4.o libarc4-y := arc4.o + +obj-$(CONFIG_CRYPTO_LIB_DES) += libdes.o +libdes-y := des.o + +obj-$(CONFIG_CRYPTO_LIB_SHA256) += libsha256.o +libsha256-y := sha256.o diff --git a/lib/crypto/aes.c b/lib/crypto/aes.c new file mode 100644 index 000000000000..827fe89922ff --- /dev/null +++ b/lib/crypto/aes.c @@ -0,0 +1,356 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2017-2019 Linaro Ltd <ard.biesheuvel@linaro.org> + */ + +#include <crypto/aes.h> +#include <linux/crypto.h> +#include <linux/module.h> +#include <asm/unaligned.h> + +/* + * Emit the sbox as volatile const to prevent the compiler from doing + * constant folding on sbox references involving fixed indexes. + */ +static volatile const u8 __cacheline_aligned aes_sbox[] = { + 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, + 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76, + 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, + 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0, + 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, + 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15, + 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, + 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75, + 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, + 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84, + 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, + 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf, + 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, + 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8, + 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, + 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2, + 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, + 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73, + 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, + 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb, + 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c, + 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79, + 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, + 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08, + 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, + 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a, + 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, + 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e, + 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, + 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf, + 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, + 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16, +}; + +static volatile const u8 __cacheline_aligned aes_inv_sbox[] = { + 0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38, + 0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb, + 0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87, + 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb, + 0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d, + 0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e, + 0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2, + 0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25, + 0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16, + 0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92, + 0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda, + 0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84, + 0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a, + 0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06, + 0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02, + 0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b, + 0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea, + 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73, + 0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85, + 0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e, + 0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89, + 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b, + 0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20, + 0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4, + 0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31, + 0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f, + 0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d, + 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef, + 0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0, + 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61, + 0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26, + 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d, +}; + +extern const u8 crypto_aes_sbox[256] __alias(aes_sbox); +extern const u8 crypto_aes_inv_sbox[256] __alias(aes_inv_sbox); + +EXPORT_SYMBOL(crypto_aes_sbox); +EXPORT_SYMBOL(crypto_aes_inv_sbox); + +static u32 mul_by_x(u32 w) +{ + u32 x = w & 0x7f7f7f7f; + u32 y = w & 0x80808080; + + /* multiply by polynomial 'x' (0b10) in GF(2^8) */ + return (x << 1) ^ (y >> 7) * 0x1b; +} + +static u32 mul_by_x2(u32 w) +{ + u32 x = w & 0x3f3f3f3f; + u32 y = w & 0x80808080; + u32 z = w & 0x40404040; + + /* multiply by polynomial 'x^2' (0b100) in GF(2^8) */ + return (x << 2) ^ (y >> 7) * 0x36 ^ (z >> 6) * 0x1b; +} + +static u32 mix_columns(u32 x) +{ + /* + * Perform the following matrix multiplication in GF(2^8) + * + * | 0x2 0x3 0x1 0x1 | | x[0] | + * | 0x1 0x2 0x3 0x1 | | x[1] | + * | 0x1 0x1 0x2 0x3 | x | x[2] | + * | 0x3 0x1 0x1 0x2 | | x[3] | + */ + u32 y = mul_by_x(x) ^ ror32(x, 16); + + return y ^ ror32(x ^ y, 8); +} + +static u32 inv_mix_columns(u32 x) +{ + /* + * Perform the following matrix multiplication in GF(2^8) + * + * | 0xe 0xb 0xd 0x9 | | x[0] | + * | 0x9 0xe 0xb 0xd | | x[1] | + * | 0xd 0x9 0xe 0xb | x | x[2] | + * | 0xb 0xd 0x9 0xe | | x[3] | + * + * which can conveniently be reduced to + * + * | 0x2 0x3 0x1 0x1 | | 0x5 0x0 0x4 0x0 | | x[0] | + * | 0x1 0x2 0x3 0x1 | | 0x0 0x5 0x0 0x4 | | x[1] | + * | 0x1 0x1 0x2 0x3 | x | 0x4 0x0 0x5 0x0 | x | x[2] | + * | 0x3 0x1 0x1 0x2 | | 0x0 0x4 0x0 0x5 | | x[3] | + */ + u32 y = mul_by_x2(x); + + return mix_columns(x ^ y ^ ror32(y, 16)); +} + +static __always_inline u32 subshift(u32 in[], int pos) +{ + return (aes_sbox[in[pos] & 0xff]) ^ + (aes_sbox[(in[(pos + 1) % 4] >> 8) & 0xff] << 8) ^ + (aes_sbox[(in[(pos + 2) % 4] >> 16) & 0xff] << 16) ^ + (aes_sbox[(in[(pos + 3) % 4] >> 24) & 0xff] << 24); +} + +static __always_inline u32 inv_subshift(u32 in[], int pos) +{ + return (aes_inv_sbox[in[pos] & 0xff]) ^ + (aes_inv_sbox[(in[(pos + 3) % 4] >> 8) & 0xff] << 8) ^ + (aes_inv_sbox[(in[(pos + 2) % 4] >> 16) & 0xff] << 16) ^ + (aes_inv_sbox[(in[(pos + 1) % 4] >> 24) & 0xff] << 24); +} + +static u32 subw(u32 in) +{ + return (aes_sbox[in & 0xff]) ^ + (aes_sbox[(in >> 8) & 0xff] << 8) ^ + (aes_sbox[(in >> 16) & 0xff] << 16) ^ + (aes_sbox[(in >> 24) & 0xff] << 24); +} + +/** + * aes_expandkey - Expands the AES key as described in FIPS-197 + * @ctx: The location where the computed key will be stored. + * @in_key: The supplied key. + * @key_len: The length of the supplied key. + * + * Returns 0 on success. The function fails only if an invalid key size (or + * pointer) is supplied. + * The expanded key size is 240 bytes (max of 14 rounds with a unique 16 bytes + * key schedule plus a 16 bytes key which is used before the first round). + * The decryption key is prepared for the "Equivalent Inverse Cipher" as + * described in FIPS-197. The first slot (16 bytes) of each key (enc or dec) is + * for the initial combination, the second slot for the first round and so on. + */ +int aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key, + unsigned int key_len) +{ + u32 kwords = key_len / sizeof(u32); + u32 rc, i, j; + int err; + + err = aes_check_keylen(key_len); + if (err) + return err; + + ctx->key_length = key_len; + + for (i = 0; i < kwords; i++) + ctx->key_enc[i] = get_unaligned_le32(in_key + i * sizeof(u32)); + + for (i = 0, rc = 1; i < 10; i++, rc = mul_by_x(rc)) { + u32 *rki = ctx->key_enc + (i * kwords); + u32 *rko = rki + kwords; + + rko[0] = ror32(subw(rki[kwords - 1]), 8) ^ rc ^ rki[0]; + rko[1] = rko[0] ^ rki[1]; + rko[2] = rko[1] ^ rki[2]; + rko[3] = rko[2] ^ rki[3]; + + if (key_len == AES_KEYSIZE_192) { + if (i >= 7) + break; + rko[4] = rko[3] ^ rki[4]; + rko[5] = rko[4] ^ rki[5]; + } else if (key_len == AES_KEYSIZE_256) { + if (i >= 6) + break; + rko[4] = subw(rko[3]) ^ rki[4]; + rko[5] = rko[4] ^ rki[5]; + rko[6] = rko[5] ^ rki[6]; + rko[7] = rko[6] ^ rki[7]; + } + } + + /* + * Generate the decryption keys for the Equivalent Inverse Cipher. + * This involves reversing the order of the round keys, and applying + * the Inverse Mix Columns transformation to all but the first and + * the last one. + */ + ctx->key_dec[0] = ctx->key_enc[key_len + 24]; + ctx->key_dec[1] = ctx->key_enc[key_len + 25]; + ctx->key_dec[2] = ctx->key_enc[key_len + 26]; + ctx->key_dec[3] = ctx->key_enc[key_len + 27]; + + for (i = 4, j = key_len + 20; j > 0; i += 4, j -= 4) { + ctx->key_dec[i] = inv_mix_columns(ctx->key_enc[j]); + ctx->key_dec[i + 1] = inv_mix_columns(ctx->key_enc[j + 1]); + ctx->key_dec[i + 2] = inv_mix_columns(ctx->key_enc[j + 2]); + ctx->key_dec[i + 3] = inv_mix_columns(ctx->key_enc[j + 3]); + } + + ctx->key_dec[i] = ctx->key_enc[0]; + ctx->key_dec[i + 1] = ctx->key_enc[1]; + ctx->key_dec[i + 2] = ctx->key_enc[2]; + ctx->key_dec[i + 3] = ctx->key_enc[3]; + + return 0; +} +EXPORT_SYMBOL(aes_expandkey); + +/** + * aes_encrypt - Encrypt a single AES block + * @ctx: Context struct containing the key schedule + * @out: Buffer to store the ciphertext + * @in: Buffer containing the plaintext + */ +void aes_encrypt(const struct crypto_aes_ctx *ctx, u8 *out, const u8 *in) +{ + const u32 *rkp = ctx->key_enc + 4; + int rounds = 6 + ctx->key_length / 4; + u32 st0[4], st1[4]; + int round; + + st0[0] = ctx->key_enc[0] ^ get_unaligned_le32(in); + st0[1] = ctx->key_enc[1] ^ get_unaligned_le32(in + 4); + st0[2] = ctx->key_enc[2] ^ get_unaligned_le32(in + 8); + st0[3] = ctx->key_enc[3] ^ get_unaligned_le32(in + 12); + + /* + * Force the compiler to emit data independent Sbox references, + * by xoring the input with Sbox values that are known to add up + * to zero. This pulls the entire Sbox into the D-cache before any + * data dependent lookups are done. + */ + st0[0] ^= aes_sbox[ 0] ^ aes_sbox[ 64] ^ aes_sbox[134] ^ aes_sbox[195]; + st0[1] ^= aes_sbox[16] ^ aes_sbox[ 82] ^ aes_sbox[158] ^ aes_sbox[221]; + st0[2] ^= aes_sbox[32] ^ aes_sbox[ 96] ^ aes_sbox[160] ^ aes_sbox[234]; + st0[3] ^= aes_sbox[48] ^ aes_sbox[112] ^ aes_sbox[186] ^ aes_sbox[241]; + + for (round = 0;; round += 2, rkp += 8) { + st1[0] = mix_columns(subshift(st0, 0)) ^ rkp[0]; + st1[1] = mix_columns(subshift(st0, 1)) ^ rkp[1]; + st1[2] = mix_columns(subshift(st0, 2)) ^ rkp[2]; + st1[3] = mix_columns(subshift(st0, 3)) ^ rkp[3]; + + if (round == rounds - 2) + break; + + st0[0] = mix_columns(subshift(st1, 0)) ^ rkp[4]; + st0[1] = mix_columns(subshift(st1, 1)) ^ rkp[5]; + st0[2] = mix_columns(subshift(st1, 2)) ^ rkp[6]; + st0[3] = mix_columns(subshift(st1, 3)) ^ rkp[7]; + } + + put_unaligned_le32(subshift(st1, 0) ^ rkp[4], out); + put_unaligned_le32(subshift(st1, 1) ^ rkp[5], out + 4); + put_unaligned_le32(subshift(st1, 2) ^ rkp[6], out + 8); + put_unaligned_le32(subshift(st1, 3) ^ rkp[7], out + 12); +} +EXPORT_SYMBOL(aes_encrypt); + +/** + * aes_decrypt - Decrypt a single AES block + * @ctx: Context struct containing the key schedule + * @out: Buffer to store the plaintext + * @in: Buffer containing the ciphertext + */ +void aes_decrypt(const struct crypto_aes_ctx *ctx, u8 *out, const u8 *in) +{ + const u32 *rkp = ctx->key_dec + 4; + int rounds = 6 + ctx->key_length / 4; + u32 st0[4], st1[4]; + int round; + + st0[0] = ctx->key_dec[0] ^ get_unaligned_le32(in); + st0[1] = ctx->key_dec[1] ^ get_unaligned_le32(in + 4); + st0[2] = ctx->key_dec[2] ^ get_unaligned_le32(in + 8); + st0[3] = ctx->key_dec[3] ^ get_unaligned_le32(in + 12); + + /* + * Force the compiler to emit data independent Sbox references, + * by xoring the input with Sbox values that are known to add up + * to zero. This pulls the entire Sbox into the D-cache before any + * data dependent lookups are done. + */ + st0[0] ^= aes_inv_sbox[ 0] ^ aes_inv_sbox[ 64] ^ aes_inv_sbox[129] ^ aes_inv_sbox[200]; + st0[1] ^= aes_inv_sbox[16] ^ aes_inv_sbox[ 83] ^ aes_inv_sbox[150] ^ aes_inv_sbox[212]; + st0[2] ^= aes_inv_sbox[32] ^ aes_inv_sbox[ 96] ^ aes_inv_sbox[160] ^ aes_inv_sbox[236]; + st0[3] ^= aes_inv_sbox[48] ^ aes_inv_sbox[112] ^ aes_inv_sbox[187] ^ aes_inv_sbox[247]; + + for (round = 0;; round += 2, rkp += 8) { + st1[0] = inv_mix_columns(inv_subshift(st0, 0)) ^ rkp[0]; + st1[1] = inv_mix_columns(inv_subshift(st0, 1)) ^ rkp[1]; + st1[2] = inv_mix_columns(inv_subshift(st0, 2)) ^ rkp[2]; + st1[3] = inv_mix_columns(inv_subshift(st0, 3)) ^ rkp[3]; + + if (round == rounds - 2) + break; + + st0[0] = inv_mix_columns(inv_subshift(st1, 0)) ^ rkp[4]; + st0[1] = inv_mix_columns(inv_subshift(st1, 1)) ^ rkp[5]; + st0[2] = inv_mix_columns(inv_subshift(st1, 2)) ^ rkp[6]; + st0[3] = inv_mix_columns(inv_subshift(st1, 3)) ^ rkp[7]; + } + + put_unaligned_le32(inv_subshift(st1, 0) ^ rkp[4], out); + put_unaligned_le32(inv_subshift(st1, 1) ^ rkp[5], out + 4); + put_unaligned_le32(inv_subshift(st1, 2) ^ rkp[6], out + 8); + put_unaligned_le32(inv_subshift(st1, 3) ^ rkp[7], out + 12); +} +EXPORT_SYMBOL(aes_decrypt); + +MODULE_DESCRIPTION("Generic AES library"); +MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>"); +MODULE_LICENSE("GPL v2"); diff --git a/lib/crypto/des.c b/lib/crypto/des.c new file mode 100644 index 000000000000..ef5bb8822aba --- /dev/null +++ b/lib/crypto/des.c @@ -0,0 +1,902 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Cryptographic API. + * + * DES & Triple DES EDE Cipher Algorithms. + * + * Copyright (c) 2005 Dag Arne Osvik <da@osvik.no> + */ + +#include <linux/bitops.h> +#include <linux/compiler.h> +#include <linux/crypto.h> +#include <linux/errno.h> +#include <linux/fips.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/string.h> +#include <linux/types.h> + +#include <asm/unaligned.h> + +#include <crypto/des.h> +#include <crypto/internal/des.h> + +#define ROL(x, r) ((x) = rol32((x), (r))) +#define ROR(x, r) ((x) = ror32((x), (r))) + +/* Lookup tables for key expansion */ + +static const u8 pc1[256] = { + 0x00, 0x00, 0x40, 0x04, 0x10, 0x10, 0x50, 0x14, + 0x04, 0x40, 0x44, 0x44, 0x14, 0x50, 0x54, 0x54, + 0x02, 0x02, 0x42, 0x06, 0x12, 0x12, 0x52, 0x16, + 0x06, 0x42, 0x46, 0x46, 0x16, 0x52, 0x56, 0x56, + 0x80, 0x08, 0xc0, 0x0c, 0x90, 0x18, 0xd0, 0x1c, + 0x84, 0x48, 0xc4, 0x4c, 0x94, 0x58, 0xd4, 0x5c, + 0x82, 0x0a, 0xc2, 0x0e, 0x92, 0x1a, 0xd2, 0x1e, + 0x86, 0x4a, 0xc6, 0x4e, 0x96, 0x5a, 0xd6, 0x5e, + 0x20, 0x20, 0x60, 0x24, 0x30, 0x30, 0x70, 0x34, + 0x24, 0x60, 0x64, 0x64, 0x34, 0x70, 0x74, 0x74, + 0x22, 0x22, 0x62, 0x26, 0x32, 0x32, 0x72, 0x36, + 0x26, 0x62, 0x66, 0x66, 0x36, 0x72, 0x76, 0x76, + 0xa0, 0x28, 0xe0, 0x2c, 0xb0, 0x38, 0xf0, 0x3c, + 0xa4, 0x68, 0xe4, 0x6c, 0xb4, 0x78, 0xf4, 0x7c, + 0xa2, 0x2a, 0xe2, 0x2e, 0xb2, 0x3a, 0xf2, 0x3e, + 0xa6, 0x6a, 0xe6, 0x6e, 0xb6, 0x7a, 0xf6, 0x7e, + 0x08, 0x80, 0x48, 0x84, 0x18, 0x90, 0x58, 0x94, + 0x0c, 0xc0, 0x4c, 0xc4, 0x1c, 0xd0, 0x5c, 0xd4, + 0x0a, 0x82, 0x4a, 0x86, 0x1a, 0x92, 0x5a, 0x96, + 0x0e, 0xc2, 0x4e, 0xc6, 0x1e, 0xd2, 0x5e, 0xd6, + 0x88, 0x88, 0xc8, 0x8c, 0x98, 0x98, 0xd8, 0x9c, + 0x8c, 0xc8, 0xcc, 0xcc, 0x9c, 0xd8, 0xdc, 0xdc, + 0x8a, 0x8a, 0xca, 0x8e, 0x9a, 0x9a, 0xda, 0x9e, + 0x8e, 0xca, 0xce, 0xce, 0x9e, 0xda, 0xde, 0xde, + 0x28, 0xa0, 0x68, 0xa4, 0x38, 0xb0, 0x78, 0xb4, + 0x2c, 0xe0, 0x6c, 0xe4, 0x3c, 0xf0, 0x7c, 0xf4, + 0x2a, 0xa2, 0x6a, 0xa6, 0x3a, 0xb2, 0x7a, 0xb6, + 0x2e, 0xe2, 0x6e, 0xe6, 0x3e, 0xf2, 0x7e, 0xf6, + 0xa8, 0xa8, 0xe8, 0xac, 0xb8, 0xb8, 0xf8, 0xbc, + 0xac, 0xe8, 0xec, 0xec, 0xbc, 0xf8, 0xfc, 0xfc, + 0xaa, 0xaa, 0xea, 0xae, 0xba, 0xba, 0xfa, 0xbe, + 0xae, 0xea, 0xee, 0xee, 0xbe, 0xfa, 0xfe, 0xfe +}; + +static const u8 rs[256] = { + 0x00, 0x00, 0x80, 0x80, 0x02, 0x02, 0x82, 0x82, + 0x04, 0x04, 0x84, 0x84, 0x06, 0x06, 0x86, 0x86, + 0x08, 0x08, 0x88, 0x88, 0x0a, 0x0a, 0x8a, 0x8a, + 0x0c, 0x0c, 0x8c, 0x8c, 0x0e, 0x0e, 0x8e, 0x8e, + 0x10, 0x10, 0x90, 0x90, 0x12, 0x12, 0x92, 0x92, + 0x14, 0x14, 0x94, 0x94, 0x16, 0x16, 0x96, 0x96, + 0x18, 0x18, 0x98, 0x98, 0x1a, 0x1a, 0x9a, 0x9a, + 0x1c, 0x1c, 0x9c, 0x9c, 0x1e, 0x1e, 0x9e, 0x9e, + 0x20, 0x20, 0xa0, 0xa0, 0x22, 0x22, 0xa2, 0xa2, + 0x24, 0x24, 0xa4, 0xa4, 0x26, 0x26, 0xa6, 0xa6, + 0x28, 0x28, 0xa8, 0xa8, 0x2a, 0x2a, 0xaa, 0xaa, + 0x2c, 0x2c, 0xac, 0xac, 0x2e, 0x2e, 0xae, 0xae, + 0x30, 0x30, 0xb0, 0xb0, 0x32, 0x32, 0xb2, 0xb2, + 0x34, 0x34, 0xb4, 0xb4, 0x36, 0x36, 0xb6, 0xb6, + 0x38, 0x38, 0xb8, 0xb8, 0x3a, 0x3a, 0xba, 0xba, + 0x3c, 0x3c, 0xbc, 0xbc, 0x3e, 0x3e, 0xbe, 0xbe, + 0x40, 0x40, 0xc0, 0xc0, 0x42, 0x42, 0xc2, 0xc2, + 0x44, 0x44, 0xc4, 0xc4, 0x46, 0x46, 0xc6, 0xc6, + 0x48, 0x48, 0xc8, 0xc8, 0x4a, 0x4a, 0xca, 0xca, + 0x4c, 0x4c, 0xcc, 0xcc, 0x4e, 0x4e, 0xce, 0xce, + 0x50, 0x50, 0xd0, 0xd0, 0x52, 0x52, 0xd2, 0xd2, + 0x54, 0x54, 0xd4, 0xd4, 0x56, 0x56, 0xd6, 0xd6, + 0x58, 0x58, 0xd8, 0xd8, 0x5a, 0x5a, 0xda, 0xda, + 0x5c, 0x5c, 0xdc, 0xdc, 0x5e, 0x5e, 0xde, 0xde, + 0x60, 0x60, 0xe0, 0xe0, 0x62, 0x62, 0xe2, 0xe2, + 0x64, 0x64, 0xe4, 0xe4, 0x66, 0x66, 0xe6, 0xe6, + 0x68, 0x68, 0xe8, 0xe8, 0x6a, 0x6a, 0xea, 0xea, + 0x6c, 0x6c, 0xec, 0xec, 0x6e, 0x6e, 0xee, 0xee, + 0x70, 0x70, 0xf0, 0xf0, 0x72, 0x72, 0xf2, 0xf2, + 0x74, 0x74, 0xf4, 0xf4, 0x76, 0x76, 0xf6, 0xf6, + 0x78, 0x78, 0xf8, 0xf8, 0x7a, 0x7a, 0xfa, 0xfa, + 0x7c, 0x7c, 0xfc, 0xfc, 0x7e, 0x7e, 0xfe, 0xfe +}; + +static const u32 pc2[1024] = { + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00040000, 0x00000000, 0x04000000, 0x00100000, + 0x00400000, 0x00000008, 0x00000800, 0x40000000, + 0x00440000, 0x00000008, 0x04000800, 0x40100000, + 0x00000400, 0x00000020, 0x08000000, 0x00000100, + 0x00040400, 0x00000020, 0x0c000000, 0x00100100, + 0x00400400, 0x00000028, 0x08000800, 0x40000100, + 0x00440400, 0x00000028, 0x0c000800, 0x40100100, + 0x80000000, 0x00000010, 0x00000000, 0x00800000, + 0x80040000, 0x00000010, 0x04000000, 0x00900000, + 0x80400000, 0x00000018, 0x00000800, 0x40800000, + 0x80440000, 0x00000018, 0x04000800, 0x40900000, + 0x80000400, 0x00000030, 0x08000000, 0x00800100, + 0x80040400, 0x00000030, 0x0c000000, 0x00900100, + 0x80400400, 0x00000038, 0x08000800, 0x40800100, + 0x80440400, 0x00000038, 0x0c000800, 0x40900100, + 0x10000000, 0x00000000, 0x00200000, 0x00001000, + 0x10040000, 0x00000000, 0x04200000, 0x00101000, + 0x10400000, 0x00000008, 0x00200800, 0x40001000, + 0x10440000, 0x00000008, 0x04200800, 0x40101000, + 0x10000400, 0x00000020, 0x08200000, 0x00001100, + 0x10040400, 0x00000020, 0x0c200000, 0x00101100, + 0x10400400, 0x00000028, 0x08200800, 0x40001100, + 0x10440400, 0x00000028, 0x0c200800, 0x40101100, + 0x90000000, 0x00000010, 0x00200000, 0x00801000, + 0x90040000, 0x00000010, 0x04200000, 0x00901000, + 0x90400000, 0x00000018, 0x00200800, 0x40801000, + 0x90440000, 0x00000018, 0x04200800, 0x40901000, + 0x90000400, 0x00000030, 0x08200000, 0x00801100, + 0x90040400, 0x00000030, 0x0c200000, 0x00901100, + 0x90400400, 0x00000038, 0x08200800, 0x40801100, + 0x90440400, 0x00000038, 0x0c200800, 0x40901100, + 0x00000200, 0x00080000, 0x00000000, 0x00000004, + 0x00040200, 0x00080000, 0x04000000, 0x00100004, + 0x00400200, 0x00080008, 0x00000800, 0x40000004, + 0x00440200, 0x00080008, 0x04000800, 0x40100004, + 0x00000600, 0x00080020, 0x08000000, 0x00000104, + 0x00040600, 0x00080020, 0x0c000000, 0x00100104, + 0x00400600, 0x00080028, 0x08000800, 0x40000104, + 0x00440600, 0x00080028, 0x0c000800, 0x40100104, + 0x80000200, 0x00080010, 0x00000000, 0x00800004, + 0x80040200, 0x00080010, 0x04000000, 0x00900004, + 0x80400200, 0x00080018, 0x00000800, 0x40800004, + 0x80440200, 0x00080018, 0x04000800, 0x40900004, + 0x80000600, 0x00080030, 0x08000000, 0x00800104, + 0x80040600, 0x00080030, 0x0c000000, 0x00900104, + 0x80400600, 0x00080038, 0x08000800, 0x40800104, + 0x80440600, 0x00080038, 0x0c000800, 0x40900104, + 0x10000200, 0x00080000, 0x00200000, 0x00001004, + 0x10040200, 0x00080000, 0x04200000, 0x00101004, + 0x10400200, 0x00080008, 0x00200800, 0x40001004, + 0x10440200, 0x00080008, 0x04200800, 0x40101004, + 0x10000600, 0x00080020, 0x08200000, 0x00001104, + 0x10040600, 0x00080020, 0x0c200000, 0x00101104, + 0x10400600, 0x00080028, 0x08200800, 0x40001104, + 0x10440600, 0x00080028, 0x0c200800, 0x40101104, + 0x90000200, 0x00080010, 0x00200000, 0x00801004, + 0x90040200, 0x00080010, 0x04200000, 0x00901004, + 0x90400200, 0x00080018, 0x00200800, 0x40801004, + 0x90440200, 0x00080018, 0x04200800, 0x40901004, + 0x90000600, 0x00080030, 0x08200000, 0x00801104, + 0x90040600, 0x00080030, 0x0c200000, 0x00901104, + 0x90400600, 0x00080038, 0x08200800, 0x40801104, + 0x90440600, 0x00080038, 0x0c200800, 0x40901104, + 0x00000002, 0x00002000, 0x20000000, 0x00000001, + 0x00040002, 0x00002000, 0x24000000, 0x00100001, + 0x00400002, 0x00002008, 0x20000800, 0x40000001, + 0x00440002, 0x00002008, 0x24000800, 0x40100001, + 0x00000402, 0x00002020, 0x28000000, 0x00000101, + 0x00040402, 0x00002020, 0x2c000000, 0x00100101, + 0x00400402, 0x00002028, 0x28000800, 0x40000101, + 0x00440402, 0x00002028, 0x2c000800, 0x40100101, + 0x80000002, 0x00002010, 0x20000000, 0x00800001, + 0x80040002, 0x00002010, 0x24000000, 0x00900001, + 0x80400002, 0x00002018, 0x20000800, 0x40800001, + 0x80440002, 0x00002018, 0x24000800, 0x40900001, + 0x80000402, 0x00002030, 0x28000000, 0x00800101, + 0x80040402, 0x00002030, 0x2c000000, 0x00900101, + 0x80400402, 0x00002038, 0x28000800, 0x40800101, + 0x80440402, 0x00002038, 0x2c000800, 0x40900101, + 0x10000002, 0x00002000, 0x20200000, 0x00001001, + 0x10040002, 0x00002000, 0x24200000, 0x00101001, + 0x10400002, 0x00002008, 0x20200800, 0x40001001, + 0x10440002, 0x00002008, 0x24200800, 0x40101001, + 0x10000402, 0x00002020, 0x28200000, 0x00001101, + 0x10040402, 0x00002020, 0x2c200000, 0x00101101, + 0x10400402, 0x00002028, 0x28200800, 0x40001101, + 0x10440402, 0x00002028, 0x2c200800, 0x40101101, + 0x90000002, 0x00002010, 0x20200000, 0x00801001, + 0x90040002, 0x00002010, 0x24200000, 0x00901001, + 0x90400002, 0x00002018, 0x20200800, 0x40801001, + 0x90440002, 0x00002018, 0x24200800, 0x40901001, + 0x90000402, 0x00002030, 0x28200000, 0x00801101, + 0x90040402, 0x00002030, 0x2c200000, 0x00901101, + 0x90400402, 0x00002038, 0x28200800, 0x40801101, + 0x90440402, 0x00002038, 0x2c200800, 0x40901101, + 0x00000202, 0x00082000, 0x20000000, 0x00000005, + 0x00040202, 0x00082000, 0x24000000, 0x00100005, + 0x00400202, 0x00082008, 0x20000800, 0x40000005, + 0x00440202, 0x00082008, 0x24000800, 0x40100005, + 0x00000602, 0x00082020, 0x28000000, 0x00000105, + 0x00040602, 0x00082020, 0x2c000000, 0x00100105, + 0x00400602, 0x00082028, 0x28000800, 0x40000105, + 0x00440602, 0x00082028, 0x2c000800, 0x40100105, + 0x80000202, 0x00082010, 0x20000000, 0x00800005, + 0x80040202, 0x00082010, 0x24000000, 0x00900005, + 0x80400202, 0x00082018, 0x20000800, 0x40800005, + 0x80440202, 0x00082018, 0x24000800, 0x40900005, + 0x80000602, 0x00082030, 0x28000000, 0x00800105, + 0x80040602, 0x00082030, 0x2c000000, 0x00900105, + 0x80400602, 0x00082038, 0x28000800, 0x40800105, + 0x80440602, 0x00082038, 0x2c000800, 0x40900105, + 0x10000202, 0x00082000, 0x20200000, 0x00001005, + 0x10040202, 0x00082000, 0x24200000, 0x00101005, + 0x10400202, 0x00082008, 0x20200800, 0x40001005, + 0x10440202, 0x00082008, 0x24200800, 0x40101005, + 0x10000602, 0x00082020, 0x28200000, 0x00001105, + 0x10040602, 0x00082020, 0x2c200000, 0x00101105, + 0x10400602, 0x00082028, 0x28200800, 0x40001105, + 0x10440602, 0x00082028, 0x2c200800, 0x40101105, + 0x90000202, 0x00082010, 0x20200000, 0x00801005, + 0x90040202, 0x00082010, 0x24200000, 0x00901005, + 0x90400202, 0x00082018, 0x20200800, 0x40801005, + 0x90440202, 0x00082018, 0x24200800, 0x40901005, + 0x90000602, 0x00082030, 0x28200000, 0x00801105, + 0x90040602, 0x00082030, 0x2c200000, 0x00901105, + 0x90400602, 0x00082038, 0x28200800, 0x40801105, + 0x90440602, 0x00082038, 0x2c200800, 0x40901105, + + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000008, 0x00080000, 0x10000000, + 0x02000000, 0x00000000, 0x00000080, 0x00001000, + 0x02000000, 0x00000008, 0x00080080, 0x10001000, + 0x00004000, 0x00000000, 0x00000040, 0x00040000, + 0x00004000, 0x00000008, 0x00080040, 0x10040000, + 0x02004000, 0x00000000, 0x000000c0, 0x00041000, + 0x02004000, 0x00000008, 0x000800c0, 0x10041000, + 0x00020000, 0x00008000, 0x08000000, 0x00200000, + 0x00020000, 0x00008008, 0x08080000, 0x10200000, + 0x02020000, 0x00008000, 0x08000080, 0x00201000, + 0x02020000, 0x00008008, 0x08080080, 0x10201000, + 0x00024000, 0x00008000, 0x08000040, 0x00240000, + 0x00024000, 0x00008008, 0x08080040, 0x10240000, + 0x02024000, 0x00008000, 0x080000c0, 0x00241000, + 0x02024000, 0x00008008, 0x080800c0, 0x10241000, + 0x00000000, 0x01000000, 0x00002000, 0x00000020, + 0x00000000, 0x01000008, 0x00082000, 0x10000020, + 0x02000000, 0x01000000, 0x00002080, 0x00001020, + 0x02000000, 0x01000008, 0x00082080, 0x10001020, + 0x00004000, 0x01000000, 0x00002040, 0x00040020, + 0x00004000, 0x01000008, 0x00082040, 0x10040020, + 0x02004000, 0x01000000, 0x000020c0, 0x00041020, + 0x02004000, 0x01000008, 0x000820c0, 0x10041020, + 0x00020000, 0x01008000, 0x08002000, 0x00200020, + 0x00020000, 0x01008008, 0x08082000, 0x10200020, + 0x02020000, 0x01008000, 0x08002080, 0x00201020, + 0x02020000, 0x01008008, 0x08082080, 0x10201020, + 0x00024000, 0x01008000, 0x08002040, 0x00240020, + 0x00024000, 0x01008008, 0x08082040, 0x10240020, + 0x02024000, 0x01008000, 0x080020c0, 0x00241020, + 0x02024000, 0x01008008, 0x080820c0, 0x10241020, + 0x00000400, 0x04000000, 0x00100000, 0x00000004, + 0x00000400, 0x04000008, 0x00180000, 0x10000004, + 0x02000400, 0x04000000, 0x00100080, 0x00001004, + 0x02000400, 0x04000008, 0x00180080, 0x10001004, + 0x00004400, 0x04000000, 0x00100040, 0x00040004, + 0x00004400, 0x04000008, 0x00180040, 0x10040004, + 0x02004400, 0x04000000, 0x001000c0, 0x00041004, + 0x02004400, 0x04000008, 0x001800c0, 0x10041004, + 0x00020400, 0x04008000, 0x08100000, 0x00200004, + 0x00020400, 0x04008008, 0x08180000, 0x10200004, + 0x02020400, 0x04008000, 0x08100080, 0x00201004, + 0x02020400, 0x04008008, 0x08180080, 0x10201004, + 0x00024400, 0x04008000, 0x08100040, 0x00240004, + 0x00024400, 0x04008008, 0x08180040, 0x10240004, + 0x02024400, 0x04008000, 0x081000c0, 0x00241004, + 0x02024400, 0x04008008, 0x081800c0, 0x10241004, + 0x00000400, 0x05000000, 0x00102000, 0x00000024, + 0x00000400, 0x05000008, 0x00182000, 0x10000024, + 0x02000400, 0x05000000, 0x00102080, 0x00001024, + 0x02000400, 0x05000008, 0x00182080, 0x10001024, + 0x00004400, 0x05000000, 0x00102040, 0x00040024, + 0x00004400, 0x05000008, 0x00182040, 0x10040024, + 0x02004400, 0x05000000, 0x001020c0, 0x00041024, + 0x02004400, 0x05000008, 0x001820c0, 0x10041024, + 0x00020400, 0x05008000, 0x08102000, 0x00200024, + 0x00020400, 0x05008008, 0x08182000, 0x10200024, + 0x02020400, 0x05008000, 0x08102080, 0x00201024, + 0x02020400, 0x05008008, 0x08182080, 0x10201024, + 0x00024400, 0x05008000, 0x08102040, 0x00240024, + 0x00024400, 0x05008008, 0x08182040, 0x10240024, + 0x02024400, 0x05008000, 0x081020c0, 0x00241024, + 0x02024400, 0x05008008, 0x081820c0, 0x10241024, + 0x00000800, 0x00010000, 0x20000000, 0x00000010, + 0x00000800, 0x00010008, 0x20080000, 0x10000010, + 0x02000800, 0x00010000, 0x20000080, 0x00001010, + 0x02000800, 0x00010008, 0x20080080, 0x10001010, + 0x00004800, 0x00010000, 0x20000040, 0x00040010, + 0x00004800, 0x00010008, 0x20080040, 0x10040010, + 0x02004800, 0x00010000, 0x200000c0, 0x00041010, + 0x02004800, 0x00010008, 0x200800c0, 0x10041010, + 0x00020800, 0x00018000, 0x28000000, 0x00200010, + 0x00020800, 0x00018008, 0x28080000, 0x10200010, + 0x02020800, 0x00018000, 0x28000080, 0x00201010, + 0x02020800, 0x00018008, 0x28080080, 0x10201010, + 0x00024800, 0x00018000, 0x28000040, 0x00240010, + 0x00024800, 0x00018008, 0x28080040, 0x10240010, + 0x02024800, 0x00018000, 0x280000c0, 0x00241010, + 0x02024800, 0x00018008, 0x280800c0, 0x10241010, + 0x00000800, 0x01010000, 0x20002000, 0x00000030, + 0x00000800, 0x01010008, 0x20082000, 0x10000030, + 0x02000800, 0x01010000, 0x20002080, 0x00001030, + 0x02000800, 0x01010008, 0x20082080, 0x10001030, + 0x00004800, 0x01010000, 0x20002040, 0x00040030, + 0x00004800, 0x01010008, 0x20082040, 0x10040030, + 0x02004800, 0x01010000, 0x200020c0, 0x00041030, + 0x02004800, 0x01010008, 0x200820c0, 0x10041030, + 0x00020800, 0x01018000, 0x28002000, 0x00200030, + 0x00020800, 0x01018008, 0x28082000, 0x10200030, + 0x02020800, 0x01018000, 0x28002080, 0x00201030, + 0x02020800, 0x01018008, 0x28082080, 0x10201030, + 0x00024800, 0x01018000, 0x28002040, 0x00240030, + 0x00024800, 0x01018008, 0x28082040, 0x10240030, + 0x02024800, 0x01018000, 0x280020c0, 0x00241030, + 0x02024800, 0x01018008, 0x280820c0, 0x10241030, + 0x00000c00, 0x04010000, 0x20100000, 0x00000014, + 0x00000c00, 0x04010008, 0x20180000, 0x10000014, + 0x02000c00, 0x04010000, 0x20100080, 0x00001014, + 0x02000c00, 0x04010008, 0x20180080, 0x10001014, + 0x00004c00, 0x04010000, 0x20100040, 0x00040014, + 0x00004c00, 0x04010008, 0x20180040, 0x10040014, + 0x02004c00, 0x04010000, 0x201000c0, 0x00041014, + 0x02004c00, 0x04010008, 0x201800c0, 0x10041014, + 0x00020c00, 0x04018000, 0x28100000, 0x00200014, + 0x00020c00, 0x04018008, 0x28180000, 0x10200014, + 0x02020c00, 0x04018000, 0x28100080, 0x00201014, + 0x02020c00, 0x04018008, 0x28180080, 0x10201014, + 0x00024c00, 0x04018000, 0x28100040, 0x00240014, + 0x00024c00, 0x04018008, 0x28180040, 0x10240014, + 0x02024c00, 0x04018000, 0x281000c0, 0x00241014, + 0x02024c00, 0x04018008, 0x281800c0, 0x10241014, + 0x00000c00, 0x05010000, 0x20102000, 0x00000034, + 0x00000c00, 0x05010008, 0x20182000, 0x10000034, + 0x02000c00, 0x05010000, 0x20102080, 0x00001034, + 0x02000c00, 0x05010008, 0x20182080, 0x10001034, + 0x00004c00, 0x05010000, 0x20102040, 0x00040034, + 0x00004c00, 0x05010008, 0x20182040, 0x10040034, + 0x02004c00, 0x05010000, 0x201020c0, 0x00041034, + 0x02004c00, 0x05010008, 0x201820c0, 0x10041034, + 0x00020c00, 0x05018000, 0x28102000, 0x00200034, + 0x00020c00, 0x05018008, 0x28182000, 0x10200034, + 0x02020c00, 0x05018000, 0x28102080, 0x00201034, + 0x02020c00, 0x05018008, 0x28182080, 0x10201034, + 0x00024c00, 0x05018000, 0x28102040, 0x00240034, + 0x00024c00, 0x05018008, 0x28182040, 0x10240034, + 0x02024c00, 0x05018000, 0x281020c0, 0x00241034, + 0x02024c00, 0x05018008, 0x281820c0, 0x10241034 +}; + +/* S-box lookup tables */ + +static const u32 S1[64] = { + 0x01010400, 0x00000000, 0x00010000, 0x01010404, + 0x01010004, 0x00010404, 0x00000004, 0x00010000, + 0x00000400, 0x01010400, 0x01010404, 0x00000400, + 0x01000404, 0x01010004, 0x01000000, 0x00000004, + 0x00000404, 0x01000400, 0x01000400, 0x00010400, + 0x00010400, 0x01010000, 0x01010000, 0x01000404, + 0x00010004, 0x01000004, 0x01000004, 0x00010004, + 0x00000000, 0x00000404, 0x00010404, 0x01000000, + 0x00010000, 0x01010404, 0x00000004, 0x01010000, + 0x01010400, 0x01000000, 0x01000000, 0x00000400, + 0x01010004, 0x00010000, 0x00010400, 0x01000004, + 0x00000400, 0x00000004, 0x01000404, 0x00010404, + 0x01010404, 0x00010004, 0x01010000, 0x01000404, + 0x01000004, 0x00000404, 0x00010404, 0x01010400, + 0x00000404, 0x01000400, 0x01000400, 0x00000000, + 0x00010004, 0x00010400, 0x00000000, 0x01010004 +}; + +static const u32 S2[64] = { + 0x80108020, 0x80008000, 0x00008000, 0x00108020, + 0x00100000, 0x00000020, 0x80100020, 0x80008020, + 0x80000020, 0x80108020, 0x80108000, 0x80000000, + 0x80008000, 0x00100000, 0x00000020, 0x80100020, + 0x00108000, 0x00100020, 0x80008020, 0x00000000, + 0x80000000, 0x00008000, 0x00108020, 0x80100000, + 0x00100020, 0x80000020, 0x00000000, 0x00108000, + 0x00008020, 0x80108000, 0x80100000, 0x00008020, + 0x00000000, 0x00108020, 0x80100020, 0x00100000, + 0x80008020, 0x80100000, 0x80108000, 0x00008000, + 0x80100000, 0x80008000, 0x00000020, 0x80108020, + 0x00108020, 0x00000020, 0x00008000, 0x80000000, + 0x00008020, 0x80108000, 0x00100000, 0x80000020, + 0x00100020, 0x80008020, 0x80000020, 0x00100020, + 0x00108000, 0x00000000, 0x80008000, 0x00008020, + 0x80000000, 0x80100020, 0x80108020, 0x00108000 +}; + +static const u32 S3[64] = { + 0x00000208, 0x08020200, 0x00000000, 0x08020008, + 0x08000200, 0x00000000, 0x00020208, 0x08000200, + 0x00020008, 0x08000008, 0x08000008, 0x00020000, + 0x08020208, 0x00020008, 0x08020000, 0x00000208, + 0x08000000, 0x00000008, 0x08020200, 0x00000200, + 0x00020200, 0x08020000, 0x08020008, 0x00020208, + 0x08000208, 0x00020200, 0x00020000, 0x08000208, + 0x00000008, 0x08020208, 0x00000200, 0x08000000, + 0x08020200, 0x08000000, 0x00020008, 0x00000208, + 0x00020000, 0x08020200, 0x08000200, 0x00000000, + 0x00000200, 0x00020008, 0x08020208, 0x08000200, + 0x08000008, 0x00000200, 0x00000000, 0x08020008, + 0x08000208, 0x00020000, 0x08000000, 0x08020208, + 0x00000008, 0x00020208, 0x00020200, 0x08000008, + 0x08020000, 0x08000208, 0x00000208, 0x08020000, + 0x00020208, 0x00000008, 0x08020008, 0x00020200 +}; + +static const u32 S4[64] = { + 0x00802001, 0x00002081, 0x00002081, 0x00000080, + 0x00802080, 0x00800081, 0x00800001, 0x00002001, + 0x00000000, 0x00802000, 0x00802000, 0x00802081, + 0x00000081, 0x00000000, 0x00800080, 0x00800001, + 0x00000001, 0x00002000, 0x00800000, 0x00802001, + 0x00000080, 0x00800000, 0x00002001, 0x00002080, + 0x00800081, 0x00000001, 0x00002080, 0x00800080, + 0x00002000, 0x00802080, 0x00802081, 0x00000081, + 0x00800080, 0x00800001, 0x00802000, 0x00802081, + 0x00000081, 0x00000000, 0x00000000, 0x00802000, + 0x00002080, 0x00800080, 0x00800081, 0x00000001, + 0x00802001, 0x00002081, 0x00002081, 0x00000080, + 0x00802081, 0x00000081, 0x00000001, 0x00002000, + 0x00800001, 0x00002001, 0x00802080, 0x00800081, + 0x00002001, 0x00002080, 0x00800000, 0x00802001, + 0x00000080, 0x00800000, 0x00002000, 0x00802080 +}; + +static const u32 S5[64] = { + 0x00000100, 0x02080100, 0x02080000, 0x42000100, + 0x00080000, 0x00000100, 0x40000000, 0x02080000, + 0x40080100, 0x00080000, 0x02000100, 0x40080100, + 0x42000100, 0x42080000, 0x00080100, 0x40000000, + 0x02000000, 0x40080000, 0x40080000, 0x00000000, + 0x40000100, 0x42080100, 0x42080100, 0x02000100, + 0x42080000, 0x40000100, 0x00000000, 0x42000000, + 0x02080100, 0x02000000, 0x42000000, 0x00080100, + 0x00080000, 0x42000100, 0x00000100, 0x02000000, + 0x40000000, 0x02080000, 0x42000100, 0x40080100, + 0x02000100, 0x40000000, 0x42080000, 0x02080100, + 0x40080100, 0x00000100, 0x02000000, 0x42080000, + 0x42080100, 0x00080100, 0x42000000, 0x42080100, + 0x02080000, 0x00000000, 0x40080000, 0x42000000, + 0x00080100, 0x02000100, 0x40000100, 0x00080000, + 0x00000000, 0x40080000, 0x02080100, 0x40000100 +}; + +static const u32 S6[64] = { + 0x20000010, 0x20400000, 0x00004000, 0x20404010, + 0x20400000, 0x00000010, 0x20404010, 0x00400000, + 0x20004000, 0x00404010, 0x00400000, 0x20000010, + 0x00400010, 0x20004000, 0x20000000, 0x00004010, + 0x00000000, 0x00400010, 0x20004010, 0x00004000, + 0x00404000, 0x20004010, 0x00000010, 0x20400010, + 0x20400010, 0x00000000, 0x00404010, 0x20404000, + 0x00004010, 0x00404000, 0x20404000, 0x20000000, + 0x20004000, 0x00000010, 0x20400010, 0x00404000, + 0x20404010, 0x00400000, 0x00004010, 0x20000010, + 0x00400000, 0x20004000, 0x20000000, 0x00004010, + 0x20000010, 0x20404010, 0x00404000, 0x20400000, + 0x00404010, 0x20404000, 0x00000000, 0x20400010, + 0x00000010, 0x00004000, 0x20400000, 0x00404010, + 0x00004000, 0x00400010, 0x20004010, 0x00000000, + 0x20404000, 0x20000000, 0x00400010, 0x20004010 +}; + +static const u32 S7[64] = { + 0x00200000, 0x04200002, 0x04000802, 0x00000000, + 0x00000800, 0x04000802, 0x00200802, 0x04200800, + 0x04200802, 0x00200000, 0x00000000, 0x04000002, + 0x00000002, 0x04000000, 0x04200002, 0x00000802, + 0x04000800, 0x00200802, 0x00200002, 0x04000800, + 0x04000002, 0x04200000, 0x04200800, 0x00200002, + 0x04200000, 0x00000800, 0x00000802, 0x04200802, + 0x00200800, 0x00000002, 0x04000000, 0x00200800, + 0x04000000, 0x00200800, 0x00200000, 0x04000802, + 0x04000802, 0x04200002, 0x04200002, 0x00000002, + 0x00200002, 0x04000000, 0x04000800, 0x00200000, + 0x04200800, 0x00000802, 0x00200802, 0x04200800, + 0x00000802, 0x04000002, 0x04200802, 0x04200000, + 0x00200800, 0x00000000, 0x00000002, 0x04200802, + 0x00000000, 0x00200802, 0x04200000, 0x00000800, + 0x04000002, 0x04000800, 0x00000800, 0x00200002 +}; + +static const u32 S8[64] = { + 0x10001040, 0x00001000, 0x00040000, 0x10041040, + 0x10000000, 0x10001040, 0x00000040, 0x10000000, + 0x00040040, 0x10040000, 0x10041040, 0x00041000, + 0x10041000, 0x00041040, 0x00001000, 0x00000040, + 0x10040000, 0x10000040, 0x10001000, 0x00001040, + 0x00041000, 0x00040040, 0x10040040, 0x10041000, + 0x00001040, 0x00000000, 0x00000000, 0x10040040, + 0x10000040, 0x10001000, 0x00041040, 0x00040000, + 0x00041040, 0x00040000, 0x10041000, 0x00001000, + 0x00000040, 0x10040040, 0x00001000, 0x00041040, + 0x10001000, 0x00000040, 0x10000040, 0x10040000, + 0x10040040, 0x10000000, 0x00040000, 0x10001040, + 0x00000000, 0x10041040, 0x00040040, 0x10000040, + 0x10040000, 0x10001000, 0x10001040, 0x00000000, + 0x10041040, 0x00041000, 0x00041000, 0x00001040, + 0x00001040, 0x00040040, 0x10000000, 0x10041000 +}; + +/* Encryption components: IP, FP, and round function */ + +#define IP(L, R, T) \ + ROL(R, 4); \ + T = L; \ + L ^= R; \ + L &= 0xf0f0f0f0; \ + R ^= L; \ + L ^= T; \ + ROL(R, 12); \ + T = L; \ + L ^= R; \ + L &= 0xffff0000; \ + R ^= L; \ + L ^= T; \ + ROR(R, 14); \ + T = L; \ + L ^= R; \ + L &= 0xcccccccc; \ + R ^= L; \ + L ^= T; \ + ROL(R, 6); \ + T = L; \ + L ^= R; \ + L &= 0xff00ff00; \ + R ^= L; \ + L ^= T; \ + ROR(R, 7); \ + T = L; \ + L ^= R; \ + L &= 0xaaaaaaaa; \ + R ^= L; \ + L ^= T; \ + ROL(L, 1); + +#define FP(L, R, T) \ + ROR(L, 1); \ + T = L; \ + L ^= R; \ + L &= 0xaaaaaaaa; \ + R ^= L; \ + L ^= T; \ + ROL(R, 7); \ + T = L; \ + L ^= R; \ + L &= 0xff00ff00; \ + R ^= L; \ + L ^= T; \ + ROR(R, 6); \ + T = L; \ + L ^= R; \ + L &= 0xcccccccc; \ + R ^= L; \ + L ^= T; \ + ROL(R, 14); \ + T = L; \ + L ^= R; \ + L &= 0xffff0000; \ + R ^= L; \ + L ^= T; \ + ROR(R, 12); \ + T = L; \ + L ^= R; \ + L &= 0xf0f0f0f0; \ + R ^= L; \ + L ^= T; \ + ROR(R, 4); + +#define ROUND(L, R, A, B, K, d) \ + B = K[0]; A = K[1]; K += d; \ + B ^= R; A ^= R; \ + B &= 0x3f3f3f3f; ROR(A, 4); \ + L ^= S8[0xff & B]; A &= 0x3f3f3f3f; \ + L ^= S6[0xff & (B >> 8)]; B >>= 16; \ + L ^= S7[0xff & A]; \ + L ^= S5[0xff & (A >> 8)]; A >>= 16; \ + L ^= S4[0xff & B]; \ + L ^= S2[0xff & (B >> 8)]; \ + L ^= S3[0xff & A]; \ + L ^= S1[0xff & (A >> 8)]; + +/* + * PC2 lookup tables are organized as 2 consecutive sets of 4 interleaved + * tables of 128 elements. One set is for C_i and the other for D_i, while + * the 4 interleaved tables correspond to four 7-bit subsets of C_i or D_i. + * + * After PC1 each of the variables a,b,c,d contains a 7 bit subset of C_i + * or D_i in bits 7-1 (bit 0 being the least significant). + */ + +#define T1(x) pt[2 * (x) + 0] +#define T2(x) pt[2 * (x) + 1] +#define T3(x) pt[2 * (x) + 2] +#define T4(x) pt[2 * (x) + 3] + +#define DES_PC2(a, b, c, d) (T4(d) | T3(c) | T2(b) | T1(a)) + +/* + * Encryption key expansion + * + * RFC2451: Weak key checks SHOULD be performed. + * + * FIPS 74: + * + * Keys having duals are keys which produce all zeros, all ones, or + * alternating zero-one patterns in the C and D registers after Permuted + * Choice 1 has operated on the key. + * + */ +static unsigned long des_ekey(u32 *pe, const u8 *k) +{ + /* K&R: long is at least 32 bits */ + unsigned long a, b, c, d, w; + const u32 *pt = pc2; + + d = k[4]; d &= 0x0e; d <<= 4; d |= k[0] & 0x1e; d = pc1[d]; + c = k[5]; c &= 0x0e; c <<= 4; c |= k[1] & 0x1e; c = pc1[c]; + b = k[6]; b &= 0x0e; b <<= 4; b |= k[2] & 0x1e; b = pc1[b]; + a = k[7]; a &= 0x0e; a <<= 4; a |= k[3] & 0x1e; a = pc1[a]; + + pe[15 * 2 + 0] = DES_PC2(a, b, c, d); d = rs[d]; + pe[14 * 2 + 0] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; + pe[13 * 2 + 0] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; + pe[12 * 2 + 0] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; + pe[11 * 2 + 0] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; + pe[10 * 2 + 0] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; + pe[ 9 * 2 + 0] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; + pe[ 8 * 2 + 0] = DES_PC2(d, a, b, c); c = rs[c]; + pe[ 7 * 2 + 0] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; + pe[ 6 * 2 + 0] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c]; + pe[ 5 * 2 + 0] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; + pe[ 4 * 2 + 0] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c]; + pe[ 3 * 2 + 0] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; + pe[ 2 * 2 + 0] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c]; + pe[ 1 * 2 + 0] = DES_PC2(c, d, a, b); b = rs[b]; + pe[ 0 * 2 + 0] = DES_PC2(b, c, d, a); + + /* Check if first half is weak */ + w = (a ^ c) | (b ^ d) | (rs[a] ^ c) | (b ^ rs[d]); + + /* Skip to next table set */ + pt += 512; + + d = k[0]; d &= 0xe0; d >>= 4; d |= k[4] & 0xf0; d = pc1[d + 1]; + c = k[1]; c &= 0xe0; c >>= 4; c |= k[5] & 0xf0; c = pc1[c + 1]; + b = k[2]; b &= 0xe0; b >>= 4; b |= k[6] & 0xf0; b = pc1[b + 1]; + a = k[3]; a &= 0xe0; a >>= 4; a |= k[7] & 0xf0; a = pc1[a + 1]; + + /* Check if second half is weak */ + w |= (a ^ c) | (b ^ d) | (rs[a] ^ c) | (b ^ rs[d]); + + pe[15 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; + pe[14 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; + pe[13 * 2 + 1] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; + pe[12 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; + pe[11 * 2 + 1] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; + pe[10 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; + pe[ 9 * 2 + 1] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; + pe[ 8 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; + pe[ 7 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; + pe[ 6 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c]; + pe[ 5 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; + pe[ 4 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c]; + pe[ 3 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; + pe[ 2 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c]; + pe[ 1 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; + pe[ 0 * 2 + 1] = DES_PC2(b, c, d, a); + + /* Fixup: 2413 5768 -> 1357 2468 */ + for (d = 0; d < 16; ++d) { + a = pe[2 * d]; + b = pe[2 * d + 1]; + c = a ^ b; + c &= 0xffff0000; + a ^= c; + b ^= c; + ROL(b, 18); + pe[2 * d] = a; + pe[2 * d + 1] = b; + } + + /* Zero if weak key */ + return w; +} + +int des_expand_key(struct des_ctx *ctx, const u8 *key, unsigned int keylen) +{ + if (keylen != DES_KEY_SIZE) + return -EINVAL; + + return des_ekey(ctx->expkey, key) ? 0 : -ENOKEY; +} +EXPORT_SYMBOL_GPL(des_expand_key); + +/* + * Decryption key expansion + * + * No weak key checking is performed, as this is only used by triple DES + * + */ +static void dkey(u32 *pe, const u8 *k) +{ + /* K&R: long is at least 32 bits */ + unsigned long a, b, c, d; + const u32 *pt = pc2; + + d = k[4]; d &= 0x0e; d <<= 4; d |= k[0] & 0x1e; d = pc1[d]; + c = k[5]; c &= 0x0e; c <<= 4; c |= k[1] & 0x1e; c = pc1[c]; + b = k[6]; b &= 0x0e; b <<= 4; b |= k[2] & 0x1e; b = pc1[b]; + a = k[7]; a &= 0x0e; a <<= 4; a |= k[3] & 0x1e; a = pc1[a]; + + pe[ 0 * 2] = DES_PC2(a, b, c, d); d = rs[d]; + pe[ 1 * 2] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; + pe[ 2 * 2] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; + pe[ 3 * 2] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; + pe[ 4 * 2] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; + pe[ 5 * 2] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; + pe[ 6 * 2] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; + pe[ 7 * 2] = DES_PC2(d, a, b, c); c = rs[c]; + pe[ 8 * 2] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; + pe[ 9 * 2] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c]; + pe[10 * 2] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; + pe[11 * 2] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c]; + pe[12 * 2] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; + pe[13 * 2] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c]; + pe[14 * 2] = DES_PC2(c, d, a, b); b = rs[b]; + pe[15 * 2] = DES_PC2(b, c, d, a); + + /* Skip to next table set */ + pt += 512; + + d = k[0]; d &= 0xe0; d >>= 4; d |= k[4] & 0xf0; d = pc1[d + 1]; + c = k[1]; c &= 0xe0; c >>= 4; c |= k[5] & 0xf0; c = pc1[c + 1]; + b = k[2]; b &= 0xe0; b >>= 4; b |= k[6] & 0xf0; b = pc1[b + 1]; + a = k[3]; a &= 0xe0; a >>= 4; a |= k[7] & 0xf0; a = pc1[a + 1]; + + pe[ 0 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; + pe[ 1 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; + pe[ 2 * 2 + 1] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; + pe[ 3 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; + pe[ 4 * 2 + 1] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; + pe[ 5 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; + pe[ 6 * 2 + 1] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; + pe[ 7 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; + pe[ 8 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; + pe[ 9 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c]; + pe[10 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; + pe[11 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c]; + pe[12 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; + pe[13 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c]; + pe[14 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; + pe[15 * 2 + 1] = DES_PC2(b, c, d, a); + + /* Fixup: 2413 5768 -> 1357 2468 */ + for (d = 0; d < 16; ++d) { + a = pe[2 * d]; + b = pe[2 * d + 1]; + c = a ^ b; + c &= 0xffff0000; + a ^= c; + b ^= c; + ROL(b, 18); + pe[2 * d] = a; + pe[2 * d + 1] = b; + } +} + +void des_encrypt(const struct des_ctx *ctx, u8 *dst, const u8 *src) +{ + const u32 *K = ctx->expkey; + u32 L, R, A, B; + int i; + + L = get_unaligned_le32(src); + R = get_unaligned_le32(src + 4); + + IP(L, R, A); + for (i = 0; i < 8; i++) { + ROUND(L, R, A, B, K, 2); + ROUND(R, L, A, B, K, 2); + } + FP(R, L, A); + + put_unaligned_le32(R, dst); + put_unaligned_le32(L, dst + 4); +} +EXPORT_SYMBOL_GPL(des_encrypt); + +void des_decrypt(const struct des_ctx *ctx, u8 *dst, const u8 *src) +{ + const u32 *K = ctx->expkey + DES_EXPKEY_WORDS - 2; + u32 L, R, A, B; + int i; + + L = get_unaligned_le32(src); + R = get_unaligned_le32(src + 4); + + IP(L, R, A); + for (i = 0; i < 8; i++) { + ROUND(L, R, A, B, K, -2); + ROUND(R, L, A, B, K, -2); + } + FP(R, L, A); + + put_unaligned_le32(R, dst); + put_unaligned_le32(L, dst + 4); +} +EXPORT_SYMBOL_GPL(des_decrypt); + +int des3_ede_expand_key(struct des3_ede_ctx *ctx, const u8 *key, + unsigned int keylen) +{ + u32 *pe = ctx->expkey; + int err; + + if (keylen != DES3_EDE_KEY_SIZE) + return -EINVAL; + + err = des3_ede_verify_key(key, keylen, true); + if (err && err != -ENOKEY) + return err; + + des_ekey(pe, key); pe += DES_EXPKEY_WORDS; key += DES_KEY_SIZE; + dkey(pe, key); pe += DES_EXPKEY_WORDS; key += DES_KEY_SIZE; + des_ekey(pe, key); + + return err; +} +EXPORT_SYMBOL_GPL(des3_ede_expand_key); + +void des3_ede_encrypt(const struct des3_ede_ctx *dctx, u8 *dst, const u8 *src) +{ + const u32 *K = dctx->expkey; + u32 L, R, A, B; + int i; + + L = get_unaligned_le32(src); + R = get_unaligned_le32(src + 4); + + IP(L, R, A); + for (i = 0; i < 8; i++) { + ROUND(L, R, A, B, K, 2); + ROUND(R, L, A, B, K, 2); + } + for (i = 0; i < 8; i++) { + ROUND(R, L, A, B, K, 2); + ROUND(L, R, A, B, K, 2); + } + for (i = 0; i < 8; i++) { + ROUND(L, R, A, B, K, 2); + ROUND(R, L, A, B, K, 2); + } + FP(R, L, A); + + put_unaligned_le32(R, dst); + put_unaligned_le32(L, dst + 4); +} +EXPORT_SYMBOL_GPL(des3_ede_encrypt); + +void des3_ede_decrypt(const struct des3_ede_ctx *dctx, u8 *dst, const u8 *src) +{ + const u32 *K = dctx->expkey + DES3_EDE_EXPKEY_WORDS - 2; + u32 L, R, A, B; + int i; + + L = get_unaligned_le32(src); + R = get_unaligned_le32(src + 4); + + IP(L, R, A); + for (i = 0; i < 8; i++) { + ROUND(L, R, A, B, K, -2); + ROUND(R, L, A, B, K, -2); + } + for (i = 0; i < 8; i++) { + ROUND(R, L, A, B, K, -2); + ROUND(L, R, A, B, K, -2); + } + for (i = 0; i < 8; i++) { + ROUND(L, R, A, B, K, -2); + ROUND(R, L, A, B, K, -2); + } + FP(R, L, A); + + put_unaligned_le32(R, dst); + put_unaligned_le32(L, dst + 4); +} +EXPORT_SYMBOL_GPL(des3_ede_decrypt); + +MODULE_LICENSE("GPL"); diff --git a/lib/sha256.c b/lib/crypto/sha256.c index d9af148d4349..66cb04b0cf4e 100644 --- a/lib/sha256.c +++ b/lib/crypto/sha256.c @@ -12,9 +12,11 @@ */ #include <linux/bitops.h> -#include <linux/sha256.h> +#include <linux/export.h> +#include <linux/module.h> #include <linux/string.h> -#include <asm/byteorder.h> +#include <crypto/sha.h> +#include <asm/unaligned.h> static inline u32 Ch(u32 x, u32 y, u32 z) { @@ -33,7 +35,7 @@ static inline u32 Maj(u32 x, u32 y, u32 z) static inline void LOAD_OP(int I, u32 *W, const u8 *input) { - W[I] = __be32_to_cpu(((__be32 *)(input))[I]); + W[I] = get_unaligned_be32((__u32 *)input + I); } static inline void BLEND_OP(int I, u32 *W) @@ -92,131 +94,116 @@ static void sha256_transform(u32 *state, const u8 *input) t1 = b + e1(g) + Ch(g, h, a) + 0x9bdc06a7 + W[14]; t2 = e0(c) + Maj(c, d, e); f += t1; b = t1 + t2; t1 = a + e1(f) + Ch(f, g, h) + 0xc19bf174 + W[15]; - t2 = e0(b) + Maj(b, c, d); e += t1; a = t1+t2; + t2 = e0(b) + Maj(b, c, d); e += t1; a = t1 + t2; t1 = h + e1(e) + Ch(e, f, g) + 0xe49b69c1 + W[16]; - t2 = e0(a) + Maj(a, b, c); d += t1; h = t1+t2; + t2 = e0(a) + Maj(a, b, c); d += t1; h = t1 + t2; t1 = g + e1(d) + Ch(d, e, f) + 0xefbe4786 + W[17]; - t2 = e0(h) + Maj(h, a, b); c += t1; g = t1+t2; + t2 = e0(h) + Maj(h, a, b); c += t1; g = t1 + t2; t1 = f + e1(c) + Ch(c, d, e) + 0x0fc19dc6 + W[18]; - t2 = e0(g) + Maj(g, h, a); b += t1; f = t1+t2; + t2 = e0(g) + Maj(g, h, a); b += t1; f = t1 + t2; t1 = e + e1(b) + Ch(b, c, d) + 0x240ca1cc + W[19]; - t2 = e0(f) + Maj(f, g, h); a += t1; e = t1+t2; + t2 = e0(f) + Maj(f, g, h); a += t1; e = t1 + t2; t1 = d + e1(a) + Ch(a, b, c) + 0x2de92c6f + W[20]; - t2 = e0(e) + Maj(e, f, g); h += t1; d = t1+t2; + t2 = e0(e) + Maj(e, f, g); h += t1; d = t1 + t2; t1 = c + e1(h) + Ch(h, a, b) + 0x4a7484aa + W[21]; - t2 = e0(d) + Maj(d, e, f); g += t1; c = t1+t2; + t2 = e0(d) + Maj(d, e, f); g += t1; c = t1 + t2; t1 = b + e1(g) + Ch(g, h, a) + 0x5cb0a9dc + W[22]; - t2 = e0(c) + Maj(c, d, e); f += t1; b = t1+t2; + t2 = e0(c) + Maj(c, d, e); f += t1; b = t1 + t2; t1 = a + e1(f) + Ch(f, g, h) + 0x76f988da + W[23]; - t2 = e0(b) + Maj(b, c, d); e += t1; a = t1+t2; + t2 = e0(b) + Maj(b, c, d); e += t1; a = t1 + t2; t1 = h + e1(e) + Ch(e, f, g) + 0x983e5152 + W[24]; - t2 = e0(a) + Maj(a, b, c); d += t1; h = t1+t2; + t2 = e0(a) + Maj(a, b, c); d += t1; h = t1 + t2; t1 = g + e1(d) + Ch(d, e, f) + 0xa831c66d + W[25]; - t2 = e0(h) + Maj(h, a, b); c += t1; g = t1+t2; + t2 = e0(h) + Maj(h, a, b); c += t1; g = t1 + t2; t1 = f + e1(c) + Ch(c, d, e) + 0xb00327c8 + W[26]; - t2 = e0(g) + Maj(g, h, a); b += t1; f = t1+t2; + t2 = e0(g) + Maj(g, h, a); b += t1; f = t1 + t2; t1 = e + e1(b) + Ch(b, c, d) + 0xbf597fc7 + W[27]; - t2 = e0(f) + Maj(f, g, h); a += t1; e = t1+t2; + t2 = e0(f) + Maj(f, g, h); a += t1; e = t1 + t2; t1 = d + e1(a) + Ch(a, b, c) + 0xc6e00bf3 + W[28]; - t2 = e0(e) + Maj(e, f, g); h += t1; d = t1+t2; + t2 = e0(e) + Maj(e, f, g); h += t1; d = t1 + t2; t1 = c + e1(h) + Ch(h, a, b) + 0xd5a79147 + W[29]; - t2 = e0(d) + Maj(d, e, f); g += t1; c = t1+t2; + t2 = e0(d) + Maj(d, e, f); g += t1; c = t1 + t2; t1 = b + e1(g) + Ch(g, h, a) + 0x06ca6351 + W[30]; - t2 = e0(c) + Maj(c, d, e); f += t1; b = t1+t2; + t2 = e0(c) + Maj(c, d, e); f += t1; b = t1 + t2; t1 = a + e1(f) + Ch(f, g, h) + 0x14292967 + W[31]; - t2 = e0(b) + Maj(b, c, d); e += t1; a = t1+t2; + t2 = e0(b) + Maj(b, c, d); e += t1; a = t1 + t2; t1 = h + e1(e) + Ch(e, f, g) + 0x27b70a85 + W[32]; - t2 = e0(a) + Maj(a, b, c); d += t1; h = t1+t2; + t2 = e0(a) + Maj(a, b, c); d += t1; h = t1 + t2; t1 = g + e1(d) + Ch(d, e, f) + 0x2e1b2138 + W[33]; - t2 = e0(h) + Maj(h, a, b); c += t1; g = t1+t2; + t2 = e0(h) + Maj(h, a, b); c += t1; g = t1 + t2; t1 = f + e1(c) + Ch(c, d, e) + 0x4d2c6dfc + W[34]; - t2 = e0(g) + Maj(g, h, a); b += t1; f = t1+t2; + t2 = e0(g) + Maj(g, h, a); b += t1; f = t1 + t2; t1 = e + e1(b) + Ch(b, c, d) + 0x53380d13 + W[35]; - t2 = e0(f) + Maj(f, g, h); a += t1; e = t1+t2; + t2 = e0(f) + Maj(f, g, h); a += t1; e = t1 + t2; t1 = d + e1(a) + Ch(a, b, c) + 0x650a7354 + W[36]; - t2 = e0(e) + Maj(e, f, g); h += t1; d = t1+t2; + t2 = e0(e) + Maj(e, f, g); h += t1; d = t1 + t2; t1 = c + e1(h) + Ch(h, a, b) + 0x766a0abb + W[37]; - t2 = e0(d) + Maj(d, e, f); g += t1; c = t1+t2; + t2 = e0(d) + Maj(d, e, f); g += t1; c = t1 + t2; t1 = b + e1(g) + Ch(g, h, a) + 0x81c2c92e + W[38]; - t2 = e0(c) + Maj(c, d, e); f += t1; b = t1+t2; + t2 = e0(c) + Maj(c, d, e); f += t1; b = t1 + t2; t1 = a + e1(f) + Ch(f, g, h) + 0x92722c85 + W[39]; - t2 = e0(b) + Maj(b, c, d); e += t1; a = t1+t2; + t2 = e0(b) + Maj(b, c, d); e += t1; a = t1 + t2; t1 = h + e1(e) + Ch(e, f, g) + 0xa2bfe8a1 + W[40]; - t2 = e0(a) + Maj(a, b, c); d += t1; h = t1+t2; + t2 = e0(a) + Maj(a, b, c); d += t1; h = t1 + t2; t1 = g + e1(d) + Ch(d, e, f) + 0xa81a664b + W[41]; - t2 = e0(h) + Maj(h, a, b); c += t1; g = t1+t2; + t2 = e0(h) + Maj(h, a, b); c += t1; g = t1 + t2; t1 = f + e1(c) + Ch(c, d, e) + 0xc24b8b70 + W[42]; - t2 = e0(g) + Maj(g, h, a); b += t1; f = t1+t2; + t2 = e0(g) + Maj(g, h, a); b += t1; f = t1 + t2; t1 = e + e1(b) + Ch(b, c, d) + 0xc76c51a3 + W[43]; - t2 = e0(f) + Maj(f, g, h); a += t1; e = t1+t2; + t2 = e0(f) + Maj(f, g, h); a += t1; e = t1 + t2; t1 = d + e1(a) + Ch(a, b, c) + 0xd192e819 + W[44]; - t2 = e0(e) + Maj(e, f, g); h += t1; d = t1+t2; + t2 = e0(e) + Maj(e, f, g); h += t1; d = t1 + t2; t1 = c + e1(h) + Ch(h, a, b) + 0xd6990624 + W[45]; - t2 = e0(d) + Maj(d, e, f); g += t1; c = t1+t2; + t2 = e0(d) + Maj(d, e, f); g += t1; c = t1 + t2; t1 = b + e1(g) + Ch(g, h, a) + 0xf40e3585 + W[46]; - t2 = e0(c) + Maj(c, d, e); f += t1; b = t1+t2; + t2 = e0(c) + Maj(c, d, e); f += t1; b = t1 + t2; t1 = a + e1(f) + Ch(f, g, h) + 0x106aa070 + W[47]; - t2 = e0(b) + Maj(b, c, d); e += t1; a = t1+t2; + t2 = e0(b) + Maj(b, c, d); e += t1; a = t1 + t2; t1 = h + e1(e) + Ch(e, f, g) + 0x19a4c116 + W[48]; - t2 = e0(a) + Maj(a, b, c); d += t1; h = t1+t2; + t2 = e0(a) + Maj(a, b, c); d += t1; h = t1 + t2; t1 = g + e1(d) + Ch(d, e, f) + 0x1e376c08 + W[49]; - t2 = e0(h) + Maj(h, a, b); c += t1; g = t1+t2; + t2 = e0(h) + Maj(h, a, b); c += t1; g = t1 + t2; t1 = f + e1(c) + Ch(c, d, e) + 0x2748774c + W[50]; - t2 = e0(g) + Maj(g, h, a); b += t1; f = t1+t2; + t2 = e0(g) + Maj(g, h, a); b += t1; f = t1 + t2; t1 = e + e1(b) + Ch(b, c, d) + 0x34b0bcb5 + W[51]; - t2 = e0(f) + Maj(f, g, h); a += t1; e = t1+t2; + t2 = e0(f) + Maj(f, g, h); a += t1; e = t1 + t2; t1 = d + e1(a) + Ch(a, b, c) + 0x391c0cb3 + W[52]; - t2 = e0(e) + Maj(e, f, g); h += t1; d = t1+t2; + t2 = e0(e) + Maj(e, f, g); h += t1; d = t1 + t2; t1 = c + e1(h) + Ch(h, a, b) + 0x4ed8aa4a + W[53]; - t2 = e0(d) + Maj(d, e, f); g += t1; c = t1+t2; + t2 = e0(d) + Maj(d, e, f); g += t1; c = t1 + t2; t1 = b + e1(g) + Ch(g, h, a) + 0x5b9cca4f + W[54]; - t2 = e0(c) + Maj(c, d, e); f += t1; b = t1+t2; + t2 = e0(c) + Maj(c, d, e); f += t1; b = t1 + t2; t1 = a + e1(f) + Ch(f, g, h) + 0x682e6ff3 + W[55]; - t2 = e0(b) + Maj(b, c, d); e += t1; a = t1+t2; + t2 = e0(b) + Maj(b, c, d); e += t1; a = t1 + t2; t1 = h + e1(e) + Ch(e, f, g) + 0x748f82ee + W[56]; - t2 = e0(a) + Maj(a, b, c); d += t1; h = t1+t2; + t2 = e0(a) + Maj(a, b, c); d += t1; h = t1 + t2; t1 = g + e1(d) + Ch(d, e, f) + 0x78a5636f + W[57]; - t2 = e0(h) + Maj(h, a, b); c += t1; g = t1+t2; + t2 = e0(h) + Maj(h, a, b); c += t1; g = t1 + t2; t1 = f + e1(c) + Ch(c, d, e) + 0x84c87814 + W[58]; - t2 = e0(g) + Maj(g, h, a); b += t1; f = t1+t2; + t2 = e0(g) + Maj(g, h, a); b += t1; f = t1 + t2; t1 = e + e1(b) + Ch(b, c, d) + 0x8cc70208 + W[59]; - t2 = e0(f) + Maj(f, g, h); a += t1; e = t1+t2; + t2 = e0(f) + Maj(f, g, h); a += t1; e = t1 + t2; t1 = d + e1(a) + Ch(a, b, c) + 0x90befffa + W[60]; - t2 = e0(e) + Maj(e, f, g); h += t1; d = t1+t2; + t2 = e0(e) + Maj(e, f, g); h += t1; d = t1 + t2; t1 = c + e1(h) + Ch(h, a, b) + 0xa4506ceb + W[61]; - t2 = e0(d) + Maj(d, e, f); g += t1; c = t1+t2; + t2 = e0(d) + Maj(d, e, f); g += t1; c = t1 + t2; t1 = b + e1(g) + Ch(g, h, a) + 0xbef9a3f7 + W[62]; - t2 = e0(c) + Maj(c, d, e); f += t1; b = t1+t2; + t2 = e0(c) + Maj(c, d, e); f += t1; b = t1 + t2; t1 = a + e1(f) + Ch(f, g, h) + 0xc67178f2 + W[63]; - t2 = e0(b) + Maj(b, c, d); e += t1; a = t1+t2; + t2 = e0(b) + Maj(b, c, d); e += t1; a = t1 + t2; state[0] += a; state[1] += b; state[2] += c; state[3] += d; state[4] += e; state[5] += f; state[6] += g; state[7] += h; /* clear any sensitive info... */ a = b = c = d = e = f = g = h = t1 = t2 = 0; - memset(W, 0, 64 * sizeof(u32)); -} - -int sha256_init(struct sha256_state *sctx) -{ - sctx->state[0] = SHA256_H0; - sctx->state[1] = SHA256_H1; - sctx->state[2] = SHA256_H2; - sctx->state[3] = SHA256_H3; - sctx->state[4] = SHA256_H4; - sctx->state[5] = SHA256_H5; - sctx->state[6] = SHA256_H6; - sctx->state[7] = SHA256_H7; - sctx->count = 0; - - return 0; + memzero_explicit(W, 64 * sizeof(u32)); } int sha256_update(struct sha256_state *sctx, const u8 *data, unsigned int len) @@ -248,8 +235,15 @@ int sha256_update(struct sha256_state *sctx, const u8 *data, unsigned int len) return 0; } +EXPORT_SYMBOL(sha256_update); -int sha256_final(struct sha256_state *sctx, u8 *out) +int sha224_update(struct sha256_state *sctx, const u8 *data, unsigned int len) +{ + return sha256_update(sctx, data, len); +} +EXPORT_SYMBOL(sha224_update); + +static int __sha256_final(struct sha256_state *sctx, u8 *out, int digest_words) { __be32 *dst = (__be32 *)out; __be64 bits; @@ -269,11 +263,25 @@ int sha256_final(struct sha256_state *sctx, u8 *out) sha256_update(sctx, (const u8 *)&bits, sizeof(bits)); /* Store state in digest */ - for (i = 0; i < 8; i++) - dst[i] = cpu_to_be32(sctx->state[i]); + for (i = 0; i < digest_words; i++) + put_unaligned_be32(sctx->state[i], &dst[i]); /* Zeroize sensitive information. */ memset(sctx, 0, sizeof(*sctx)); return 0; } + +int sha256_final(struct sha256_state *sctx, u8 *out) +{ + return __sha256_final(sctx, out, 8); +} +EXPORT_SYMBOL(sha256_final); + +int sha224_final(struct sha256_state *sctx, u8 *out) +{ + return __sha256_final(sctx, out, 7); +} +EXPORT_SYMBOL(sha224_final); + +MODULE_LICENSE("GPL"); diff --git a/lib/mpi/longlong.h b/lib/mpi/longlong.h index 3bb6260d8f42..2dceaca27489 100644 --- a/lib/mpi/longlong.h +++ b/lib/mpi/longlong.h @@ -639,30 +639,12 @@ do { \ ************** MIPS ***************** ***************************************/ #if defined(__mips__) && W_TYPE_SIZE == 32 -#if (__GNUC__ >= 5) || (__GNUC__ >= 4 && __GNUC_MINOR__ >= 4) #define umul_ppmm(w1, w0, u, v) \ do { \ UDItype __ll = (UDItype)(u) * (v); \ w1 = __ll >> 32; \ w0 = __ll; \ } while (0) -#elif __GNUC__ > 2 || __GNUC_MINOR__ >= 7 -#define umul_ppmm(w1, w0, u, v) \ - __asm__ ("multu %2,%3" \ - : "=l" ((USItype)(w0)), \ - "=h" ((USItype)(w1)) \ - : "d" ((USItype)(u)), \ - "d" ((USItype)(v))) -#else -#define umul_ppmm(w1, w0, u, v) \ - __asm__ ("multu %2,%3\n" \ - "mflo %0\n" \ - "mfhi %1" \ - : "=d" ((USItype)(w0)), \ - "=d" ((USItype)(w1)) \ - : "d" ((USItype)(u)), \ - "d" ((USItype)(v))) -#endif #define UMUL_TIME 10 #define UDIV_TIME 100 #endif /* __mips__ */ @@ -687,7 +669,7 @@ do { \ : "d" ((UDItype)(u)), \ "d" ((UDItype)(v))); \ } while (0) -#elif (__GNUC__ >= 5) || (__GNUC__ >= 4 && __GNUC_MINOR__ >= 4) +#else #define umul_ppmm(w1, w0, u, v) \ do { \ typedef unsigned int __ll_UTItype __attribute__((mode(TI))); \ @@ -695,22 +677,6 @@ do { \ w1 = __ll >> 64; \ w0 = __ll; \ } while (0) -#elif __GNUC__ > 2 || __GNUC_MINOR__ >= 7 -#define umul_ppmm(w1, w0, u, v) \ - __asm__ ("dmultu %2,%3" \ - : "=l" ((UDItype)(w0)), \ - "=h" ((UDItype)(w1)) \ - : "d" ((UDItype)(u)), \ - "d" ((UDItype)(v))) -#else -#define umul_ppmm(w1, w0, u, v) \ - __asm__ ("dmultu %2,%3\n" \ - "mflo %0\n" \ - "mfhi %1" \ - : "=d" ((UDItype)(w0)), \ - "=d" ((UDItype)(w1)) \ - : "d" ((UDItype)(u)), \ - "d" ((UDItype)(v))) #endif #define UMUL_TIME 20 #define UDIV_TIME 140 |