diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-12-13 13:33:26 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-12-13 13:33:26 -0800 |
commit | e3aa91a7cb21a595169b20c64f63ca39a91a0c43 (patch) | |
tree | 6a92a2e595629949a45336c770c2408abba8444d /arch/arm64 | |
parent | 78a45c6f067824cf5d0a9fedea7339ac2e28603c (diff) | |
parent | 8606813a6c8997fd3bb805186056d78670eb86ca (diff) | |
download | linux-e3aa91a7cb21a595169b20c64f63ca39a91a0c43.tar.gz linux-e3aa91a7cb21a595169b20c64f63ca39a91a0c43.tar.bz2 linux-e3aa91a7cb21a595169b20c64f63ca39a91a0c43.zip |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto update from Herbert Xu:
- The crypto API is now documented :)
- Disallow arbitrary module loading through crypto API.
- Allow get request with empty driver name through crypto_user.
- Allow speed testing of arbitrary hash functions.
- Add caam support for ctr(aes), gcm(aes) and their derivatives.
- nx now supports concurrent hashing properly.
- Add sahara support for SHA1/256.
- Add ARM64 version of CRC32.
- Misc fixes.
* git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (77 commits)
crypto: tcrypt - Allow speed testing of arbitrary hash functions
crypto: af_alg - add user space interface for AEAD
crypto: qat - fix problem with coalescing enable logic
crypto: sahara - add support for SHA1/256
crypto: sahara - replace tasklets with kthread
crypto: sahara - add support for i.MX53
crypto: sahara - fix spinlock initialization
crypto: arm - replace memset by memzero_explicit
crypto: powerpc - replace memset by memzero_explicit
crypto: sha - replace memset by memzero_explicit
crypto: sparc - replace memset by memzero_explicit
crypto: algif_skcipher - initialize upon init request
crypto: algif_skcipher - removed unneeded code
crypto: algif_skcipher - Fixed blocking recvmsg
crypto: drbg - use memzero_explicit() for clearing sensitive data
crypto: drbg - use MODULE_ALIAS_CRYPTO
crypto: include crypto- module prefix in template
crypto: user - add MODULE_ALIAS
crypto: sha-mb - remove a bogus NULL check
crytpo: qat - Fix 64 bytes requests
...
Diffstat (limited to 'arch/arm64')
-rw-r--r-- | arch/arm64/crypto/Kconfig | 4 | ||||
-rw-r--r-- | arch/arm64/crypto/Makefile | 4 | ||||
-rw-r--r-- | arch/arm64/crypto/aes-ce-ccm-glue.c | 2 | ||||
-rw-r--r-- | arch/arm64/crypto/aes-glue.c | 8 | ||||
-rw-r--r-- | arch/arm64/crypto/crc32-arm64.c | 274 |
5 files changed, 287 insertions, 5 deletions
diff --git a/arch/arm64/crypto/Kconfig b/arch/arm64/crypto/Kconfig index a38b02ce5f9a..2cf32e9887e1 100644 --- a/arch/arm64/crypto/Kconfig +++ b/arch/arm64/crypto/Kconfig @@ -49,4 +49,8 @@ config CRYPTO_AES_ARM64_NEON_BLK select CRYPTO_AES select CRYPTO_ABLK_HELPER +config CRYPTO_CRC32_ARM64 + tristate "CRC32 and CRC32C using optional ARMv8 instructions" + depends on ARM64 + select CRYPTO_HASH endif diff --git a/arch/arm64/crypto/Makefile b/arch/arm64/crypto/Makefile index a3f935fde975..5720608c50b1 100644 --- a/arch/arm64/crypto/Makefile +++ b/arch/arm64/crypto/Makefile @@ -34,5 +34,9 @@ AFLAGS_aes-neon.o := -DINTERLEAVE=4 CFLAGS_aes-glue-ce.o := -DUSE_V8_CRYPTO_EXTENSIONS +obj-$(CONFIG_CRYPTO_CRC32_ARM64) += crc32-arm64.o + +CFLAGS_crc32-arm64.o := -mcpu=generic+crc + $(obj)/aes-glue-%.o: $(src)/aes-glue.c FORCE $(call if_changed_rule,cc_o_c) diff --git a/arch/arm64/crypto/aes-ce-ccm-glue.c b/arch/arm64/crypto/aes-ce-ccm-glue.c index 0ac73b838fa3..6c348df5bf36 100644 --- a/arch/arm64/crypto/aes-ce-ccm-glue.c +++ b/arch/arm64/crypto/aes-ce-ccm-glue.c @@ -296,4 +296,4 @@ module_exit(aes_mod_exit); MODULE_DESCRIPTION("Synchronous AES in CCM mode using ARMv8 Crypto Extensions"); MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>"); MODULE_LICENSE("GPL v2"); -MODULE_ALIAS("ccm(aes)"); +MODULE_ALIAS_CRYPTO("ccm(aes)"); diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c index 801aae32841f..b1b5b893eb20 100644 --- a/arch/arm64/crypto/aes-glue.c +++ b/arch/arm64/crypto/aes-glue.c @@ -44,10 +44,10 @@ MODULE_DESCRIPTION("AES-ECB/CBC/CTR/XTS using ARMv8 Crypto Extensions"); #define aes_xts_encrypt neon_aes_xts_encrypt #define aes_xts_decrypt neon_aes_xts_decrypt MODULE_DESCRIPTION("AES-ECB/CBC/CTR/XTS using ARMv8 NEON"); -MODULE_ALIAS("ecb(aes)"); -MODULE_ALIAS("cbc(aes)"); -MODULE_ALIAS("ctr(aes)"); -MODULE_ALIAS("xts(aes)"); +MODULE_ALIAS_CRYPTO("ecb(aes)"); +MODULE_ALIAS_CRYPTO("cbc(aes)"); +MODULE_ALIAS_CRYPTO("ctr(aes)"); +MODULE_ALIAS_CRYPTO("xts(aes)"); #endif MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>"); diff --git a/arch/arm64/crypto/crc32-arm64.c b/arch/arm64/crypto/crc32-arm64.c new file mode 100644 index 000000000000..9499199924ae --- /dev/null +++ b/arch/arm64/crypto/crc32-arm64.c @@ -0,0 +1,274 @@ +/* + * crc32-arm64.c - CRC32 and CRC32C using optional ARMv8 instructions + * + * Module based on crypto/crc32c_generic.c + * + * CRC32 loop taken from Ed Nevill's Hadoop CRC patch + * http://mail-archives.apache.org/mod_mbox/hadoop-common-dev/201406.mbox/%3C1403687030.3355.19.camel%40localhost.localdomain%3E + * + * Using inline assembly instead of intrinsics in order to be backwards + * compatible with older compilers. + * + * Copyright (C) 2014 Linaro Ltd <yazen.ghannam@linaro.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/unaligned/access_ok.h> +#include <linux/cpufeature.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/string.h> + +#include <crypto/internal/hash.h> + +MODULE_AUTHOR("Yazen Ghannam <yazen.ghannam@linaro.org>"); +MODULE_DESCRIPTION("CRC32 and CRC32C using optional ARMv8 instructions"); +MODULE_LICENSE("GPL v2"); + +#define CRC32X(crc, value) __asm__("crc32x %w[c], %w[c], %x[v]":[c]"+r"(crc):[v]"r"(value)) +#define CRC32W(crc, value) __asm__("crc32w %w[c], %w[c], %w[v]":[c]"+r"(crc):[v]"r"(value)) +#define CRC32H(crc, value) __asm__("crc32h %w[c], %w[c], %w[v]":[c]"+r"(crc):[v]"r"(value)) +#define CRC32B(crc, value) __asm__("crc32b %w[c], %w[c], %w[v]":[c]"+r"(crc):[v]"r"(value)) +#define CRC32CX(crc, value) __asm__("crc32cx %w[c], %w[c], %x[v]":[c]"+r"(crc):[v]"r"(value)) +#define CRC32CW(crc, value) __asm__("crc32cw %w[c], %w[c], %w[v]":[c]"+r"(crc):[v]"r"(value)) +#define CRC32CH(crc, value) __asm__("crc32ch %w[c], %w[c], %w[v]":[c]"+r"(crc):[v]"r"(value)) +#define CRC32CB(crc, value) __asm__("crc32cb %w[c], %w[c], %w[v]":[c]"+r"(crc):[v]"r"(value)) + +static u32 crc32_arm64_le_hw(u32 crc, const u8 *p, unsigned int len) +{ + s64 length = len; + + while ((length -= sizeof(u64)) >= 0) { + CRC32X(crc, get_unaligned_le64(p)); + p += sizeof(u64); + } + + /* The following is more efficient than the straight loop */ + if (length & sizeof(u32)) { + CRC32W(crc, get_unaligned_le32(p)); + p += sizeof(u32); + } + if (length & sizeof(u16)) { + CRC32H(crc, get_unaligned_le16(p)); + p += sizeof(u16); + } + if (length & sizeof(u8)) + CRC32B(crc, *p); + + return crc; +} + +static u32 crc32c_arm64_le_hw(u32 crc, const u8 *p, unsigned int len) +{ + s64 length = len; + + while ((length -= sizeof(u64)) >= 0) { + CRC32CX(crc, get_unaligned_le64(p)); + p += sizeof(u64); + } + + /* The following is more efficient than the straight loop */ + if (length & sizeof(u32)) { + CRC32CW(crc, get_unaligned_le32(p)); + p += sizeof(u32); + } + if (length & sizeof(u16)) { + CRC32CH(crc, get_unaligned_le16(p)); + p += sizeof(u16); + } + if (length & sizeof(u8)) + CRC32CB(crc, *p); + + return crc; +} + +#define CHKSUM_BLOCK_SIZE 1 +#define CHKSUM_DIGEST_SIZE 4 + +struct chksum_ctx { + u32 key; +}; + +struct chksum_desc_ctx { + u32 crc; +}; + +static int chksum_init(struct shash_desc *desc) +{ + struct chksum_ctx *mctx = crypto_shash_ctx(desc->tfm); + struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); + + ctx->crc = mctx->key; + + return 0; +} + +/* + * Setting the seed allows arbitrary accumulators and flexible XOR policy + * If your algorithm starts with ~0, then XOR with ~0 before you set + * the seed. + */ +static int chksum_setkey(struct crypto_shash *tfm, const u8 *key, + unsigned int keylen) +{ + struct chksum_ctx *mctx = crypto_shash_ctx(tfm); + + if (keylen != sizeof(mctx->key)) { + crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + mctx->key = get_unaligned_le32(key); + return 0; +} + +static int chksum_update(struct shash_desc *desc, const u8 *data, + unsigned int length) +{ + struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); + + ctx->crc = crc32_arm64_le_hw(ctx->crc, data, length); + return 0; +} + +static int chksumc_update(struct shash_desc *desc, const u8 *data, + unsigned int length) +{ + struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); + + ctx->crc = crc32c_arm64_le_hw(ctx->crc, data, length); + return 0; +} + +static int chksum_final(struct shash_desc *desc, u8 *out) +{ + struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); + + put_unaligned_le32(~ctx->crc, out); + return 0; +} + +static int __chksum_finup(u32 crc, const u8 *data, unsigned int len, u8 *out) +{ + put_unaligned_le32(~crc32_arm64_le_hw(crc, data, len), out); + return 0; +} + +static int __chksumc_finup(u32 crc, const u8 *data, unsigned int len, u8 *out) +{ + put_unaligned_le32(~crc32c_arm64_le_hw(crc, data, len), out); + return 0; +} + +static int chksum_finup(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out) +{ + struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); + + return __chksum_finup(ctx->crc, data, len, out); +} + +static int chksumc_finup(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out) +{ + struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); + + return __chksumc_finup(ctx->crc, data, len, out); +} + +static int chksum_digest(struct shash_desc *desc, const u8 *data, + unsigned int length, u8 *out) +{ + struct chksum_ctx *mctx = crypto_shash_ctx(desc->tfm); + + return __chksum_finup(mctx->key, data, length, out); +} + +static int chksumc_digest(struct shash_desc *desc, const u8 *data, + unsigned int length, u8 *out) +{ + struct chksum_ctx *mctx = crypto_shash_ctx(desc->tfm); + + return __chksumc_finup(mctx->key, data, length, out); +} + +static int crc32_cra_init(struct crypto_tfm *tfm) +{ + struct chksum_ctx *mctx = crypto_tfm_ctx(tfm); + + mctx->key = ~0; + return 0; +} + +static struct shash_alg crc32_alg = { + .digestsize = CHKSUM_DIGEST_SIZE, + .setkey = chksum_setkey, + .init = chksum_init, + .update = chksum_update, + .final = chksum_final, + .finup = chksum_finup, + .digest = chksum_digest, + .descsize = sizeof(struct chksum_desc_ctx), + .base = { + .cra_name = "crc32", + .cra_driver_name = "crc32-arm64-hw", + .cra_priority = 300, + .cra_blocksize = CHKSUM_BLOCK_SIZE, + .cra_alignmask = 0, + .cra_ctxsize = sizeof(struct chksum_ctx), + .cra_module = THIS_MODULE, + .cra_init = crc32_cra_init, + } +}; + +static struct shash_alg crc32c_alg = { + .digestsize = CHKSUM_DIGEST_SIZE, + .setkey = chksum_setkey, + .init = chksum_init, + .update = chksumc_update, + .final = chksum_final, + .finup = chksumc_finup, + .digest = chksumc_digest, + .descsize = sizeof(struct chksum_desc_ctx), + .base = { + .cra_name = "crc32c", + .cra_driver_name = "crc32c-arm64-hw", + .cra_priority = 300, + .cra_blocksize = CHKSUM_BLOCK_SIZE, + .cra_alignmask = 0, + .cra_ctxsize = sizeof(struct chksum_ctx), + .cra_module = THIS_MODULE, + .cra_init = crc32_cra_init, + } +}; + +static int __init crc32_mod_init(void) +{ + int err; + + err = crypto_register_shash(&crc32_alg); + + if (err) + return err; + + err = crypto_register_shash(&crc32c_alg); + + if (err) { + crypto_unregister_shash(&crc32_alg); + return err; + } + + return 0; +} + +static void __exit crc32_mod_exit(void) +{ + crypto_unregister_shash(&crc32_alg); + crypto_unregister_shash(&crc32c_alg); +} + +module_cpu_feature_match(CRC32, crc32_mod_init); +module_exit(crc32_mod_exit); |