diff options
Diffstat (limited to 'drivers/crypto/talitos.c')
-rw-r--r-- | drivers/crypto/talitos.c | 699 |
1 files changed, 652 insertions, 47 deletions
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index dc558a097311..6a0f59d1fc5c 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -1,7 +1,7 @@ /* * talitos - Freescale Integrated Security Engine (SEC) device driver * - * Copyright (c) 2008 Freescale Semiconductor, Inc. + * Copyright (c) 2008-2010 Freescale Semiconductor, Inc. * * Scatterlist Crypto API glue code copied from files with the following: * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au> @@ -43,9 +43,12 @@ #include <crypto/aes.h> #include <crypto/des.h> #include <crypto/sha.h> +#include <crypto/md5.h> #include <crypto/aead.h> #include <crypto/authenc.h> #include <crypto/skcipher.h> +#include <crypto/hash.h> +#include <crypto/internal/hash.h> #include <crypto/scatterwalk.h> #include "talitos.h" @@ -65,6 +68,13 @@ struct talitos_ptr { __be32 ptr; /* address */ }; +static const struct talitos_ptr zero_entry = { + .len = 0, + .j_extent = 0, + .eptr = 0, + .ptr = 0 +}; + /* descriptor */ struct talitos_desc { __be32 hdr; /* header high bits */ @@ -146,6 +156,7 @@ struct talitos_private { /* .features flag */ #define TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT 0x00000001 #define TALITOS_FTR_HW_AUTH_CHECK 0x00000002 +#define TALITOS_FTR_SHA224_HWINIT 0x00000004 static void to_talitos_ptr(struct talitos_ptr *talitos_ptr, dma_addr_t dma_addr) { @@ -692,7 +703,7 @@ static void talitos_unregister_rng(struct device *dev) #define TALITOS_MAX_KEY_SIZE 64 #define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */ -#define MD5_DIGEST_SIZE 16 +#define MD5_BLOCK_SIZE 64 struct talitos_ctx { struct device *dev; @@ -705,6 +716,23 @@ struct talitos_ctx { unsigned int authsize; }; +#define HASH_MAX_BLOCK_SIZE SHA512_BLOCK_SIZE +#define TALITOS_MDEU_MAX_CONTEXT_SIZE TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512 + +struct talitos_ahash_req_ctx { + u64 count; + u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)]; + unsigned int hw_context_size; + u8 buf[HASH_MAX_BLOCK_SIZE]; + u8 bufnext[HASH_MAX_BLOCK_SIZE]; + unsigned int swinit; + unsigned int first; + unsigned int last; + unsigned int to_hash_later; + struct scatterlist bufsl[2]; + struct scatterlist *psrc; +}; + static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize) { @@ -821,10 +849,14 @@ static void talitos_sg_unmap(struct device *dev, else dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE); - if (edesc->dst_is_chained) - talitos_unmap_sg_chain(dev, dst, DMA_FROM_DEVICE); - else - dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE); + if (dst) { + if (edesc->dst_is_chained) + talitos_unmap_sg_chain(dev, dst, + DMA_FROM_DEVICE); + else + dma_unmap_sg(dev, dst, dst_nents, + DMA_FROM_DEVICE); + } } else if (edesc->src_is_chained) talitos_unmap_sg_chain(dev, src, DMA_BIDIRECTIONAL); @@ -1114,12 +1146,67 @@ static int sg_count(struct scatterlist *sg_list, int nbytes, int *chained) return sg_nents; } +/** + * sg_copy_end_to_buffer - Copy end data from SG list to a linear buffer + * @sgl: The SG list + * @nents: Number of SG entries + * @buf: Where to copy to + * @buflen: The number of bytes to copy + * @skip: The number of bytes to skip before copying. + * Note: skip + buflen should equal SG total size. + * + * Returns the number of copied bytes. + * + **/ +static size_t sg_copy_end_to_buffer(struct scatterlist *sgl, unsigned int nents, + void *buf, size_t buflen, unsigned int skip) +{ + unsigned int offset = 0; + unsigned int boffset = 0; + struct sg_mapping_iter miter; + unsigned long flags; + unsigned int sg_flags = SG_MITER_ATOMIC; + size_t total_buffer = buflen + skip; + + sg_flags |= SG_MITER_FROM_SG; + + sg_miter_start(&miter, sgl, nents, sg_flags); + + local_irq_save(flags); + + while (sg_miter_next(&miter) && offset < total_buffer) { + unsigned int len; + unsigned int ignore; + + if ((offset + miter.length) > skip) { + if (offset < skip) { + /* Copy part of this segment */ + ignore = skip - offset; + len = miter.length - ignore; + memcpy(buf + boffset, miter.addr + ignore, len); + } else { + /* Copy all of this segment */ + len = miter.length; + memcpy(buf + boffset, miter.addr, len); + } + boffset += len; + } + offset += miter.length; + } + + sg_miter_stop(&miter); + + local_irq_restore(flags); + return boffset; +} + /* * allocate and map the extended descriptor */ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, struct scatterlist *src, struct scatterlist *dst, + int hash_result, unsigned int cryptlen, unsigned int authsize, int icv_stashing, @@ -1139,11 +1226,16 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, src_nents = sg_count(src, cryptlen + authsize, &src_chained); src_nents = (src_nents == 1) ? 0 : src_nents; - if (dst == src) { - dst_nents = src_nents; + if (hash_result) { + dst_nents = 0; } else { - dst_nents = sg_count(dst, cryptlen + authsize, &dst_chained); - dst_nents = (dst_nents == 1) ? 0 : dst_nents; + if (dst == src) { + dst_nents = src_nents; + } else { + dst_nents = sg_count(dst, cryptlen + authsize, + &dst_chained); + dst_nents = (dst_nents == 1) ? 0 : dst_nents; + } } /* @@ -1172,8 +1264,10 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, edesc->src_is_chained = src_chained; edesc->dst_is_chained = dst_chained; edesc->dma_len = dma_len; - edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0], - edesc->dma_len, DMA_BIDIRECTIONAL); + if (dma_len) + edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0], + edesc->dma_len, + DMA_BIDIRECTIONAL); return edesc; } @@ -1184,7 +1278,7 @@ static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, struct crypto_aead *authenc = crypto_aead_reqtfm(areq); struct talitos_ctx *ctx = crypto_aead_ctx(authenc); - return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst, + return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst, 0, areq->cryptlen, ctx->authsize, icv_stashing, areq->base.flags); } @@ -1441,8 +1535,8 @@ static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request * struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); - return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst, areq->nbytes, - 0, 0, areq->base.flags); + return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst, 0, + areq->nbytes, 0, 0, areq->base.flags); } static int ablkcipher_encrypt(struct ablkcipher_request *areq) @@ -1478,15 +1572,329 @@ static int ablkcipher_decrypt(struct ablkcipher_request *areq) return common_nonsnoop(edesc, areq, NULL, ablkcipher_done); } +static void common_nonsnoop_hash_unmap(struct device *dev, + struct talitos_edesc *edesc, + struct ahash_request *areq) +{ + struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); + + unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE); + + /* When using hashctx-in, must unmap it. */ + if (edesc->desc.ptr[1].len) + unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], + DMA_TO_DEVICE); + + if (edesc->desc.ptr[2].len) + unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], + DMA_TO_DEVICE); + + talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL); + + if (edesc->dma_len) + dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len, + DMA_BIDIRECTIONAL); + +} + +static void ahash_done(struct device *dev, + struct talitos_desc *desc, void *context, + int err) +{ + struct ahash_request *areq = context; + struct talitos_edesc *edesc = + container_of(desc, struct talitos_edesc, desc); + struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); + + if (!req_ctx->last && req_ctx->to_hash_later) { + /* Position any partial block for next update/final/finup */ + memcpy(req_ctx->buf, req_ctx->bufnext, req_ctx->to_hash_later); + } + common_nonsnoop_hash_unmap(dev, edesc, areq); + + kfree(edesc); + + areq->base.complete(&areq->base, err); +} + +static int common_nonsnoop_hash(struct talitos_edesc *edesc, + struct ahash_request *areq, unsigned int length, + void (*callback) (struct device *dev, + struct talitos_desc *desc, + void *context, int error)) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); + struct talitos_ctx *ctx = crypto_ahash_ctx(tfm); + struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); + struct device *dev = ctx->dev; + struct talitos_desc *desc = &edesc->desc; + int sg_count, ret; + + /* first DWORD empty */ + desc->ptr[0] = zero_entry; + + /* hash context in */ + if (!req_ctx->first || req_ctx->swinit) { + map_single_talitos_ptr(dev, &desc->ptr[1], + req_ctx->hw_context_size, + (char *)req_ctx->hw_context, 0, + DMA_TO_DEVICE); + req_ctx->swinit = 0; + } else { + desc->ptr[1] = zero_entry; + /* Indicate next op is not the first. */ + req_ctx->first = 0; + } + + /* HMAC key */ + if (ctx->keylen) + map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen, + (char *)&ctx->key, 0, DMA_TO_DEVICE); + else + desc->ptr[2] = zero_entry; + + /* + * data in + */ + desc->ptr[3].len = cpu_to_be16(length); + desc->ptr[3].j_extent = 0; + + sg_count = talitos_map_sg(dev, req_ctx->psrc, + edesc->src_nents ? : 1, + DMA_TO_DEVICE, + edesc->src_is_chained); + + if (sg_count == 1) { + to_talitos_ptr(&desc->ptr[3], sg_dma_address(req_ctx->psrc)); + } else { + sg_count = sg_to_link_tbl(req_ctx->psrc, sg_count, length, + &edesc->link_tbl[0]); + if (sg_count > 1) { + desc->ptr[3].j_extent |= DESC_PTR_LNKTBL_JUMP; + to_talitos_ptr(&desc->ptr[3], edesc->dma_link_tbl); + dma_sync_single_for_device(ctx->dev, + edesc->dma_link_tbl, + edesc->dma_len, + DMA_BIDIRECTIONAL); + } else { + /* Only one segment now, so no link tbl needed */ + to_talitos_ptr(&desc->ptr[3], + sg_dma_address(req_ctx->psrc)); + } + } + + /* fifth DWORD empty */ + desc->ptr[4] = zero_entry; + + /* hash/HMAC out -or- hash context out */ + if (req_ctx->last) + map_single_talitos_ptr(dev, &desc->ptr[5], + crypto_ahash_digestsize(tfm), + areq->result, 0, DMA_FROM_DEVICE); + else + map_single_talitos_ptr(dev, &desc->ptr[5], + req_ctx->hw_context_size, + req_ctx->hw_context, 0, DMA_FROM_DEVICE); + + /* last DWORD empty */ + desc->ptr[6] = zero_entry; + + ret = talitos_submit(dev, desc, callback, areq); + if (ret != -EINPROGRESS) { + common_nonsnoop_hash_unmap(dev, edesc, areq); + kfree(edesc); + } + return ret; +} + +static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq, + unsigned int nbytes) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); + struct talitos_ctx *ctx = crypto_ahash_ctx(tfm); + struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); + + return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, 1, + nbytes, 0, 0, areq->base.flags); +} + +static int ahash_init(struct ahash_request *areq) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); + struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); + + /* Initialize the context */ + req_ctx->count = 0; + req_ctx->first = 1; /* first indicates h/w must init its context */ + req_ctx->swinit = 0; /* assume h/w init of context */ + req_ctx->hw_context_size = + (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE) + ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256 + : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512; + + return 0; +} + +/* + * on h/w without explicit sha224 support, we initialize h/w context + * manually with sha224 constants, and tell it to run sha256. + */ +static int ahash_init_sha224_swinit(struct ahash_request *areq) +{ + struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); + + ahash_init(areq); + req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/ + + req_ctx->hw_context[0] = cpu_to_be32(SHA224_H0); + req_ctx->hw_context[1] = cpu_to_be32(SHA224_H1); + req_ctx->hw_context[2] = cpu_to_be32(SHA224_H2); + req_ctx->hw_context[3] = cpu_to_be32(SHA224_H3); + req_ctx->hw_context[4] = cpu_to_be32(SHA224_H4); + req_ctx->hw_context[5] = cpu_to_be32(SHA224_H5); + req_ctx->hw_context[6] = cpu_to_be32(SHA224_H6); + req_ctx->hw_context[7] = cpu_to_be32(SHA224_H7); + + /* init 64-bit count */ + req_ctx->hw_context[8] = 0; + req_ctx->hw_context[9] = 0; + + return 0; +} + +static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); + struct talitos_ctx *ctx = crypto_ahash_ctx(tfm); + struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); + struct talitos_edesc *edesc; + unsigned int blocksize = + crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); + unsigned int nbytes_to_hash; + unsigned int to_hash_later; + unsigned int index; + int chained; + + index = req_ctx->count & (blocksize - 1); + req_ctx->count += nbytes; + + if (!req_ctx->last && (index + nbytes) < blocksize) { + /* Buffer the partial block */ + sg_copy_to_buffer(areq->src, + sg_count(areq->src, nbytes, &chained), + req_ctx->buf + index, nbytes); + return 0; + } + + if (index) { + /* partial block from previous update; chain it in. */ + sg_init_table(req_ctx->bufsl, (nbytes) ? 2 : 1); + sg_set_buf(req_ctx->bufsl, req_ctx->buf, index); + if (nbytes) + scatterwalk_sg_chain(req_ctx->bufsl, 2, + areq->src); + req_ctx->psrc = req_ctx->bufsl; + } else { + req_ctx->psrc = areq->src; + } + nbytes_to_hash = index + nbytes; + if (!req_ctx->last) { + to_hash_later = (nbytes_to_hash & (blocksize - 1)); + if (to_hash_later) { + int nents; + /* Must copy to_hash_later bytes from the end + * to bufnext (a partial block) for later. + */ + nents = sg_count(areq->src, nbytes, &chained); + sg_copy_end_to_buffer(areq->src, nents, + req_ctx->bufnext, + to_hash_later, + nbytes - to_hash_later); + + /* Adjust count for what will be hashed now */ + nbytes_to_hash -= to_hash_later; + } + req_ctx->to_hash_later = to_hash_later; + } + + /* allocate extended descriptor */ + edesc = ahash_edesc_alloc(areq, nbytes_to_hash); + if (IS_ERR(edesc)) + return PTR_ERR(edesc); + + edesc->desc.hdr = ctx->desc_hdr_template; + + /* On last one, request SEC to pad; otherwise continue */ + if (req_ctx->last) + edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD; + else + edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT; + + /* request SEC to INIT hash. */ + if (req_ctx->first && !req_ctx->swinit) + edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT; + + /* When the tfm context has a keylen, it's an HMAC. + * A first or last (ie. not middle) descriptor must request HMAC. + */ + if (ctx->keylen && (req_ctx->first || req_ctx->last)) + edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC; + + return common_nonsnoop_hash(edesc, areq, nbytes_to_hash, + ahash_done); +} + +static int ahash_update(struct ahash_request *areq) +{ + struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); + + req_ctx->last = 0; + + return ahash_process_req(areq, areq->nbytes); +} + +static int ahash_final(struct ahash_request *areq) +{ + struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); + + req_ctx->last = 1; + + return ahash_process_req(areq, 0); +} + +static int ahash_finup(struct ahash_request *areq) +{ + struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); + + req_ctx->last = 1; + + return ahash_process_req(areq, areq->nbytes); +} + +static int ahash_digest(struct ahash_request *areq) +{ + struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); + struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); + + ahash->init(areq); + req_ctx->last = 1; + + return ahash_process_req(areq, areq->nbytes); +} + struct talitos_alg_template { - struct crypto_alg alg; + u32 type; + union { + struct crypto_alg crypto; + struct ahash_alg hash; + } alg; __be32 desc_hdr_template; }; static struct talitos_alg_template driver_algs[] = { /* AEAD algorithms. These use a single-pass ipsec_esp descriptor */ - { - .alg = { + { .type = CRYPTO_ALG_TYPE_AEAD, + .alg.crypto = { .cra_name = "authenc(hmac(sha1),cbc(aes))", .cra_driver_name = "authenc-hmac-sha1-cbc-aes-talitos", .cra_blocksize = AES_BLOCK_SIZE, @@ -1511,8 +1919,8 @@ static struct talitos_alg_template driver_algs[] = { DESC_HDR_MODE1_MDEU_PAD | DESC_HDR_MODE1_MDEU_SHA1_HMAC, }, - { - .alg = { + { .type = CRYPTO_ALG_TYPE_AEAD, + .alg.crypto = { .cra_name = "authenc(hmac(sha1),cbc(des3_ede))", .cra_driver_name = "authenc-hmac-sha1-cbc-3des-talitos", .cra_blocksize = DES3_EDE_BLOCK_SIZE, @@ -1538,8 +1946,8 @@ static struct talitos_alg_template driver_algs[] = { DESC_HDR_MODE1_MDEU_PAD | DESC_HDR_MODE1_MDEU_SHA1_HMAC, }, - { - .alg = { + { .type = CRYPTO_ALG_TYPE_AEAD, + .alg.crypto = { .cra_name = "authenc(hmac(sha256),cbc(aes))", .cra_driver_name = "authenc-hmac-sha256-cbc-aes-talitos", .cra_blocksize = AES_BLOCK_SIZE, @@ -1564,8 +1972,8 @@ static struct talitos_alg_template driver_algs[] = { DESC_HDR_MODE1_MDEU_PAD | DESC_HDR_MODE1_MDEU_SHA256_HMAC, }, - { - .alg = { + { .type = CRYPTO_ALG_TYPE_AEAD, + .alg.crypto = { .cra_name = "authenc(hmac(sha256),cbc(des3_ede))", .cra_driver_name = "authenc-hmac-sha256-cbc-3des-talitos", .cra_blocksize = DES3_EDE_BLOCK_SIZE, @@ -1591,8 +1999,8 @@ static struct talitos_alg_template driver_algs[] = { DESC_HDR_MODE1_MDEU_PAD | DESC_HDR_MODE1_MDEU_SHA256_HMAC, }, - { - .alg = { + { .type = CRYPTO_ALG_TYPE_AEAD, + .alg.crypto = { .cra_name = "authenc(hmac(md5),cbc(aes))", .cra_driver_name = "authenc-hmac-md5-cbc-aes-talitos", .cra_blocksize = AES_BLOCK_SIZE, @@ -1617,8 +2025,8 @@ static struct talitos_alg_template driver_algs[] = { DESC_HDR_MODE1_MDEU_PAD | DESC_HDR_MODE1_MDEU_MD5_HMAC, }, - { - .alg = { + { .type = CRYPTO_ALG_TYPE_AEAD, + .alg.crypto = { .cra_name = "authenc(hmac(md5),cbc(des3_ede))", .cra_driver_name = "authenc-hmac-md5-cbc-3des-talitos", .cra_blocksize = DES3_EDE_BLOCK_SIZE, @@ -1645,8 +2053,8 @@ static struct talitos_alg_template driver_algs[] = { DESC_HDR_MODE1_MDEU_MD5_HMAC, }, /* ABLKCIPHER algorithms. */ - { - .alg = { + { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, + .alg.crypto = { .cra_name = "cbc(aes)", .cra_driver_name = "cbc-aes-talitos", .cra_blocksize = AES_BLOCK_SIZE, @@ -1667,8 +2075,8 @@ static struct talitos_alg_template driver_algs[] = { DESC_HDR_SEL0_AESU | DESC_HDR_MODE0_AESU_CBC, }, - { - .alg = { + { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, + .alg.crypto = { .cra_name = "cbc(des3_ede)", .cra_driver_name = "cbc-3des-talitos", .cra_blocksize = DES3_EDE_BLOCK_SIZE, @@ -1689,14 +2097,140 @@ static struct talitos_alg_template driver_algs[] = { DESC_HDR_SEL0_DEU | DESC_HDR_MODE0_DEU_CBC | DESC_HDR_MODE0_DEU_3DES, - } + }, + /* AHASH algorithms. */ + { .type = CRYPTO_ALG_TYPE_AHASH, + .alg.hash = { + .init = ahash_init, + .update = ahash_update, + .final = ahash_final, + .finup = ahash_finup, + .digest = ahash_digest, + .halg.digestsize = MD5_DIGEST_SIZE, + .halg.base = { + .cra_name = "md5", + .cra_driver_name = "md5-talitos", + .cra_blocksize = MD5_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC, + .cra_type = &crypto_ahash_type + } + }, + .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | + DESC_HDR_SEL0_MDEUA | + DESC_HDR_MODE0_MDEU_MD5, + }, + { .type = CRYPTO_ALG_TYPE_AHASH, + .alg.hash = { + .init = ahash_init, + .update = ahash_update, + .final = ahash_final, + .finup = ahash_finup, + .digest = ahash_digest, + .halg.digestsize = SHA1_DIGEST_SIZE, + .halg.base = { + .cra_name = "sha1", + .cra_driver_name = "sha1-talitos", + .cra_blocksize = SHA1_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC, + .cra_type = &crypto_ahash_type + } + }, + .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | + DESC_HDR_SEL0_MDEUA | + DESC_HDR_MODE0_MDEU_SHA1, + }, + { .type = CRYPTO_ALG_TYPE_AHASH, + .alg.hash = { + .init = ahash_init, + .update = ahash_update, + .final = ahash_final, + .finup = ahash_finup, + .digest = ahash_digest, + .halg.digestsize = SHA224_DIGEST_SIZE, + .halg.base = { + .cra_name = "sha224", + .cra_driver_name = "sha224-talitos", + .cra_blocksize = SHA224_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC, + .cra_type = &crypto_ahash_type + } + }, + .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | + DESC_HDR_SEL0_MDEUA | + DESC_HDR_MODE0_MDEU_SHA224, + }, + { .type = CRYPTO_ALG_TYPE_AHASH, + .alg.hash = { + .init = ahash_init, + .update = ahash_update, + .final = ahash_final, + .finup = ahash_finup, + .digest = ahash_digest, + .halg.digestsize = SHA256_DIGEST_SIZE, + .halg.base = { + .cra_name = "sha256", + .cra_driver_name = "sha256-talitos", + .cra_blocksize = SHA256_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC, + .cra_type = &crypto_ahash_type + } + }, + .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | + DESC_HDR_SEL0_MDEUA | + DESC_HDR_MODE0_MDEU_SHA256, + }, + { .type = CRYPTO_ALG_TYPE_AHASH, + .alg.hash = { + .init = ahash_init, + .update = ahash_update, + .final = ahash_final, + .finup = ahash_finup, + .digest = ahash_digest, + .halg.digestsize = SHA384_DIGEST_SIZE, + .halg.base = { + .cra_name = "sha384", + .cra_driver_name = "sha384-talitos", + .cra_blocksize = SHA384_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC, + .cra_type = &crypto_ahash_type + } + }, + .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | + DESC_HDR_SEL0_MDEUB | + DESC_HDR_MODE0_MDEUB_SHA384, + }, + { .type = CRYPTO_ALG_TYPE_AHASH, + .alg.hash = { + .init = ahash_init, + .update = ahash_update, + .final = ahash_final, + .finup = ahash_finup, + .digest = ahash_digest, + .halg.digestsize = SHA512_DIGEST_SIZE, + .halg.base = { + .cra_name = "sha512", + .cra_driver_name = "sha512-talitos", + .cra_blocksize = SHA512_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC, + .cra_type = &crypto_ahash_type + } + }, + .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | + DESC_HDR_SEL0_MDEUB | + DESC_HDR_MODE0_MDEUB_SHA512, + }, }; struct talitos_crypto_alg { struct list_head entry; struct device *dev; - __be32 desc_hdr_template; - struct crypto_alg crypto_alg; + struct talitos_alg_template algt; }; static int talitos_cra_init(struct crypto_tfm *tfm) @@ -1705,13 +2239,28 @@ static int talitos_cra_init(struct crypto_tfm *tfm) struct talitos_crypto_alg *talitos_alg; struct talitos_ctx *ctx = crypto_tfm_ctx(tfm); - talitos_alg = container_of(alg, struct talitos_crypto_alg, crypto_alg); + if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH) + talitos_alg = container_of(__crypto_ahash_alg(alg), + struct talitos_crypto_alg, + algt.alg.hash); + else + talitos_alg = container_of(alg, struct talitos_crypto_alg, + algt.alg.crypto); /* update context with ptr to dev */ ctx->dev = talitos_alg->dev; /* copy descriptor header template value */ - ctx->desc_hdr_template = talitos_alg->desc_hdr_template; + ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template; + + return 0; +} + +static int talitos_cra_init_aead(struct crypto_tfm *tfm) +{ + struct talitos_ctx *ctx = crypto_tfm_ctx(tfm); + + talitos_cra_init(tfm); /* random first IV */ get_random_bytes(ctx->iv, TALITOS_MAX_IV_LENGTH); @@ -1719,6 +2268,19 @@ static int talitos_cra_init(struct crypto_tfm *tfm) return 0; } +static int talitos_cra_init_ahash(struct crypto_tfm *tfm) +{ + struct talitos_ctx *ctx = crypto_tfm_ctx(tfm); + + talitos_cra_init(tfm); + + ctx->keylen = 0; + crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), + sizeof(struct talitos_ahash_req_ctx)); + + return 0; +} + /* * given the alg's descriptor header template, determine whether descriptor * type and primary/secondary execution units required match the hw @@ -1747,7 +2309,15 @@ static int talitos_remove(struct of_device *ofdev) int i; list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) { - crypto_unregister_alg(&t_alg->crypto_alg); + switch (t_alg->algt.type) { + case CRYPTO_ALG_TYPE_ABLKCIPHER: + case CRYPTO_ALG_TYPE_AEAD: + crypto_unregister_alg(&t_alg->algt.alg.crypto); + break; + case CRYPTO_ALG_TYPE_AHASH: + crypto_unregister_ahash(&t_alg->algt.alg.hash); + break; + } list_del(&t_alg->entry); kfree(t_alg); } @@ -1781,6 +2351,7 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev, struct talitos_alg_template *template) { + struct talitos_private *priv = dev_get_drvdata(dev); struct talitos_crypto_alg *t_alg; struct crypto_alg *alg; @@ -1788,16 +2359,36 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev, if (!t_alg) return ERR_PTR(-ENOMEM); - alg = &t_alg->crypto_alg; - *alg = template->alg; + t_alg->algt = *template; + + switch (t_alg->algt.type) { + case CRYPTO_ALG_TYPE_ABLKCIPHER: + alg = &t_alg->algt.alg.crypto; + alg->cra_init = talitos_cra_init; + break; + case CRYPTO_ALG_TYPE_AEAD: + alg = &t_alg->algt.alg.crypto; + alg->cra_init = talitos_cra_init_aead; + break; + case CRYPTO_ALG_TYPE_AHASH: + alg = &t_alg->algt.alg.hash.halg.base; + alg->cra_init = talitos_cra_init_ahash; + if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) && + !strcmp(alg->cra_name, "sha224")) { + t_alg->algt.alg.hash.init = ahash_init_sha224_swinit; + t_alg->algt.desc_hdr_template = + DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | + DESC_HDR_SEL0_MDEUA | + DESC_HDR_MODE0_MDEU_SHA256; + } + break; + } alg->cra_module = THIS_MODULE; - alg->cra_init = talitos_cra_init; alg->cra_priority = TALITOS_CRA_PRIORITY; alg->cra_alignmask = 0; alg->cra_ctxsize = sizeof(struct talitos_ctx); - t_alg->desc_hdr_template = template->desc_hdr_template; t_alg->dev = dev; return t_alg; @@ -1877,7 +2468,8 @@ static int talitos_probe(struct of_device *ofdev, priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT; if (of_device_is_compatible(np, "fsl,sec2.1")) - priv->features |= TALITOS_FTR_HW_AUTH_CHECK; + priv->features |= TALITOS_FTR_HW_AUTH_CHECK | + TALITOS_FTR_SHA224_HWINIT; priv->chan = kzalloc(sizeof(struct talitos_channel) * priv->num_channels, GFP_KERNEL); @@ -1931,6 +2523,7 @@ static int talitos_probe(struct of_device *ofdev, for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { if (hw_supports(dev, driver_algs[i].desc_hdr_template)) { struct talitos_crypto_alg *t_alg; + char *name = NULL; t_alg = talitos_alg_alloc(dev, &driver_algs[i]); if (IS_ERR(t_alg)) { @@ -1938,15 +2531,27 @@ static int talitos_probe(struct of_device *ofdev, goto err_out; } - err = crypto_register_alg(&t_alg->crypto_alg); + switch (t_alg->algt.type) { + case CRYPTO_ALG_TYPE_ABLKCIPHER: + case CRYPTO_ALG_TYPE_AEAD: + err = crypto_register_alg( + &t_alg->algt.alg.crypto); + name = t_alg->algt.alg.crypto.cra_driver_name; + break; + case CRYPTO_ALG_TYPE_AHASH: + err = crypto_register_ahash( + &t_alg->algt.alg.hash); + name = + t_alg->algt.alg.hash.halg.base.cra_driver_name; + break; + } if (err) { dev_err(dev, "%s alg registration failed\n", - t_alg->crypto_alg.cra_driver_name); + name); kfree(t_alg); } else { list_add_tail(&t_alg->entry, &priv->alg_list); - dev_info(dev, "%s\n", - t_alg->crypto_alg.cra_driver_name); + dev_info(dev, "%s\n", name); } } } |