diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-07-08 20:57:08 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-07-08 20:57:08 -0700 |
commit | 4d2fa8b44b891f0da5ceda3e5a1402ccf0ab6f26 (patch) | |
tree | cbb763ec5e74cfbaac6ce53df277883cb78a8a1a /drivers/crypto/caam/caamalg_qi2.c | |
parent | 8b68150883ca466a23e90902dd4113b22e692f04 (diff) | |
parent | f3880a23564e3172437285ebcb5b8a124539fdae (diff) | |
download | linux-4d2fa8b44b891f0da5ceda3e5a1402ccf0ab6f26.tar.gz linux-4d2fa8b44b891f0da5ceda3e5a1402ccf0ab6f26.tar.bz2 linux-4d2fa8b44b891f0da5ceda3e5a1402ccf0ab6f26.zip |
Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto updates from Herbert Xu:
"Here is the crypto update for 5.3:
API:
- Test shash interface directly in testmgr
- cra_driver_name is now mandatory
Algorithms:
- Replace arc4 crypto_cipher with library helper
- Implement 5 way interleave for ECB, CBC and CTR on arm64
- Add xxhash
- Add continuous self-test on noise source to drbg
- Update jitter RNG
Drivers:
- Add support for SHA204A random number generator
- Add support for 7211 in iproc-rng200
- Fix fuzz test failures in inside-secure
- Fix fuzz test failures in talitos
- Fix fuzz test failures in qat"
* 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (143 commits)
crypto: stm32/hash - remove interruptible condition for dma
crypto: stm32/hash - Fix hmac issue more than 256 bytes
crypto: stm32/crc32 - rename driver file
crypto: amcc - remove memset after dma_alloc_coherent
crypto: ccp - Switch to SPDX license identifiers
crypto: ccp - Validate the the error value used to index error messages
crypto: doc - Fix formatting of new crypto engine content
crypto: doc - Add parameter documentation
crypto: arm64/aes-ce - implement 5 way interleave for ECB, CBC and CTR
crypto: arm64/aes-ce - add 5 way interleave routines
crypto: talitos - drop icv_ool
crypto: talitos - fix hash on SEC1.
crypto: talitos - move struct talitos_edesc into talitos.h
lib/scatterlist: Fix mapping iterator when sg->offset is greater than PAGE_SIZE
crypto/NX: Set receive window credits to max number of CRBs in RxFIFO
crypto: asymmetric_keys - select CRYPTO_HASH where needed
crypto: serpent - mark __serpent_setkey_sbox noinline
crypto: testmgr - dynamically allocate crypto_shash
crypto: testmgr - dynamically allocate testvec_config
crypto: talitos - eliminate unneeded 'done' functions at build time
...
Diffstat (limited to 'drivers/crypto/caam/caamalg_qi2.c')
-rw-r--r-- | drivers/crypto/caam/caamalg_qi2.c | 202 |
1 files changed, 120 insertions, 82 deletions
diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c index 2b2980a8a9b9..06bf32c32cbd 100644 --- a/drivers/crypto/caam/caamalg_qi2.c +++ b/drivers/crypto/caam/caamalg_qi2.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) /* * Copyright 2015-2016 Freescale Semiconductor Inc. - * Copyright 2017-2018 NXP + * Copyright 2017-2019 NXP */ #include "compat.h" @@ -140,7 +140,8 @@ static struct caam_request *to_caam_req(struct crypto_async_request *areq) static void caam_unmap(struct device *dev, struct scatterlist *src, struct scatterlist *dst, int src_nents, int dst_nents, dma_addr_t iv_dma, int ivsize, - dma_addr_t qm_sg_dma, int qm_sg_bytes) + enum dma_data_direction iv_dir, dma_addr_t qm_sg_dma, + int qm_sg_bytes) { if (dst != src) { if (src_nents) @@ -152,7 +153,7 @@ static void caam_unmap(struct device *dev, struct scatterlist *src, } if (iv_dma) - dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE); + dma_unmap_single(dev, iv_dma, ivsize, iv_dir); if (qm_sg_bytes) dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE); @@ -371,6 +372,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC; int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; + int src_len, dst_len = 0; struct aead_edesc *edesc; dma_addr_t qm_sg_dma, iv_dma = 0; int ivsize = 0; @@ -387,23 +389,21 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, } if (unlikely(req->dst != req->src)) { - src_nents = sg_nents_for_len(req->src, req->assoclen + - req->cryptlen); + src_len = req->assoclen + req->cryptlen; + dst_len = src_len + (encrypt ? authsize : (-authsize)); + + src_nents = sg_nents_for_len(req->src, src_len); if (unlikely(src_nents < 0)) { dev_err(dev, "Insufficient bytes (%d) in src S/G\n", - req->assoclen + req->cryptlen); + src_len); qi_cache_free(edesc); return ERR_PTR(src_nents); } - dst_nents = sg_nents_for_len(req->dst, req->assoclen + - req->cryptlen + - (encrypt ? authsize : - (-authsize))); + dst_nents = sg_nents_for_len(req->dst, dst_len); if (unlikely(dst_nents < 0)) { dev_err(dev, "Insufficient bytes (%d) in dst S/G\n", - req->assoclen + req->cryptlen + - (encrypt ? authsize : (-authsize))); + dst_len); qi_cache_free(edesc); return ERR_PTR(dst_nents); } @@ -434,13 +434,13 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, mapped_dst_nents = 0; } } else { - src_nents = sg_nents_for_len(req->src, req->assoclen + - req->cryptlen + - (encrypt ? authsize : 0)); + src_len = req->assoclen + req->cryptlen + + (encrypt ? authsize : 0); + + src_nents = sg_nents_for_len(req->src, src_len); if (unlikely(src_nents < 0)) { dev_err(dev, "Insufficient bytes (%d) in src S/G\n", - req->assoclen + req->cryptlen + - (encrypt ? authsize : 0)); + src_len); qi_cache_free(edesc); return ERR_PTR(src_nents); } @@ -460,9 +460,25 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, /* * Create S/G table: req->assoclen, [IV,] req->src [, req->dst]. * Input is not contiguous. + * HW reads 4 S/G entries at a time; make sure the reads don't go beyond + * the end of the table by allocating more S/G entries. Logic: + * if (src != dst && output S/G) + * pad output S/G, if needed + * else if (src == dst && S/G) + * overlapping S/Gs; pad one of them + * else if (input S/G) ... + * pad input S/G, if needed */ - qm_sg_nents = 1 + !!ivsize + mapped_src_nents + - (mapped_dst_nents > 1 ? mapped_dst_nents : 0); + qm_sg_nents = 1 + !!ivsize + mapped_src_nents; + if (mapped_dst_nents > 1) + qm_sg_nents += pad_sg_nents(mapped_dst_nents); + else if ((req->src == req->dst) && (mapped_src_nents > 1)) + qm_sg_nents = max(pad_sg_nents(qm_sg_nents), + 1 + !!ivsize + + pad_sg_nents(mapped_src_nents)); + else + qm_sg_nents = pad_sg_nents(qm_sg_nents); + sg_table = &edesc->sgt[0]; qm_sg_bytes = qm_sg_nents * sizeof(*sg_table); if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize > @@ -470,7 +486,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, dev_err(dev, "No space for %d S/G entries and/or %dB IV\n", qm_sg_nents, ivsize); caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0, - 0, 0, 0); + 0, DMA_NONE, 0, 0); qi_cache_free(edesc); return ERR_PTR(-ENOMEM); } @@ -485,7 +501,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, if (dma_mapping_error(dev, iv_dma)) { dev_err(dev, "unable to map IV\n"); caam_unmap(dev, req->src, req->dst, src_nents, - dst_nents, 0, 0, 0, 0); + dst_nents, 0, 0, DMA_NONE, 0, 0); qi_cache_free(edesc); return ERR_PTR(-ENOMEM); } @@ -509,7 +525,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, if (dma_mapping_error(dev, edesc->assoclen_dma)) { dev_err(dev, "unable to map assoclen\n"); caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, - iv_dma, ivsize, 0, 0); + iv_dma, ivsize, DMA_TO_DEVICE, 0, 0); qi_cache_free(edesc); return ERR_PTR(-ENOMEM); } @@ -520,19 +536,18 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0); qm_sg_index++; } - sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0); + sg_to_qm_sg_last(req->src, src_len, sg_table + qm_sg_index, 0); qm_sg_index += mapped_src_nents; if (mapped_dst_nents > 1) - sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table + - qm_sg_index, 0); + sg_to_qm_sg_last(req->dst, dst_len, sg_table + qm_sg_index, 0); qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE); if (dma_mapping_error(dev, qm_sg_dma)) { dev_err(dev, "unable to map S/G table\n"); dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE); caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, - iv_dma, ivsize, 0, 0); + iv_dma, ivsize, DMA_TO_DEVICE, 0, 0); qi_cache_free(edesc); return ERR_PTR(-ENOMEM); } @@ -559,6 +574,14 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, dpaa2_fl_set_addr(out_fle, qm_sg_dma + (1 + !!ivsize) * sizeof(*sg_table)); } + } else if (!mapped_dst_nents) { + /* + * crypto engine requires the output entry to be present when + * "frame list" FD is used. + * Since engine does not support FMT=2'b11 (unused entry type), + * leaving out_fle zeroized is the best option. + */ + goto skip_out_fle; } else if (mapped_dst_nents == 1) { dpaa2_fl_set_format(out_fle, dpaa2_fl_single); dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst)); @@ -570,6 +593,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, dpaa2_fl_set_len(out_fle, out_len); +skip_out_fle: return edesc; } @@ -1077,14 +1101,26 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req) qm_sg_ents = 1 + mapped_src_nents; dst_sg_idx = qm_sg_ents; - qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0; + /* + * Input, output HW S/G tables: [IV, src][dst, IV] + * IV entries point to the same buffer + * If src == dst, S/G entries are reused (S/G tables overlap) + * + * HW reads 4 S/G entries at a time; make sure the reads don't go beyond + * the end of the table by allocating more S/G entries. + */ + if (req->src != req->dst) + qm_sg_ents += pad_sg_nents(mapped_dst_nents + 1); + else + qm_sg_ents = 1 + pad_sg_nents(qm_sg_ents); + qm_sg_bytes = qm_sg_ents * sizeof(struct dpaa2_sg_entry); if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes + ivsize > CAAM_QI_MEMCACHE_SIZE)) { dev_err(dev, "No space for %d S/G entries and/or %dB IV\n", qm_sg_ents, ivsize); caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0, - 0, 0, 0); + 0, DMA_NONE, 0, 0); return ERR_PTR(-ENOMEM); } @@ -1093,7 +1129,7 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req) if (unlikely(!edesc)) { dev_err(dev, "could not allocate extended descriptor\n"); caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0, - 0, 0, 0); + 0, DMA_NONE, 0, 0); return ERR_PTR(-ENOMEM); } @@ -1102,11 +1138,11 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req) iv = (u8 *)(sg_table + qm_sg_ents); memcpy(iv, req->iv, ivsize); - iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE); + iv_dma = dma_map_single(dev, iv, ivsize, DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, iv_dma)) { dev_err(dev, "unable to map IV\n"); caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0, - 0, 0, 0); + 0, DMA_NONE, 0, 0); qi_cache_free(edesc); return ERR_PTR(-ENOMEM); } @@ -1117,18 +1153,20 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req) edesc->qm_sg_bytes = qm_sg_bytes; dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0); - sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0); + sg_to_qm_sg(req->src, req->cryptlen, sg_table + 1, 0); - if (mapped_dst_nents > 1) - sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table + - dst_sg_idx, 0); + if (req->src != req->dst) + sg_to_qm_sg(req->dst, req->cryptlen, sg_table + dst_sg_idx, 0); + + dma_to_qm_sg_one(sg_table + dst_sg_idx + mapped_dst_nents, iv_dma, + ivsize, 0); edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes, DMA_TO_DEVICE); if (dma_mapping_error(dev, edesc->qm_sg_dma)) { dev_err(dev, "unable to map S/G table\n"); caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, - iv_dma, ivsize, 0, 0); + iv_dma, ivsize, DMA_BIDIRECTIONAL, 0, 0); qi_cache_free(edesc); return ERR_PTR(-ENOMEM); } @@ -1136,23 +1174,19 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req) memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt)); dpaa2_fl_set_final(in_fle, true); dpaa2_fl_set_len(in_fle, req->cryptlen + ivsize); - dpaa2_fl_set_len(out_fle, req->cryptlen); + dpaa2_fl_set_len(out_fle, req->cryptlen + ivsize); dpaa2_fl_set_format(in_fle, dpaa2_fl_sg); dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma); - if (req->src == req->dst) { - dpaa2_fl_set_format(out_fle, dpaa2_fl_sg); + dpaa2_fl_set_format(out_fle, dpaa2_fl_sg); + + if (req->src == req->dst) dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + sizeof(*sg_table)); - } else if (mapped_dst_nents > 1) { - dpaa2_fl_set_format(out_fle, dpaa2_fl_sg); + else dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + dst_sg_idx * sizeof(*sg_table)); - } else { - dpaa2_fl_set_format(out_fle, dpaa2_fl_single); - dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst)); - } return edesc; } @@ -1164,7 +1198,8 @@ static void aead_unmap(struct device *dev, struct aead_edesc *edesc, int ivsize = crypto_aead_ivsize(aead); caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents, - edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes); + edesc->iv_dma, ivsize, DMA_TO_DEVICE, edesc->qm_sg_dma, + edesc->qm_sg_bytes); dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE); } @@ -1175,7 +1210,8 @@ static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc, int ivsize = crypto_skcipher_ivsize(skcipher); caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents, - edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes); + edesc->iv_dma, ivsize, DMA_BIDIRECTIONAL, edesc->qm_sg_dma, + edesc->qm_sg_bytes); } static void aead_encrypt_done(void *cbk_ctx, u32 status) @@ -1324,7 +1360,7 @@ static void skcipher_encrypt_done(void *cbk_ctx, u32 status) print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, req->iv, edesc->src_nents > 1 ? 100 : ivsize, 1); - caam_dump_sg(KERN_DEBUG, "dst @" __stringify(__LINE__)": ", + caam_dump_sg("dst @" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, req->dst, edesc->dst_nents > 1 ? 100 : req->cryptlen, 1); @@ -1332,10 +1368,10 @@ static void skcipher_encrypt_done(void *cbk_ctx, u32 status) /* * The crypto API expects us to set the IV (req->iv) to the last - * ciphertext block. This is used e.g. by the CTS mode. + * ciphertext block (CBC mode) or last counter (CTR mode). + * This is used e.g. by the CTS mode. */ - scatterwalk_map_and_copy(req->iv, req->dst, req->cryptlen - ivsize, - ivsize, 0); + memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes, ivsize); qi_cache_free(edesc); skcipher_request_complete(req, ecode); @@ -1362,11 +1398,19 @@ static void skcipher_decrypt_done(void *cbk_ctx, u32 status) print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, req->iv, edesc->src_nents > 1 ? 100 : ivsize, 1); - caam_dump_sg(KERN_DEBUG, "dst @" __stringify(__LINE__)": ", + caam_dump_sg("dst @" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, req->dst, edesc->dst_nents > 1 ? 100 : req->cryptlen, 1); skcipher_unmap(ctx->dev, edesc, req); + + /* + * The crypto API expects us to set the IV (req->iv) to the last + * ciphertext block (CBC mode) or last counter (CTR mode). + * This is used e.g. by the CTS mode. + */ + memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes, ivsize); + qi_cache_free(edesc); skcipher_request_complete(req, ecode); } @@ -1405,7 +1449,6 @@ static int skcipher_decrypt(struct skcipher_request *req) struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); struct caam_request *caam_req = skcipher_request_ctx(req); - int ivsize = crypto_skcipher_ivsize(skcipher); int ret; /* allocate extended descriptor */ @@ -1413,13 +1456,6 @@ static int skcipher_decrypt(struct skcipher_request *req) if (IS_ERR(edesc)) return PTR_ERR(edesc); - /* - * The crypto API expects us to set the IV (req->iv) to the last - * ciphertext block. - */ - scatterwalk_map_and_copy(req->iv, req->src, req->cryptlen - ivsize, - ivsize, 0); - caam_req->flc = &ctx->flc[DECRYPT]; caam_req->flc_dma = ctx->flc_dma[DECRYPT]; caam_req->cbk = skcipher_decrypt_done; @@ -3380,9 +3416,9 @@ static int ahash_update_ctx(struct ahash_request *req) if (to_hash) { struct dpaa2_sg_entry *sg_table; + int src_len = req->nbytes - *next_buflen; - src_nents = sg_nents_for_len(req->src, - req->nbytes - (*next_buflen)); + src_nents = sg_nents_for_len(req->src, src_len); if (src_nents < 0) { dev_err(ctx->dev, "Invalid number of src SG.\n"); return src_nents; @@ -3409,7 +3445,7 @@ static int ahash_update_ctx(struct ahash_request *req) edesc->src_nents = src_nents; qm_sg_src_index = 1 + (*buflen ? 1 : 0); - qm_sg_bytes = (qm_sg_src_index + mapped_nents) * + qm_sg_bytes = pad_sg_nents(qm_sg_src_index + mapped_nents) * sizeof(*sg_table); sg_table = &edesc->sgt[0]; @@ -3423,7 +3459,7 @@ static int ahash_update_ctx(struct ahash_request *req) goto unmap_ctx; if (mapped_nents) { - sg_to_qm_sg_last(req->src, mapped_nents, + sg_to_qm_sg_last(req->src, src_len, sg_table + qm_sg_src_index, 0); if (*next_buflen) scatterwalk_map_and_copy(next_buf, req->src, @@ -3494,7 +3530,7 @@ static int ahash_final_ctx(struct ahash_request *req) gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC; int buflen = *current_buflen(state); - int qm_sg_bytes, qm_sg_src_index; + int qm_sg_bytes; int digestsize = crypto_ahash_digestsize(ahash); struct ahash_edesc *edesc; struct dpaa2_sg_entry *sg_table; @@ -3505,8 +3541,7 @@ static int ahash_final_ctx(struct ahash_request *req) if (!edesc) return -ENOMEM; - qm_sg_src_index = 1 + (buflen ? 1 : 0); - qm_sg_bytes = qm_sg_src_index * sizeof(*sg_table); + qm_sg_bytes = pad_sg_nents(1 + (buflen ? 1 : 0)) * sizeof(*sg_table); sg_table = &edesc->sgt[0]; ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table, @@ -3518,7 +3553,7 @@ static int ahash_final_ctx(struct ahash_request *req) if (ret) goto unmap_ctx; - dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1, true); + dpaa2_sg_set_final(sg_table + (buflen ? 1 : 0), true); edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE); @@ -3599,7 +3634,8 @@ static int ahash_finup_ctx(struct ahash_request *req) edesc->src_nents = src_nents; qm_sg_src_index = 1 + (buflen ? 1 : 0); - qm_sg_bytes = (qm_sg_src_index + mapped_nents) * sizeof(*sg_table); + qm_sg_bytes = pad_sg_nents(qm_sg_src_index + mapped_nents) * + sizeof(*sg_table); sg_table = &edesc->sgt[0]; ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table, @@ -3611,7 +3647,7 @@ static int ahash_finup_ctx(struct ahash_request *req) if (ret) goto unmap_ctx; - sg_to_qm_sg_last(req->src, mapped_nents, sg_table + qm_sg_src_index, 0); + sg_to_qm_sg_last(req->src, req->nbytes, sg_table + qm_sg_src_index, 0); edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE); @@ -3696,8 +3732,8 @@ static int ahash_digest(struct ahash_request *req) int qm_sg_bytes; struct dpaa2_sg_entry *sg_table = &edesc->sgt[0]; - qm_sg_bytes = mapped_nents * sizeof(*sg_table); - sg_to_qm_sg_last(req->src, mapped_nents, sg_table, 0); + qm_sg_bytes = pad_sg_nents(mapped_nents) * sizeof(*sg_table); + sg_to_qm_sg_last(req->src, req->nbytes, sg_table, 0); edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE); if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) { @@ -3840,9 +3876,9 @@ static int ahash_update_no_ctx(struct ahash_request *req) if (to_hash) { struct dpaa2_sg_entry *sg_table; + int src_len = req->nbytes - *next_buflen; - src_nents = sg_nents_for_len(req->src, - req->nbytes - *next_buflen); + src_nents = sg_nents_for_len(req->src, src_len); if (src_nents < 0) { dev_err(ctx->dev, "Invalid number of src SG.\n"); return src_nents; @@ -3868,14 +3904,15 @@ static int ahash_update_no_ctx(struct ahash_request *req) } edesc->src_nents = src_nents; - qm_sg_bytes = (1 + mapped_nents) * sizeof(*sg_table); + qm_sg_bytes = pad_sg_nents(1 + mapped_nents) * + sizeof(*sg_table); sg_table = &edesc->sgt[0]; ret = buf_map_to_qm_sg(ctx->dev, sg_table, state); if (ret) goto unmap_ctx; - sg_to_qm_sg_last(req->src, mapped_nents, sg_table + 1, 0); + sg_to_qm_sg_last(req->src, src_len, sg_table + 1, 0); if (*next_buflen) scatterwalk_map_and_copy(next_buf, req->src, @@ -3987,14 +4024,14 @@ static int ahash_finup_no_ctx(struct ahash_request *req) } edesc->src_nents = src_nents; - qm_sg_bytes = (2 + mapped_nents) * sizeof(*sg_table); + qm_sg_bytes = pad_sg_nents(2 + mapped_nents) * sizeof(*sg_table); sg_table = &edesc->sgt[0]; ret = buf_map_to_qm_sg(ctx->dev, sg_table, state); if (ret) goto unmap; - sg_to_qm_sg_last(req->src, mapped_nents, sg_table + 1, 0); + sg_to_qm_sg_last(req->src, req->nbytes, sg_table + 1, 0); edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE); @@ -4064,9 +4101,9 @@ static int ahash_update_first(struct ahash_request *req) if (to_hash) { struct dpaa2_sg_entry *sg_table; + int src_len = req->nbytes - *next_buflen; - src_nents = sg_nents_for_len(req->src, - req->nbytes - (*next_buflen)); + src_nents = sg_nents_for_len(req->src, src_len); if (src_nents < 0) { dev_err(ctx->dev, "Invalid number of src SG.\n"); return src_nents; @@ -4101,8 +4138,9 @@ static int ahash_update_first(struct ahash_request *req) if (mapped_nents > 1) { int qm_sg_bytes; - sg_to_qm_sg_last(req->src, mapped_nents, sg_table, 0); - qm_sg_bytes = mapped_nents * sizeof(*sg_table); + sg_to_qm_sg_last(req->src, src_len, sg_table, 0); + qm_sg_bytes = pad_sg_nents(mapped_nents) * + sizeof(*sg_table); edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE); |