summaryrefslogtreecommitdiffstats
path: root/drivers/crypto
diff options
context:
space:
mode:
authorThara Gopinath <thara.gopinath@linaro.org>2021-05-20 22:20:23 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2021-07-14 16:55:52 +0200
commitaddcb6bb584aef558ecab13c45bec0cd1bc3efb3 (patch)
tree81dbfb0bac12e6107fe021d4b97f6d8801f0cb2f /drivers/crypto
parentd000c598db1d2eb24401a10236c68326208df7cd (diff)
downloadlinux-stable-addcb6bb584aef558ecab13c45bec0cd1bc3efb3.tar.gz
linux-stable-addcb6bb584aef558ecab13c45bec0cd1bc3efb3.tar.bz2
linux-stable-addcb6bb584aef558ecab13c45bec0cd1bc3efb3.zip
crypto: qce: skcipher: Fix incorrect sg count for dma transfers
[ Upstream commit 1339a7c3ba05137a2d2fe75f602311bbfc6fab33 ] Use the sg count returned by dma_map_sg to call into dmaengine_prep_slave_sg rather than using the original sg count. dma_map_sg can merge consecutive sglist entries, thus making the original sg count wrong. This is a fix for memory coruption issues observed while testing encryption/decryption of large messages using libkcapi framework. Patch has been tested further by running full suite of tcrypt.ko tests including fuzz tests. Signed-off-by: Thara Gopinath <thara.gopinath@linaro.org> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: Sasha Levin <sashal@kernel.org>
Diffstat (limited to 'drivers/crypto')
-rw-r--r--drivers/crypto/qce/skcipher.c15
1 files changed, 8 insertions, 7 deletions
diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c
index a2d3da0ad95f..5a6559131eac 100644
--- a/drivers/crypto/qce/skcipher.c
+++ b/drivers/crypto/qce/skcipher.c
@@ -71,7 +71,7 @@ qce_skcipher_async_req_handle(struct crypto_async_request *async_req)
struct scatterlist *sg;
bool diff_dst;
gfp_t gfp;
- int ret;
+ int dst_nents, src_nents, ret;
rctx->iv = req->iv;
rctx->ivsize = crypto_skcipher_ivsize(skcipher);
@@ -122,21 +122,22 @@ qce_skcipher_async_req_handle(struct crypto_async_request *async_req)
sg_mark_end(sg);
rctx->dst_sg = rctx->dst_tbl.sgl;
- ret = dma_map_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
- if (ret < 0)
+ dst_nents = dma_map_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
+ if (dst_nents < 0)
goto error_free;
if (diff_dst) {
- ret = dma_map_sg(qce->dev, req->src, rctx->src_nents, dir_src);
- if (ret < 0)
+ src_nents = dma_map_sg(qce->dev, req->src, rctx->src_nents, dir_src);
+ if (src_nents < 0)
goto error_unmap_dst;
rctx->src_sg = req->src;
} else {
rctx->src_sg = rctx->dst_sg;
+ src_nents = dst_nents - 1;
}
- ret = qce_dma_prep_sgs(&qce->dma, rctx->src_sg, rctx->src_nents,
- rctx->dst_sg, rctx->dst_nents,
+ ret = qce_dma_prep_sgs(&qce->dma, rctx->src_sg, src_nents,
+ rctx->dst_sg, dst_nents,
qce_skcipher_done, async_req);
if (ret)
goto error_unmap_src;