summaryrefslogtreecommitdiffstats
path: root/drivers/crypto/caam/caamalg_qi.c
diff options
context:
space:
mode:
authorHoria Geantă <horia.geanta@nxp.com>2019-05-03 17:17:38 +0300
committerHerbert Xu <herbert@gondor.apana.org.au>2019-05-23 14:01:03 +0800
commita5e5c13398f353bb7ebbe913a7bb0c2a77b2ae10 (patch)
tree81a97f828dbecd401d927245d0d966d0dae54ca2 /drivers/crypto/caam/caamalg_qi.c
parentdcd9c76e5a183af4f793beb5141efcd260b8d09f (diff)
downloadlinux-a5e5c13398f353bb7ebbe913a7bb0c2a77b2ae10.tar.gz
linux-a5e5c13398f353bb7ebbe913a7bb0c2a77b2ae10.tar.bz2
linux-a5e5c13398f353bb7ebbe913a7bb0c2a77b2ae10.zip
crypto: caam - fix S/G table passing page boundary
According to CAAM RM: -crypto engine reads 4 S/G entries (64 bytes) at a time, even if the S/G table has fewer entries -it's the responsibility of the user / programmer to make sure this HW behaviour has no side effect The drivers do not take care of this currently, leading to IOMMU faults when the S/G table ends close to a page boundary - since only one page is DMA mapped, while CAAM's DMA engine accesses two pages. Fix this by rounding up the number of allocated S/G table entries to a multiple of 4. Note that in case of two *contiguous* S/G tables, only the last table might needs extra entries. Signed-off-by: Horia Geantă <horia.geanta@nxp.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'drivers/crypto/caam/caamalg_qi.c')
-rw-r--r--drivers/crypto/caam/caamalg_qi.c40
1 files changed, 36 insertions, 4 deletions
diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
index 116cbc81fa8d..4ccaa4b7936c 100644
--- a/drivers/crypto/caam/caamalg_qi.c
+++ b/drivers/crypto/caam/caamalg_qi.c
@@ -4,7 +4,7 @@
* Based on caamalg.c
*
* Copyright 2013-2016 Freescale Semiconductor, Inc.
- * Copyright 2016-2018 NXP
+ * Copyright 2016-2019 NXP
*/
#include "compat.h"
@@ -1019,9 +1019,24 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
/*
* Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
* Input is not contiguous.
+ * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
+ * the end of the table by allocating more S/G entries. Logic:
+ * if (src != dst && output S/G)
+ * pad output S/G, if needed
+ * else if (src == dst && S/G)
+ * overlapping S/Gs; pad one of them
+ * else if (input S/G) ...
+ * pad input S/G, if needed
*/
- qm_sg_ents = 1 + !!ivsize + mapped_src_nents +
- (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
+ qm_sg_ents = 1 + !!ivsize + mapped_src_nents;
+ if (mapped_dst_nents > 1)
+ qm_sg_ents += pad_sg_nents(mapped_dst_nents);
+ else if ((req->src == req->dst) && (mapped_src_nents > 1))
+ qm_sg_ents = max(pad_sg_nents(qm_sg_ents),
+ 1 + !!ivsize + pad_sg_nents(mapped_src_nents));
+ else
+ qm_sg_ents = pad_sg_nents(qm_sg_ents);
+
sg_table = &edesc->sgt[0];
qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
@@ -1276,7 +1291,24 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
qm_sg_ents = 1 + mapped_src_nents;
dst_sg_idx = qm_sg_ents;
- qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
+ /*
+ * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
+ * the end of the table by allocating more S/G entries. Logic:
+ * if (src != dst && output S/G)
+ * pad output S/G, if needed
+ * else if (src == dst && S/G)
+ * overlapping S/Gs; pad one of them
+ * else if (input S/G) ...
+ * pad input S/G, if needed
+ */
+ if (mapped_dst_nents > 1)
+ qm_sg_ents += pad_sg_nents(mapped_dst_nents);
+ else if ((req->src == req->dst) && (mapped_src_nents > 1))
+ qm_sg_ents = max(pad_sg_nents(qm_sg_ents),
+ 1 + pad_sg_nents(mapped_src_nents));
+ else
+ qm_sg_ents = pad_sg_nents(qm_sg_ents);
+
qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry);
if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes +
ivsize > CAAM_QI_MEMCACHE_SIZE)) {