summaryrefslogtreecommitdiffstats
path: root/drivers/crypto
diff options
context:
space:
mode:
authorGiovanni Cabiddu <giovanni.cabiddu@intel.com>2022-05-09 14:34:12 +0100
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2022-07-29 17:25:28 +0200
commit6e8606e7ae401251f74c91423fb5bb8e5d11a0c8 (patch)
treec1cb150d96982711ba22b69ab0c66daefa0b21e8 /drivers/crypto
parenta843925e0287eebb4aa808666bf22c664dfe4c53 (diff)
downloadlinux-stable-6e8606e7ae401251f74c91423fb5bb8e5d11a0c8.tar.gz
linux-stable-6e8606e7ae401251f74c91423fb5bb8e5d11a0c8.tar.bz2
linux-stable-6e8606e7ae401251f74c91423fb5bb8e5d11a0c8.zip
crypto: qat - remove dma_free_coherent() for RSA
[ Upstream commit 3dfaf0071ed74d7a9c6b3c9ea4df7a6f8e423c2a ] After commit f5ff79fddf0e ("dma-mapping: remove CONFIG_DMA_REMAP"), if the algorithms are enabled, the driver crashes with a BUG_ON while executing vunmap() in the context of a tasklet. This is due to the fact that the function dma_free_coherent() cannot be called in an interrupt context (see Documentation/core-api/dma-api-howto.rst). The functions qat_rsa_enc() and qat_rsa_dec() allocate memory with dma_alloc_coherent() if the source or the destination buffers are made of multiple flat buffers or of a size that is not compatible with the hardware. This memory is then freed with dma_free_coherent() in the context of a tasklet invoked to handle the response for the corresponding request. Replace allocations with dma_alloc_coherent() in the functions qat_rsa_enc() and qat_rsa_dec() with kmalloc() + dma_map_single(). Cc: stable@vger.kernel.org Fixes: a990532023b9 ("crypto: qat - Add support for RSA algorithm") Signed-off-by: Giovanni Cabiddu <giovanni.cabiddu@intel.com> Reviewed-by: Adam Guerin <adam.guerin@intel.com> Reviewed-by: Wojciech Ziemba <wojciech.ziemba@intel.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: Sasha Levin <sashal@kernel.org>
Diffstat (limited to 'drivers/crypto')
-rw-r--r--drivers/crypto/qat/qat_common/qat_asym_algs.c137
1 files changed, 60 insertions, 77 deletions
diff --git a/drivers/crypto/qat/qat_common/qat_asym_algs.c b/drivers/crypto/qat/qat_common/qat_asym_algs.c
index 2bc02c75398e..b31372bddb96 100644
--- a/drivers/crypto/qat/qat_common/qat_asym_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_asym_algs.c
@@ -529,25 +529,22 @@ static void qat_rsa_cb(struct icp_qat_fw_pke_resp *resp)
err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL;
- if (req->src_align)
- dma_free_coherent(dev, req->ctx.rsa->key_sz, req->src_align,
- req->in.rsa.enc.m);
- else
- dma_unmap_single(dev, req->in.rsa.enc.m, req->ctx.rsa->key_sz,
- DMA_TO_DEVICE);
+ kfree_sensitive(req->src_align);
+
+ dma_unmap_single(dev, req->in.rsa.enc.m, req->ctx.rsa->key_sz,
+ DMA_TO_DEVICE);
areq->dst_len = req->ctx.rsa->key_sz;
if (req->dst_align) {
scatterwalk_map_and_copy(req->dst_align, areq->dst, 0,
areq->dst_len, 1);
- dma_free_coherent(dev, req->ctx.rsa->key_sz, req->dst_align,
- req->out.rsa.enc.c);
- } else {
- dma_unmap_single(dev, req->out.rsa.enc.c, req->ctx.rsa->key_sz,
- DMA_FROM_DEVICE);
+ kfree_sensitive(req->dst_align);
}
+ dma_unmap_single(dev, req->out.rsa.enc.c, req->ctx.rsa->key_sz,
+ DMA_FROM_DEVICE);
+
dma_unmap_single(dev, req->phy_in, sizeof(struct qat_rsa_input_params),
DMA_TO_DEVICE);
dma_unmap_single(dev, req->phy_out,
@@ -664,6 +661,7 @@ static int qat_rsa_enc(struct akcipher_request *req)
struct qat_asym_request *qat_req =
PTR_ALIGN(akcipher_request_ctx(req), 64);
struct icp_qat_fw_pke_request *msg = &qat_req->req;
+ u8 *vaddr;
int ret;
if (unlikely(!ctx->n || !ctx->e))
@@ -701,40 +699,39 @@ static int qat_rsa_enc(struct akcipher_request *req)
*/
if (sg_is_last(req->src) && req->src_len == ctx->key_sz) {
qat_req->src_align = NULL;
- qat_req->in.rsa.enc.m = dma_map_single(dev, sg_virt(req->src),
- req->src_len, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.enc.m)))
- return ret;
-
+ vaddr = sg_virt(req->src);
} else {
int shift = ctx->key_sz - req->src_len;
- qat_req->src_align = dma_alloc_coherent(dev, ctx->key_sz,
- &qat_req->in.rsa.enc.m,
- GFP_KERNEL);
+ qat_req->src_align = kzalloc(ctx->key_sz, GFP_KERNEL);
if (unlikely(!qat_req->src_align))
return ret;
scatterwalk_map_and_copy(qat_req->src_align + shift, req->src,
0, req->src_len, 0);
+ vaddr = qat_req->src_align;
}
- if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) {
- qat_req->dst_align = NULL;
- qat_req->out.rsa.enc.c = dma_map_single(dev, sg_virt(req->dst),
- req->dst_len,
- DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.enc.c)))
- goto unmap_src;
+ qat_req->in.rsa.enc.m = dma_map_single(dev, vaddr, ctx->key_sz,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.enc.m)))
+ goto unmap_src;
+ if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) {
+ qat_req->dst_align = NULL;
+ vaddr = sg_virt(req->dst);
} else {
- qat_req->dst_align = dma_alloc_coherent(dev, ctx->key_sz,
- &qat_req->out.rsa.enc.c,
- GFP_KERNEL);
+ qat_req->dst_align = kzalloc(ctx->key_sz, GFP_KERNEL);
if (unlikely(!qat_req->dst_align))
goto unmap_src;
-
+ vaddr = qat_req->dst_align;
}
+
+ qat_req->out.rsa.enc.c = dma_map_single(dev, vaddr, ctx->key_sz,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.enc.c)))
+ goto unmap_dst;
+
qat_req->in.rsa.in_tab[3] = 0;
qat_req->out.rsa.out_tab[1] = 0;
qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa.enc.m,
@@ -772,21 +769,15 @@ unmap_in_params:
sizeof(struct qat_rsa_input_params),
DMA_TO_DEVICE);
unmap_dst:
- if (qat_req->dst_align)
- dma_free_coherent(dev, ctx->key_sz, qat_req->dst_align,
- qat_req->out.rsa.enc.c);
- else
- if (!dma_mapping_error(dev, qat_req->out.rsa.enc.c))
- dma_unmap_single(dev, qat_req->out.rsa.enc.c,
- ctx->key_sz, DMA_FROM_DEVICE);
+ if (!dma_mapping_error(dev, qat_req->out.rsa.enc.c))
+ dma_unmap_single(dev, qat_req->out.rsa.enc.c,
+ ctx->key_sz, DMA_FROM_DEVICE);
+ kfree_sensitive(qat_req->dst_align);
unmap_src:
- if (qat_req->src_align)
- dma_free_coherent(dev, ctx->key_sz, qat_req->src_align,
- qat_req->in.rsa.enc.m);
- else
- if (!dma_mapping_error(dev, qat_req->in.rsa.enc.m))
- dma_unmap_single(dev, qat_req->in.rsa.enc.m,
- ctx->key_sz, DMA_TO_DEVICE);
+ if (!dma_mapping_error(dev, qat_req->in.rsa.enc.m))
+ dma_unmap_single(dev, qat_req->in.rsa.enc.m, ctx->key_sz,
+ DMA_TO_DEVICE);
+ kfree_sensitive(qat_req->src_align);
return ret;
}
@@ -799,6 +790,7 @@ static int qat_rsa_dec(struct akcipher_request *req)
struct qat_asym_request *qat_req =
PTR_ALIGN(akcipher_request_ctx(req), 64);
struct icp_qat_fw_pke_request *msg = &qat_req->req;
+ u8 *vaddr;
int ret;
if (unlikely(!ctx->n || !ctx->d))
@@ -846,40 +838,37 @@ static int qat_rsa_dec(struct akcipher_request *req)
*/
if (sg_is_last(req->src) && req->src_len == ctx->key_sz) {
qat_req->src_align = NULL;
- qat_req->in.rsa.dec.c = dma_map_single(dev, sg_virt(req->src),
- req->dst_len, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.dec.c)))
- return ret;
-
+ vaddr = sg_virt(req->src);
} else {
int shift = ctx->key_sz - req->src_len;
- qat_req->src_align = dma_alloc_coherent(dev, ctx->key_sz,
- &qat_req->in.rsa.dec.c,
- GFP_KERNEL);
+ qat_req->src_align = kzalloc(ctx->key_sz, GFP_KERNEL);
if (unlikely(!qat_req->src_align))
return ret;
scatterwalk_map_and_copy(qat_req->src_align + shift, req->src,
0, req->src_len, 0);
+ vaddr = qat_req->src_align;
}
- if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) {
- qat_req->dst_align = NULL;
- qat_req->out.rsa.dec.m = dma_map_single(dev, sg_virt(req->dst),
- req->dst_len,
- DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.dec.m)))
- goto unmap_src;
+ qat_req->in.rsa.dec.c = dma_map_single(dev, vaddr, ctx->key_sz,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.dec.c)))
+ goto unmap_src;
+ if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) {
+ qat_req->dst_align = NULL;
+ vaddr = sg_virt(req->dst);
} else {
- qat_req->dst_align = dma_alloc_coherent(dev, ctx->key_sz,
- &qat_req->out.rsa.dec.m,
- GFP_KERNEL);
+ qat_req->dst_align = kzalloc(ctx->key_sz, GFP_KERNEL);
if (unlikely(!qat_req->dst_align))
goto unmap_src;
-
+ vaddr = qat_req->dst_align;
}
+ qat_req->out.rsa.dec.m = dma_map_single(dev, vaddr, ctx->key_sz,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.dec.m)))
+ goto unmap_dst;
if (ctx->crt_mode)
qat_req->in.rsa.in_tab[6] = 0;
@@ -925,21 +914,15 @@ unmap_in_params:
sizeof(struct qat_rsa_input_params),
DMA_TO_DEVICE);
unmap_dst:
- if (qat_req->dst_align)
- dma_free_coherent(dev, ctx->key_sz, qat_req->dst_align,
- qat_req->out.rsa.dec.m);
- else
- if (!dma_mapping_error(dev, qat_req->out.rsa.dec.m))
- dma_unmap_single(dev, qat_req->out.rsa.dec.m,
- ctx->key_sz, DMA_FROM_DEVICE);
+ if (!dma_mapping_error(dev, qat_req->out.rsa.dec.m))
+ dma_unmap_single(dev, qat_req->out.rsa.dec.m,
+ ctx->key_sz, DMA_FROM_DEVICE);
+ kfree_sensitive(qat_req->dst_align);
unmap_src:
- if (qat_req->src_align)
- dma_free_coherent(dev, ctx->key_sz, qat_req->src_align,
- qat_req->in.rsa.dec.c);
- else
- if (!dma_mapping_error(dev, qat_req->in.rsa.dec.c))
- dma_unmap_single(dev, qat_req->in.rsa.dec.c,
- ctx->key_sz, DMA_TO_DEVICE);
+ if (!dma_mapping_error(dev, qat_req->in.rsa.dec.c))
+ dma_unmap_single(dev, qat_req->in.rsa.dec.c, ctx->key_sz,
+ DMA_TO_DEVICE);
+ kfree_sensitive(qat_req->src_align);
return ret;
}