summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMax Gurtovoy <maxg@mellanox.com>2018-06-10 16:58:29 +0300
committerChristoph Hellwig <hch@lst.de>2018-06-11 16:17:58 +0200
commit94423a8f89ed7b66746cade3351a185fb6a1f38d (patch)
tree6188815d7b4d99774f31d2d4afaea539fdcfa82d
parent2796b569591c6f0681303f148a55e50c2cc8304a (diff)
downloadlinux-stable-94423a8f89ed7b66746cade3351a185fb6a1f38d.tar.gz
linux-stable-94423a8f89ed7b66746cade3351a185fb6a1f38d.tar.bz2
linux-stable-94423a8f89ed7b66746cade3351a185fb6a1f38d.zip
nvme-rdma: fix error flow during mapping request data
After dma mapping the sgl, we map the sgl to nvme sgl descriptor. In case of failure during the last mapping we never dma unmap the sgl. Signed-off-by: Max Gurtovoy <maxg@mellanox.com> Signed-off-by: Christoph Hellwig <hch@lst.de>
-rw-r--r--drivers/nvme/host/rdma.c31
1 files changed, 24 insertions, 7 deletions
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 2aba03876d84..7cd4199db225 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -1189,21 +1189,38 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
count = ib_dma_map_sg(ibdev, req->sg_table.sgl, req->nents,
rq_data_dir(rq) == WRITE ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
if (unlikely(count <= 0)) {
- sg_free_table_chained(&req->sg_table, true);
- return -EIO;
+ ret = -EIO;
+ goto out_free_table;
}
if (count == 1) {
if (rq_data_dir(rq) == WRITE && nvme_rdma_queue_idx(queue) &&
blk_rq_payload_bytes(rq) <=
- nvme_rdma_inline_data_size(queue))
- return nvme_rdma_map_sg_inline(queue, req, c);
+ nvme_rdma_inline_data_size(queue)) {
+ ret = nvme_rdma_map_sg_inline(queue, req, c);
+ goto out;
+ }
- if (dev->pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY)
- return nvme_rdma_map_sg_single(queue, req, c);
+ if (dev->pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) {
+ ret = nvme_rdma_map_sg_single(queue, req, c);
+ goto out;
+ }
}
- return nvme_rdma_map_sg_fr(queue, req, c, count);
+ ret = nvme_rdma_map_sg_fr(queue, req, c, count);
+out:
+ if (unlikely(ret))
+ goto out_unmap_sg;
+
+ return 0;
+
+out_unmap_sg:
+ ib_dma_unmap_sg(ibdev, req->sg_table.sgl,
+ req->nents, rq_data_dir(rq) ==
+ WRITE ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+out_free_table:
+ sg_free_table_chained(&req->sg_table, true);
+ return ret;
}
static void nvme_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc)