summaryrefslogtreecommitdiffstats
path: root/drivers/nvme/host/rdma.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-03-21 17:16:01 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2022-03-21 17:16:01 -0700
commit69d1dea852b54eecd8ad2ec92a7fd371e9aec4bd (patch)
tree4ab2be8e9d5d7eccd68e0fbf3aeea242a0e56613 /drivers/nvme/host/rdma.c
parent616355cc818c6ddadc393fdfd4491f94458cb715 (diff)
parentae53aea611b7a532a52ba966281a8b7a8cfd008a (diff)
downloadlinux-stable-69d1dea852b54eecd8ad2ec92a7fd371e9aec4bd.tar.gz
linux-stable-69d1dea852b54eecd8ad2ec92a7fd371e9aec4bd.tar.bz2
linux-stable-69d1dea852b54eecd8ad2ec92a7fd371e9aec4bd.zip
Merge tag 'for-5.18/drivers-2022-03-18' of git://git.kernel.dk/linux-block
Pull block driver updates from Jens Axboe: - NVMe updates via Christoph: - add vectored-io support for user-passthrough (Kanchan Joshi) - add verbose error logging (Alan Adamson) - support buffered I/O on block devices in nvmet (Chaitanya Kulkarni) - central discovery controller support (Martin Belanger) - fix and extended the globally unique idenfier validation (Christoph) - move away from the deprecated IDA APIs (Sagi Grimberg) - misc code cleanup (Keith Busch, Max Gurtovoy, Qinghua Jin, Chaitanya Kulkarni) - add lockdep annotations for in-kernel sockets (Chris Leech) - use vmalloc for ANA log buffer (Hannes Reinecke) - kerneldoc fixes (Chaitanya Kulkarni) - cleanups (Guoqing Jiang, Chaitanya Kulkarni, Christoph) - warn about shared namespaces without multipathing (Christoph) - MD updates via Song with a set of cleanups (Christoph, Mariusz, Paul, Erik, Dirk) - loop cleanups and queue depth configuration (Chaitanya) - null_blk cleanups and fixes (Chaitanya) - Use descriptive init/exit names in virtio_blk (Randy) - Use bvec_kmap_local() in drivers (Christoph) - bcache fixes (Mingzhe) - xen blk-front persistent grant speedups (Juergen) - rnbd fix and cleanup (Gioh) - Misc fixes (Christophe, Colin) * tag 'for-5.18/drivers-2022-03-18' of git://git.kernel.dk/linux-block: (76 commits) virtio_blk: eliminate anonymous module_init & module_exit nvme: warn about shared namespaces without CONFIG_NVME_MULTIPATH nvme: remove nvme_alloc_request and nvme_alloc_request_qid nvme: cleanup how disk->disk_name is assigned nvmet: move the call to nvmet_ns_changed out of nvmet_ns_revalidate nvmet: use snprintf() with PAGE_SIZE in configfs nvmet: don't fold lines nvmet-rdma: fix kernel-doc warning for nvmet_rdma_device_removal nvmet-fc: fix kernel-doc warning for nvmet_fc_unregister_targetport nvmet-fc: fix kernel-doc warning for nvmet_fc_register_targetport nvme-tcp: lockdep: annotate in-kernel sockets nvme-tcp: don't fold the line nvme-tcp: don't initialize ret variable nvme-multipath: call bio_io_error in nvme_ns_head_submit_bio nvme-multipath: use vmalloc for ANA log buffer xen/blkfront: speed up purge_persistent_grants() raid5: initialize the stripe_head embeeded bios as needed raid5-cache: statically allocate the recovery ra bio raid5-cache: fully initialize flush_bio when needed raid5-ppl: fully initialize the bio in ppl_new_iounit ...
Diffstat (limited to 'drivers/nvme/host/rdma.c')
-rw-r--r--drivers/nvme/host/rdma.c117
1 files changed, 67 insertions, 50 deletions
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 9c55e4be8a39..d9f19d901313 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -978,11 +978,9 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
goto out_free_io_queues;
}
- ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
- if (IS_ERR(ctrl->ctrl.connect_q)) {
- ret = PTR_ERR(ctrl->ctrl.connect_q);
+ ret = nvme_ctrl_init_connect_q(&(ctrl->ctrl));
+ if (ret)
goto out_free_tag_set;
- }
}
ret = nvme_rdma_start_io_queues(ctrl);
@@ -1283,6 +1281,22 @@ static int nvme_rdma_inv_rkey(struct nvme_rdma_queue *queue,
return ib_post_send(queue->qp, &wr, NULL);
}
+static void nvme_rdma_dma_unmap_req(struct ib_device *ibdev, struct request *rq)
+{
+ struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
+
+ if (blk_integrity_rq(rq)) {
+ ib_dma_unmap_sg(ibdev, req->metadata_sgl->sg_table.sgl,
+ req->metadata_sgl->nents, rq_dma_dir(rq));
+ sg_free_table_chained(&req->metadata_sgl->sg_table,
+ NVME_INLINE_METADATA_SG_CNT);
+ }
+
+ ib_dma_unmap_sg(ibdev, req->data_sgl.sg_table.sgl, req->data_sgl.nents,
+ rq_dma_dir(rq));
+ sg_free_table_chained(&req->data_sgl.sg_table, NVME_INLINE_SG_CNT);
+}
+
static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue,
struct request *rq)
{
@@ -1294,13 +1308,6 @@ static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue,
if (!blk_rq_nr_phys_segments(rq))
return;
- if (blk_integrity_rq(rq)) {
- ib_dma_unmap_sg(ibdev, req->metadata_sgl->sg_table.sgl,
- req->metadata_sgl->nents, rq_dma_dir(rq));
- sg_free_table_chained(&req->metadata_sgl->sg_table,
- NVME_INLINE_METADATA_SG_CNT);
- }
-
if (req->use_sig_mr)
pool = &queue->qp->sig_mrs;
@@ -1309,9 +1316,7 @@ static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue,
req->mr = NULL;
}
- ib_dma_unmap_sg(ibdev, req->data_sgl.sg_table.sgl, req->data_sgl.nents,
- rq_dma_dir(rq));
- sg_free_table_chained(&req->data_sgl.sg_table, NVME_INLINE_SG_CNT);
+ nvme_rdma_dma_unmap_req(ibdev, rq);
}
static int nvme_rdma_set_sg_null(struct nvme_command *c)
@@ -1522,22 +1527,11 @@ mr_put:
return -EINVAL;
}
-static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
- struct request *rq, struct nvme_command *c)
+static int nvme_rdma_dma_map_req(struct ib_device *ibdev, struct request *rq,
+ int *count, int *pi_count)
{
struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
- struct nvme_rdma_device *dev = queue->device;
- struct ib_device *ibdev = dev->dev;
- int pi_count = 0;
- int count, ret;
-
- req->num_sge = 1;
- refcount_set(&req->ref, 2); /* send and recv completions */
-
- c->common.flags |= NVME_CMD_SGL_METABUF;
-
- if (!blk_rq_nr_phys_segments(rq))
- return nvme_rdma_set_sg_null(c);
+ int ret;
req->data_sgl.sg_table.sgl = (struct scatterlist *)(req + 1);
ret = sg_alloc_table_chained(&req->data_sgl.sg_table,
@@ -1549,9 +1543,9 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
req->data_sgl.nents = blk_rq_map_sg(rq->q, rq,
req->data_sgl.sg_table.sgl);
- count = ib_dma_map_sg(ibdev, req->data_sgl.sg_table.sgl,
- req->data_sgl.nents, rq_dma_dir(rq));
- if (unlikely(count <= 0)) {
+ *count = ib_dma_map_sg(ibdev, req->data_sgl.sg_table.sgl,
+ req->data_sgl.nents, rq_dma_dir(rq));
+ if (unlikely(*count <= 0)) {
ret = -EIO;
goto out_free_table;
}
@@ -1570,16 +1564,50 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
req->metadata_sgl->nents = blk_rq_map_integrity_sg(rq->q,
rq->bio, req->metadata_sgl->sg_table.sgl);
- pi_count = ib_dma_map_sg(ibdev,
- req->metadata_sgl->sg_table.sgl,
- req->metadata_sgl->nents,
- rq_dma_dir(rq));
- if (unlikely(pi_count <= 0)) {
+ *pi_count = ib_dma_map_sg(ibdev,
+ req->metadata_sgl->sg_table.sgl,
+ req->metadata_sgl->nents,
+ rq_dma_dir(rq));
+ if (unlikely(*pi_count <= 0)) {
ret = -EIO;
goto out_free_pi_table;
}
}
+ return 0;
+
+out_free_pi_table:
+ sg_free_table_chained(&req->metadata_sgl->sg_table,
+ NVME_INLINE_METADATA_SG_CNT);
+out_unmap_sg:
+ ib_dma_unmap_sg(ibdev, req->data_sgl.sg_table.sgl, req->data_sgl.nents,
+ rq_dma_dir(rq));
+out_free_table:
+ sg_free_table_chained(&req->data_sgl.sg_table, NVME_INLINE_SG_CNT);
+ return ret;
+}
+
+static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
+ struct request *rq, struct nvme_command *c)
+{
+ struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
+ struct nvme_rdma_device *dev = queue->device;
+ struct ib_device *ibdev = dev->dev;
+ int pi_count = 0;
+ int count, ret;
+
+ req->num_sge = 1;
+ refcount_set(&req->ref, 2); /* send and recv completions */
+
+ c->common.flags |= NVME_CMD_SGL_METABUF;
+
+ if (!blk_rq_nr_phys_segments(rq))
+ return nvme_rdma_set_sg_null(c);
+
+ ret = nvme_rdma_dma_map_req(ibdev, rq, &count, &pi_count);
+ if (unlikely(ret))
+ return ret;
+
if (req->use_sig_mr) {
ret = nvme_rdma_map_sg_pi(queue, req, c, count, pi_count);
goto out;
@@ -1603,23 +1631,12 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
ret = nvme_rdma_map_sg_fr(queue, req, c, count);
out:
if (unlikely(ret))
- goto out_unmap_pi_sg;
+ goto out_dma_unmap_req;
return 0;
-out_unmap_pi_sg:
- if (blk_integrity_rq(rq))
- ib_dma_unmap_sg(ibdev, req->metadata_sgl->sg_table.sgl,
- req->metadata_sgl->nents, rq_dma_dir(rq));
-out_free_pi_table:
- if (blk_integrity_rq(rq))
- sg_free_table_chained(&req->metadata_sgl->sg_table,
- NVME_INLINE_METADATA_SG_CNT);
-out_unmap_sg:
- ib_dma_unmap_sg(ibdev, req->data_sgl.sg_table.sgl, req->data_sgl.nents,
- rq_dma_dir(rq));
-out_free_table:
- sg_free_table_chained(&req->data_sgl.sg_table, NVME_INLINE_SG_CNT);
+out_dma_unmap_req:
+ nvme_rdma_dma_unmap_req(ibdev, rq);
return ret;
}