summaryrefslogtreecommitdiffstats
path: root/drivers/nvme
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2021-02-21 11:06:54 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2021-02-21 11:06:54 -0800
commit9820b4dca0f9c6b7ab8b4307286cdace171b724d (patch)
tree9feef543deb60557f301ceeed8f7c56ed3efedad /drivers/nvme
parent582cd91f69de8e44857cb610ebca661dac8656b7 (diff)
parentf4b64ae6745177642cd9610cfd7df0041e7fca58 (diff)
downloadlinux-stable-9820b4dca0f9c6b7ab8b4307286cdace171b724d.tar.gz
linux-stable-9820b4dca0f9c6b7ab8b4307286cdace171b724d.tar.bz2
linux-stable-9820b4dca0f9c6b7ab8b4307286cdace171b724d.zip
Merge tag 'for-5.12/drivers-2021-02-17' of git://git.kernel.dk/linux-block
Pull block driver updates from Jens Axboe: - Remove the skd driver. It's been EOL for a long time (Damien) - NVMe pull requests - fix multipath handling of ->queue_rq errors (Chao Leng) - nvmet cleanups (Chaitanya Kulkarni) - add a quirk for buggy Amazon controller (Filippo Sironi) - avoid devm allocations in nvme-hwmon that don't interact well with fabrics (Hannes Reinecke) - sysfs cleanups (Jiapeng Chong) - fix nr_zones for multipath (Keith Busch) - nvme-tcp crash fix for no-data commands (Sagi Grimberg) - nvmet-tcp fixes (Sagi Grimberg) - add a missing __rcu annotation (Christoph) - failed reconnect fixes (Chao Leng) - various tracing improvements (Michal Krakowiak, Johannes Thumshirn) - switch the nvmet-fc assoc_list to use RCU protection (Leonid Ravich) - resync the status codes with the latest spec (Max Gurtovoy) - minor nvme-tcp improvements (Sagi Grimberg) - various cleanups (Rikard Falkeborn, Minwoo Im, Chaitanya Kulkarni, Israel Rukshin) - Floppy O_NDELAY fix (Denis) - MD pull request - raid5 chunk_sectors fix (Guoqing) - Use lore links (Kees) - Use DEFINE_SHOW_ATTRIBUTE for nbd (Liao) - loop lock scaling (Pavel) - mtip32xx PCI fixes (Bjorn) - bcache fixes (Kai, Dongdong) - Misc fixes (Tian, Yang, Guoqing, Joe, Andy) * tag 'for-5.12/drivers-2021-02-17' of git://git.kernel.dk/linux-block: (64 commits) lightnvm: pblk: Replace guid_copy() with export_guid()/import_guid() lightnvm: fix unnecessary NULL check warnings nvme-tcp: fix crash triggered with a dataless request submission block: Replace lkml.org links with lore nbd: Convert to DEFINE_SHOW_ATTRIBUTE nvme: add 48-bit DMA address quirk for Amazon NVMe controllers nvme-hwmon: rework to avoid devm allocation nvmet: remove else at the end of the function nvmet: add nvmet_req_subsys() helper nvmet: use min of device_path and disk len nvmet: use invalid cmd opcode helper nvmet: use invalid cmd opcode helper nvmet: add helper to report invalid opcode nvmet: remove extra variable in id-ns handler nvmet: make nvmet_find_namespace() req based nvmet: return uniform error for invalid ns nvmet: set status to 0 in case for invalid nsid nvmet-fc: add a missing __rcu annotation to nvmet_fc_tgt_assoc.queues nvme-multipath: set nr_zones for zoned namespaces nvmet-tcp: fix potential race of tcp socket closing accept_work ...
Diffstat (limited to 'drivers/nvme')
-rw-r--r--drivers/nvme/host/core.c63
-rw-r--r--drivers/nvme/host/fabrics.c6
-rw-r--r--drivers/nvme/host/fc.c2
-rw-r--r--drivers/nvme/host/hwmon.c31
-rw-r--r--drivers/nvme/host/multipath.c4
-rw-r--r--drivers/nvme/host/nvme.h17
-rw-r--r--drivers/nvme/host/pci.c17
-rw-r--r--drivers/nvme/host/rdma.c34
-rw-r--r--drivers/nvme/host/tcp.c55
-rw-r--r--drivers/nvme/host/trace.c53
-rw-r--r--drivers/nvme/target/admin-cmd.c114
-rw-r--r--drivers/nvme/target/configfs.c6
-rw-r--r--drivers/nvme/target/core.c37
-rw-r--r--drivers/nvme/target/fc.c83
-rw-r--r--drivers/nvme/target/fcloop.c2
-rw-r--r--drivers/nvme/target/io-cmd-bdev.c13
-rw-r--r--drivers/nvme/target/io-cmd-file.c5
-rw-r--r--drivers/nvme/target/nvmet.h20
-rw-r--r--drivers/nvme/target/passthru.c6
-rw-r--r--drivers/nvme/target/tcp.c59
-rw-r--r--drivers/nvme/target/trace.h9
21 files changed, 401 insertions, 235 deletions
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 89cacff897a8..e68a8c4ac5a6 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -279,14 +279,13 @@ static blk_status_t nvme_error_status(u16 status)
static void nvme_retry_req(struct request *req)
{
- struct nvme_ns *ns = req->q->queuedata;
unsigned long delay = 0;
u16 crd;
/* The mask and shift result must be <= 3 */
crd = (nvme_req(req)->status & NVME_SC_CRD) >> 11;
- if (ns && crd)
- delay = ns->ctrl->crdt[crd - 1] * 100;
+ if (crd)
+ delay = nvme_req(req)->ctrl->crdt[crd - 1] * 100;
nvme_req(req)->retries++;
blk_mq_requeue_request(req, false);
@@ -356,6 +355,21 @@ void nvme_complete_rq(struct request *req)
}
EXPORT_SYMBOL_GPL(nvme_complete_rq);
+/*
+ * Called to unwind from ->queue_rq on a failed command submission so that the
+ * multipathing code gets called to potentially failover to another path.
+ * The caller needs to unwind all transport specific resource allocations and
+ * must return propagate the return value.
+ */
+blk_status_t nvme_host_path_error(struct request *req)
+{
+ nvme_req(req)->status = NVME_SC_HOST_PATH_ERROR;
+ blk_mq_set_request_complete(req);
+ nvme_complete_rq(req);
+ return BLK_STS_OK;
+}
+EXPORT_SYMBOL_GPL(nvme_host_path_error);
+
bool nvme_cancel_request(struct request *req, void *data, bool reserved)
{
dev_dbg_ratelimited(((struct nvme_ctrl *) data)->device,
@@ -371,6 +385,26 @@ bool nvme_cancel_request(struct request *req, void *data, bool reserved)
}
EXPORT_SYMBOL_GPL(nvme_cancel_request);
+void nvme_cancel_tagset(struct nvme_ctrl *ctrl)
+{
+ if (ctrl->tagset) {
+ blk_mq_tagset_busy_iter(ctrl->tagset,
+ nvme_cancel_request, ctrl);
+ blk_mq_tagset_wait_completed_request(ctrl->tagset);
+ }
+}
+EXPORT_SYMBOL_GPL(nvme_cancel_tagset);
+
+void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl)
+{
+ if (ctrl->admin_tagset) {
+ blk_mq_tagset_busy_iter(ctrl->admin_tagset,
+ nvme_cancel_request, ctrl);
+ blk_mq_tagset_wait_completed_request(ctrl->admin_tagset);
+ }
+}
+EXPORT_SYMBOL_GPL(nvme_cancel_admin_tagset);
+
bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
enum nvme_ctrl_state new_state)
{
@@ -842,11 +876,11 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
void nvme_cleanup_cmd(struct request *req)
{
if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
- struct nvme_ns *ns = req->rq_disk->private_data;
+ struct nvme_ctrl *ctrl = nvme_req(req)->ctrl;
struct page *page = req->special_vec.bv_page;
- if (page == ns->ctrl->discard_page)
- clear_bit_unlock(0, &ns->ctrl->discard_page_busy);
+ if (page == ctrl->discard_page)
+ clear_bit_unlock(0, &ctrl->discard_page_busy);
else
kfree(page_address(page) + req->special_vec.bv_offset);
}
@@ -2831,7 +2865,7 @@ static ssize_t nvme_subsys_show_nqn(struct device *dev,
struct nvme_subsystem *subsys =
container_of(dev, struct nvme_subsystem, dev);
- return snprintf(buf, PAGE_SIZE, "%s\n", subsys->subnqn);
+ return sysfs_emit(buf, "%s\n", subsys->subnqn);
}
static SUBSYS_ATTR_RO(subsysnqn, S_IRUGO, nvme_subsys_show_nqn);
@@ -2861,7 +2895,7 @@ static struct attribute *nvme_subsys_attrs[] = {
NULL,
};
-static struct attribute_group nvme_subsys_attrs_group = {
+static const struct attribute_group nvme_subsys_attrs_group = {
.attrs = nvme_subsys_attrs,
};
@@ -3524,7 +3558,7 @@ static ssize_t nvme_sysfs_show_transport(struct device *dev,
{
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->ops->name);
+ return sysfs_emit(buf, "%s\n", ctrl->ops->name);
}
static DEVICE_ATTR(transport, S_IRUGO, nvme_sysfs_show_transport, NULL);
@@ -3558,7 +3592,7 @@ static ssize_t nvme_sysfs_show_subsysnqn(struct device *dev,
{
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->subsys->subnqn);
+ return sysfs_emit(buf, "%s\n", ctrl->subsys->subnqn);
}
static DEVICE_ATTR(subsysnqn, S_IRUGO, nvme_sysfs_show_subsysnqn, NULL);
@@ -3568,7 +3602,7 @@ static ssize_t nvme_sysfs_show_hostnqn(struct device *dev,
{
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->opts->host->nqn);
+ return sysfs_emit(buf, "%s\n", ctrl->opts->host->nqn);
}
static DEVICE_ATTR(hostnqn, S_IRUGO, nvme_sysfs_show_hostnqn, NULL);
@@ -3578,7 +3612,7 @@ static ssize_t nvme_sysfs_show_hostid(struct device *dev,
{
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE, "%pU\n", &ctrl->opts->host->id);
+ return sysfs_emit(buf, "%pU\n", &ctrl->opts->host->id);
}
static DEVICE_ATTR(hostid, S_IRUGO, nvme_sysfs_show_hostid, NULL);
@@ -3696,7 +3730,7 @@ static umode_t nvme_dev_attrs_are_visible(struct kobject *kobj,
return a->mode;
}
-static struct attribute_group nvme_dev_attrs_group = {
+static const struct attribute_group nvme_dev_attrs_group = {
.attrs = nvme_dev_attrs,
.is_visible = nvme_dev_attrs_are_visible,
};
@@ -4439,6 +4473,7 @@ EXPORT_SYMBOL_GPL(nvme_start_ctrl);
void nvme_uninit_ctrl(struct nvme_ctrl *ctrl)
{
+ nvme_hwmon_exit(ctrl);
nvme_fault_inject_fini(&ctrl->fault_inject);
dev_pm_qos_hide_latency_tolerance(ctrl->device);
cdev_device_del(&ctrl->cdev, ctrl->device);
@@ -4451,7 +4486,7 @@ static void nvme_free_cels(struct nvme_ctrl *ctrl)
struct nvme_effects_log *cel;
unsigned long i;
- xa_for_each (&ctrl->cels, i, cel) {
+ xa_for_each(&ctrl->cels, i, cel) {
xa_erase(&ctrl->cels, i);
kfree(cel);
}
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index 72ac00173500..5dfd806fc2d2 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -552,11 +552,7 @@ blk_status_t nvmf_fail_nonready_command(struct nvme_ctrl *ctrl,
!test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags) &&
!blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH))
return BLK_STS_RESOURCE;
-
- nvme_req(rq)->status = NVME_SC_HOST_PATH_ERROR;
- blk_mq_start_request(rq);
- nvme_complete_rq(rq);
- return BLK_STS_OK;
+ return nvme_host_path_error(rq);
}
EXPORT_SYMBOL_GPL(nvmf_fail_nonready_command);
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 5f36cfa8136c..20dadd86e981 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -3789,7 +3789,7 @@ static struct attribute *nvme_fc_attrs[] = {
NULL
};
-static struct attribute_group nvme_fc_attr_group = {
+static const struct attribute_group nvme_fc_attr_group = {
.attrs = nvme_fc_attrs,
};
diff --git a/drivers/nvme/host/hwmon.c b/drivers/nvme/host/hwmon.c
index 552dbc04567b..8f9e96986780 100644
--- a/drivers/nvme/host/hwmon.c
+++ b/drivers/nvme/host/hwmon.c
@@ -223,12 +223,12 @@ static const struct hwmon_chip_info nvme_hwmon_chip_info = {
int nvme_hwmon_init(struct nvme_ctrl *ctrl)
{
- struct device *dev = ctrl->dev;
+ struct device *dev = ctrl->device;
struct nvme_hwmon_data *data;
struct device *hwmon;
int err;
- data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return 0;
@@ -237,19 +237,30 @@ int nvme_hwmon_init(struct nvme_ctrl *ctrl)
err = nvme_hwmon_get_smart_log(data);
if (err) {
- dev_warn(ctrl->device,
- "Failed to read smart log (error %d)\n", err);
- devm_kfree(dev, data);
+ dev_warn(dev, "Failed to read smart log (error %d)\n", err);
+ kfree(data);
return err;
}
- hwmon = devm_hwmon_device_register_with_info(dev, "nvme", data,
- &nvme_hwmon_chip_info,
- NULL);
+ hwmon = hwmon_device_register_with_info(dev, "nvme",
+ data, &nvme_hwmon_chip_info,
+ NULL);
if (IS_ERR(hwmon)) {
dev_warn(dev, "Failed to instantiate hwmon device\n");
- devm_kfree(dev, data);
+ kfree(data);
}
-
+ ctrl->hwmon_device = hwmon;
return 0;
}
+
+void nvme_hwmon_exit(struct nvme_ctrl *ctrl)
+{
+ if (ctrl->hwmon_device) {
+ struct nvme_hwmon_data *data =
+ dev_get_drvdata(ctrl->hwmon_device);
+
+ hwmon_device_unregister(ctrl->hwmon_device);
+ ctrl->hwmon_device = NULL;
+ kfree(data);
+ }
+}
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 1427c9555cef..a1d476e1ac02 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -677,6 +677,10 @@ void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id)
if (blk_queue_stable_writes(ns->queue) && ns->head->disk)
blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES,
ns->head->disk->queue);
+#ifdef CONFIG_BLK_DEV_ZONED
+ if (blk_queue_is_zoned(ns->queue) && ns->head->disk)
+ ns->head->disk->queue->nr_zones = ns->queue->nr_zones;
+#endif
}
void nvme_mpath_remove_disk(struct nvme_ns_head *head)
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 88a6b97247f5..07b34175c6ce 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -144,6 +144,12 @@ enum nvme_quirks {
* NVMe 1.3 compliance.
*/
NVME_QUIRK_NO_NS_DESC_LIST = (1 << 15),
+
+ /*
+ * The controller does not properly handle DMA addresses over
+ * 48 bits.
+ */
+ NVME_QUIRK_DMA_ADDRESS_BITS_48 = (1 << 16),
};
/*
@@ -246,6 +252,9 @@ struct nvme_ctrl {
struct rw_semaphore namespaces_rwsem;
struct device ctrl_device;
struct device *device; /* char device */
+#ifdef CONFIG_NVME_HWMON
+ struct device *hwmon_device;
+#endif
struct cdev cdev;
struct work_struct reset_work;
struct work_struct delete_work;
@@ -575,7 +584,10 @@ static inline bool nvme_is_aen_req(u16 qid, __u16 command_id)
}
void nvme_complete_rq(struct request *req);
+blk_status_t nvme_host_path_error(struct request *req);
bool nvme_cancel_request(struct request *req, void *data, bool reserved);
+void nvme_cancel_tagset(struct nvme_ctrl *ctrl);
+void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl);
bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
enum nvme_ctrl_state new_state);
bool nvme_wait_reset(struct nvme_ctrl *ctrl);
@@ -809,11 +821,16 @@ static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev)
#ifdef CONFIG_NVME_HWMON
int nvme_hwmon_init(struct nvme_ctrl *ctrl);
+void nvme_hwmon_exit(struct nvme_ctrl *ctrl);
#else
static inline int nvme_hwmon_init(struct nvme_ctrl *ctrl)
{
return 0;
}
+
+static inline void nvme_hwmon_exit(struct nvme_ctrl *ctrl)
+{
+}
#endif
u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 808ab5186928..7b6632c00ffd 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -2362,13 +2362,16 @@ static int nvme_pci_enable(struct nvme_dev *dev)
{
int result = -ENOMEM;
struct pci_dev *pdev = to_pci_dev(dev->dev);
+ int dma_address_bits = 64;
if (pci_enable_device_mem(pdev))
return result;
pci_set_master(pdev);
- if (dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64)))
+ if (dev->ctrl.quirks & NVME_QUIRK_DMA_ADDRESS_BITS_48)
+ dma_address_bits = 48;
+ if (dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(dma_address_bits)))
goto disable;
if (readl(dev->bar + NVME_REG_CSTS) == -1) {
@@ -3263,6 +3266,18 @@ static const struct pci_device_id nvme_id_table[] = {
.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
{ PCI_DEVICE(0x2646, 0x2263), /* KINGSTON A2000 NVMe SSD */
.driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0061),
+ .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0065),
+ .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x8061),
+ .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0xcd00),
+ .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0xcd01),
+ .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0xcd02),
+ .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001),
.driver_data = NVME_QUIRK_SINGLE_VECTOR },
{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) },
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index f5ef3edeb2fd..53ac4d7442ba 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -919,12 +919,16 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
error = nvme_init_identify(&ctrl->ctrl);
if (error)
- goto out_stop_queue;
+ goto out_quiesce_queue;
return 0;
+out_quiesce_queue:
+ blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
+ blk_sync_queue(ctrl->ctrl.admin_q);
out_stop_queue:
nvme_rdma_stop_queue(&ctrl->queues[0]);
+ nvme_cancel_admin_tagset(&ctrl->ctrl);
out_cleanup_queue:
if (new)
blk_cleanup_queue(ctrl->ctrl.admin_q);
@@ -1001,8 +1005,10 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
out_wait_freeze_timed_out:
nvme_stop_queues(&ctrl->ctrl);
+ nvme_sync_io_queues(&ctrl->ctrl);
nvme_rdma_stop_io_queues(ctrl);
out_cleanup_connect_q:
+ nvme_cancel_tagset(&ctrl->ctrl);
if (new)
blk_cleanup_queue(ctrl->ctrl.connect_q);
out_free_tag_set:
@@ -1019,11 +1025,7 @@ static void nvme_rdma_teardown_admin_queue(struct nvme_rdma_ctrl *ctrl,
blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
blk_sync_queue(ctrl->ctrl.admin_q);
nvme_rdma_stop_queue(&ctrl->queues[0]);
- if (ctrl->ctrl.admin_tagset) {
- blk_mq_tagset_busy_iter(ctrl->ctrl.admin_tagset,
- nvme_cancel_request, &ctrl->ctrl);
- blk_mq_tagset_wait_completed_request(ctrl->ctrl.admin_tagset);
- }
+ nvme_cancel_admin_tagset(&ctrl->ctrl);
if (remove)
blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
nvme_rdma_destroy_admin_queue(ctrl, remove);
@@ -1037,11 +1039,7 @@ static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl,
nvme_stop_queues(&ctrl->ctrl);
nvme_sync_io_queues(&ctrl->ctrl);
nvme_rdma_stop_io_queues(ctrl);
- if (ctrl->ctrl.tagset) {
- blk_mq_tagset_busy_iter(ctrl->ctrl.tagset,
- nvme_cancel_request, &ctrl->ctrl);
- blk_mq_tagset_wait_completed_request(ctrl->ctrl.tagset);
- }
+ nvme_cancel_tagset(&ctrl->ctrl);
if (remove)
nvme_start_queues(&ctrl->ctrl);
nvme_rdma_destroy_io_queues(ctrl, remove);
@@ -1144,10 +1142,18 @@ static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new)
return 0;
destroy_io:
- if (ctrl->ctrl.queue_count > 1)
+ if (ctrl->ctrl.queue_count > 1) {
+ nvme_stop_queues(&ctrl->ctrl);
+ nvme_sync_io_queues(&ctrl->ctrl);
+ nvme_rdma_stop_io_queues(ctrl);
+ nvme_cancel_tagset(&ctrl->ctrl);
nvme_rdma_destroy_io_queues(ctrl, new);
+ }
destroy_admin:
+ blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
+ blk_sync_queue(ctrl->ctrl.admin_q);
nvme_rdma_stop_queue(&ctrl->queues[0]);
+ nvme_cancel_admin_tagset(&ctrl->ctrl);
nvme_rdma_destroy_admin_queue(ctrl, new);
return ret;
}
@@ -2092,7 +2098,9 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
err_unmap:
nvme_rdma_unmap_data(queue, rq);
err:
- if (err == -ENOMEM || err == -EAGAIN)
+ if (err == -EIO)
+ ret = nvme_host_path_error(rq);
+ else if (err == -ENOMEM || err == -EAGAIN)
ret = BLK_STS_RESOURCE;
else
ret = BLK_STS_IOERR;
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 881d28eb15e9..69f59d2c5799 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -206,11 +206,6 @@ static inline size_t nvme_tcp_req_cur_length(struct nvme_tcp_request *req)
req->pdu_len - req->pdu_sent);
}
-static inline size_t nvme_tcp_req_offset(struct nvme_tcp_request *req)
-{
- return req->iter.iov_offset;
-}
-
static inline size_t nvme_tcp_pdu_data_left(struct nvme_tcp_request *req)
{
return rq_data_dir(blk_mq_rq_from_pdu(req)) == WRITE ?
@@ -229,24 +224,29 @@ static void nvme_tcp_init_iter(struct nvme_tcp_request *req,
struct request *rq = blk_mq_rq_from_pdu(req);
struct bio_vec *vec;
unsigned int size;
- int nsegs;
+ int nr_bvec;
size_t offset;
if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) {
vec = &rq->special_vec;
- nsegs = 1;
+ nr_bvec = 1;
size = blk_rq_payload_bytes(rq);
offset = 0;
} else {
struct bio *bio = req->curr_bio;
+ struct bvec_iter bi;
+ struct bio_vec bv;
vec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
- nsegs = bio_segments(bio);
+ nr_bvec = 0;
+ bio_for_each_bvec(bv, bio, bi) {
+ nr_bvec++;
+ }
size = bio->bi_iter.bi_size;
offset = bio->bi_iter.bi_bvec_done;
}
- iov_iter_bvec(&req->iter, dir, vec, nsegs, size);
+ iov_iter_bvec(&req->iter, dir, vec, nr_bvec, size);
req->iter.iov_offset = offset;
}
@@ -983,7 +983,6 @@ static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req)
req->state = NVME_TCP_SEND_DATA;
if (queue->data_digest)
crypto_ahash_init(queue->snd_hash);
- nvme_tcp_init_iter(req, WRITE);
} else {
nvme_tcp_done_send_req(queue);
}
@@ -1016,8 +1015,6 @@ static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req)
req->state = NVME_TCP_SEND_DATA;
if (queue->data_digest)
crypto_ahash_init(queue->snd_hash);
- if (!req->data_sent)
- nvme_tcp_init_iter(req, WRITE);
return 1;
}
req->offset += ret;
@@ -1815,8 +1812,10 @@ static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
out_wait_freeze_timed_out:
nvme_stop_queues(ctrl);
+ nvme_sync_io_queues(ctrl);
nvme_tcp_stop_io_queues(ctrl);
out_cleanup_connect_q:
+ nvme_cancel_tagset(ctrl);
if (new)
blk_cleanup_queue(ctrl->connect_q);
out_free_tag_set:
@@ -1878,12 +1877,16 @@ static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new)
error = nvme_init_identify(ctrl);
if (error)
- goto out_stop_queue;
+ goto out_quiesce_queue;
return 0;
+out_quiesce_queue:
+ blk_mq_quiesce_queue(ctrl->admin_q);
+ blk_sync_queue(ctrl->admin_q);
out_stop_queue:
nvme_tcp_stop_queue(ctrl, 0);
+ nvme_cancel_admin_tagset(ctrl);
out_cleanup_queue:
if (new)
blk_cleanup_queue(ctrl->admin_q);
@@ -1904,11 +1907,7 @@ static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl,
blk_mq_quiesce_queue(ctrl->admin_q);
blk_sync_queue(ctrl->admin_q);
nvme_tcp_stop_queue(ctrl, 0);
- if (ctrl->admin_tagset) {
- blk_mq_tagset_busy_iter(ctrl->admin_tagset,
- nvme_cancel_request, ctrl);
- blk_mq_tagset_wait_completed_request(ctrl->admin_tagset);
- }
+ nvme_cancel_admin_tagset(ctrl);
if (remove)
blk_mq_unquiesce_queue(ctrl->admin_q);
nvme_tcp_destroy_admin_queue(ctrl, remove);
@@ -1924,11 +1923,7 @@ static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
nvme_stop_queues(ctrl);
nvme_sync_io_queues(ctrl);
nvme_tcp_stop_io_queues(ctrl);
- if (ctrl->tagset) {
- blk_mq_tagset_busy_iter(ctrl->tagset,
- nvme_cancel_request, ctrl);
- blk_mq_tagset_wait_completed_request(ctrl->tagset);
- }
+ nvme_cancel_tagset(ctrl);
if (remove)
nvme_start_queues(ctrl);
nvme_tcp_destroy_io_queues(ctrl, remove);
@@ -2003,10 +1998,18 @@ static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new)
return 0;
destroy_io:
- if (ctrl->queue_count > 1)
+ if (ctrl->queue_count > 1) {
+ nvme_stop_queues(ctrl);
+ nvme_sync_io_queues(ctrl);
+ nvme_tcp_stop_io_queues(ctrl);
+ nvme_cancel_tagset(ctrl);
nvme_tcp_destroy_io_queues(ctrl, new);
+ }
destroy_admin:
+ blk_mq_quiesce_queue(ctrl->admin_q);
+ blk_sync_queue(ctrl->admin_q);
nvme_tcp_stop_queue(ctrl, 0);
+ nvme_cancel_admin_tagset(ctrl);
nvme_tcp_destroy_admin_queue(ctrl, new);
return ret;
}
@@ -2268,12 +2271,12 @@ static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
req->data_len = blk_rq_nr_phys_segments(rq) ?
blk_rq_payload_bytes(rq) : 0;
req->curr_bio = rq->bio;
+ if (req->curr_bio && req->data_len)
+ nvme_tcp_init_iter(req, rq_data_dir(rq));
if (rq_data_dir(rq) == WRITE &&
req->data_len <= nvme_tcp_inline_data_size(queue))
req->pdu_len = req->data_len;
- else if (req->curr_bio)
- nvme_tcp_init_iter(req, READ);
pdu->hdr.type = nvme_tcp_cmd;
pdu->hdr.flags = 0;
diff --git a/drivers/nvme/host/trace.c b/drivers/nvme/host/trace.c
index 5c3cb6928f3c..6543015b6121 100644
--- a/drivers/nvme/host/trace.c
+++ b/drivers/nvme/host/trace.c
@@ -102,6 +102,23 @@ static const char *nvme_trace_get_lba_status(struct trace_seq *p,
return ret;
}
+static const char *nvme_trace_admin_format_nvm(struct trace_seq *p, u8 *cdw10)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u8 lbaf = cdw10[0] & 0xF;
+ u8 mset = (cdw10[0] >> 4) & 0x1;
+ u8 pi = (cdw10[0] >> 5) & 0x7;
+ u8 pil = cdw10[1] & 0x1;
+ u8 ses = (cdw10[1] >> 1) & 0x7;
+
+ trace_seq_printf(p, "lbaf=%u, mset=%u, pi=%u, pil=%u, ses=%u",
+ lbaf, mset, pi, pil, ses);
+
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
static const char *nvme_trace_read_write(struct trace_seq *p, u8 *cdw10)
{
const char *ret = trace_seq_buffer_ptr(p);
@@ -131,6 +148,35 @@ static const char *nvme_trace_dsm(struct trace_seq *p, u8 *cdw10)
return ret;
}
+static const char *nvme_trace_zone_mgmt_send(struct trace_seq *p, u8 *cdw10)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u64 slba = get_unaligned_le64(cdw10);
+ u8 zsa = cdw10[12];
+ u8 all = cdw10[13];
+
+ trace_seq_printf(p, "slba=%llu, zsa=%u, all=%u", slba, zsa, all);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *nvme_trace_zone_mgmt_recv(struct trace_seq *p, u8 *cdw10)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u64 slba = get_unaligned_le64(cdw10);
+ u32 numd = get_unaligned_le32(cdw10 + 8);
+ u8 zra = cdw10[12];
+ u8 zrasf = cdw10[13];
+ u8 pr = cdw10[14];
+
+ trace_seq_printf(p, "slba=%llu, numd=%u, zra=%u, zrasf=%u, pr=%u",
+ slba, numd, zra, zrasf, pr);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
static const char *nvme_trace_common(struct trace_seq *p, u8 *cdw10)
{
const char *ret = trace_seq_buffer_ptr(p);
@@ -159,6 +205,8 @@ const char *nvme_trace_parse_admin_cmd(struct trace_seq *p,
return nvme_trace_admin_get_features(p, cdw10);
case nvme_admin_get_lba_status:
return nvme_trace_get_lba_status(p, cdw10);
+ case nvme_admin_format_nvm:
+ return nvme_trace_admin_format_nvm(p, cdw10);
default:
return nvme_trace_common(p, cdw10);
}
@@ -171,9 +219,14 @@ const char *nvme_trace_parse_nvm_cmd(struct trace_seq *p,
case nvme_cmd_read:
case nvme_cmd_write:
case nvme_cmd_write_zeroes:
+ case nvme_cmd_zone_append:
return nvme_trace_read_write(p, cdw10);
case nvme_cmd_dsm:
return nvme_trace_dsm(p, cdw10);
+ case nvme_cmd_zone_mgmt_send:
+ return nvme_trace_zone_mgmt_send(p, cdw10);
+ case nvme_cmd_zone_mgmt_recv:
+ return nvme_trace_zone_mgmt_recv(p, cdw10);
default:
return nvme_trace_common(p, cdw10);
}
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index dc1ea468b182..bc6a774f2124 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -74,34 +74,28 @@ static void nvmet_execute_get_log_page_error(struct nvmet_req *req)
static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
struct nvme_smart_log *slog)
{
- struct nvmet_ns *ns;
u64 host_reads, host_writes, data_units_read, data_units_written;
+ u16 status;
- ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->get_log_page.nsid);
- if (!ns) {
- pr_err("Could not find namespace id : %d\n",
- le32_to_cpu(req->cmd->get_log_page.nsid));
- req->error_loc = offsetof(struct nvme_rw_command, nsid);
- return NVME_SC_INVALID_NS;
- }
+ status = nvmet_req_find_ns(req);
+ if (status)
+ return status;
/* we don't have the right data for file backed ns */
- if (!ns->bdev)
- goto out;
+ if (!req->ns->bdev)
+ return NVME_SC_SUCCESS;
- host_reads = part_stat_read(ns->bdev, ios[READ]);
+ host_reads = part_stat_read(req->ns->bdev, ios[READ]);
data_units_read =
- DIV_ROUND_UP(part_stat_read(ns->bdev, sectors[READ]), 1000);
- host_writes = part_stat_read(ns->bdev, ios[WRITE]);
+ DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[READ]), 1000);
+ host_writes = part_stat_read(req->ns->bdev, ios[WRITE]);
data_units_written =
- DIV_ROUND_UP(part_stat_read(ns->bdev, sectors[WRITE]), 1000);
+ DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[WRITE]), 1000);
put_unaligned_le64(host_reads, &slog->host_reads[0]);
put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
put_unaligned_le64(host_writes, &slog->host_writes[0]);
put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
-out:
- nvmet_put_namespace(ns);
return NVME_SC_SUCCESS;
}
@@ -468,10 +462,8 @@ out:
static void nvmet_execute_identify_ns(struct nvmet_req *req)
{
- struct nvmet_ctrl *ctrl = req->sq->ctrl;
- struct nvmet_ns *ns;
struct nvme_id_ns *id;
- u16 status = 0;
+ u16 status;
if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) {
req->error_loc = offsetof(struct nvme_identify, nsid);
@@ -486,20 +478,21 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req)
}
/* return an all zeroed buffer if we can't find an active namespace */
- ns = nvmet_find_namespace(ctrl, req->cmd->identify.nsid);
- if (!ns) {
- status = NVME_SC_INVALID_NS;
+ status = nvmet_req_find_ns(req);
+ if (status) {
+ status = 0;
goto done;
}
- nvmet_ns_revalidate(ns);
+ nvmet_ns_revalidate(req->ns);
/*
* nuse = ncap = nsze isn't always true, but we have no way to find
* that out from the underlying device.
*/
- id->ncap = id->nsze = cpu_to_le64(ns->size >> ns->blksize_shift);
- switch (req->port->ana_state[ns->anagrpid]) {
+ id->ncap = id->nsze =
+ cpu_to_le64(req->ns->size >> req->ns->blksize_shift);
+ switch (req->port->ana_state[req->ns->anagrpid]) {
case NVME_ANA_INACCESSIBLE:
case NVME_ANA_PERSISTENT_LOSS:
break;
@@ -508,8 +501,8 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req)
break;
}
- if (ns->bdev)
- nvmet_bdev_set_limits(ns->bdev, id);
+ if (req->ns->bdev)
+ nvmet_bdev_set_limits(req->ns->bdev, id);
/*
* We just provide a single LBA format that matches what the
@@ -523,25 +516,24 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req)
* controllers, but also with any other user of the block device.
*/
id->nmic = (1 << 0);
- id->anagrpid = cpu_to_le32(ns->anagrpid);
+ id->anagrpid = cpu_to_le32(req->ns->anagrpid);
- memcpy(&id->nguid, &ns->nguid, sizeof(id->nguid));
+ memcpy(&id->nguid, &req->ns->nguid, sizeof(id->nguid));
- id->lbaf[0].ds = ns->blksize_shift;
+ id->lbaf[0].ds = req->ns->blksize_shift;
- if (ctrl->pi_support && nvmet_ns_has_pi(ns)) {
+ if (req->sq->ctrl->pi_support && nvmet_ns_has_pi(req->ns)) {
id->dpc = NVME_NS_DPC_PI_FIRST | NVME_NS_DPC_PI_LAST |
NVME_NS_DPC_PI_TYPE1 | NVME_NS_DPC_PI_TYPE2 |
NVME_NS_DPC_PI_TYPE3;
id->mc = NVME_MC_EXTENDED_LBA;
- id->dps = ns->pi_type;
+ id->dps = req->ns->pi_type;
id->flbas = NVME_NS_FLBAS_META_EXT;
- id->lbaf[0].ms = cpu_to_le16(ns->metadata_size);
+ id->lbaf[0].ms = cpu_to_le16(req->ns->metadata_size);
}
- if (ns->readonly)
+ if (req->ns->readonly)
id->nsattr |= (1 << 0);
- nvmet_put_namespace(ns);
done:
if (!status)
status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
@@ -607,37 +599,32 @@ static u16 nvmet_copy_ns_identifier(struct nvmet_req *req, u8 type, u8 len,
static void nvmet_execute_identify_desclist(struct nvmet_req *req)
{
- struct nvmet_ns *ns;
- u16 status = 0;
off_t off = 0;
+ u16 status;
- ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->identify.nsid);
- if (!ns) {
- req->error_loc = offsetof(struct nvme_identify, nsid);
- status = NVME_SC_INVALID_NS | NVME_SC_DNR;
+ status = nvmet_req_find_ns(req);
+ if (status)
goto out;
- }
- if (memchr_inv(&ns->uuid, 0, sizeof(ns->uuid))) {
+ if (memchr_inv(&req->ns->uuid, 0, sizeof(req->ns->uuid))) {
status = nvmet_copy_ns_identifier(req, NVME_NIDT_UUID,
NVME_NIDT_UUID_LEN,
- &ns->uuid, &off);
+ &req->ns->uuid, &off);
if (status)
- goto out_put_ns;
+ goto out;
}
- if (memchr_inv(ns->nguid, 0, sizeof(ns->nguid))) {
+ if (memchr_inv(req->ns->nguid, 0, sizeof(req->ns->nguid))) {
status = nvmet_copy_ns_identifier(req, NVME_NIDT_NGUID,
NVME_NIDT_NGUID_LEN,
- &ns->nguid, &off);
+ &req->ns->nguid, &off);
if (status)
- goto out_put_ns;
+ goto out;
}
if (sg_zero_buffer(req->sg, req->sg_cnt, NVME_IDENTIFY_DATA_SIZE - off,
off) != NVME_IDENTIFY_DATA_SIZE - off)
status = NVME_SC_INTERNAL | NVME_SC_DNR;
-out_put_ns:
- nvmet_put_namespace(ns);
+
out:
nvmet_req_complete(req, status);
}
@@ -696,14 +683,12 @@ static u16 nvmet_write_protect_flush_sync(struct nvmet_req *req)
static u16 nvmet_set_feat_write_protect(struct nvmet_req *req)
{
u32 write_protect = le32_to_cpu(req->cmd->common.cdw11);
- struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
- u16 status = NVME_SC_FEATURE_NOT_CHANGEABLE;
+ struct nvmet_subsys *subsys = nvmet_req_subsys(req);
+ u16 status;
- req->ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->rw.nsid);
- if (unlikely(!req->ns)) {
- req->error_loc = offsetof(struct nvme_common_command, nsid);
+ status = nvmet_req_find_ns(req);
+ if (status)
return status;
- }
mutex_lock(&subsys->lock);
switch (write_protect) {
@@ -757,7 +742,7 @@ u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask)
void nvmet_execute_set_features(struct nvmet_req *req)
{
- struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
+ struct nvmet_subsys *subsys = nvmet_req_subsys(req);
u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
u32 cdw11 = le32_to_cpu(req->cmd->common.cdw11);
u16 status = 0;
@@ -801,14 +786,13 @@ void nvmet_execute_set_features(struct nvmet_req *req)
static u16 nvmet_get_feat_write_protect(struct nvmet_req *req)
{
- struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
+ struct nvmet_subsys *subsys = nvmet_req_subsys(req);
u32 result;
- req->ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->common.nsid);
- if (!req->ns) {
- req->error_loc = offsetof(struct nvme_common_command, nsid);
- return NVME_SC_INVALID_NS | NVME_SC_DNR;
- }
+ result = nvmet_req_find_ns(req);
+ if (result)
+ return result;
+
mutex_lock(&subsys->lock);
if (req->ns->readonly == true)
result = NVME_NS_WRITE_PROTECT;
@@ -832,7 +816,7 @@ void nvmet_get_feat_async_event(struct nvmet_req *req)
void nvmet_execute_get_features(struct nvmet_req *req)
{
- struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
+ struct nvmet_subsys *subsys = nvmet_req_subsys(req);
u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
u16 status = 0;
@@ -939,7 +923,7 @@ u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
if (nvme_is_fabrics(cmd))
return nvmet_parse_fabrics_cmd(req);
- if (req->sq->ctrl->subsys->type == NVME_NQN_DISC)
+ if (nvmet_req_subsys(req)->type == NVME_NQN_DISC)
return nvmet_parse_discovery_cmd(req);
ret = nvmet_check_ctrl_status(req, cmd);
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index c61ffd767062..635a7cb45d0b 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -45,7 +45,7 @@ static bool nvmet_is_port_enabled(struct nvmet_port *p, const char *caller)
{
if (p->enabled)
pr_err("Disable port '%u' before changing attribute in %s\n",
- le16_to_cpu(p->disc_addr.portid), caller);
+ le16_to_cpu(p->disc_addr.portid), caller);
return p->enabled;
}
@@ -266,10 +266,8 @@ static ssize_t nvmet_param_pi_enable_store(struct config_item *item,
if (strtobool(page, &val))
return -EINVAL;
- if (port->enabled) {
- pr_err("Disable port before setting pi_enable value.\n");
+ if (nvmet_is_port_enabled(port, __func__))
return -EACCES;
- }
port->pi_enable = val;
return count;
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 8ce4d59cc9e7..67bbf0e3b507 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -82,6 +82,15 @@ inline u16 errno_to_nvme_status(struct nvmet_req *req, int errno)
return status;
}
+u16 nvmet_report_invalid_opcode(struct nvmet_req *req)
+{
+ pr_debug("unhandled cmd %d on qid %d\n", req->cmd->common.opcode,
+ req->sq->qid);
+
+ req->error_loc = offsetof(struct nvme_common_command, opcode);
+ return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+}
+
static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
const char *subsysnqn);
@@ -417,15 +426,18 @@ void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl)
cancel_delayed_work_sync(&ctrl->ka_work);
}
-struct nvmet_ns *nvmet_find_namespace(struct nvmet_ctrl *ctrl, __le32 nsid)
+u16 nvmet_req_find_ns(struct nvmet_req *req)
{
- struct nvmet_ns *ns;
+ u32 nsid = le32_to_cpu(req->cmd->common.nsid);
- ns = xa_load(&ctrl->subsys->namespaces, le32_to_cpu(nsid));
- if (ns)
- percpu_ref_get(&ns->ref);
+ req->ns = xa_load(&nvmet_req_subsys(req)->namespaces, nsid);
+ if (unlikely(!req->ns)) {
+ req->error_loc = offsetof(struct nvme_common_command, nsid);
+ return NVME_SC_INVALID_NS | NVME_SC_DNR;
+ }
- return ns;
+ percpu_ref_get(&req->ns->ref);
+ return NVME_SC_SUCCESS;
}
static void nvmet_destroy_namespace(struct percpu_ref *ref)
@@ -862,11 +874,10 @@ static u16 nvmet_parse_io_cmd(struct nvmet_req *req)
if (nvmet_req_passthru_ctrl(req))
return nvmet_parse_passthru_io_cmd(req);
- req->ns = nvmet_find_namespace(req->sq->ctrl, cmd->rw.nsid);
- if (unlikely(!req->ns)) {
- req->error_loc = offsetof(struct nvme_common_command, nsid);
- return NVME_SC_INVALID_NS | NVME_SC_DNR;
- }
+ ret = nvmet_req_find_ns(req);
+ if (unlikely(ret))
+ return ret;
+
ret = nvmet_check_ana_state(req->port, req->ns);
if (unlikely(ret)) {
req->error_loc = offsetof(struct nvme_common_command, nsid);
@@ -880,8 +891,8 @@ static u16 nvmet_parse_io_cmd(struct nvmet_req *req)
if (req->ns->file)
return nvmet_file_parse_io_cmd(req);
- else
- return nvmet_bdev_parse_io_cmd(req);
+
+ return nvmet_bdev_parse_io_cmd(req);
}
bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index cd4e73aa9807..d375745fc4ed 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -145,6 +145,7 @@ struct nvmet_fc_tgt_queue {
struct list_head avail_defer_list;
struct workqueue_struct *work_q;
struct kref ref;
+ struct rcu_head rcu;
struct nvmet_fc_fcp_iod fod[]; /* array of fcp_iods */
} __aligned(sizeof(unsigned long long));
@@ -164,9 +165,10 @@ struct nvmet_fc_tgt_assoc {
struct nvmet_fc_hostport *hostport;
struct nvmet_fc_ls_iod *rcv_disconn;
struct list_head a_list;
- struct nvmet_fc_tgt_queue *queues[NVMET_NR_QUEUES + 1];
+ struct nvmet_fc_tgt_queue __rcu *queues[NVMET_NR_QUEUES + 1];
struct kref ref;
struct work_struct del_work;
+ struct rcu_head rcu;
};
@@ -790,7 +792,6 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
u16 qid, u16 sqsize)
{
struct nvmet_fc_tgt_queue *queue;
- unsigned long flags;
int ret;
if (qid > NVMET_NR_QUEUES)
@@ -829,9 +830,7 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
goto out_fail_iodlist;
WARN_ON(assoc->queues[qid]);
- spin_lock_irqsave(&assoc->tgtport->lock, flags);
- assoc->queues[qid] = queue;
- spin_unlock_irqrestore(&assoc->tgtport->lock, flags);
+ rcu_assign_pointer(assoc->queues[qid], queue);
return queue;
@@ -851,11 +850,8 @@ nvmet_fc_tgt_queue_free(struct kref *ref)
{
struct nvmet_fc_tgt_queue *queue =
container_of(ref, struct nvmet_fc_tgt_queue, ref);
- unsigned long flags;
- spin_lock_irqsave(&queue->assoc->tgtport->lock, flags);
- queue->assoc->queues[queue->qid] = NULL;
- spin_unlock_irqrestore(&queue->assoc->tgtport->lock, flags);
+ rcu_assign_pointer(queue->assoc->queues[queue->qid], NULL);
nvmet_fc_destroy_fcp_iodlist(queue->assoc->tgtport, queue);
@@ -863,7 +859,7 @@ nvmet_fc_tgt_queue_free(struct kref *ref)
destroy_workqueue(queue->work_q);
- kfree(queue);
+ kfree_rcu(queue, rcu);
}
static void
@@ -965,24 +961,23 @@ nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport,
struct nvmet_fc_tgt_queue *queue;
u64 association_id = nvmet_fc_getassociationid(connection_id);
u16 qid = nvmet_fc_getqueueid(connection_id);
- unsigned long flags;
if (qid > NVMET_NR_QUEUES)
return NULL;
- spin_lock_irqsave(&tgtport->lock, flags);
- list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
if (association_id == assoc->association_id) {
- queue = assoc->queues[qid];
+ queue = rcu_dereference(assoc->queues[qid]);
if (queue &&
(!atomic_read(&queue->connected) ||
!nvmet_fc_tgt_q_get(queue)))
queue = NULL;
- spin_unlock_irqrestore(&tgtport->lock, flags);
+ rcu_read_unlock();
return queue;
}
}
- spin_unlock_irqrestore(&tgtport->lock, flags);
+ rcu_read_unlock();
return NULL;
}
@@ -1137,7 +1132,7 @@ nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
}
if (!needrandom) {
assoc->association_id = ran;
- list_add_tail(&assoc->a_list, &tgtport->assoc_list);
+ list_add_tail_rcu(&assoc->a_list, &tgtport->assoc_list);
}
spin_unlock_irqrestore(&tgtport->lock, flags);
}
@@ -1167,7 +1162,7 @@ nvmet_fc_target_assoc_free(struct kref *ref)
nvmet_fc_free_hostport(assoc->hostport);
spin_lock_irqsave(&tgtport->lock, flags);
- list_del(&assoc->a_list);
+ list_del_rcu(&assoc->a_list);
oldls = assoc->rcv_disconn;
spin_unlock_irqrestore(&tgtport->lock, flags);
/* if pending Rcv Disconnect Association LS, send rsp now */
@@ -1177,7 +1172,7 @@ nvmet_fc_target_assoc_free(struct kref *ref)
dev_info(tgtport->dev,
"{%d:%d} Association freed\n",
tgtport->fc_target_port.port_num, assoc->a_id);
- kfree(assoc);
+ kfree_rcu(assoc, rcu);
nvmet_fc_tgtport_put(tgtport);
}
@@ -1198,7 +1193,6 @@ nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc)
{
struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
struct nvmet_fc_tgt_queue *queue;
- unsigned long flags;
int i, terminating;
terminating = atomic_xchg(&assoc->terminating, 1);
@@ -1207,19 +1201,23 @@ nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc)
if (terminating)
return;
- spin_lock_irqsave(&tgtport->lock, flags);
+
for (i = NVMET_NR_QUEUES; i >= 0; i--) {
- queue = assoc->queues[i];
- if (queue) {
- if (!nvmet_fc_tgt_q_get(queue))
- continue;
- spin_unlock_irqrestore(&tgtport->lock, flags);
- nvmet_fc_delete_target_queue(queue);
- nvmet_fc_tgt_q_put(queue);
- spin_lock_irqsave(&tgtport->lock, flags);
+ rcu_read_lock();
+ queue = rcu_dereference(assoc->queues[i]);
+ if (!queue) {
+ rcu_read_unlock();
+ continue;
+ }
+
+ if (!nvmet_fc_tgt_q_get(queue)) {
+ rcu_read_unlock();
+ continue;
}
+ rcu_read_unlock();
+ nvmet_fc_delete_target_queue(queue);
+ nvmet_fc_tgt_q_put(queue);
}
- spin_unlock_irqrestore(&tgtport->lock, flags);
dev_info(tgtport->dev,
"{%d:%d} Association deleted\n",
@@ -1234,10 +1232,9 @@ nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport *tgtport,
{
struct nvmet_fc_tgt_assoc *assoc;
struct nvmet_fc_tgt_assoc *ret = NULL;
- unsigned long flags;
- spin_lock_irqsave(&tgtport->lock, flags);
- list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
if (association_id == assoc->association_id) {
ret = assoc;
if (!nvmet_fc_tgt_a_get(assoc))
@@ -1245,7 +1242,7 @@ nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport *tgtport,
break;
}
}
- spin_unlock_irqrestore(&tgtport->lock, flags);
+ rcu_read_unlock();
return ret;
}
@@ -1473,19 +1470,17 @@ nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport)
static void
__nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport)
{
- struct nvmet_fc_tgt_assoc *assoc, *next;
- unsigned long flags;
+ struct nvmet_fc_tgt_assoc *assoc;
- spin_lock_irqsave(&tgtport->lock, flags);
- list_for_each_entry_safe(assoc, next,
- &tgtport->assoc_list, a_list) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
if (!nvmet_fc_tgt_a_get(assoc))
continue;
if (!schedule_work(&assoc->del_work))
/* already deleting - release local reference */
nvmet_fc_tgt_a_put(assoc);
}
- spin_unlock_irqrestore(&tgtport->lock, flags);
+ rcu_read_unlock();
}
/**
@@ -1568,16 +1563,16 @@ nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
continue;
spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
- spin_lock_irqsave(&tgtport->lock, flags);
- list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
- queue = assoc->queues[0];
+ rcu_read_lock();
+ list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
+ queue = rcu_dereference(assoc->queues[0]);
if (queue && queue->nvme_sq.ctrl == ctrl) {
if (nvmet_fc_tgt_a_get(assoc))
found_ctrl = true;
break;
}
}
- spin_unlock_irqrestore(&tgtport->lock, flags);
+ rcu_read_unlock();
nvmet_fc_tgtport_put(tgtport);
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
index 68213f0a052b..54606f1872b4 100644
--- a/drivers/nvme/target/fcloop.c
+++ b/drivers/nvme/target/fcloop.c
@@ -1545,7 +1545,7 @@ static struct attribute *fcloop_dev_attrs[] = {
NULL
};
-static struct attribute_group fclopp_dev_attrs_group = {
+static const struct attribute_group fclopp_dev_attrs_group = {
.attrs = fcloop_dev_attrs,
};
diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
index bf6e0ac9ad28..3d9a5d3ed9cd 100644
--- a/drivers/nvme/target/io-cmd-bdev.c
+++ b/drivers/nvme/target/io-cmd-bdev.c
@@ -256,8 +256,7 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req)
if (is_pci_p2pdma_page(sg_page(req->sg)))
op |= REQ_NOMERGE;
- sector = le64_to_cpu(req->cmd->rw.slba);
- sector <<= (req->ns->blksize_shift - 9);
+ sector = nvmet_lba_to_sect(req->ns, req->cmd->rw.slba);
if (req->transfer_len <= NVMET_MAX_INLINE_DATA_LEN) {
bio = &req->b.inline_bio;
@@ -345,7 +344,7 @@ static u16 nvmet_bdev_discard_range(struct nvmet_req *req,
int ret;
ret = __blkdev_issue_discard(ns->bdev,
- le64_to_cpu(range->slba) << (ns->blksize_shift - 9),
+ nvmet_lba_to_sect(ns, range->slba),
le32_to_cpu(range->nlb) << (ns->blksize_shift - 9),
GFP_KERNEL, 0, bio);
if (ret && ret != -EOPNOTSUPP) {
@@ -414,8 +413,7 @@ static void nvmet_bdev_execute_write_zeroes(struct nvmet_req *req)
if (!nvmet_check_transfer_len(req, 0))
return;
- sector = le64_to_cpu(write_zeroes->slba) <<
- (req->ns->blksize_shift - 9);
+ sector = nvmet_lba_to_sect(req->ns, write_zeroes->slba);
nr_sector = (((sector_t)le16_to_cpu(write_zeroes->length) + 1) <<
(req->ns->blksize_shift - 9));
@@ -451,9 +449,6 @@ u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req)
req->execute = nvmet_bdev_execute_write_zeroes;
return 0;
default:
- pr_err("unhandled cmd %d on qid %d\n", cmd->common.opcode,
- req->sq->qid);
- req->error_loc = offsetof(struct nvme_common_command, opcode);
- return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ return nvmet_report_invalid_opcode(req);
}
}
diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c
index 0abbefd9925e..715d4376c997 100644
--- a/drivers/nvme/target/io-cmd-file.c
+++ b/drivers/nvme/target/io-cmd-file.c
@@ -400,9 +400,6 @@ u16 nvmet_file_parse_io_cmd(struct nvmet_req *req)
req->execute = nvmet_file_execute_write_zeroes;
return 0;
default:
- pr_err("unhandled cmd for file ns %d on qid %d\n",
- cmd->common.opcode, req->sq->qid);
- req->error_loc = offsetof(struct nvme_common_command, opcode);
- return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ return nvmet_report_invalid_opcode(req);
}
}
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 592763732065..cdfa537b1c0a 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -443,7 +443,7 @@ struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
void nvmet_subsys_put(struct nvmet_subsys *subsys);
void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys);
-struct nvmet_ns *nvmet_find_namespace(struct nvmet_ctrl *ctrl, __le32 nsid);
+u16 nvmet_req_find_ns(struct nvmet_req *req);
void nvmet_put_namespace(struct nvmet_ns *ns);
int nvmet_ns_enable(struct nvmet_ns *ns);
void nvmet_ns_disable(struct nvmet_ns *ns);
@@ -551,6 +551,11 @@ static inline u32 nvmet_dsm_len(struct nvmet_req *req)
sizeof(struct nvme_dsm_range);
}
+static inline struct nvmet_subsys *nvmet_req_subsys(struct nvmet_req *req)
+{
+ return req->sq->ctrl->subsys;
+}
+
#ifdef CONFIG_NVME_TARGET_PASSTHRU
void nvmet_passthru_subsys_free(struct nvmet_subsys *subsys);
int nvmet_passthru_ctrl_enable(struct nvmet_subsys *subsys);
@@ -585,10 +590,11 @@ static inline struct nvme_ctrl *nvmet_passthru_ctrl(struct nvmet_subsys *subsys)
static inline struct nvme_ctrl *
nvmet_req_passthru_ctrl(struct nvmet_req *req)
{
- return nvmet_passthru_ctrl(req->sq->ctrl->subsys);
+ return nvmet_passthru_ctrl(nvmet_req_subsys(req));
}
u16 errno_to_nvme_status(struct nvmet_req *req, int errno);
+u16 nvmet_report_invalid_opcode(struct nvmet_req *req);
/* Convert a 32-bit number to a 16-bit 0's based number */
static inline __le16 to0based(u32 a)
@@ -603,4 +609,14 @@ static inline bool nvmet_ns_has_pi(struct nvmet_ns *ns)
return ns->pi_type && ns->metadata_size == sizeof(struct t10_pi_tuple);
}
+static inline __le64 nvmet_sect_to_lba(struct nvmet_ns *ns, sector_t sect)
+{
+ return cpu_to_le64(sect >> (ns->blksize_shift - SECTOR_SHIFT));
+}
+
+static inline sector_t nvmet_lba_to_sect(struct nvmet_ns *ns, __le64 lba)
+{
+ return le64_to_cpu(lba) << (ns->blksize_shift - SECTOR_SHIFT);
+}
+
#endif /* _NVMET_H */
diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
index cbc88acdd233..f50c7b2bf21c 100644
--- a/drivers/nvme/target/passthru.c
+++ b/drivers/nvme/target/passthru.c
@@ -239,9 +239,9 @@ static void nvmet_passthru_execute_cmd(struct nvmet_req *req)
}
q = ns->queue;
- timeout = req->sq->ctrl->subsys->io_timeout;
+ timeout = nvmet_req_subsys(req)->io_timeout;
} else {
- timeout = req->sq->ctrl->subsys->admin_timeout;
+ timeout = nvmet_req_subsys(req)->admin_timeout;
}
rq = nvme_alloc_request(q, req->cmd, 0);
@@ -494,7 +494,7 @@ u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req)
return nvmet_setup_passthru_command(req);
default:
/* Reject commands not in the allowlist above */
- return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ return nvmet_report_invalid_opcode(req);
}
}
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index aacf06f0b431..8b0485ada315 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -379,7 +379,7 @@ err:
return NVME_SC_INTERNAL;
}
-static void nvmet_tcp_ddgst(struct ahash_request *hash,
+static void nvmet_tcp_send_ddgst(struct ahash_request *hash,
struct nvmet_tcp_cmd *cmd)
{
ahash_request_set_crypt(hash, cmd->req.sg,
@@ -387,6 +387,23 @@ static void nvmet_tcp_ddgst(struct ahash_request *hash,
crypto_ahash_digest(hash);
}
+static void nvmet_tcp_recv_ddgst(struct ahash_request *hash,
+ struct nvmet_tcp_cmd *cmd)
+{
+ struct scatterlist sg;
+ struct kvec *iov;
+ int i;
+
+ crypto_ahash_init(hash);
+ for (i = 0, iov = cmd->iov; i < cmd->nr_mapped; i++, iov++) {
+ sg_init_one(&sg, iov->iov_base, iov->iov_len);
+ ahash_request_set_crypt(hash, &sg, NULL, iov->iov_len);
+ crypto_ahash_update(hash);
+ }
+ ahash_request_set_crypt(hash, NULL, (void *)&cmd->exp_ddgst, 0);
+ crypto_ahash_final(hash);
+}
+
static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd)
{
struct nvme_tcp_data_pdu *pdu = cmd->data_pdu;
@@ -411,7 +428,7 @@ static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd)
if (queue->data_digest) {
pdu->hdr.flags |= NVME_TCP_F_DDGST;
- nvmet_tcp_ddgst(queue->snd_hash, cmd);
+ nvmet_tcp_send_ddgst(queue->snd_hash, cmd);
}
if (cmd->queue->hdr_digest) {
@@ -1060,7 +1077,7 @@ static void nvmet_tcp_prep_recv_ddgst(struct nvmet_tcp_cmd *cmd)
{
struct nvmet_tcp_queue *queue = cmd->queue;
- nvmet_tcp_ddgst(queue->rcv_hash, cmd);
+ nvmet_tcp_recv_ddgst(queue->rcv_hash, cmd);
queue->offset = 0;
queue->left = NVME_TCP_DIGEST_LENGTH;
queue->rcv_state = NVMET_TCP_RECV_DDGST;
@@ -1081,14 +1098,14 @@ static int nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue)
cmd->rbytes_done += ret;
}
+ if (queue->data_digest) {
+ nvmet_tcp_prep_recv_ddgst(cmd);
+ return 0;
+ }
nvmet_tcp_unmap_pdu_iovec(cmd);
if (!(cmd->flags & NVMET_TCP_F_INIT_FAILED) &&
cmd->rbytes_done == cmd->req.transfer_len) {
- if (queue->data_digest) {
- nvmet_tcp_prep_recv_ddgst(cmd);
- return 0;
- }
cmd->req.execute(&cmd->req);
}
@@ -1468,17 +1485,27 @@ static int nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue)
if (inet->rcv_tos > 0)
ip_sock_set_tos(sock->sk, inet->rcv_tos);
+ ret = 0;
write_lock_bh(&sock->sk->sk_callback_lock);
- sock->sk->sk_user_data = queue;
- queue->data_ready = sock->sk->sk_data_ready;
- sock->sk->sk_data_ready = nvmet_tcp_data_ready;
- queue->state_change = sock->sk->sk_state_change;
- sock->sk->sk_state_change = nvmet_tcp_state_change;
- queue->write_space = sock->sk->sk_write_space;
- sock->sk->sk_write_space = nvmet_tcp_write_space;
+ if (sock->sk->sk_state != TCP_ESTABLISHED) {
+ /*
+ * If the socket is already closing, don't even start
+ * consuming it
+ */
+ ret = -ENOTCONN;
+ } else {
+ sock->sk->sk_user_data = queue;
+ queue->data_ready = sock->sk->sk_data_ready;
+ sock->sk->sk_data_ready = nvmet_tcp_data_ready;
+ queue->state_change = sock->sk->sk_state_change;
+ sock->sk->sk_state_change = nvmet_tcp_state_change;
+ queue->write_space = sock->sk->sk_write_space;
+ sock->sk->sk_write_space = nvmet_tcp_write_space;
+ queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
+ }
write_unlock_bh(&sock->sk->sk_callback_lock);
- return 0;
+ return ret;
}
static int nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
@@ -1526,8 +1553,6 @@ static int nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
if (ret)
goto out_destroy_sq;
- queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
-
return 0;
out_destroy_sq:
mutex_lock(&nvmet_tcp_queue_mutex);
diff --git a/drivers/nvme/target/trace.h b/drivers/nvme/target/trace.h
index c14e3249a14d..6109b3806b12 100644
--- a/drivers/nvme/target/trace.h
+++ b/drivers/nvme/target/trace.h
@@ -48,10 +48,13 @@ static inline struct nvmet_ctrl *nvmet_req_to_ctrl(struct nvmet_req *req)
static inline void __assign_req_name(char *name, struct nvmet_req *req)
{
- if (req->ns)
- strncpy(name, req->ns->device_path, DISK_NAME_LEN);
- else
+ if (!req->ns) {
memset(name, 0, DISK_NAME_LEN);
+ return;
+ }
+
+ strncpy(name, req->ns->device_path,
+ min_t(size_t, DISK_NAME_LEN, strlen(req->ns->device_path)));
}
#endif