diff options
Diffstat (limited to 'drivers/nvme')
-rw-r--r-- | drivers/nvme/host/core.c | 2 | ||||
-rw-r--r-- | drivers/nvme/host/multipath.c | 14 | ||||
-rw-r--r-- | drivers/nvme/target/auth.c | 2 | ||||
-rw-r--r-- | drivers/nvme/target/pci-epf.c | 88 |
4 files changed, 67 insertions, 39 deletions
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index b502ac07483b..eb6ea8acb3cc 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -4300,7 +4300,7 @@ static void nvme_scan_work(struct work_struct *work) if (test_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events)) nvme_queue_scan(ctrl); #ifdef CONFIG_NVME_MULTIPATH - else + else if (ctrl->ana_log_buf) /* Re-read the ANA log page to not miss updates */ queue_work(nvme_wq, &ctrl->ana_work); #endif diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index 05eccd96d34a..250f3da67cc9 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -1051,6 +1051,13 @@ void nvme_mpath_add_sysfs_link(struct nvme_ns_head *head) list_for_each_entry_rcu(ns, &head->list, siblings) { /* + * Ensure that ns path disk node is already added otherwise we + * may get invalid kobj name for target + */ + if (!test_bit(GD_ADDED, &ns->disk->state)) + continue; + + /* * Avoid creating link if it already exists for the given path. * When path ana state transitions from optimized to non- * optimized or vice-versa, the nvme_mpath_set_live() is @@ -1065,13 +1072,6 @@ void nvme_mpath_add_sysfs_link(struct nvme_ns_head *head) if (test_and_set_bit(NVME_NS_SYSFS_ATTR_LINK, &ns->flags)) continue; - /* - * Ensure that ns path disk node is already added otherwise we - * may get invalid kobj name for target - */ - if (!test_bit(GD_ADDED, &ns->disk->state)) - continue; - target = disk_to_dev(ns->disk); /* * Create sysfs link from head gendisk kobject @kobj to the diff --git a/drivers/nvme/target/auth.c b/drivers/nvme/target/auth.c index 0b0645ac5df4..cef8d77f477b 100644 --- a/drivers/nvme/target/auth.c +++ b/drivers/nvme/target/auth.c @@ -240,7 +240,7 @@ void nvmet_auth_sq_free(struct nvmet_sq *sq) { cancel_delayed_work(&sq->auth_expired_work); #ifdef CONFIG_NVME_TARGET_TCP_TLS - sq->tls_key = 0; + sq->tls_key = NULL; #endif kfree(sq->dhchap_c1); sq->dhchap_c1 = NULL; diff --git a/drivers/nvme/target/pci-epf.c b/drivers/nvme/target/pci-epf.c index 51c27b32248d..7fab7f3d79b7 100644 --- a/drivers/nvme/target/pci-epf.c +++ b/drivers/nvme/target/pci-epf.c @@ -1648,16 +1648,17 @@ static int nvmet_pci_epf_process_sq(struct nvmet_pci_epf_ctrl *ctrl, { struct nvmet_pci_epf_iod *iod; int ret, n = 0; + u16 head = sq->head; sq->tail = nvmet_pci_epf_bar_read32(ctrl, sq->db); - while (sq->head != sq->tail && (!ctrl->sq_ab || n < ctrl->sq_ab)) { + while (head != sq->tail && (!ctrl->sq_ab || n < ctrl->sq_ab)) { iod = nvmet_pci_epf_alloc_iod(sq); if (!iod) break; /* Get the NVMe command submitted by the host. */ ret = nvmet_pci_epf_transfer(ctrl, &iod->cmd, - sq->pci_addr + sq->head * sq->qes, + sq->pci_addr + head * sq->qes, sq->qes, DMA_FROM_DEVICE); if (ret) { /* Not much we can do... */ @@ -1666,12 +1667,13 @@ static int nvmet_pci_epf_process_sq(struct nvmet_pci_epf_ctrl *ctrl, } dev_dbg(ctrl->dev, "SQ[%u]: head %u, tail %u, command %s\n", - sq->qid, sq->head, sq->tail, + sq->qid, head, sq->tail, nvmet_pci_epf_iod_name(iod)); - sq->head++; - if (sq->head == sq->depth) - sq->head = 0; + head++; + if (head == sq->depth) + head = 0; + WRITE_ONCE(sq->head, head); n++; queue_work_on(WORK_CPU_UNBOUND, sq->iod_wq, &iod->work); @@ -1761,8 +1763,17 @@ static void nvmet_pci_epf_cq_work(struct work_struct *work) if (!iod) break; - /* Post the IOD completion entry. */ + /* + * Post the IOD completion entry. If the IOD request was + * executed (req->execute() called), the CQE is already + * initialized. However, the IOD may have been failed before + * that, leaving the CQE not properly initialized. So always + * initialize it here. + */ cqe = &iod->cqe; + cqe->sq_head = cpu_to_le16(READ_ONCE(iod->sq->head)); + cqe->sq_id = cpu_to_le16(iod->sq->qid); + cqe->command_id = iod->cmd.common.command_id; cqe->status = cpu_to_le16((iod->status << 1) | cq->phase); dev_dbg(ctrl->dev, @@ -1800,6 +1811,21 @@ static void nvmet_pci_epf_cq_work(struct work_struct *work) NVMET_PCI_EPF_CQ_RETRY_INTERVAL); } +static void nvmet_pci_epf_clear_ctrl_config(struct nvmet_pci_epf_ctrl *ctrl) +{ + struct nvmet_ctrl *tctrl = ctrl->tctrl; + + /* Initialize controller status. */ + tctrl->csts = 0; + ctrl->csts = 0; + nvmet_pci_epf_bar_write32(ctrl, NVME_REG_CSTS, ctrl->csts); + + /* Initialize controller configuration and start polling. */ + tctrl->cc = 0; + ctrl->cc = 0; + nvmet_pci_epf_bar_write32(ctrl, NVME_REG_CC, ctrl->cc); +} + static int nvmet_pci_epf_enable_ctrl(struct nvmet_pci_epf_ctrl *ctrl) { u64 pci_addr, asq, acq; @@ -1865,18 +1891,20 @@ static int nvmet_pci_epf_enable_ctrl(struct nvmet_pci_epf_ctrl *ctrl) return 0; err: - ctrl->csts = 0; + nvmet_pci_epf_clear_ctrl_config(ctrl); return -EINVAL; } -static void nvmet_pci_epf_disable_ctrl(struct nvmet_pci_epf_ctrl *ctrl) +static void nvmet_pci_epf_disable_ctrl(struct nvmet_pci_epf_ctrl *ctrl, + bool shutdown) { int qid; if (!ctrl->enabled) return; - dev_info(ctrl->dev, "Disabling controller\n"); + dev_info(ctrl->dev, "%s controller\n", + shutdown ? "Shutting down" : "Disabling"); ctrl->enabled = false; cancel_delayed_work_sync(&ctrl->poll_sqs); @@ -1893,6 +1921,11 @@ static void nvmet_pci_epf_disable_ctrl(struct nvmet_pci_epf_ctrl *ctrl) nvmet_pci_epf_delete_cq(ctrl->tctrl, 0); ctrl->csts &= ~NVME_CSTS_RDY; + if (shutdown) { + ctrl->csts |= NVME_CSTS_SHST_CMPLT; + ctrl->cc &= ~NVME_CC_ENABLE; + nvmet_pci_epf_bar_write32(ctrl, NVME_REG_CC, ctrl->cc); + } } static void nvmet_pci_epf_poll_cc_work(struct work_struct *work) @@ -1919,12 +1952,10 @@ static void nvmet_pci_epf_poll_cc_work(struct work_struct *work) } if (!nvmet_cc_en(new_cc) && nvmet_cc_en(old_cc)) - nvmet_pci_epf_disable_ctrl(ctrl); + nvmet_pci_epf_disable_ctrl(ctrl, false); - if (nvmet_cc_shn(new_cc) && !nvmet_cc_shn(old_cc)) { - nvmet_pci_epf_disable_ctrl(ctrl); - ctrl->csts |= NVME_CSTS_SHST_CMPLT; - } + if (nvmet_cc_shn(new_cc) && !nvmet_cc_shn(old_cc)) + nvmet_pci_epf_disable_ctrl(ctrl, true); if (!nvmet_cc_shn(new_cc) && nvmet_cc_shn(old_cc)) ctrl->csts &= ~NVME_CSTS_SHST_CMPLT; @@ -1963,16 +1994,10 @@ static void nvmet_pci_epf_init_bar(struct nvmet_pci_epf_ctrl *ctrl) /* Clear Controller Memory Buffer Supported (CMBS). */ ctrl->cap &= ~(0x1ULL << 57); - /* Controller configuration. */ - ctrl->cc = tctrl->cc & (~NVME_CC_ENABLE); - - /* Controller status. */ - ctrl->csts = ctrl->tctrl->csts; - nvmet_pci_epf_bar_write64(ctrl, NVME_REG_CAP, ctrl->cap); nvmet_pci_epf_bar_write32(ctrl, NVME_REG_VS, tctrl->subsys->ver); - nvmet_pci_epf_bar_write32(ctrl, NVME_REG_CSTS, ctrl->csts); - nvmet_pci_epf_bar_write32(ctrl, NVME_REG_CC, ctrl->cc); + + nvmet_pci_epf_clear_ctrl_config(ctrl); } static int nvmet_pci_epf_create_ctrl(struct nvmet_pci_epf *nvme_epf, @@ -2070,14 +2095,22 @@ out_mempool_exit: static void nvmet_pci_epf_start_ctrl(struct nvmet_pci_epf_ctrl *ctrl) { + + dev_info(ctrl->dev, "PCI link up\n"); + ctrl->link_up = true; + schedule_delayed_work(&ctrl->poll_cc, NVMET_PCI_EPF_CC_POLL_INTERVAL); } static void nvmet_pci_epf_stop_ctrl(struct nvmet_pci_epf_ctrl *ctrl) { + dev_info(ctrl->dev, "PCI link down\n"); + ctrl->link_up = false; + cancel_delayed_work_sync(&ctrl->poll_cc); - nvmet_pci_epf_disable_ctrl(ctrl); + nvmet_pci_epf_disable_ctrl(ctrl, false); + nvmet_pci_epf_clear_ctrl_config(ctrl); } static void nvmet_pci_epf_destroy_ctrl(struct nvmet_pci_epf_ctrl *ctrl) @@ -2300,10 +2333,8 @@ static int nvmet_pci_epf_epc_init(struct pci_epf *epf) if (ret) goto out_clear_bar; - if (!epc_features->linkup_notifier) { - ctrl->link_up = true; + if (!epc_features->linkup_notifier) nvmet_pci_epf_start_ctrl(&nvme_epf->ctrl); - } return 0; @@ -2319,7 +2350,6 @@ static void nvmet_pci_epf_epc_deinit(struct pci_epf *epf) struct nvmet_pci_epf *nvme_epf = epf_get_drvdata(epf); struct nvmet_pci_epf_ctrl *ctrl = &nvme_epf->ctrl; - ctrl->link_up = false; nvmet_pci_epf_destroy_ctrl(ctrl); nvmet_pci_epf_deinit_dma(nvme_epf); @@ -2331,7 +2361,6 @@ static int nvmet_pci_epf_link_up(struct pci_epf *epf) struct nvmet_pci_epf *nvme_epf = epf_get_drvdata(epf); struct nvmet_pci_epf_ctrl *ctrl = &nvme_epf->ctrl; - ctrl->link_up = true; nvmet_pci_epf_start_ctrl(ctrl); return 0; @@ -2342,7 +2371,6 @@ static int nvmet_pci_epf_link_down(struct pci_epf *epf) struct nvmet_pci_epf *nvme_epf = epf_get_drvdata(epf); struct nvmet_pci_epf_ctrl *ctrl = &nvme_epf->ctrl; - ctrl->link_up = false; nvmet_pci_epf_stop_ctrl(ctrl); return 0; |