summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2015-10-22 14:03:33 +0200
committerJens Axboe <axboe@fb.com>2015-12-22 09:38:23 -0700
commit7385014c073263b077442439299fad013edd4409 (patch)
tree862c56f67438962389649e8b706f4026f0c7eb0e /drivers
parent749941f2365db8198b5d75c83a575ee6e55bf03b (diff)
downloadlinux-7385014c073263b077442439299fad013edd4409.tar.gz
linux-7385014c073263b077442439299fad013edd4409.tar.bz2
linux-7385014c073263b077442439299fad013edd4409.zip
nvme: only add a controller to dev_list after it's been fully initialized
Without this we can easily get bad derferences on nvmeq->d_db when the nvme kthread tries to poll the CQs for controllers that are in half initialized state. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Keith Busch <keith.busch@intel.com> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/nvme/host/pci.c51
1 files changed, 30 insertions, 21 deletions
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 1f92b328522a..d82f08d671e6 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1994,6 +1994,30 @@ static void nvme_disable_io_queues(struct nvme_dev *dev)
kthread_stop(kworker_task);
}
+static int nvme_dev_list_add(struct nvme_dev *dev)
+{
+ bool start_thread = false;
+
+ spin_lock(&dev_list_lock);
+ if (list_empty(&dev_list) && IS_ERR_OR_NULL(nvme_thread)) {
+ start_thread = true;
+ nvme_thread = NULL;
+ }
+ list_add(&dev->node, &dev_list);
+ spin_unlock(&dev_list_lock);
+
+ if (start_thread) {
+ nvme_thread = kthread_run(nvme_kthread, NULL, "nvme");
+ wake_up_all(&nvme_kthread_wait);
+ } else
+ wait_event_killable(nvme_kthread_wait, nvme_thread);
+
+ if (IS_ERR_OR_NULL(nvme_thread))
+ return nvme_thread ? PTR_ERR(nvme_thread) : -EINTR;
+
+ return 0;
+}
+
/*
* Remove the node from the device list and check
* for whether or not we need to stop the nvme_thread.
@@ -2109,7 +2133,6 @@ static void nvme_pci_free_ctrl(struct nvme_ctrl *ctrl)
static void nvme_probe_work(struct work_struct *work)
{
struct nvme_dev *dev = container_of(work, struct nvme_dev, probe_work);
- bool start_thread = false;
int result;
result = nvme_dev_map(dev);
@@ -2120,25 +2143,6 @@ static void nvme_probe_work(struct work_struct *work)
if (result)
goto unmap;
- spin_lock(&dev_list_lock);
- if (list_empty(&dev_list) && IS_ERR_OR_NULL(nvme_thread)) {
- start_thread = true;
- nvme_thread = NULL;
- }
- list_add(&dev->node, &dev_list);
- spin_unlock(&dev_list_lock);
-
- if (start_thread) {
- nvme_thread = kthread_run(nvme_kthread, NULL, "nvme");
- wake_up_all(&nvme_kthread_wait);
- } else
- wait_event_killable(nvme_kthread_wait, nvme_thread);
-
- if (IS_ERR_OR_NULL(nvme_thread)) {
- result = nvme_thread ? PTR_ERR(nvme_thread) : -EINTR;
- goto disable;
- }
-
nvme_init_queue(dev->queues[0], 0);
result = nvme_alloc_admin_tags(dev);
if (result)
@@ -2154,6 +2158,10 @@ static void nvme_probe_work(struct work_struct *work)
dev->ctrl.event_limit = 1;
+ result = nvme_dev_list_add(dev);
+ if (result)
+ goto remove;
+
/*
* Keep the controller around but remove all namespaces if we don't have
* any working I/O queue.
@@ -2168,6 +2176,8 @@ static void nvme_probe_work(struct work_struct *work)
return;
+ remove:
+ nvme_dev_list_remove(dev);
free_tags:
nvme_dev_remove_admin(dev);
blk_put_queue(dev->ctrl.admin_q);
@@ -2175,7 +2185,6 @@ static void nvme_probe_work(struct work_struct *work)
dev->queues[0]->tags = NULL;
disable:
nvme_disable_queue(dev, 0);
- nvme_dev_list_remove(dev);
unmap:
nvme_dev_unmap(dev);
out: