summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKeith Busch <kbusch@kernel.org>2021-09-09 08:54:52 -0700
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2021-09-22 12:27:57 +0200
commit8f8ad122ffe01efb7c14b34f9c33b8b16e02fec5 (patch)
treedde11a7e229e2ab4c70c2f8e96e009459ac477c2
parentc586bc31d5b9ae7ed202fc0dcaffbaaed77bde06 (diff)
downloadlinux-stable-8f8ad122ffe01efb7c14b34f9c33b8b16e02fec5.tar.gz
linux-stable-8f8ad122ffe01efb7c14b34f9c33b8b16e02fec5.tar.bz2
linux-stable-8f8ad122ffe01efb7c14b34f9c33b8b16e02fec5.zip
nvme-tcp: fix io_work priority inversion
commit 70f437fb4395ad4d1d16fab9a1ad9fbc9fc0579b upstream. Dispatching requests inline with the .queue_rq() call may block while holding the send_mutex. If the tcp io_work also happens to schedule, it may see the req_list is non-empty, leaving "pending" true and remaining in TASK_RUNNING. Since io_work is of higher scheduling priority, the .queue_rq task may not get a chance to run, blocking forward progress and leading to io timeouts. Instead of checking for pending requests within io_work, let the queueing restart io_work outside the send_mutex lock if there is more work to be done. Fixes: a0fdd1418007f ("nvme-tcp: rerun io_work if req_list is not empty") Reported-by: Samuel Jones <sjones@kalrayinc.com> Signed-off-by: Keith Busch <kbusch@kernel.org> Reviewed-by: Sagi Grimberg <sagi@grimberg.me> Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r--drivers/nvme/host/tcp.c20
1 files changed, 10 insertions, 10 deletions
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index c9a925999c6e..a6b3b0762763 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -273,6 +273,12 @@ static inline void nvme_tcp_send_all(struct nvme_tcp_queue *queue)
} while (ret > 0);
}
+static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
+{
+ return !list_empty(&queue->send_list) ||
+ !llist_empty(&queue->req_list) || queue->more_requests;
+}
+
static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
bool sync, bool last)
{
@@ -293,9 +299,10 @@ static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
nvme_tcp_send_all(queue);
queue->more_requests = false;
mutex_unlock(&queue->send_mutex);
- } else if (last) {
- queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
}
+
+ if (last && nvme_tcp_queue_more(queue))
+ queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
}
static void nvme_tcp_process_req_list(struct nvme_tcp_queue *queue)
@@ -890,12 +897,6 @@ done:
read_unlock_bh(&sk->sk_callback_lock);
}
-static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
-{
- return !list_empty(&queue->send_list) ||
- !llist_empty(&queue->req_list) || queue->more_requests;
-}
-
static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue)
{
queue->request = NULL;
@@ -1132,8 +1133,7 @@ static void nvme_tcp_io_work(struct work_struct *w)
pending = true;
else if (unlikely(result < 0))
break;
- } else
- pending = !llist_empty(&queue->req_list);
+ }
result = nvme_tcp_try_recv(queue);
if (result > 0)