summaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorPeter-Jan Gootzen <pgootzen@nvidia.com>2024-05-17 21:04:35 +0200
committerMichael S. Tsirkin <mst@redhat.com>2024-07-09 08:42:42 -0400
commit106e4df1206b2c239ba13a9ec2fd1e9b754bd455 (patch)
tree9200fbcdc68b3cbf08d53284a8241221c888e4c8 /fs
parent2106e1f444d9d79f5c3784e5847bfdb6cc3ca69f (diff)
downloadlinux-stable-106e4df1206b2c239ba13a9ec2fd1e9b754bd455.tar.gz
linux-stable-106e4df1206b2c239ba13a9ec2fd1e9b754bd455.tar.bz2
linux-stable-106e4df1206b2c239ba13a9ec2fd1e9b754bd455.zip
virtio-fs: improved request latencies when Virtio queue is full
Currently, when the Virtio queue is full, a work item is scheduled to execute in 1ms that retries adding the request to the queue. This is a large amount of time on the scale on which a virtio-fs device can operate. When using a DPU this is around 30-40us baseline without going to a remote server (4k, QD=1). This patch changes the retrying behavior to immediately filling the Virtio queue up again when a completion has been received. This reduces the 99.9th percentile latencies in our tests by 60x and slightly increases the overall throughput, when using a workload IO depth 2x the size of the Virtio queue and a DPU-powered virtio-fs device (NVIDIA BlueField DPU). Signed-off-by: Peter-Jan Gootzen <pgootzen@nvidia.com> Reviewed-by: Max Gurtovoy <mgurtovoy@nvidia.com> Reviewed-by: Yoray Zack <yorayz@nvidia.com> Message-Id: <20240517190435.152096-3-pgootzen@nvidia.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com> Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/fuse/virtio_fs.c34
1 files changed, 19 insertions, 15 deletions
diff --git a/fs/fuse/virtio_fs.c b/fs/fuse/virtio_fs.c
index 3cc2a1f81014..728f14e077bd 100644
--- a/fs/fuse/virtio_fs.c
+++ b/fs/fuse/virtio_fs.c
@@ -51,7 +51,7 @@ struct virtio_fs_vq {
struct work_struct done_work;
struct list_head queued_reqs;
struct list_head end_reqs; /* End these requests */
- struct delayed_work dispatch_work;
+ struct work_struct dispatch_work;
struct fuse_dev *fud;
bool connected;
long in_flight;
@@ -233,7 +233,7 @@ static void virtio_fs_drain_queue(struct virtio_fs_vq *fsvq)
}
flush_work(&fsvq->done_work);
- flush_delayed_work(&fsvq->dispatch_work);
+ flush_work(&fsvq->dispatch_work);
}
static void virtio_fs_drain_all_queues_locked(struct virtio_fs *fs)
@@ -408,6 +408,10 @@ static void virtio_fs_hiprio_done_work(struct work_struct *work)
dec_in_flight_req(fsvq);
}
} while (!virtqueue_enable_cb(vq));
+
+ if (!list_empty(&fsvq->queued_reqs))
+ schedule_work(&fsvq->dispatch_work);
+
spin_unlock(&fsvq->lock);
}
@@ -415,7 +419,7 @@ static void virtio_fs_request_dispatch_work(struct work_struct *work)
{
struct fuse_req *req;
struct virtio_fs_vq *fsvq = container_of(work, struct virtio_fs_vq,
- dispatch_work.work);
+ dispatch_work);
int ret;
pr_debug("virtio-fs: worker %s called.\n", __func__);
@@ -450,8 +454,6 @@ static void virtio_fs_request_dispatch_work(struct work_struct *work)
if (ret == -ENOSPC) {
spin_lock(&fsvq->lock);
list_add_tail(&req->list, &fsvq->queued_reqs);
- schedule_delayed_work(&fsvq->dispatch_work,
- msecs_to_jiffies(1));
spin_unlock(&fsvq->lock);
return;
}
@@ -498,8 +500,6 @@ static int send_forget_request(struct virtio_fs_vq *fsvq,
pr_debug("virtio-fs: Could not queue FORGET: err=%d. Will try later\n",
ret);
list_add_tail(&forget->list, &fsvq->queued_reqs);
- schedule_delayed_work(&fsvq->dispatch_work,
- msecs_to_jiffies(1));
if (!in_flight)
inc_in_flight_req(fsvq);
/* Queue is full */
@@ -531,7 +531,7 @@ static void virtio_fs_hiprio_dispatch_work(struct work_struct *work)
{
struct virtio_fs_forget *forget;
struct virtio_fs_vq *fsvq = container_of(work, struct virtio_fs_vq,
- dispatch_work.work);
+ dispatch_work);
pr_debug("virtio-fs: worker %s called.\n", __func__);
while (1) {
spin_lock(&fsvq->lock);
@@ -709,6 +709,12 @@ static void virtio_fs_requests_done_work(struct work_struct *work)
virtio_fs_request_complete(req, fsvq);
}
}
+
+ /* Try to push previously queued requests, as the queue might no longer be full */
+ spin_lock(&fsvq->lock);
+ if (!list_empty(&fsvq->queued_reqs))
+ schedule_work(&fsvq->dispatch_work);
+ spin_unlock(&fsvq->lock);
}
static void virtio_fs_map_queues(struct virtio_device *vdev, struct virtio_fs *fs)
@@ -770,12 +776,12 @@ static void virtio_fs_init_vq(struct virtio_fs_vq *fsvq, char *name,
if (vq_type == VQ_REQUEST) {
INIT_WORK(&fsvq->done_work, virtio_fs_requests_done_work);
- INIT_DELAYED_WORK(&fsvq->dispatch_work,
- virtio_fs_request_dispatch_work);
+ INIT_WORK(&fsvq->dispatch_work,
+ virtio_fs_request_dispatch_work);
} else {
INIT_WORK(&fsvq->done_work, virtio_fs_hiprio_done_work);
- INIT_DELAYED_WORK(&fsvq->dispatch_work,
- virtio_fs_hiprio_dispatch_work);
+ INIT_WORK(&fsvq->dispatch_work,
+ virtio_fs_hiprio_dispatch_work);
}
}
@@ -1375,8 +1381,6 @@ __releases(fiq->lock)
spin_lock(&fsvq->lock);
list_add_tail(&req->list, &fsvq->queued_reqs);
inc_in_flight_req(fsvq);
- schedule_delayed_work(&fsvq->dispatch_work,
- msecs_to_jiffies(1));
spin_unlock(&fsvq->lock);
return;
}
@@ -1386,7 +1390,7 @@ __releases(fiq->lock)
/* Can't end request in submission context. Use a worker */
spin_lock(&fsvq->lock);
list_add_tail(&req->list, &fsvq->end_reqs);
- schedule_delayed_work(&fsvq->dispatch_work, 0);
+ schedule_work(&fsvq->dispatch_work);
spin_unlock(&fsvq->lock);
return;
}