summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorMike Christie <michael.christie@oracle.com>2024-03-15 19:46:59 -0500
committerMichael S. Tsirkin <mst@redhat.com>2024-05-22 08:31:15 -0400
commitb1b2ce58ed23c5d56e0ab299a5271ac01f95b75c (patch)
tree40a9175d8907d5cc071921e3d682ed8014c27240 /drivers
parent7b1b5c7fff05bdf2dc21ee124907df1f8cc819ad (diff)
downloadlinux-stable-b1b2ce58ed23c5d56e0ab299a5271ac01f95b75c.tar.gz
linux-stable-b1b2ce58ed23c5d56e0ab299a5271ac01f95b75c.tar.bz2
linux-stable-b1b2ce58ed23c5d56e0ab299a5271ac01f95b75c.zip
vhost-scsi: Handle vhost_vq_work_queue failures for events
Currently, we can try to queue an event's work before the vhost_task is created. When this happens we just drop it in vhost_scsi_do_plug before even calling vhost_vq_work_queue. During a device shutdown we do the same thing after vhost_scsi_clear_endpoint has cleared the backends. In the next patches we will be able to kill the vhost_task before we have cleared the endpoint. In that case, vhost_vq_work_queue can fail and we will leak the event's memory. This has handle the failure by just freeing the event. This is safe to do, because vhost_vq_work_queue will only return failure for us when the vhost_task is killed and so userspace will not be able to handle events if we sent them. Signed-off-by: Mike Christie <michael.christie@oracle.com> Message-Id: <20240316004707.45557-2-michael.christie@oracle.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/vhost/scsi.c17
1 files changed, 12 insertions, 5 deletions
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 282aac45c690..f34f9895b898 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -497,10 +497,8 @@ again:
vq_err(vq, "Faulted on vhost_scsi_send_event\n");
}
-static void vhost_scsi_evt_work(struct vhost_work *work)
+static void vhost_scsi_complete_events(struct vhost_scsi *vs, bool drop)
{
- struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
- vs_event_work);
struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
struct vhost_scsi_evt *evt, *t;
struct llist_node *llnode;
@@ -508,12 +506,20 @@ static void vhost_scsi_evt_work(struct vhost_work *work)
mutex_lock(&vq->mutex);
llnode = llist_del_all(&vs->vs_event_list);
llist_for_each_entry_safe(evt, t, llnode, list) {
- vhost_scsi_do_evt_work(vs, evt);
+ if (!drop)
+ vhost_scsi_do_evt_work(vs, evt);
vhost_scsi_free_evt(vs, evt);
}
mutex_unlock(&vq->mutex);
}
+static void vhost_scsi_evt_work(struct vhost_work *work)
+{
+ struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
+ vs_event_work);
+ vhost_scsi_complete_events(vs, false);
+}
+
static int vhost_scsi_copy_sgl_to_iov(struct vhost_scsi_cmd *cmd)
{
struct iov_iter *iter = &cmd->saved_iter;
@@ -1509,7 +1515,8 @@ vhost_scsi_send_evt(struct vhost_scsi *vs, struct vhost_virtqueue *vq,
}
llist_add(&evt->list, &vs->vs_event_list);
- vhost_vq_work_queue(vq, &vs->vs_event_work);
+ if (!vhost_vq_work_queue(vq, &vs->vs_event_work))
+ vhost_scsi_complete_events(vs, true);
}
static void vhost_scsi_evt_handle_kick(struct vhost_work *work)