summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorEli Cohen <elic@nvidia.com>2022-03-21 16:13:03 +0200
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2022-07-12 16:34:59 +0200
commit09674bfd8054b6d3667f7db8489551d3c8c1e3d9 (patch)
treec785e0ce2d06a9d455e407966ac779a651b37ea2
parent008e29d172ca0a4f2f7147b64b06bbaa537500f8 (diff)
downloadlinux-stable-09674bfd8054b6d3667f7db8489551d3c8c1e3d9.tar.gz
linux-stable-09674bfd8054b6d3667f7db8489551d3c8c1e3d9.tar.bz2
linux-stable-09674bfd8054b6d3667f7db8489551d3c8c1e3d9.zip
vdpa/mlx5: Avoid processing works if workqueue was destroyed
[ Upstream commit ad6dc1daaf29f97f23cc810d60ee01c0e83f4c6b ] If mlx5_vdpa gets unloaded while a VM is running, the workqueue will be destroyed. However, vhost might still have reference to the kick function and might attempt to push new works. This could lead to null pointer dereference. To fix this, set mvdev->wq to NULL just before destroying and verify that the workqueue is not NULL in mlx5_vdpa_kick_vq before attempting to push a new work. Fixes: 5262912ef3cf ("vdpa/mlx5: Add support for control VQ and MAC setting") Signed-off-by: Eli Cohen <elic@nvidia.com> Link: https://lore.kernel.org/r/20220321141303.9586-1-elic@nvidia.com Signed-off-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Sasha Levin <sashal@kernel.org>
-rw-r--r--drivers/vdpa/mlx5/net/mlx5_vnet.c7
1 files changed, 5 insertions, 2 deletions
diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
index 174895372e7f..467a349dc26c 100644
--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
@@ -1641,7 +1641,7 @@ static void mlx5_vdpa_kick_vq(struct vdpa_device *vdev, u16 idx)
return;
if (unlikely(is_ctrl_vq_idx(mvdev, idx))) {
- if (!mvdev->cvq.ready)
+ if (!mvdev->wq || !mvdev->cvq.ready)
return;
queue_work(mvdev->wq, &ndev->cvq_ent.work);
@@ -2626,9 +2626,12 @@ static void mlx5_vdpa_dev_del(struct vdpa_mgmt_dev *v_mdev, struct vdpa_device *
struct mlx5_vdpa_mgmtdev *mgtdev = container_of(v_mdev, struct mlx5_vdpa_mgmtdev, mgtdev);
struct mlx5_vdpa_dev *mvdev = to_mvdev(dev);
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+ struct workqueue_struct *wq;
mlx5_notifier_unregister(mvdev->mdev, &ndev->nb);
- destroy_workqueue(mvdev->wq);
+ wq = mvdev->wq;
+ mvdev->wq = NULL;
+ destroy_workqueue(wq);
_vdpa_unregister_device(dev);
mgtdev->ndev = NULL;
}