diff options
author | Max Gurtovoy <maxg@mellanox.com> | 2020-07-31 15:38:22 +0800 |
---|---|---|
committer | Michael S. Tsirkin <mst@redhat.com> | 2020-08-05 11:08:41 -0400 |
commit | 0ea9ee430e74b16c6b17e70757d1c26d8d140e1f (patch) | |
tree | 6aa33641d8e365181f33587a1bc6419b8d855ebf /drivers/vdpa/vdpa_sim | |
parent | 6234f80574d7569444d8718355fa2838e92b158b (diff) | |
download | linux-0ea9ee430e74b16c6b17e70757d1c26d8d140e1f.tar.gz linux-0ea9ee430e74b16c6b17e70757d1c26d8d140e1f.tar.bz2 linux-0ea9ee430e74b16c6b17e70757d1c26d8d140e1f.zip |
vdpasim: protect concurrent access to iommu iotlb
Iommu iotlb can be accessed by different cores for performing IO using
multiple virt queues. Add a spinlock to synchronize iotlb accesses.
This could be easily reproduced when using more than 1 pktgen threads
to inject traffic to vdpa simulator.
Fixes: 2c53d0f64c06f("vdpasim: vDPA device simulator")
Cc: stable@vger.kernel.org
Signed-off-by: Max Gurtovoy <maxg@mellanox.com>
Signed-off-by: Jason Wang <jasowang@redhat.com>
Link: https://lore.kernel.org/r/20200731073822.13326-1-jasowang@redhat.com
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Diffstat (limited to 'drivers/vdpa/vdpa_sim')
-rw-r--r-- | drivers/vdpa/vdpa_sim/vdpa_sim.c | 31 |
1 files changed, 27 insertions, 4 deletions
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c index b7d5727fde4c..2a55b463e64c 100644 --- a/drivers/vdpa/vdpa_sim/vdpa_sim.c +++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c @@ -71,6 +71,8 @@ struct vdpasim { u32 status; u32 generation; u64 features; + /* spinlock to synchronize iommu table */ + spinlock_t iommu_lock; }; /* TODO: cross-endian support */ @@ -136,7 +138,9 @@ static void vdpasim_reset(struct vdpasim *vdpasim) for (i = 0; i < VDPASIM_VQ_NUM; i++) vdpasim_vq_reset(&vdpasim->vqs[i]); + spin_lock(&vdpasim->iommu_lock); vhost_iotlb_reset(vdpasim->iommu); + spin_unlock(&vdpasim->iommu_lock); vdpasim->features = 0; vdpasim->status = 0; @@ -254,8 +258,10 @@ static dma_addr_t vdpasim_map_page(struct device *dev, struct page *page, /* For simplicity, use identical mapping to avoid e.g iova * allocator. */ + spin_lock(&vdpasim->iommu_lock); ret = vhost_iotlb_add_range(iommu, pa, pa + size - 1, pa, dir_to_perm(dir)); + spin_unlock(&vdpasim->iommu_lock); if (ret) return DMA_MAPPING_ERROR; @@ -269,8 +275,10 @@ static void vdpasim_unmap_page(struct device *dev, dma_addr_t dma_addr, struct vdpasim *vdpasim = dev_to_sim(dev); struct vhost_iotlb *iommu = vdpasim->iommu; + spin_lock(&vdpasim->iommu_lock); vhost_iotlb_del_range(iommu, (u64)dma_addr, (u64)dma_addr + size - 1); + spin_unlock(&vdpasim->iommu_lock); } static void *vdpasim_alloc_coherent(struct device *dev, size_t size, @@ -282,9 +290,10 @@ static void *vdpasim_alloc_coherent(struct device *dev, size_t size, void *addr = kmalloc(size, flag); int ret; - if (!addr) + spin_lock(&vdpasim->iommu_lock); + if (!addr) { *dma_addr = DMA_MAPPING_ERROR; - else { + } else { u64 pa = virt_to_phys(addr); ret = vhost_iotlb_add_range(iommu, (u64)pa, @@ -297,6 +306,7 @@ static void *vdpasim_alloc_coherent(struct device *dev, size_t size, } else *dma_addr = (dma_addr_t)pa; } + spin_unlock(&vdpasim->iommu_lock); return addr; } @@ -308,8 +318,11 @@ static void vdpasim_free_coherent(struct device *dev, size_t size, struct vdpasim *vdpasim = dev_to_sim(dev); struct vhost_iotlb *iommu = vdpasim->iommu; + spin_lock(&vdpasim->iommu_lock); vhost_iotlb_del_range(iommu, (u64)dma_addr, (u64)dma_addr + size - 1); + spin_unlock(&vdpasim->iommu_lock); + kfree(phys_to_virt((uintptr_t)dma_addr)); } @@ -555,6 +568,7 @@ static int vdpasim_set_map(struct vdpa_device *vdpa, u64 start = 0ULL, last = 0ULL - 1; int ret; + spin_lock(&vdpasim->iommu_lock); vhost_iotlb_reset(vdpasim->iommu); for (map = vhost_iotlb_itree_first(iotlb, start, last); map; @@ -564,10 +578,12 @@ static int vdpasim_set_map(struct vdpa_device *vdpa, if (ret) goto err; } + spin_unlock(&vdpasim->iommu_lock); return 0; err: vhost_iotlb_reset(vdpasim->iommu); + spin_unlock(&vdpasim->iommu_lock); return ret; } @@ -575,16 +591,23 @@ static int vdpasim_dma_map(struct vdpa_device *vdpa, u64 iova, u64 size, u64 pa, u32 perm) { struct vdpasim *vdpasim = vdpa_to_sim(vdpa); + int ret; - return vhost_iotlb_add_range(vdpasim->iommu, iova, - iova + size - 1, pa, perm); + spin_lock(&vdpasim->iommu_lock); + ret = vhost_iotlb_add_range(vdpasim->iommu, iova, iova + size - 1, pa, + perm); + spin_unlock(&vdpasim->iommu_lock); + + return ret; } static int vdpasim_dma_unmap(struct vdpa_device *vdpa, u64 iova, u64 size) { struct vdpasim *vdpasim = vdpa_to_sim(vdpa); + spin_lock(&vdpasim->iommu_lock); vhost_iotlb_del_range(vdpasim->iommu, iova, iova + size - 1); + spin_unlock(&vdpasim->iommu_lock); return 0; } |