summaryrefslogtreecommitdiffstats
path: root/drivers/vfio
diff options
context:
space:
mode:
authorAlex Williamson <alex.williamson@redhat.com>2015-02-06 14:19:12 -0700
committerAlex Williamson <alex.williamson@redhat.com>2015-02-06 14:19:12 -0700
commitc5e6688752c25434d71920bc969f9fab60353c5e (patch)
tree176ad1355f26bc2542e320471ab1691400c0974c /drivers/vfio
parentbabbf1760970f141eb4021288ce0fb7196bc1a23 (diff)
downloadlinux-stable-c5e6688752c25434d71920bc969f9fab60353c5e.tar.gz
linux-stable-c5e6688752c25434d71920bc969f9fab60353c5e.tar.bz2
linux-stable-c5e6688752c25434d71920bc969f9fab60353c5e.zip
vfio/type1: Add conditional rescheduling
IOMMU operations can be expensive and it's not very difficult for a user to give us a lot of work to do for a map or unmap operation. Killing a large VM will vfio assigned devices can result in soft lockups and IOMMU tracing shows that we can easily spend 80% of our time with need-resched set. A sprinkling of conf_resched() calls after map and unmap calls has a very tiny affect on performance while resulting in traces with <1% of calls overflowing into needs- resched. Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
Diffstat (limited to 'drivers/vfio')
-rw-r--r--drivers/vfio/vfio_iommu_type1.c8
1 files changed, 7 insertions, 1 deletions
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 35c90089478e..57d8c37a002b 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -351,8 +351,10 @@ static void vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma)
domain = d = list_first_entry(&iommu->domain_list,
struct vfio_domain, next);
- list_for_each_entry_continue(d, &iommu->domain_list, next)
+ list_for_each_entry_continue(d, &iommu->domain_list, next) {
iommu_unmap(d->domain, dma->iova, dma->size);
+ cond_resched();
+ }
while (iova < end) {
size_t unmapped, len;
@@ -384,6 +386,8 @@ static void vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma)
unmapped >> PAGE_SHIFT,
dma->prot, false);
iova += unmapped;
+
+ cond_resched();
}
vfio_lock_acct(-unlocked);
@@ -528,6 +532,8 @@ static int vfio_iommu_map(struct vfio_iommu *iommu, dma_addr_t iova,
map_try_harder(d, iova, pfn, npage, prot))
goto unwind;
}
+
+ cond_resched();
}
return 0;