summaryrefslogtreecommitdiffstats
path: root/drivers/iommu
diff options
context:
space:
mode:
authorNiklas Schnelle <schnelle@linux.ibm.com>2023-11-20 15:51:57 +0100
committerJoerg Roedel <jroedel@suse.de>2023-11-27 11:00:43 +0100
commit6f01a732608f294a853f0d1cc963772bac12f1e9 (patch)
tree1be78cd40390cbb223015178e6e997fd7cf3118b /drivers/iommu
parent00271ca5cbcd36d01f3c58a05378304ba9dd6a2a (diff)
downloadlinux-6f01a732608f294a853f0d1cc963772bac12f1e9.tar.gz
linux-6f01a732608f294a853f0d1cc963772bac12f1e9.tar.bz2
linux-6f01a732608f294a853f0d1cc963772bac12f1e9.zip
iommu/virtio: Add ops->flush_iotlb_all and enable deferred flush
Add ops->flush_iotlb_all operation to enable virtio-iommu for the dma-iommu deferred flush scheme. This results in a significant increase in performance in exchange for a window in which devices can still access previously IOMMU mapped memory when running with CONFIG_IOMMU_DEFAULT_DMA_LAZY. The previous strict behavior can be achieved with iommu.strict=1 on the kernel command line or CONFIG_IOMMU_DEFAULT_DMA_STRICT. Link: https://lore.kernel.org/lkml/20230802123612.GA6142@myrica/ Signed-off-by: Niklas Schnelle <schnelle@linux.ibm.com> Reviewed-by: Jean-Philippe Brucker <jean-philippe@linaro.org> Link: https://lore.kernel.org/r/20231120-viommu-sync-map-v3-2-50a57ecf78b5@linux.ibm.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers/iommu')
-rw-r--r--drivers/iommu/virtio-iommu.c16
1 files changed, 16 insertions, 0 deletions
diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
index 3f18f69d70d8..ef6a87445105 100644
--- a/drivers/iommu/virtio-iommu.c
+++ b/drivers/iommu/virtio-iommu.c
@@ -926,6 +926,19 @@ static int viommu_iotlb_sync_map(struct iommu_domain *domain,
return viommu_sync_req(vdomain->viommu);
}
+static void viommu_flush_iotlb_all(struct iommu_domain *domain)
+{
+ struct viommu_domain *vdomain = to_viommu_domain(domain);
+
+ /*
+ * May be called before the viommu is initialized including
+ * while creating direct mapping
+ */
+ if (!vdomain->nr_endpoints)
+ return;
+ viommu_sync_req(vdomain->viommu);
+}
+
static void viommu_get_resv_regions(struct device *dev, struct list_head *head)
{
struct iommu_resv_region *entry, *new_entry, *msi = NULL;
@@ -1051,6 +1064,8 @@ static bool viommu_capable(struct device *dev, enum iommu_cap cap)
switch (cap) {
case IOMMU_CAP_CACHE_COHERENCY:
return true;
+ case IOMMU_CAP_DEFERRED_FLUSH:
+ return true;
default:
return false;
}
@@ -1071,6 +1086,7 @@ static struct iommu_ops viommu_ops = {
.map_pages = viommu_map_pages,
.unmap_pages = viommu_unmap_pages,
.iova_to_phys = viommu_iova_to_phys,
+ .flush_iotlb_all = viommu_flush_iotlb_all,
.iotlb_sync = viommu_iotlb_sync,
.iotlb_sync_map = viommu_iotlb_sync_map,
.free = viommu_domain_free,