summaryrefslogtreecommitdiffstats
path: root/drivers/vfio
diff options
context:
space:
mode:
authorJason Gunthorpe <jgg@nvidia.com>2023-02-21 11:11:03 -0400
committerJason Gunthorpe <jgg@nvidia.com>2023-02-21 11:11:03 -0400
commit939204e4df962982cbc84acc26b29b421dd530a8 (patch)
tree753698947a28ff1f18857d198a671317352f16c8 /drivers/vfio
parentb4ff830eca097df51af10a9be29e8cc817327919 (diff)
parentc9c3395d5e3dcc6daee66c6908354d47bf98cb0c (diff)
downloadlinux-stable-939204e4df962982cbc84acc26b29b421dd530a8.tar.gz
linux-stable-939204e4df962982cbc84acc26b29b421dd530a8.tar.bz2
linux-stable-939204e4df962982cbc84acc26b29b421dd530a8.zip
Merge tag 'v6.2' into iommufd.git for-next
Resolve conflicts from the signature change in iommu_map: - drivers/infiniband/hw/usnic/usnic_uiom.c Switch iommu_map_atomic() to iommu_map(.., GFP_ATOMIC) - drivers/vfio/vfio_iommu_type1.c Following indenting change for GFP_KERNEL Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Diffstat (limited to 'drivers/vfio')
-rw-r--r--drivers/vfio/vfio_iommu_type1.c31
1 files changed, 20 insertions, 11 deletions
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 7bea5594bc45..a44ac3fe657c 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -1856,24 +1856,33 @@ unwind:
* significantly boosts non-hugetlbfs mappings and doesn't seem to hurt when
* hugetlbfs is in use.
*/
-static void vfio_test_domain_fgsp(struct vfio_domain *domain)
+static void vfio_test_domain_fgsp(struct vfio_domain *domain, struct list_head *regions)
{
- struct page *pages;
int ret, order = get_order(PAGE_SIZE * 2);
+ struct vfio_iova *region;
+ struct page *pages;
+ dma_addr_t start;
pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
if (!pages)
return;
- ret = iommu_map(domain->domain, 0, page_to_phys(pages), PAGE_SIZE * 2,
- IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE, GFP_KERNEL);
- if (!ret) {
- size_t unmapped = iommu_unmap(domain->domain, 0, PAGE_SIZE);
+ list_for_each_entry(region, regions, list) {
+ start = ALIGN(region->start, PAGE_SIZE * 2);
+ if (start >= region->end || (region->end - start < PAGE_SIZE * 2))
+ continue;
- if (unmapped == PAGE_SIZE)
- iommu_unmap(domain->domain, PAGE_SIZE, PAGE_SIZE);
- else
- domain->fgsp = true;
+ ret = iommu_map(domain->domain, start, page_to_phys(pages), PAGE_SIZE * 2,
+ IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE, GFP_KERNEL);
+ if (!ret) {
+ size_t unmapped = iommu_unmap(domain->domain, start, PAGE_SIZE);
+
+ if (unmapped == PAGE_SIZE)
+ iommu_unmap(domain->domain, start + PAGE_SIZE, PAGE_SIZE);
+ else
+ domain->fgsp = true;
+ }
+ break;
}
__free_pages(pages, order);
@@ -2317,7 +2326,7 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
}
}
- vfio_test_domain_fgsp(domain);
+ vfio_test_domain_fgsp(domain, &iova_copy);
/* replay mappings on new domains */
ret = vfio_iommu_replay(iommu, domain);