summaryrefslogtreecommitdiffstats
path: root/drivers/iommu
diff options
context:
space:
mode:
authorRobin Murphy <robin.murphy@arm.com>2019-05-20 09:29:33 +0200
committerJoerg Roedel <jroedel@suse.de>2019-05-27 17:31:11 +0200
commit4c360acee9298d048921bec3e21c4183d06cb43a (patch)
treec1133d965fa8c652ed26162eac6b9ca33d727b76 /drivers/iommu
parent796a08cf16054ef93783ca8ab4aec2469c3307c1 (diff)
downloadlinux-4c360acee9298d048921bec3e21c4183d06cb43a.tar.gz
linux-4c360acee9298d048921bec3e21c4183d06cb43a.tar.bz2
linux-4c360acee9298d048921bec3e21c4183d06cb43a.zip
iommu/dma: Factor out remapped pages lookup
Since we duplicate the find_vm_area() logic a few times in places where we only care aboute the pages, factor out a helper to abstract it. Signed-off-by: Robin Murphy <robin.murphy@arm.com> [hch: don't warn when not finding a region, as we'll rely on that later] Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers/iommu')
-rw-r--r--drivers/iommu/dma-iommu.c32
1 files changed, 20 insertions, 12 deletions
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 498e319d6607..5e81165e6755 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -554,6 +554,15 @@ static struct page **__iommu_dma_alloc_pages(struct device *dev,
return pages;
}
+static struct page **__iommu_dma_get_pages(void *cpu_addr)
+{
+ struct vm_struct *area = find_vm_area(cpu_addr);
+
+ if (!area || !area->pages)
+ return NULL;
+ return area->pages;
+}
+
/**
* iommu_dma_free - Free a buffer allocated by __iommu_dma_alloc()
* @dev: Device which owns this buffer
@@ -1042,11 +1051,11 @@ static void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr,
dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
dma_common_free_remap(cpu_addr, size, VM_USERMAP);
} else if (is_vmalloc_addr(cpu_addr)){
- struct vm_struct *area = find_vm_area(cpu_addr);
+ struct page **pages = __iommu_dma_get_pages(cpu_addr);
- if (WARN_ON(!area || !area->pages))
+ if (!pages)
return;
- __iommu_dma_free(dev, area->pages, iosize, &handle);
+ __iommu_dma_free(dev, pages, iosize, &handle);
dma_common_free_remap(cpu_addr, size, VM_USERMAP);
} else {
__iommu_dma_unmap(dev, handle, iosize);
@@ -1078,7 +1087,7 @@ static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
{
unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
unsigned long off = vma->vm_pgoff;
- struct vm_struct *area;
+ struct page **pages;
int ret;
vma->vm_page_prot = arch_dma_mmap_pgprot(dev, vma->vm_page_prot, attrs);
@@ -1103,11 +1112,10 @@ static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
return __iommu_dma_mmap_pfn(vma, pfn, size);
}
- area = find_vm_area(cpu_addr);
- if (WARN_ON(!area || !area->pages))
+ pages = __iommu_dma_get_pages(cpu_addr);
+ if (!pages)
return -ENXIO;
-
- return __iommu_dma_mmap(area->pages, size, vma);
+ return __iommu_dma_mmap(pages, size, vma);
}
static int __iommu_dma_get_sgtable_page(struct sg_table *sgt, struct page *page,
@@ -1125,7 +1133,7 @@ static int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
unsigned long attrs)
{
unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
- struct vm_struct *area = find_vm_area(cpu_addr);
+ struct page **pages;
if (!is_vmalloc_addr(cpu_addr)) {
struct page *page = virt_to_page(cpu_addr);
@@ -1141,10 +1149,10 @@ static int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
return __iommu_dma_get_sgtable_page(sgt, page, size);
}
- if (WARN_ON(!area || !area->pages))
+ pages = __iommu_dma_get_pages(cpu_addr);
+ if (!pages)
return -ENXIO;
-
- return sg_alloc_table_from_pages(sgt, area->pages, count, 0, size,
+ return sg_alloc_table_from_pages(sgt, pages, count, 0, size,
GFP_KERNEL);
}