diff options
author | Robin Murphy <robin.murphy@arm.com> | 2019-05-20 09:29:32 +0200 |
---|---|---|
committer | Joerg Roedel <jroedel@suse.de> | 2019-05-27 17:31:11 +0200 |
commit | 796a08cf16054ef93783ca8ab4aec2469c3307c1 (patch) | |
tree | e4d3a1c2192cea2870681ea16646aea82760bfd4 /drivers/iommu/dma-iommu.c | |
parent | b61d271e59d7fd679ad9922ce5f16c116c0f8e94 (diff) | |
download | linux-796a08cf16054ef93783ca8ab4aec2469c3307c1.tar.gz linux-796a08cf16054ef93783ca8ab4aec2469c3307c1.tar.bz2 linux-796a08cf16054ef93783ca8ab4aec2469c3307c1.zip |
iommu/dma: Squash __iommu_dma_{map,unmap}_page helpers
The remaining internal callsites don't care about having prototypes
compatible with the relevant dma_map_ops callbacks, so the extra
level of indirection just wastes space and complictaes things.
Signed-off-by: Robin Murphy <robin.murphy@arm.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers/iommu/dma-iommu.c')
-rw-r--r-- | drivers/iommu/dma-iommu.c | 25 |
1 files changed, 7 insertions, 18 deletions
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 6ece8f477fc8..498e319d6607 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -717,18 +717,6 @@ static void iommu_dma_sync_sg_for_device(struct device *dev, arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir); } -static dma_addr_t __iommu_dma_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, int prot) -{ - return __iommu_dma_map(dev, page_to_phys(page) + offset, size, prot); -} - -static void __iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, - size_t size, enum dma_data_direction dir, unsigned long attrs) -{ - __iommu_dma_unmap(dev, handle, size); -} - static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, unsigned long attrs) @@ -974,7 +962,8 @@ static void *iommu_dma_alloc(struct device *dev, size_t size, if (!addr) return NULL; - *handle = __iommu_dma_map_page(dev, page, 0, iosize, ioprot); + *handle = __iommu_dma_map(dev, page_to_phys(page), iosize, + ioprot); if (*handle == DMA_MAPPING_ERROR) { if (coherent) __free_pages(page, get_order(size)); @@ -991,7 +980,7 @@ static void *iommu_dma_alloc(struct device *dev, size_t size, if (!page) return NULL; - *handle = __iommu_dma_map_page(dev, page, 0, iosize, ioprot); + *handle = __iommu_dma_map(dev, page_to_phys(page), iosize, ioprot); if (*handle == DMA_MAPPING_ERROR) { dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT); @@ -1005,7 +994,7 @@ static void *iommu_dma_alloc(struct device *dev, size_t size, arch_dma_prep_coherent(page, iosize); memset(addr, 0, size); } else { - __iommu_dma_unmap_page(dev, *handle, iosize, 0, attrs); + __iommu_dma_unmap(dev, *handle, iosize); dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT); } @@ -1044,12 +1033,12 @@ static void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr, * Hence how dodgy the below logic looks... */ if (dma_in_atomic_pool(cpu_addr, size)) { - __iommu_dma_unmap_page(dev, handle, iosize, 0, 0); + __iommu_dma_unmap(dev, handle, iosize); dma_free_from_pool(cpu_addr, size); } else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) { struct page *page = vmalloc_to_page(cpu_addr); - __iommu_dma_unmap_page(dev, handle, iosize, 0, attrs); + __iommu_dma_unmap(dev, handle, iosize); dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT); dma_common_free_remap(cpu_addr, size, VM_USERMAP); } else if (is_vmalloc_addr(cpu_addr)){ @@ -1060,7 +1049,7 @@ static void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr, __iommu_dma_free(dev, area->pages, iosize, &handle); dma_common_free_remap(cpu_addr, size, VM_USERMAP); } else { - __iommu_dma_unmap_page(dev, handle, iosize, 0, 0); + __iommu_dma_unmap(dev, handle, iosize); __free_pages(virt_to_page(cpu_addr), get_order(size)); } } |