From b4dca1512941aa8fec33c28939abc2bba4a2c78c Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 5 Sep 2019 10:04:30 +0200 Subject: swiotlb-xen: simplify cache maintainance Now that we know we always have the dma-noncoherent.h helpers available if we are on an architecture with support for non-coherent devices, we can just call them directly, and remove the calls to the dma-direct routines, including the fact that we call the dma_direct_map_page routines but ignore the value returned from it. Instead we now have Xen wrappers for the arch_sync_dma_for_{device,cpu} helpers that call the special Xen versions of those routines for foreign pages. Note that the new helpers get the physical address passed in addition to the dma address to avoid another translation for the local cache maintainance. The pfn_valid checks remain on the dma address as in the old code, even if that looks a little funny. Signed-off-by: Christoph Hellwig Reviewed-by: Boris Ostrovsky Reviewed-by: Stefano Stabellini Acked-by: Konrad Rzeszutek Wilk --- arch/arm/xen/mm.c | 64 +++++++++++++++---------------------------------------- 1 file changed, 17 insertions(+), 47 deletions(-) (limited to 'arch/arm/xen/mm.c') diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c index 9d73fa4a5991..2b2c208408bb 100644 --- a/arch/arm/xen/mm.c +++ b/arch/arm/xen/mm.c @@ -60,63 +60,33 @@ static void dma_cache_maint(dma_addr_t handle, size_t size, u32 op) } while (size); } -static void __xen_dma_page_dev_to_cpu(struct device *hwdev, dma_addr_t handle, - size_t size, enum dma_data_direction dir) +/* + * Dom0 is mapped 1:1, and while the Linux page can span across multiple Xen + * pages, it is not possible for it to contain a mix of local and foreign Xen + * pages. Calling pfn_valid on a foreign mfn will always return false, so if + * pfn_valid returns true the pages is local and we can use the native + * dma-direct functions, otherwise we call the Xen specific version. + */ +void xen_dma_sync_for_cpu(struct device *dev, dma_addr_t handle, + phys_addr_t paddr, size_t size, enum dma_data_direction dir) { - if (dir != DMA_TO_DEVICE) + if (pfn_valid(PFN_DOWN(handle))) + arch_sync_dma_for_cpu(dev, paddr, size, dir); + else if (dir != DMA_TO_DEVICE) dma_cache_maint(handle, size, GNTTAB_CACHE_INVAL); } -static void __xen_dma_page_cpu_to_dev(struct device *hwdev, dma_addr_t handle, - size_t size, enum dma_data_direction dir) +void xen_dma_sync_for_device(struct device *dev, dma_addr_t handle, + phys_addr_t paddr, size_t size, enum dma_data_direction dir) { - if (dir == DMA_FROM_DEVICE) + if (pfn_valid(PFN_DOWN(handle))) + arch_sync_dma_for_device(dev, paddr, size, dir); + else if (dir == DMA_FROM_DEVICE) dma_cache_maint(handle, size, GNTTAB_CACHE_INVAL); else dma_cache_maint(handle, size, GNTTAB_CACHE_CLEAN); } -void __xen_dma_map_page(struct device *hwdev, struct page *page, - dma_addr_t dev_addr, unsigned long offset, size_t size, - enum dma_data_direction dir, unsigned long attrs) -{ - if (dev_is_dma_coherent(hwdev)) - return; - if (attrs & DMA_ATTR_SKIP_CPU_SYNC) - return; - - __xen_dma_page_cpu_to_dev(hwdev, dev_addr, size, dir); -} - -void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, - size_t size, enum dma_data_direction dir, - unsigned long attrs) - -{ - if (dev_is_dma_coherent(hwdev)) - return; - if (attrs & DMA_ATTR_SKIP_CPU_SYNC) - return; - - __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir); -} - -void __xen_dma_sync_single_for_cpu(struct device *hwdev, - dma_addr_t handle, size_t size, enum dma_data_direction dir) -{ - if (dev_is_dma_coherent(hwdev)) - return; - __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir); -} - -void __xen_dma_sync_single_for_device(struct device *hwdev, - dma_addr_t handle, size_t size, enum dma_data_direction dir) -{ - if (dev_is_dma_coherent(hwdev)) - return; - __xen_dma_page_cpu_to_dev(hwdev, handle, size, dir); -} - bool xen_arch_need_swiotlb(struct device *dev, phys_addr_t phys, dma_addr_t dev_addr) -- cgit v1.2.3