diff options
author | Christoph Hellwig <hch@lst.de> | 2019-06-03 12:52:47 +0200 |
---|---|---|
committer | Christoph Hellwig <hch@lst.de> | 2019-06-25 08:14:24 +0200 |
commit | 80e61fcd23946cb222f780a49ab2eeb7ef1d3749 (patch) | |
tree | b3b775066ec7062eccd1e0fd50d7be00475099c0 /arch/arc/mm | |
parent | 34ab03160eda51839be6dd5a939680963266707c (diff) | |
download | linux-stable-80e61fcd23946cb222f780a49ab2eeb7ef1d3749.tar.gz linux-stable-80e61fcd23946cb222f780a49ab2eeb7ef1d3749.tar.bz2 linux-stable-80e61fcd23946cb222f780a49ab2eeb7ef1d3749.zip |
arc: remove the partial DMA_ATTR_NON_CONSISTENT support
The arc DMA code supports DMA_ATTR_NON_CONSISTENT allocations, but does
not provide a cache_sync operation. This means any user of it will
never be able to actually transfer cache ownership and thus cause
coherency bugs.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Evgeniy Paltsev <paltsev@synopsys.com>
Tested-by: Evgeniy Paltsev <paltsev@synopsys.com>
Diffstat (limited to 'arch/arc/mm')
-rw-r--r-- | arch/arc/mm/dma.c | 21 |
1 files changed, 6 insertions, 15 deletions
diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c index 1525ac00fd02..9832928f896d 100644 --- a/arch/arc/mm/dma.c +++ b/arch/arc/mm/dma.c @@ -24,7 +24,6 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, struct page *page; phys_addr_t paddr; void *kvaddr; - bool need_coh = !(attrs & DMA_ATTR_NON_CONSISTENT); /* * __GFP_HIGHMEM flag is cleared by upper layer functions @@ -46,14 +45,10 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, * A coherent buffer needs MMU mapping to enforce non-cachability. * kvaddr is kernel Virtual address (0x7000_0000 based). */ - if (need_coh) { - kvaddr = ioremap_nocache(paddr, size); - if (kvaddr == NULL) { - __free_pages(page, order); - return NULL; - } - } else { - kvaddr = (void *)(u32)paddr; + kvaddr = ioremap_nocache(paddr, size); + if (kvaddr == NULL) { + __free_pages(page, order); + return NULL; } /* @@ -66,9 +61,7 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, * Currently flush_cache_vmap nukes the L1 cache completely which * will be optimized as a separate commit */ - if (need_coh) - dma_cache_wback_inv(paddr, size); - + dma_cache_wback_inv(paddr, size); return kvaddr; } @@ -78,9 +71,7 @@ void arch_dma_free(struct device *dev, size_t size, void *vaddr, phys_addr_t paddr = dma_handle; struct page *page = virt_to_page(paddr); - if (!(attrs & DMA_ATTR_NON_CONSISTENT)) - iounmap((void __force __iomem *)vaddr); - + iounmap((void __force __iomem *)vaddr); __free_pages(page, get_order(size)); } |