diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-11-14 16:54:12 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-11-14 16:54:12 -0800 |
commit | e37e0ee0190034a059c9faea8adfb4982fb24ddd (patch) | |
tree | ba6d6da8ae099eb734f4d8d33bf648fe0dfa39e7 | |
parent | 23c258763ba992f6a95a4b8980ffa7c1890bc8d8 (diff) | |
parent | c9eb6172c328dde7e14812f94f8da87b691e41b5 (diff) | |
download | linux-e37e0ee0190034a059c9faea8adfb4982fb24ddd.tar.gz linux-e37e0ee0190034a059c9faea8adfb4982fb24ddd.tar.bz2 linux-e37e0ee0190034a059c9faea8adfb4982fb24ddd.zip |
Merge tag 'dma-mapping-4.15' of git://git.infradead.org/users/hch/dma-mapping
Pull dma-mapping updates from Christoph Hellwig:
- turn dma_cache_sync into a dma_map_ops instance and remove
implementation that purely are dead because the architecture doesn't
support noncoherent allocations
- add a flag for busses that need DMA configuration (Robin Murphy)
* tag 'dma-mapping-4.15' of git://git.infradead.org/users/hch/dma-mapping:
dma-mapping: turn dma_cache_sync into a dma_map_ops method
sh: make dma_cache_sync a no-op
xtensa: make dma_cache_sync a no-op
unicore32: make dma_cache_sync a no-op
powerpc: make dma_cache_sync a no-op
mn10300: make dma_cache_sync a no-op
microblaze: make dma_cache_sync a no-op
ia64: make dma_cache_sync a no-op
frv: make dma_cache_sync a no-op
x86: make dma_cache_sync a no-op
floppy: consolidate the dummy fd_cacheflush definition
drivers: flag buses which demand DMA configuration
42 files changed, 71 insertions, 250 deletions
diff --git a/arch/alpha/include/asm/dma-mapping.h b/arch/alpha/include/asm/dma-mapping.h index e542cb272b67..b78f61f20796 100644 --- a/arch/alpha/include/asm/dma-mapping.h +++ b/arch/alpha/include/asm/dma-mapping.h @@ -9,6 +9,4 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) return dma_ops; } -#define dma_cache_sync(dev, va, size, dir) ((void)0) - #endif /* _ALPHA_DMA_MAPPING_H */ diff --git a/arch/alpha/include/asm/floppy.h b/arch/alpha/include/asm/floppy.h index bae97eb19d26..942924756cf2 100644 --- a/arch/alpha/include/asm/floppy.h +++ b/arch/alpha/include/asm/floppy.h @@ -24,7 +24,6 @@ #define fd_set_dma_count(count) set_dma_count(FLOPPY_DMA,count) #define fd_enable_irq() enable_irq(FLOPPY_IRQ) #define fd_disable_irq() disable_irq(FLOPPY_IRQ) -#define fd_cacheflush(addr,size) /* nothing */ #define fd_request_irq() request_irq(FLOPPY_IRQ, floppy_interrupt,\ 0, "floppy", NULL) #define fd_free_irq() free_irq(FLOPPY_IRQ, NULL) @@ -62,7 +61,6 @@ alpha_fd_dma_setup(char *addr, unsigned long size, int mode, int io) prev_dir = dir; fd_clear_dma_ff(); - fd_cacheflush(addr, size); fd_set_dma_mode(mode); set_dma_addr(FLOPPY_DMA, bus_addr); fd_set_dma_count(size); diff --git a/arch/cris/include/asm/dma-mapping.h b/arch/cris/include/asm/dma-mapping.h index 1c9bf14807db..1553bdb30a0c 100644 --- a/arch/cris/include/asm/dma-mapping.h +++ b/arch/cris/include/asm/dma-mapping.h @@ -17,10 +17,4 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) } #endif -static inline void -dma_cache_sync(struct device *dev, void *vaddr, size_t size, - enum dma_data_direction direction) -{ -} - #endif diff --git a/arch/frv/include/asm/dma-mapping.h b/arch/frv/include/asm/dma-mapping.h index 273defa02a02..fd80e840a1e6 100644 --- a/arch/frv/include/asm/dma-mapping.h +++ b/arch/frv/include/asm/dma-mapping.h @@ -15,11 +15,4 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) return &frv_dma_ops; } -static inline -void dma_cache_sync(struct device *dev, void *vaddr, size_t size, - enum dma_data_direction direction) -{ - flush_write_buffers(); -} - #endif /* _ASM_DMA_MAPPING_H */ diff --git a/arch/hexagon/include/asm/dma-mapping.h b/arch/hexagon/include/asm/dma-mapping.h index 463dbc18f853..5208de242e79 100644 --- a/arch/hexagon/include/asm/dma-mapping.h +++ b/arch/hexagon/include/asm/dma-mapping.h @@ -37,9 +37,6 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) return dma_ops; } -extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size, - enum dma_data_direction direction); - static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) { if (!dev->dma_mask) diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h index 5da9421fb0ff..c1bab526a046 100644 --- a/arch/ia64/include/asm/dma-mapping.h +++ b/arch/ia64/include/asm/dma-mapping.h @@ -45,15 +45,4 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) return daddr; } -static inline void -dma_cache_sync (struct device *dev, void *vaddr, size_t size, - enum dma_data_direction dir) -{ - /* - * IA-64 is cache-coherent, so this is mostly a no-op. However, we do need to - * ensure that dma_cache_sync() enforces order, hence the mb(). - */ - mb(); -} - #endif /* _ASM_IA64_DMA_MAPPING_H */ diff --git a/arch/m32r/include/asm/dma-mapping.h b/arch/m32r/include/asm/dma-mapping.h index 4abfc07f4762..336ffe60814b 100644 --- a/arch/m32r/include/asm/dma-mapping.h +++ b/arch/m32r/include/asm/dma-mapping.h @@ -14,11 +14,6 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) return &dma_noop_ops; } -static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, - enum dma_data_direction direction) -{ -} - static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) { if (!dev->dma_mask) diff --git a/arch/m68k/include/asm/dma-mapping.h b/arch/m68k/include/asm/dma-mapping.h index 3e1a3ffba291..e3722ed04fbb 100644 --- a/arch/m68k/include/asm/dma-mapping.h +++ b/arch/m68k/include/asm/dma-mapping.h @@ -9,10 +9,4 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) return &m68k_dma_ops; } -static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, - enum dma_data_direction dir) -{ - /* we use coherent allocation, so not much to do here. */ -} - #endif /* _M68K_DMA_MAPPING_H */ diff --git a/arch/metag/include/asm/dma-mapping.h b/arch/metag/include/asm/dma-mapping.h index 7465ce54a4a9..cfd6a0505b56 100644 --- a/arch/metag/include/asm/dma-mapping.h +++ b/arch/metag/include/asm/dma-mapping.h @@ -9,14 +9,4 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) return &metag_dma_ops; } -/* - * dma_alloc_attrs() always returns non-cacheable memory, so there's no need to - * do any flushing here. - */ -static inline void -dma_cache_sync(struct device *dev, void *vaddr, size_t size, - enum dma_data_direction direction) -{ -} - #endif diff --git a/arch/microblaze/include/asm/dma-mapping.h b/arch/microblaze/include/asm/dma-mapping.h index e15cd2f76e23..6b9ea39405b8 100644 --- a/arch/microblaze/include/asm/dma-mapping.h +++ b/arch/microblaze/include/asm/dma-mapping.h @@ -16,22 +16,6 @@ #define _ASM_MICROBLAZE_DMA_MAPPING_H /* - * See Documentation/DMA-API-HOWTO.txt and - * Documentation/DMA-API.txt for documentation. - */ - -#include <linux/types.h> -#include <linux/cache.h> -#include <linux/mm.h> -#include <linux/scatterlist.h> -#include <linux/dma-debug.h> -#include <asm/io.h> -#include <asm/cacheflush.h> - -#define __dma_alloc_coherent(dev, gfp, size, handle) NULL -#define __dma_free_coherent(size, addr) ((void)0) - -/* * Available generic sets of operations */ extern const struct dma_map_ops dma_direct_ops; @@ -41,27 +25,4 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) return &dma_direct_ops; } -static inline void __dma_sync(unsigned long paddr, - size_t size, enum dma_data_direction direction) -{ - switch (direction) { - case DMA_TO_DEVICE: - case DMA_BIDIRECTIONAL: - flush_dcache_range(paddr, paddr + size); - break; - case DMA_FROM_DEVICE: - invalidate_dcache_range(paddr, paddr + size); - break; - default: - BUG(); - } -} - -static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, - enum dma_data_direction direction) -{ - BUG_ON(direction == DMA_NONE); - __dma_sync(virt_to_phys(vaddr), size, (int)direction); -} - #endif /* _ASM_MICROBLAZE_DMA_MAPPING_H */ diff --git a/arch/microblaze/kernel/dma.c b/arch/microblaze/kernel/dma.c index e63f154be964..990bf9ea0ec6 100644 --- a/arch/microblaze/kernel/dma.c +++ b/arch/microblaze/kernel/dma.c @@ -13,6 +13,7 @@ #include <linux/dma-debug.h> #include <linux/export.h> #include <linux/bug.h> +#include <asm/cacheflush.h> #define NOT_COHERENT_CACHE @@ -52,6 +53,22 @@ static void dma_direct_free_coherent(struct device *dev, size_t size, #endif } +static inline void __dma_sync(unsigned long paddr, + size_t size, enum dma_data_direction direction) +{ + switch (direction) { + case DMA_TO_DEVICE: + case DMA_BIDIRECTIONAL: + flush_dcache_range(paddr, paddr + size); + break; + case DMA_FROM_DEVICE: + invalidate_dcache_range(paddr, paddr + size); + break; + default: + BUG(); + } +} + static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents, enum dma_data_direction direction, unsigned long attrs) diff --git a/arch/mips/include/asm/dma-mapping.h b/arch/mips/include/asm/dma-mapping.h index 42f8cbad6c23..0d9418d264f9 100644 --- a/arch/mips/include/asm/dma-mapping.h +++ b/arch/mips/include/asm/dma-mapping.h @@ -27,9 +27,6 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) static inline void dma_mark_clean(void *addr, size_t size) {} -extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size, - enum dma_data_direction direction); - #define arch_setup_dma_ops arch_setup_dma_ops static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, const struct iommu_ops *iommu, diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c index c01bd20d0208..2e2514e00720 100644 --- a/arch/mips/mm/dma-default.c +++ b/arch/mips/mm/dma-default.c @@ -383,7 +383,7 @@ static int mips_dma_supported(struct device *dev, u64 mask) return plat_dma_supported(dev, mask); } -void dma_cache_sync(struct device *dev, void *vaddr, size_t size, +static void mips_dma_cache_sync(struct device *dev, void *vaddr, size_t size, enum dma_data_direction direction) { BUG_ON(direction == DMA_NONE); @@ -392,8 +392,6 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size, __dma_sync_virtual(vaddr, size, direction); } -EXPORT_SYMBOL(dma_cache_sync); - static const struct dma_map_ops mips_default_dma_map_ops = { .alloc = mips_dma_alloc_coherent, .free = mips_dma_free_coherent, @@ -407,7 +405,8 @@ static const struct dma_map_ops mips_default_dma_map_ops = { .sync_sg_for_cpu = mips_dma_sync_sg_for_cpu, .sync_sg_for_device = mips_dma_sync_sg_for_device, .mapping_error = mips_dma_mapping_error, - .dma_supported = mips_dma_supported + .dma_supported = mips_dma_supported, + .cache_sync = mips_dma_cache_sync, }; const struct dma_map_ops *mips_dma_map_ops = &mips_default_dma_map_ops; diff --git a/arch/mn10300/include/asm/dma-mapping.h b/arch/mn10300/include/asm/dma-mapping.h index 737ef574b3ea..439e474ed6d7 100644 --- a/arch/mn10300/include/asm/dma-mapping.h +++ b/arch/mn10300/include/asm/dma-mapping.h @@ -11,9 +11,6 @@ #ifndef _ASM_DMA_MAPPING_H #define _ASM_DMA_MAPPING_H -#include <asm/cache.h> -#include <asm/io.h> - extern const struct dma_map_ops mn10300_dma_ops; static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) @@ -21,11 +18,4 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) return &mn10300_dma_ops; } -static inline -void dma_cache_sync(void *vaddr, size_t size, - enum dma_data_direction direction) -{ - mn10300_dcache_flush_inv(); -} - #endif diff --git a/arch/nios2/include/asm/dma-mapping.h b/arch/nios2/include/asm/dma-mapping.h index f8dc62222741..6ceb92251da0 100644 --- a/arch/nios2/include/asm/dma-mapping.h +++ b/arch/nios2/include/asm/dma-mapping.h @@ -17,13 +17,4 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) return &nios2_dma_ops; } -/* - * dma_alloc_attrs() always returns non-cacheable memory, so there's no need to - * do any flushing here. - */ -static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, - enum dma_data_direction direction) -{ -} - #endif /* _ASM_NIOS2_DMA_MAPPING_H */ diff --git a/arch/parisc/include/asm/dma-mapping.h b/arch/parisc/include/asm/dma-mapping.h index 7af4a00b5ce2..01e1fc057c83 100644 --- a/arch/parisc/include/asm/dma-mapping.h +++ b/arch/parisc/include/asm/dma-mapping.h @@ -33,14 +33,6 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) return hppa_dma_ops; } -static inline void -dma_cache_sync(struct device *dev, void *vaddr, size_t size, - enum dma_data_direction direction) -{ - if (hppa_dma_ops->sync_single_for_cpu) - flush_kernel_dcache_range((unsigned long)vaddr, size); -} - static inline void * parisc_walk_tree(struct device *dev) { diff --git a/arch/parisc/kernel/pci-dma.c b/arch/parisc/kernel/pci-dma.c index 412231d101f9..c0dfd892f70c 100644 --- a/arch/parisc/kernel/pci-dma.c +++ b/arch/parisc/kernel/pci-dma.c @@ -572,6 +572,12 @@ static void pa11_dma_sync_sg_for_device(struct device *dev, struct scatterlist * flush_kernel_vmap_range(sg_virt(sg), sg->length); } +static void pa11_dma_cache_sync(struct device *dev, void *vaddr, size_t size, + enum dma_data_direction direction) +{ + flush_kernel_dcache_range((unsigned long)vaddr, size); +} + const struct dma_map_ops pcxl_dma_ops = { .dma_supported = pa11_dma_supported, .alloc = pa11_dma_alloc, @@ -584,6 +590,7 @@ const struct dma_map_ops pcxl_dma_ops = { .sync_single_for_device = pa11_dma_sync_single_for_device, .sync_sg_for_cpu = pa11_dma_sync_sg_for_cpu, .sync_sg_for_device = pa11_dma_sync_sg_for_device, + .cache_sync = pa11_dma_cache_sync, }; static void *pcx_dma_alloc(struct device *dev, size_t size, @@ -620,4 +627,5 @@ const struct dma_map_ops pcx_dma_ops = { .sync_single_for_device = pa11_dma_sync_single_for_device, .sync_sg_for_cpu = pa11_dma_sync_sg_for_cpu, .sync_sg_for_device = pa11_dma_sync_sg_for_device, + .cache_sync = pa11_dma_cache_sync, }; diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h index ee1e38ff1b77..5a6cbe11db6f 100644 --- a/arch/powerpc/include/asm/dma-mapping.h +++ b/arch/powerpc/include/asm/dma-mapping.h @@ -142,12 +142,5 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) #define ARCH_HAS_DMA_MMAP_COHERENT -static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, - enum dma_data_direction direction) -{ - BUG_ON(direction == DMA_NONE); - __dma_sync(vaddr, size, (int)direction); -} - #endif /* __KERNEL__ */ #endif /* _ASM_DMA_MAPPING_H */ diff --git a/arch/powerpc/include/asm/floppy.h b/arch/powerpc/include/asm/floppy.h index 936a904ae78c..167c44b58848 100644 --- a/arch/powerpc/include/asm/floppy.h +++ b/arch/powerpc/include/asm/floppy.h @@ -25,7 +25,6 @@ #define fd_get_dma_residue() fd_ops->_get_dma_residue(FLOPPY_DMA) #define fd_enable_irq() enable_irq(FLOPPY_IRQ) #define fd_disable_irq() disable_irq(FLOPPY_IRQ) -#define fd_cacheflush(addr,size) /* nothing */ #define fd_free_irq() free_irq(FLOPPY_IRQ, NULL); #include <linux/pci.h> @@ -152,7 +151,6 @@ static int hard_dma_setup(char *addr, unsigned long size, int mode, int io) prev_dir = dir; fd_clear_dma_ff(); - fd_cacheflush(addr, size); fd_set_dma_mode(mode); set_dma_addr(FLOPPY_DMA, bus_addr); fd_set_dma_count(size); diff --git a/arch/s390/include/asm/dma-mapping.h b/arch/s390/include/asm/dma-mapping.h index 8fc8764fe5ee..eaf490f9c5bc 100644 --- a/arch/s390/include/asm/dma-mapping.h +++ b/arch/s390/include/asm/dma-mapping.h @@ -16,11 +16,6 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) return &dma_noop_ops; } -static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, - enum dma_data_direction direction) -{ -} - static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) { if (!dev->dma_mask) diff --git a/arch/sh/include/asm/dma-mapping.h b/arch/sh/include/asm/dma-mapping.h index 68c1536b3aab..41167931e5d9 100644 --- a/arch/sh/include/asm/dma-mapping.h +++ b/arch/sh/include/asm/dma-mapping.h @@ -10,10 +10,6 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) return dma_ops; } -void dma_cache_sync(struct device *dev, void *vaddr, size_t size, - enum dma_data_direction dir); - -/* arch/sh/mm/consistent.c */ extern void *dma_generic_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr, gfp_t flag, unsigned long attrs); @@ -21,4 +17,7 @@ extern void dma_generic_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, unsigned long attrs); +void sh_sync_dma_for_device(void *vaddr, size_t size, + enum dma_data_direction dir); + #endif /* __ASM_SH_DMA_MAPPING_H */ diff --git a/arch/sh/kernel/dma-nommu.c b/arch/sh/kernel/dma-nommu.c index d24c707b2181..62b485107eae 100644 --- a/arch/sh/kernel/dma-nommu.c +++ b/arch/sh/kernel/dma-nommu.c @@ -9,6 +9,7 @@ */ #include <linux/dma-mapping.h> #include <linux/io.h> +#include <asm/cacheflush.h> static dma_addr_t nommu_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, @@ -20,7 +21,7 @@ static dma_addr_t nommu_map_page(struct device *dev, struct page *page, WARN_ON(size == 0); if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) - dma_cache_sync(dev, page_address(page) + offset, size, dir); + sh_sync_dma_for_device(page_address(page) + offset, size, dir); return addr; } @@ -38,7 +39,7 @@ static int nommu_map_sg(struct device *dev, struct scatterlist *sg, BUG_ON(!sg_page(s)); if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) - dma_cache_sync(dev, sg_virt(s), s->length, dir); + sh_sync_dma_for_device(sg_virt(s), s->length, dir); s->dma_address = sg_phys(s); s->dma_length = s->length; @@ -48,20 +49,20 @@ static int nommu_map_sg(struct device *dev, struct scatterlist *sg, } #ifdef CONFIG_DMA_NONCOHERENT -static void nommu_sync_single(struct device *dev, dma_addr_t addr, +static void nommu_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir) { - dma_cache_sync(dev, phys_to_virt(addr), size, dir); + sh_sync_dma_for_device(phys_to_virt(addr), size, dir); } -static void nommu_sync_sg(struct device *dev, struct scatterlist *sg, +static void nommu_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, enum dma_data_direction dir) { struct scatterlist *s; int i; for_each_sg(sg, s, nelems, i) - dma_cache_sync(dev, sg_virt(s), s->length, dir); + sh_sync_dma_for_device(sg_virt(s), s->length, dir); } #endif @@ -71,8 +72,8 @@ const struct dma_map_ops nommu_dma_ops = { .map_page = nommu_map_page, .map_sg = nommu_map_sg, #ifdef CONFIG_DMA_NONCOHERENT - .sync_single_for_device = nommu_sync_single, - .sync_sg_for_device = nommu_sync_sg, + .sync_single_for_device = nommu_sync_single_for_device, + .sync_sg_for_device = nommu_sync_sg_for_device, #endif .is_phys = 1, }; diff --git a/arch/sh/mm/consistent.c b/arch/sh/mm/consistent.c index d1275adfa0ef..6ea3aab508f2 100644 --- a/arch/sh/mm/consistent.c +++ b/arch/sh/mm/consistent.c @@ -49,7 +49,7 @@ void *dma_generic_alloc_coherent(struct device *dev, size_t size, * Pages from the page allocator may have data present in * cache. So flush the cache before using uncached memory. */ - dma_cache_sync(dev, ret, size, DMA_BIDIRECTIONAL); + sh_sync_dma_for_device(ret, size, DMA_BIDIRECTIONAL); ret_nocache = (void __force *)ioremap_nocache(virt_to_phys(ret), size); if (!ret_nocache) { @@ -78,7 +78,7 @@ void dma_generic_free_coherent(struct device *dev, size_t size, iounmap(vaddr); } -void dma_cache_sync(struct device *dev, void *vaddr, size_t size, +void sh_sync_dma_for_device(void *vaddr, size_t size, enum dma_data_direction direction) { void *addr; @@ -100,7 +100,7 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size, BUG(); } } -EXPORT_SYMBOL(dma_cache_sync); +EXPORT_SYMBOL(sh_sync_dma_for_device); static int __init memchunk_setup(char *str) { diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h index 2f3490dd37de..12ae33daf52f 100644 --- a/arch/sparc/include/asm/dma-mapping.h +++ b/arch/sparc/include/asm/dma-mapping.h @@ -6,14 +6,6 @@ #include <linux/mm.h> #include <linux/dma-debug.h> -static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, - enum dma_data_direction dir) -{ - /* Since dma_{alloc,free}_noncoherent() allocated coherent memory, this - * routine can be a nop. - */ -} - extern const struct dma_map_ops *dma_ops; extern const struct dma_map_ops pci32_dma_ops; diff --git a/arch/sparc/include/asm/floppy_32.h b/arch/sparc/include/asm/floppy_32.h index dd63aa301658..b519acf4383d 100644 --- a/arch/sparc/include/asm/floppy_32.h +++ b/arch/sparc/include/asm/floppy_32.h @@ -71,7 +71,6 @@ static struct sun_floppy_ops sun_fdops; #define fd_set_dma_count(count) sun_fd_set_dma_count(count) #define fd_enable_irq() /* nothing... */ #define fd_disable_irq() /* nothing... */ -#define fd_cacheflush(addr, size) /* nothing... */ #define fd_request_irq() sun_fd_request_irq() #define fd_free_irq() /* nothing... */ #if 0 /* P3: added by Alain, these cause a MMU corruption. 19960524 XXX */ diff --git a/arch/sparc/include/asm/floppy_64.h b/arch/sparc/include/asm/floppy_64.h index 22fbeab92a4c..2a050eab69a0 100644 --- a/arch/sparc/include/asm/floppy_64.h +++ b/arch/sparc/include/asm/floppy_64.h @@ -73,7 +73,6 @@ static struct sun_floppy_ops sun_fdops; #define fd_set_dma_addr(addr) sun_fdops.fd_set_dma_addr(addr) #define fd_set_dma_count(count) sun_fdops.fd_set_dma_count(count) #define get_dma_residue(x) sun_fdops.get_dma_residue() -#define fd_cacheflush(addr, size) /* nothing... */ #define fd_request_irq() sun_fdops.fd_request_irq() #define fd_free_irq() sun_fdops.fd_free_irq() #define fd_eject(drive) sun_fdops.fd_eject(drive) diff --git a/arch/tile/include/asm/dma-mapping.h b/arch/tile/include/asm/dma-mapping.h index 7061dc8af43a..97ad62878290 100644 --- a/arch/tile/include/asm/dma-mapping.h +++ b/arch/tile/include/asm/dma-mapping.h @@ -67,13 +67,4 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) #define HAVE_ARCH_DMA_SET_MASK 1 int dma_set_mask(struct device *dev, u64 mask); -/* - * dma_alloc_attrs() always returns non-cacheable memory, so there's no need to - * do any flushing here. - */ -static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, - enum dma_data_direction direction) -{ -} - #endif /* _ASM_TILE_DMA_MAPPING_H */ diff --git a/arch/unicore32/include/asm/cacheflush.h b/arch/unicore32/include/asm/cacheflush.h index c0301e6c8b81..a5e08e2d5d6d 100644 --- a/arch/unicore32/include/asm/cacheflush.h +++ b/arch/unicore32/include/asm/cacheflush.h @@ -102,15 +102,6 @@ extern void __cpuc_flush_dcache_area(void *, size_t); extern void __cpuc_flush_kern_dcache_area(void *addr, size_t size); /* - * These are private to the dma-mapping API. Do not use directly. - * Their sole purpose is to ensure that data held in the cache - * is visible to DMA, or data written by DMA to system memory is - * visible to the CPU. - */ -extern void __cpuc_dma_clean_range(unsigned long, unsigned long); -extern void __cpuc_dma_flush_range(unsigned long, unsigned long); - -/* * Copy user data from/to a page which is mapped into a different * processes address space. Really, we want to allow our "user * space" model to handle this. diff --git a/arch/unicore32/include/asm/dma-mapping.h b/arch/unicore32/include/asm/dma-mapping.h index 518ba5848dd6..ac608c2f6af6 100644 --- a/arch/unicore32/include/asm/dma-mapping.h +++ b/arch/unicore32/include/asm/dma-mapping.h @@ -18,9 +18,6 @@ #include <linux/scatterlist.h> #include <linux/swiotlb.h> -#include <asm/memory.h> -#include <asm/cacheflush.h> - extern const struct dma_map_ops swiotlb_dma_map_ops; static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) @@ -48,24 +45,5 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) static inline void dma_mark_clean(void *addr, size_t size) {} -static inline void dma_cache_sync(struct device *dev, void *vaddr, - size_t size, enum dma_data_direction direction) -{ - unsigned long start = (unsigned long)vaddr; - unsigned long end = start + size; - - switch (direction) { - case DMA_NONE: - BUG(); - case DMA_FROM_DEVICE: - case DMA_BIDIRECTIONAL: /* writeback and invalidate */ - __cpuc_dma_flush_range(start, end); - break; - case DMA_TO_DEVICE: /* writeback only */ - __cpuc_dma_clean_range(start, end); - break; - } -} - #endif /* __KERNEL__ */ #endif diff --git a/arch/unicore32/mm/proc-syms.c b/arch/unicore32/mm/proc-syms.c index 21c00fc85c99..df215fd6d639 100644 --- a/arch/unicore32/mm/proc-syms.c +++ b/arch/unicore32/mm/proc-syms.c @@ -20,6 +20,3 @@ EXPORT_SYMBOL(cpu_dcache_clean_area); EXPORT_SYMBOL(cpu_set_pte); EXPORT_SYMBOL(__cpuc_coherent_kern_range); - -EXPORT_SYMBOL(__cpuc_dma_flush_range); -EXPORT_SYMBOL(__cpuc_dma_clean_range); diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h index 836ca1178a6a..43cbe843de8d 100644 --- a/arch/x86/include/asm/dma-mapping.h +++ b/arch/x86/include/asm/dma-mapping.h @@ -68,13 +68,6 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) } #endif /* CONFIG_X86_DMA_REMAP */ -static inline void -dma_cache_sync(struct device *dev, void *vaddr, size_t size, - enum dma_data_direction dir) -{ - flush_write_buffers(); -} - static inline unsigned long dma_alloc_coherent_mask(struct device *dev, gfp_t gfp) { diff --git a/arch/xtensa/include/asm/dma-mapping.h b/arch/xtensa/include/asm/dma-mapping.h index 269738dc9d1d..153bf2370988 100644 --- a/arch/xtensa/include/asm/dma-mapping.h +++ b/arch/xtensa/include/asm/dma-mapping.h @@ -23,9 +23,6 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) return &xtensa_dma_map_ops; } -void dma_cache_sync(struct device *dev, void *vaddr, size_t size, - enum dma_data_direction direction); - static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) { return (dma_addr_t)paddr; diff --git a/arch/xtensa/kernel/pci-dma.c b/arch/xtensa/kernel/pci-dma.c index cec86a1c2acc..623720a11143 100644 --- a/arch/xtensa/kernel/pci-dma.c +++ b/arch/xtensa/kernel/pci-dma.c @@ -26,29 +26,6 @@ #include <asm/cacheflush.h> #include <asm/io.h> -void dma_cache_sync(struct device *dev, void *vaddr, size_t size, - enum dma_data_direction dir) -{ - switch (dir) { - case DMA_BIDIRECTIONAL: - __flush_invalidate_dcache_range((unsigned long)vaddr, size); - break; - - case DMA_FROM_DEVICE: - __invalidate_dcache_range((unsigned long)vaddr, size); - break; - - case DMA_TO_DEVICE: - __flush_dcache_range((unsigned long)vaddr, size); - break; - - case DMA_NONE: - BUG(); - break; - } -} -EXPORT_SYMBOL(dma_cache_sync); - static void do_cache_op(dma_addr_t dma_handle, size_t size, void (*fn)(unsigned long, unsigned long)) { diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c index e0f74ddc22b7..594c228d2f02 100644 --- a/drivers/amba/bus.c +++ b/drivers/amba/bus.c @@ -195,6 +195,7 @@ struct bus_type amba_bustype = { .match = amba_match, .uevent = amba_uevent, .pm = &amba_pm, + .force_dma = true, }; static int __init amba_init(void) diff --git a/drivers/base/platform.c b/drivers/base/platform.c index 9045c5f3734e..c203fb90c1a0 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c @@ -1143,6 +1143,7 @@ struct bus_type platform_bus_type = { .match = platform_match, .uevent = platform_uevent, .pm = &platform_dev_pm_ops, + .force_dma = true, }; EXPORT_SYMBOL_GPL(platform_bus_type); diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index 60c086a53609..a54183935aa1 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -275,6 +275,10 @@ static int set_next_request(void); #define fd_dma_mem_alloc(size) __get_dma_pages(GFP_KERNEL, get_order(size)) #endif +#ifndef fd_cacheflush +#define fd_cacheflush(addr, size) /* nothing... */ +#endif + static inline void fallback_on_nodma_alloc(char **addr, size_t l) { #ifdef FLOPPY_CAN_FALLBACK_ON_NODMA diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c index f9cde03030fd..ed03b3243195 100644 --- a/drivers/gpu/host1x/bus.c +++ b/drivers/gpu/host1x/bus.c @@ -320,6 +320,7 @@ struct bus_type host1x_bus_type = { .name = "host1x", .match = host1x_device_match, .pm = &host1x_device_pm_ops, + .force_dma = true, }; static void __host1x_device_del(struct host1x_device *device) diff --git a/drivers/of/device.c b/drivers/of/device.c index 64b710265d39..25bddf9c9fe1 100644 --- a/drivers/of/device.c +++ b/drivers/of/device.c @@ -9,9 +9,7 @@ #include <linux/module.h> #include <linux/mod_devicetable.h> #include <linux/slab.h> -#include <linux/pci.h> #include <linux/platform_device.h> -#include <linux/amba/bus.h> #include <asm/errno.h> #include "of_private.h" @@ -101,11 +99,7 @@ int of_dma_configure(struct device *dev, struct device_node *np) * DMA configuration regardless of whether "dma-ranges" is * correctly specified or not. */ - if (!dev_is_pci(dev) && -#ifdef CONFIG_ARM_AMBA - dev->bus != &amba_bustype && -#endif - dev->bus != &platform_bus_type) + if (!dev->bus->force_dma) return ret == -ENODEV ? 0 : ret; dma_addr = offset = 0; diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index 07b8a9b385ab..7f47bb72bf30 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -1516,6 +1516,7 @@ struct bus_type pci_bus_type = { .drv_groups = pci_drv_groups, .pm = PCI_PM_OPS_PTR, .num_vf = pci_bus_num_vf, + .force_dma = true, }; EXPORT_SYMBOL(pci_bus_type); diff --git a/drivers/sh/maple/maple.c b/drivers/sh/maple/maple.c index bec81c2404f7..7525039d812c 100644 --- a/drivers/sh/maple/maple.c +++ b/drivers/sh/maple/maple.c @@ -300,7 +300,7 @@ static void maple_send(void) mutex_unlock(&maple_wlist_lock); if (maple_packets > 0) { for (i = 0; i < (1 << MAPLE_DMA_PAGES); i++) - dma_cache_sync(0, maple_sendbuf + i * PAGE_SIZE, + sh_sync_dma_for_device(maple_sendbuf + i * PAGE_SIZE, PAGE_SIZE, DMA_BIDIRECTIONAL); } @@ -642,8 +642,7 @@ static void maple_dma_handler(struct work_struct *work) list_for_each_entry_safe(mq, nmq, &maple_sentq, list) { mdev = mq->dev; recvbuf = mq->recvbuf->buf; - dma_cache_sync(&mdev->dev, recvbuf, 0x400, - DMA_FROM_DEVICE); + sh_sync_dma_for_device(recvbuf, 0x400, DMA_FROM_DEVICE); code = recvbuf[0]; kfree(mq->sendbuf); list_del_init(&mq->list); diff --git a/include/linux/device.h b/include/linux/device.h index fb9451599aca..9d32000725da 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -97,6 +97,8 @@ extern void bus_remove_file(struct bus_type *, struct bus_attribute *); * @p: The private data of the driver core, only the driver core can * touch this. * @lock_key: Lock class key for use by the lock validator + * @force_dma: Assume devices on this bus should be set up by dma_configure() + * even if DMA capability is not explicitly described by firmware. * * A bus is a channel between the processor and one or more devices. For the * purposes of the device model, all devices are connected via a bus, even if @@ -135,6 +137,8 @@ struct bus_type { struct subsys_private *p; struct lock_class_key lock_key; + + bool force_dma; }; extern int __must_check bus_register(struct bus_type *bus); diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 7653ea66874d..eee1499db396 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -127,6 +127,8 @@ struct dma_map_ops { void (*sync_sg_for_device)(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir); + void (*cache_sync)(struct device *dev, void *vaddr, size_t size, + enum dma_data_direction direction); int (*mapping_error)(struct device *dev, dma_addr_t dma_addr); int (*dma_supported)(struct device *dev, u64 mask); #ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK @@ -437,6 +439,17 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, #define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0) #define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0) +static inline void +dma_cache_sync(struct device *dev, void *vaddr, size_t size, + enum dma_data_direction dir) +{ + const struct dma_map_ops *ops = get_dma_ops(dev); + + BUG_ON(!valid_dma_direction(dir)); + if (ops->cache_sync) + ops->cache_sync(dev, vaddr, size, dir); +} + extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t dma_addr, size_t size); |