diff options
174 files changed, 435 insertions, 1296 deletions
diff --git a/arch/alpha/include/asm/dma-mapping.h b/arch/alpha/include/asm/dma-mapping.h index c63b6ac19ee5..5d53666935e6 100644 --- a/arch/alpha/include/asm/dma-mapping.h +++ b/arch/alpha/include/asm/dma-mapping.h @@ -1,9 +1,9 @@ #ifndef _ALPHA_DMA_MAPPING_H #define _ALPHA_DMA_MAPPING_H -extern struct dma_map_ops *dma_ops; +extern const struct dma_map_ops *dma_ops; -static inline struct dma_map_ops *get_dma_ops(struct device *dev) +static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) { return dma_ops; } diff --git a/arch/alpha/kernel/pci-noop.c b/arch/alpha/kernel/pci-noop.c index bb152e21e5ae..ffbdb3fb672f 100644 --- a/arch/alpha/kernel/pci-noop.c +++ b/arch/alpha/kernel/pci-noop.c @@ -128,7 +128,7 @@ static int alpha_noop_supported(struct device *dev, u64 mask) return mask < 0x00ffffffUL ? 0 : 1; } -struct dma_map_ops alpha_noop_ops = { +const struct dma_map_ops alpha_noop_ops = { .alloc = alpha_noop_alloc_coherent, .free = dma_noop_free_coherent, .map_page = dma_noop_map_page, @@ -137,5 +137,5 @@ struct dma_map_ops alpha_noop_ops = { .dma_supported = alpha_noop_supported, }; -struct dma_map_ops *dma_ops = &alpha_noop_ops; +const struct dma_map_ops *dma_ops = &alpha_noop_ops; EXPORT_SYMBOL(dma_ops); diff --git a/arch/alpha/kernel/pci_iommu.c b/arch/alpha/kernel/pci_iommu.c index 451fc9cdd323..7fd2329038a3 100644 --- a/arch/alpha/kernel/pci_iommu.c +++ b/arch/alpha/kernel/pci_iommu.c @@ -939,7 +939,7 @@ static int alpha_pci_mapping_error(struct device *dev, dma_addr_t dma_addr) return dma_addr == 0; } -struct dma_map_ops alpha_pci_ops = { +const struct dma_map_ops alpha_pci_ops = { .alloc = alpha_pci_alloc_coherent, .free = alpha_pci_free_coherent, .map_page = alpha_pci_map_page, @@ -950,5 +950,5 @@ struct dma_map_ops alpha_pci_ops = { .dma_supported = alpha_pci_supported, }; -struct dma_map_ops *dma_ops = &alpha_pci_ops; +const struct dma_map_ops *dma_ops = &alpha_pci_ops; EXPORT_SYMBOL(dma_ops); diff --git a/arch/arc/include/asm/dma-mapping.h b/arch/arc/include/asm/dma-mapping.h index 266f11c9bd59..94285031c4fb 100644 --- a/arch/arc/include/asm/dma-mapping.h +++ b/arch/arc/include/asm/dma-mapping.h @@ -18,9 +18,9 @@ #include <plat/dma.h> #endif -extern struct dma_map_ops arc_dma_ops; +extern const struct dma_map_ops arc_dma_ops; -static inline struct dma_map_ops *get_dma_ops(struct device *dev) +static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) { return &arc_dma_ops; } diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c index 08450a1a5b5f..2a07e6ecafbd 100644 --- a/arch/arc/mm/dma.c +++ b/arch/arc/mm/dma.c @@ -218,7 +218,7 @@ static int arc_dma_supported(struct device *dev, u64 dma_mask) return dma_mask == DMA_BIT_MASK(32); } -struct dma_map_ops arc_dma_ops = { +const struct dma_map_ops arc_dma_ops = { .alloc = arc_dma_alloc, .free = arc_dma_free, .mmap = arc_dma_mmap, diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c index 75055df1cda3..9b1b7be2ec0e 100644 --- a/arch/arm/common/dmabounce.c +++ b/arch/arm/common/dmabounce.c @@ -452,7 +452,7 @@ static int dmabounce_set_mask(struct device *dev, u64 dma_mask) return arm_dma_ops.set_dma_mask(dev, dma_mask); } -static struct dma_map_ops dmabounce_ops = { +static const struct dma_map_ops dmabounce_ops = { .alloc = arm_dma_alloc, .free = arm_dma_free, .mmap = arm_dma_mmap, diff --git a/arch/arm/include/asm/device.h b/arch/arm/include/asm/device.h index 4111592f0130..220ba207be91 100644 --- a/arch/arm/include/asm/device.h +++ b/arch/arm/include/asm/device.h @@ -7,7 +7,6 @@ #define ASMARM_DEVICE_H struct dev_archdata { - struct dma_map_ops *dma_ops; #ifdef CONFIG_DMABOUNCE struct dmabounce_device_info *dmabounce; #endif diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index bf02dbd9ccda..716656925975 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -13,28 +13,22 @@ #include <asm/xen/hypervisor.h> #define DMA_ERROR_CODE (~(dma_addr_t)0x0) -extern struct dma_map_ops arm_dma_ops; -extern struct dma_map_ops arm_coherent_dma_ops; +extern const struct dma_map_ops arm_dma_ops; +extern const struct dma_map_ops arm_coherent_dma_ops; -static inline struct dma_map_ops *__generic_dma_ops(struct device *dev) +static inline const struct dma_map_ops *__generic_dma_ops(struct device *dev) { - if (dev && dev->archdata.dma_ops) - return dev->archdata.dma_ops; + if (dev && dev->dma_ops) + return dev->dma_ops; return &arm_dma_ops; } -static inline struct dma_map_ops *get_dma_ops(struct device *dev) +static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) { if (xen_initial_domain()) return xen_dma_ops; else - return __generic_dma_ops(dev); -} - -static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops) -{ - BUG_ON(!dev); - dev->archdata.dma_ops = ops; + return __generic_dma_ops(NULL); } #define HAVE_ARCH_DMA_SUPPORTED 1 diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 6ffdf17e0d5c..e309a5e2c935 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -180,7 +180,7 @@ static void arm_dma_sync_single_for_device(struct device *dev, __dma_page_cpu_to_dev(page, offset, size, dir); } -struct dma_map_ops arm_dma_ops = { +const struct dma_map_ops arm_dma_ops = { .alloc = arm_dma_alloc, .free = arm_dma_free, .mmap = arm_dma_mmap, @@ -204,7 +204,7 @@ static int arm_coherent_dma_mmap(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t dma_addr, size_t size, unsigned long attrs); -struct dma_map_ops arm_coherent_dma_ops = { +const struct dma_map_ops arm_coherent_dma_ops = { .alloc = arm_coherent_dma_alloc, .free = arm_coherent_dma_free, .mmap = arm_coherent_dma_mmap, @@ -1069,7 +1069,7 @@ static void __dma_page_dev_to_cpu(struct page *page, unsigned long off, int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, unsigned long attrs) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); struct scatterlist *s; int i, j; @@ -1103,7 +1103,7 @@ int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, unsigned long attrs) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); struct scatterlist *s; int i; @@ -1122,7 +1122,7 @@ void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, void arm_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); struct scatterlist *s; int i; @@ -1141,7 +1141,7 @@ void arm_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); struct scatterlist *s; int i; @@ -2101,7 +2101,7 @@ static void arm_iommu_sync_single_for_device(struct device *dev, __dma_page_cpu_to_dev(page, offset, size, dir); } -struct dma_map_ops iommu_ops = { +const struct dma_map_ops iommu_ops = { .alloc = arm_iommu_alloc_attrs, .free = arm_iommu_free_attrs, .mmap = arm_iommu_mmap_attrs, @@ -2121,7 +2121,7 @@ struct dma_map_ops iommu_ops = { .unmap_resource = arm_iommu_unmap_resource, }; -struct dma_map_ops iommu_coherent_ops = { +const struct dma_map_ops iommu_coherent_ops = { .alloc = arm_coherent_iommu_alloc_attrs, .free = arm_coherent_iommu_free_attrs, .mmap = arm_coherent_iommu_mmap_attrs, @@ -2321,7 +2321,7 @@ void arm_iommu_detach_device(struct device *dev) } EXPORT_SYMBOL_GPL(arm_iommu_detach_device); -static struct dma_map_ops *arm_get_iommu_dma_map_ops(bool coherent) +static const struct dma_map_ops *arm_get_iommu_dma_map_ops(bool coherent) { return coherent ? &iommu_coherent_ops : &iommu_ops; } @@ -2376,7 +2376,7 @@ static void arm_teardown_iommu_dma_ops(struct device *dev) { } #endif /* CONFIG_ARM_DMA_USE_IOMMU */ -static struct dma_map_ops *arm_get_dma_map_ops(bool coherent) +static const struct dma_map_ops *arm_get_dma_map_ops(bool coherent) { return coherent ? &arm_coherent_dma_ops : &arm_dma_ops; } @@ -2384,7 +2384,7 @@ static struct dma_map_ops *arm_get_dma_map_ops(bool coherent) void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, const struct iommu_ops *iommu, bool coherent) { - struct dma_map_ops *dma_ops; + const struct dma_map_ops *dma_ops; dev->archdata.dma_coherent = coherent; if (arm_setup_iommu_dma_ops(dev, dma_base, size, iommu)) diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c index bd62d94f8ac5..ce18c91b50a1 100644 --- a/arch/arm/xen/mm.c +++ b/arch/arm/xen/mm.c @@ -182,10 +182,10 @@ void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order) } EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region); -struct dma_map_ops *xen_dma_ops; +const struct dma_map_ops *xen_dma_ops; EXPORT_SYMBOL(xen_dma_ops); -static struct dma_map_ops xen_swiotlb_dma_ops = { +static const struct dma_map_ops xen_swiotlb_dma_ops = { .alloc = xen_swiotlb_alloc_coherent, .free = xen_swiotlb_free_coherent, .sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu, diff --git a/arch/arm64/include/asm/device.h b/arch/arm64/include/asm/device.h index 243ef256b8c9..73d5bab015eb 100644 --- a/arch/arm64/include/asm/device.h +++ b/arch/arm64/include/asm/device.h @@ -17,7 +17,6 @@ #define __ASM_DEVICE_H struct dev_archdata { - struct dma_map_ops *dma_ops; #ifdef CONFIG_IOMMU_API void *iommu; /* private IOMMU data */ #endif diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h index ccea82c2b089..505756cdc67a 100644 --- a/arch/arm64/include/asm/dma-mapping.h +++ b/arch/arm64/include/asm/dma-mapping.h @@ -25,12 +25,12 @@ #include <asm/xen/hypervisor.h> #define DMA_ERROR_CODE (~(dma_addr_t)0) -extern struct dma_map_ops dummy_dma_ops; +extern const struct dma_map_ops dummy_dma_ops; -static inline struct dma_map_ops *__generic_dma_ops(struct device *dev) +static inline const struct dma_map_ops *__generic_dma_ops(struct device *dev) { - if (dev && dev->archdata.dma_ops) - return dev->archdata.dma_ops; + if (dev && dev->dma_ops) + return dev->dma_ops; /* * We expect no ISA devices, and all other DMA masters are expected to @@ -39,12 +39,12 @@ static inline struct dma_map_ops *__generic_dma_ops(struct device *dev) return &dummy_dma_ops; } -static inline struct dma_map_ops *get_dma_ops(struct device *dev) +static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) { if (xen_initial_domain()) return xen_dma_ops; else - return __generic_dma_ops(dev); + return __generic_dma_ops(NULL); } void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index aff1d0afeb1e..81cdb2e844ed 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c @@ -363,7 +363,7 @@ static int __swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t addr) return 0; } -static struct dma_map_ops swiotlb_dma_ops = { +static const struct dma_map_ops swiotlb_dma_ops = { .alloc = __dma_alloc, .free = __dma_free, .mmap = __swiotlb_mmap, @@ -516,7 +516,7 @@ static int __dummy_dma_supported(struct device *hwdev, u64 mask) return 0; } -struct dma_map_ops dummy_dma_ops = { +const struct dma_map_ops dummy_dma_ops = { .alloc = __dummy_alloc, .free = __dummy_free, .mmap = __dummy_mmap, @@ -795,7 +795,7 @@ static void __iommu_unmap_sg_attrs(struct device *dev, iommu_dma_unmap_sg(dev, sgl, nelems, dir, attrs); } -static struct dma_map_ops iommu_dma_ops = { +static const struct dma_map_ops iommu_dma_ops = { .alloc = __iommu_alloc_attrs, .free = __iommu_free_attrs, .mmap = __iommu_mmap_attrs, @@ -848,7 +848,7 @@ static bool do_iommu_attach(struct device *dev, const struct iommu_ops *ops, if (iommu_dma_init_domain(domain, dma_base, size, dev)) goto out_err; - dev->archdata.dma_ops = &iommu_dma_ops; + dev->dma_ops = &iommu_dma_ops; } return true; @@ -958,7 +958,7 @@ static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, void arch_teardown_dma_ops(struct device *dev) { - dev->archdata.dma_ops = NULL; + dev->dma_ops = NULL; } #else @@ -972,8 +972,8 @@ static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, const struct iommu_ops *iommu, bool coherent) { - if (!dev->archdata.dma_ops) - dev->archdata.dma_ops = &swiotlb_dma_ops; + if (!dev->dma_ops) + dev->dma_ops = &swiotlb_dma_ops; dev->archdata.dma_coherent = coherent; __iommu_setup_dma_ops(dev, dma_base, size, iommu); diff --git a/arch/avr32/include/asm/dma-mapping.h b/arch/avr32/include/asm/dma-mapping.h index 1115f2a645d1..7388451f9905 100644 --- a/arch/avr32/include/asm/dma-mapping.h +++ b/arch/avr32/include/asm/dma-mapping.h @@ -4,9 +4,9 @@ extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size, int direction); -extern struct dma_map_ops avr32_dma_ops; +extern const struct dma_map_ops avr32_dma_ops; -static inline struct dma_map_ops *get_dma_ops(struct device *dev) +static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) { return &avr32_dma_ops; } diff --git a/arch/avr32/mm/dma-coherent.c b/arch/avr32/mm/dma-coherent.c index 54534e5d0781..555222d4f414 100644 --- a/arch/avr32/mm/dma-coherent.c +++ b/arch/avr32/mm/dma-coherent.c @@ -191,7 +191,7 @@ static void avr32_dma_sync_sg_for_device(struct device *dev, dma_cache_sync(dev, sg_virt(sg), sg->length, direction); } -struct dma_map_ops avr32_dma_ops = { +const struct dma_map_ops avr32_dma_ops = { .alloc = avr32_dma_alloc, .free = avr32_dma_free, .map_page = avr32_dma_map_page, diff --git a/arch/blackfin/include/asm/dma-mapping.h b/arch/blackfin/include/asm/dma-mapping.h index 3490570aaa82..04254ac36bed 100644 --- a/arch/blackfin/include/asm/dma-mapping.h +++ b/arch/blackfin/include/asm/dma-mapping.h @@ -36,9 +36,9 @@ _dma_sync(dma_addr_t addr, size_t size, enum dma_data_direction dir) __dma_sync(addr, size, dir); } -extern struct dma_map_ops bfin_dma_ops; +extern const struct dma_map_ops bfin_dma_ops; -static inline struct dma_map_ops *get_dma_ops(struct device *dev) +static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) { return &bfin_dma_ops; } diff --git a/arch/blackfin/kernel/dma-mapping.c b/arch/blackfin/kernel/dma-mapping.c index a27a74a18fb0..477bb29a7987 100644 --- a/arch/blackfin/kernel/dma-mapping.c +++ b/arch/blackfin/kernel/dma-mapping.c @@ -159,7 +159,7 @@ static inline void bfin_dma_sync_single_for_device(struct device *dev, _dma_sync(handle, size, dir); } -struct dma_map_ops bfin_dma_ops = { +const struct dma_map_ops bfin_dma_ops = { .alloc = bfin_dma_alloc, .free = bfin_dma_free, diff --git a/arch/c6x/include/asm/dma-mapping.h b/arch/c6x/include/asm/dma-mapping.h index 5717b1e52d96..aca9f755e4f8 100644 --- a/arch/c6x/include/asm/dma-mapping.h +++ b/arch/c6x/include/asm/dma-mapping.h @@ -17,9 +17,9 @@ */ #define DMA_ERROR_CODE ~0 -extern struct dma_map_ops c6x_dma_ops; +extern const struct dma_map_ops c6x_dma_ops; -static inline struct dma_map_ops *get_dma_ops(struct device *dev) +static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) { return &c6x_dma_ops; } diff --git a/arch/c6x/kernel/dma.c b/arch/c6x/kernel/dma.c index 6752df32ef06..9fff8be75f58 100644 --- a/arch/c6x/kernel/dma.c +++ b/arch/c6x/kernel/dma.c @@ -123,7 +123,7 @@ static void c6x_dma_sync_sg_for_device(struct device *dev, } -struct dma_map_ops c6x_dma_ops = { +const struct dma_map_ops c6x_dma_ops = { .alloc = c6x_dma_alloc, .free = c6x_dma_free, .map_page = c6x_dma_map_page, diff --git a/arch/cris/arch-v32/drivers/pci/dma.c b/arch/cris/arch-v32/drivers/pci/dma.c index 1f0636793f0c..7072341995ff 100644 --- a/arch/cris/arch-v32/drivers/pci/dma.c +++ b/arch/cris/arch-v32/drivers/pci/dma.c @@ -69,7 +69,7 @@ static inline int v32_dma_supported(struct device *dev, u64 mask) return 1; } -struct dma_map_ops v32_dma_ops = { +const struct dma_map_ops v32_dma_ops = { .alloc = v32_dma_alloc, .free = v32_dma_free, .map_page = v32_dma_map_page, diff --git a/arch/cris/include/asm/dma-mapping.h b/arch/cris/include/asm/dma-mapping.h index 5a370178a0e9..256169de3743 100644 --- a/arch/cris/include/asm/dma-mapping.h +++ b/arch/cris/include/asm/dma-mapping.h @@ -2,14 +2,14 @@ #define _ASM_CRIS_DMA_MAPPING_H #ifdef CONFIG_PCI -extern struct dma_map_ops v32_dma_ops; +extern const struct dma_map_ops v32_dma_ops; -static inline struct dma_map_ops *get_dma_ops(struct device *dev) +static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) { return &v32_dma_ops; } #else -static inline struct dma_map_ops *get_dma_ops(struct device *dev) +static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) { BUG(); return NULL; diff --git a/arch/frv/include/asm/dma-mapping.h b/arch/frv/include/asm/dma-mapping.h index 9a82bfa4303b..354900917585 100644 --- a/arch/frv/include/asm/dma-mapping.h +++ b/arch/frv/include/asm/dma-mapping.h @@ -7,9 +7,9 @@ extern unsigned long __nongprelbss dma_coherent_mem_start; extern unsigned long __nongprelbss dma_coherent_mem_end; -extern struct dma_map_ops frv_dma_ops; +extern const struct dma_map_ops frv_dma_ops; -static inline struct dma_map_ops *get_dma_ops(struct device *dev) +static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) { return &frv_dma_ops; } diff --git a/arch/frv/mb93090-mb00/pci-dma-nommu.c b/arch/frv/mb93090-mb00/pci-dma-nommu.c index 187688128c65..4a96de7f0af4 100644 --- a/arch/frv/mb93090-mb00/pci-dma-nommu.c +++ b/arch/frv/mb93090-mb00/pci-dma-nommu.c @@ -164,7 +164,7 @@ static int frv_dma_supported(struct device *dev, u64 mask) return 1; } -struct dma_map_ops frv_dma_ops = { +const struct dma_map_ops frv_dma_ops = { .alloc = frv_dma_alloc, .free = frv_dma_free, .map_page = frv_dma_map_page, diff --git a/arch/frv/mb93090-mb00/pci-dma.c b/arch/frv/mb93090-mb00/pci-dma.c index dba7df918144..e7130abc0dae 100644 --- a/arch/frv/mb93090-mb00/pci-dma.c +++ b/arch/frv/mb93090-mb00/pci-dma.c @@ -106,7 +106,7 @@ static int frv_dma_supported(struct device *dev, u64 mask) return 1; } -struct dma_map_ops frv_dma_ops = { +const struct dma_map_ops frv_dma_ops = { .alloc = frv_dma_alloc, .free = frv_dma_free, .map_page = frv_dma_map_page, diff --git a/arch/h8300/include/asm/dma-mapping.h b/arch/h8300/include/asm/dma-mapping.h index 7ac7fadffed0..847c7562e046 100644 --- a/arch/h8300/include/asm/dma-mapping.h +++ b/arch/h8300/include/asm/dma-mapping.h @@ -1,9 +1,9 @@ #ifndef _H8300_DMA_MAPPING_H #define _H8300_DMA_MAPPING_H -extern struct dma_map_ops h8300_dma_map_ops; +extern const struct dma_map_ops h8300_dma_map_ops; -static inline struct dma_map_ops *get_dma_ops(struct device *dev) +static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) { return &h8300_dma_map_ops; } diff --git a/arch/h8300/kernel/dma.c b/arch/h8300/kernel/dma.c index 3651da045806..225dd0a188dc 100644 --- a/arch/h8300/kernel/dma.c +++ b/arch/h8300/kernel/dma.c @@ -60,7 +60,7 @@ static int map_sg(struct device *dev, struct scatterlist *sgl, return nents; } -struct dma_map_ops h8300_dma_map_ops = { +const struct dma_map_ops h8300_dma_map_ops = { .alloc = dma_alloc, .free = dma_free, .map_page = map_page, diff --git a/arch/hexagon/include/asm/dma-mapping.h b/arch/hexagon/include/asm/dma-mapping.h index 7ef58df909fc..d3a87bd9b686 100644 --- a/arch/hexagon/include/asm/dma-mapping.h +++ b/arch/hexagon/include/asm/dma-mapping.h @@ -32,13 +32,10 @@ struct device; extern int bad_dma_address; #define DMA_ERROR_CODE bad_dma_address -extern struct dma_map_ops *dma_ops; +extern const struct dma_map_ops *dma_ops; -static inline struct dma_map_ops *get_dma_ops(struct device *dev) +static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) { - if (unlikely(dev == NULL)) - return NULL; - return dma_ops; } diff --git a/arch/hexagon/kernel/dma.c b/arch/hexagon/kernel/dma.c index dbc4f1003da4..e74b65009587 100644 --- a/arch/hexagon/kernel/dma.c +++ b/arch/hexagon/kernel/dma.c @@ -25,7 +25,7 @@ #include <linux/module.h> #include <asm/page.h> -struct dma_map_ops *dma_ops; +const struct dma_map_ops *dma_ops; EXPORT_SYMBOL(dma_ops); int bad_dma_address; /* globals are automatically initialized to zero */ @@ -203,7 +203,7 @@ static void hexagon_sync_single_for_device(struct device *dev, dma_sync(dma_addr_to_virt(dma_handle), size, dir); } -struct dma_map_ops hexagon_dma_ops = { +const struct dma_map_ops hexagon_dma_ops = { .alloc = hexagon_dma_alloc_coherent, .free = hexagon_free_coherent, .map_sg = hexagon_map_sg, diff --git a/arch/ia64/hp/common/hwsw_iommu.c b/arch/ia64/hp/common/hwsw_iommu.c index 1e4cae5ae053..0310078a95f8 100644 --- a/arch/ia64/hp/common/hwsw_iommu.c +++ b/arch/ia64/hp/common/hwsw_iommu.c @@ -18,7 +18,7 @@ #include <linux/export.h> #include <asm/machvec.h> -extern struct dma_map_ops sba_dma_ops, swiotlb_dma_ops; +extern const struct dma_map_ops sba_dma_ops, swiotlb_dma_ops; /* swiotlb declarations & definitions: */ extern int swiotlb_late_init_with_default_size (size_t size); @@ -34,7 +34,7 @@ static inline int use_swiotlb(struct device *dev) !sba_dma_ops.dma_supported(dev, *dev->dma_mask); } -struct dma_map_ops *hwsw_dma_get_ops(struct device *dev) +const struct dma_map_ops *hwsw_dma_get_ops(struct device *dev) { if (use_swiotlb(dev)) return &swiotlb_dma_ops; diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c index 630ee8073899..aec4a3354abe 100644 --- a/arch/ia64/hp/common/sba_iommu.c +++ b/arch/ia64/hp/common/sba_iommu.c @@ -2096,7 +2096,7 @@ static int __init acpi_sba_ioc_init_acpi(void) /* This has to run before acpi_scan_init(). */ arch_initcall(acpi_sba_ioc_init_acpi); -extern struct dma_map_ops swiotlb_dma_ops; +extern const struct dma_map_ops swiotlb_dma_ops; static int __init sba_init(void) @@ -2216,7 +2216,7 @@ sba_page_override(char *str) __setup("sbapagesize=",sba_page_override); -struct dma_map_ops sba_dma_ops = { +const struct dma_map_ops sba_dma_ops = { .alloc = sba_alloc_coherent, .free = sba_free_coherent, .map_page = sba_map_page, diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h index d472805edfa9..73ec3c6f4cfe 100644 --- a/arch/ia64/include/asm/dma-mapping.h +++ b/arch/ia64/include/asm/dma-mapping.h @@ -14,7 +14,7 @@ #define DMA_ERROR_CODE 0 -extern struct dma_map_ops *dma_ops; +extern const struct dma_map_ops *dma_ops; extern struct ia64_machine_vector ia64_mv; extern void set_iommu_machvec(void); @@ -23,7 +23,10 @@ extern void machvec_dma_sync_single(struct device *, dma_addr_t, size_t, extern void machvec_dma_sync_sg(struct device *, struct scatterlist *, int, enum dma_data_direction); -#define get_dma_ops(dev) platform_dma_get_ops(dev) +static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) +{ + return platform_dma_get_ops(NULL); +} static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) { diff --git a/arch/ia64/include/asm/machvec.h b/arch/ia64/include/asm/machvec.h index ed7f09089f12..af285c423e1e 100644 --- a/arch/ia64/include/asm/machvec.h +++ b/arch/ia64/include/asm/machvec.h @@ -44,7 +44,7 @@ typedef void ia64_mv_kernel_launch_event_t(void); /* DMA-mapping interface: */ typedef void ia64_mv_dma_init (void); typedef u64 ia64_mv_dma_get_required_mask (struct device *); -typedef struct dma_map_ops *ia64_mv_dma_get_ops(struct device *); +typedef const struct dma_map_ops *ia64_mv_dma_get_ops(struct device *); /* * WARNING: The legacy I/O space is _architected_. Platforms are @@ -248,7 +248,7 @@ extern void machvec_init_from_cmdline(const char *cmdline); # endif /* CONFIG_IA64_GENERIC */ extern void swiotlb_dma_init(void); -extern struct dma_map_ops *dma_get_ops(struct device *); +extern const struct dma_map_ops *dma_get_ops(struct device *); /* * Define default versions so we can extend machvec for new platforms without having diff --git a/arch/ia64/kernel/dma-mapping.c b/arch/ia64/kernel/dma-mapping.c index 7f7916238208..e0dd97f4eb69 100644 --- a/arch/ia64/kernel/dma-mapping.c +++ b/arch/ia64/kernel/dma-mapping.c @@ -4,7 +4,7 @@ /* Set this to 1 if there is a HW IOMMU in the system */ int iommu_detected __read_mostly; -struct dma_map_ops *dma_ops; +const struct dma_map_ops *dma_ops; EXPORT_SYMBOL(dma_ops); #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) @@ -17,7 +17,7 @@ static int __init dma_init(void) } fs_initcall(dma_init); -struct dma_map_ops *dma_get_ops(struct device *dev) +const struct dma_map_ops *dma_get_ops(struct device *dev) { return dma_ops; } diff --git a/arch/ia64/kernel/pci-dma.c b/arch/ia64/kernel/pci-dma.c index 992c1098c522..9094a73f996f 100644 --- a/arch/ia64/kernel/pci-dma.c +++ b/arch/ia64/kernel/pci-dma.c @@ -90,11 +90,11 @@ void __init pci_iommu_alloc(void) { dma_ops = &intel_dma_ops; - dma_ops->sync_single_for_cpu = machvec_dma_sync_single; - dma_ops->sync_sg_for_cpu = machvec_dma_sync_sg; - dma_ops->sync_single_for_device = machvec_dma_sync_single; - dma_ops->sync_sg_for_device = machvec_dma_sync_sg; - dma_ops->dma_supported = iommu_dma_supported; + intel_dma_ops.sync_single_for_cpu = machvec_dma_sync_single; + intel_dma_ops.sync_sg_for_cpu = machvec_dma_sync_sg; + intel_dma_ops.sync_single_for_device = machvec_dma_sync_single; + intel_dma_ops.sync_sg_for_device = machvec_dma_sync_sg; + intel_dma_ops.dma_supported = iommu_dma_supported; /* * The order of these functions is important for diff --git a/arch/ia64/kernel/pci-swiotlb.c b/arch/ia64/kernel/pci-swiotlb.c index 2933208c0285..a14989dacded 100644 --- a/arch/ia64/kernel/pci-swiotlb.c +++ b/arch/ia64/kernel/pci-swiotlb.c @@ -30,7 +30,7 @@ static void ia64_swiotlb_free_coherent(struct device *dev, size_t size, swiotlb_free_coherent(dev, size, vaddr, dma_addr); } -struct dma_map_ops swiotlb_dma_ops = { +const struct dma_map_ops swiotlb_dma_ops = { .alloc = ia64_swiotlb_alloc_coherent, .free = ia64_swiotlb_free_coherent, .map_page = swiotlb_map_page, diff --git a/arch/m32r/Kconfig b/arch/m32r/Kconfig index d227a6988d6b..95474460b367 100644 --- a/arch/m32r/Kconfig +++ b/arch/m32r/Kconfig @@ -18,6 +18,7 @@ config M32R select MODULES_USE_ELF_RELA select HAVE_DEBUG_STACKOVERFLOW select CPU_NO_EFFICIENT_FFS + select DMA_NOOP_OPS config SBUS bool diff --git a/arch/m32r/include/asm/device.h b/arch/m32r/include/asm/device.h index 4a9f35e0973f..5203fc87f080 100644 --- a/arch/m32r/include/asm/device.h +++ b/arch/m32r/include/asm/device.h @@ -4,7 +4,6 @@ * This file is released under the GPLv2 */ struct dev_archdata { - struct dma_map_ops *dma_ops; }; struct pdev_archdata { diff --git a/arch/m32r/include/asm/dma-mapping.h b/arch/m32r/include/asm/dma-mapping.h index 2c43a77fe942..c01d9f52d228 100644 --- a/arch/m32r/include/asm/dma-mapping.h +++ b/arch/m32r/include/asm/dma-mapping.h @@ -10,10 +10,8 @@ #define DMA_ERROR_CODE (~(dma_addr_t)0x0) -static inline struct dma_map_ops *get_dma_ops(struct device *dev) +static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) { - if (dev && dev->archdata.dma_ops) - return dev->archdata.dma_ops; return &dma_noop_ops; } diff --git a/arch/m68k/include/asm/dma-mapping.h b/arch/m68k/include/asm/dma-mapping.h index 96c536194287..9210e470771b 100644 --- a/arch/m68k/include/asm/dma-mapping.h +++ b/arch/m68k/include/asm/dma-mapping.h @@ -1,9 +1,9 @@ #ifndef _M68K_DMA_MAPPING_H #define _M68K_DMA_MAPPING_H -extern struct dma_map_ops m68k_dma_ops; +extern const struct dma_map_ops m68k_dma_ops; -static inline struct dma_map_ops *get_dma_ops(struct device *dev) +static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) { return &m68k_dma_ops; } diff --git a/arch/m68k/kernel/dma.c b/arch/m68k/kernel/dma.c index 1e4f386ba31e..87ef73a93856 100644 --- a/arch/m68k/kernel/dma.c +++ b/arch/m68k/kernel/dma.c @@ -158,7 +158,7 @@ static int m68k_dma_map_sg(struct device *dev, struct scatterlist *sglist, return nents; } -struct dma_map_ops m68k_dma_ops = { +const struct dma_map_ops m68k_dma_ops = { .alloc = m68k_dma_alloc, .free = m68k_dma_free, .map_page = m68k_dma_map_page, diff --git a/arch/metag/include/asm/dma-mapping.h b/arch/metag/include/asm/dma-mapping.h index 27af5d479ce6..fad3dc3cb210 100644 --- a/arch/metag/include/asm/dma-mapping.h +++ b/arch/metag/include/asm/dma-mapping.h @@ -1,9 +1,9 @@ #ifndef _ASM_METAG_DMA_MAPPING_H #define _ASM_METAG_DMA_MAPPING_H -extern struct dma_map_ops metag_dma_ops; +extern const struct dma_map_ops metag_dma_ops; -static inline struct dma_map_ops *get_dma_ops(struct device *dev) +static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) { return &metag_dma_ops; } diff --git a/arch/metag/kernel/dma.c b/arch/metag/kernel/dma.c index 91968d92652b..f0ab3a498328 100644 --- a/arch/metag/kernel/dma.c +++ b/arch/metag/kernel/dma.c @@ -575,7 +575,7 @@ static void metag_dma_sync_sg_for_device(struct device *dev, dma_sync_for_device(sg_virt(sg), sg->length, direction); } -struct dma_map_ops metag_dma_ops = { +const struct dma_map_ops metag_dma_ops = { .alloc = metag_dma_alloc, .free = metag_dma_free, .map_page = metag_dma_map_page, diff --git a/arch/microblaze/include/asm/dma-mapping.h b/arch/microblaze/include/asm/dma-mapping.h index 1768d4bdc8d3..3fad5e722a66 100644 --- a/arch/microblaze/include/asm/dma-mapping.h +++ b/arch/microblaze/include/asm/dma-mapping.h @@ -36,9 +36,9 @@ /* * Available generic sets of operations */ -extern struct dma_map_ops dma_direct_ops; +extern const struct dma_map_ops dma_direct_ops; -static inline struct dma_map_ops *get_dma_ops(struct device *dev) +static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) { return &dma_direct_ops; } diff --git a/arch/microblaze/kernel/dma.c b/arch/microblaze/kernel/dma.c index 818daf230eb4..12e093a03e60 100644 --- a/arch/microblaze/kernel/dma.c +++ b/arch/microblaze/kernel/dma.c @@ -187,7 +187,7 @@ int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma, #endif } -struct dma_map_ops dma_direct_ops = { +const struct dma_map_ops dma_direct_ops = { .alloc = dma_direct_alloc_coherent, .free = dma_direct_free_coherent, .mmap = dma_direct_mmap_coherent, diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c index 1226965e1e4f..c64bd87f0b6e 100644 --- a/arch/mips/cavium-octeon/dma-octeon.c +++ b/arch/mips/cavium-octeon/dma-octeon.c @@ -200,7 +200,7 @@ static phys_addr_t octeon_unity_dma_to_phys(struct device *dev, dma_addr_t daddr } struct octeon_dma_map_ops { - struct dma_map_ops dma_map_ops; + const struct dma_map_ops dma_map_ops; dma_addr_t (*phys_to_dma)(struct device *dev, phys_addr_t paddr); phys_addr_t (*dma_to_phys)(struct device *dev, dma_addr_t daddr); }; @@ -328,7 +328,7 @@ static struct octeon_dma_map_ops _octeon_pci_dma_map_ops = { }, }; -struct dma_map_ops *octeon_pci_dma_map_ops; +const struct dma_map_ops *octeon_pci_dma_map_ops; void __init octeon_pci_dma_init(void) { diff --git a/arch/mips/include/asm/device.h b/arch/mips/include/asm/device.h index 21c2082a0dfb..6aa796f1081a 100644 --- a/arch/mips/include/asm/device.h +++ b/arch/mips/include/asm/device.h @@ -6,12 +6,7 @@ #ifndef _ASM_MIPS_DEVICE_H #define _ASM_MIPS_DEVICE_H -struct dma_map_ops; - struct dev_archdata { - /* DMA operations on that device */ - struct dma_map_ops *dma_ops; - #ifdef CONFIG_DMA_PERDEV_COHERENT /* Non-zero if DMA is coherent with CPU caches */ bool dma_coherent; diff --git a/arch/mips/include/asm/dma-mapping.h b/arch/mips/include/asm/dma-mapping.h index 7aa71b9b0258..aba71385f9d1 100644 --- a/arch/mips/include/asm/dma-mapping.h +++ b/arch/mips/include/asm/dma-mapping.h @@ -9,14 +9,11 @@ #include <dma-coherence.h> #endif -extern struct dma_map_ops *mips_dma_map_ops; +extern const struct dma_map_ops *mips_dma_map_ops; -static inline struct dma_map_ops *get_dma_ops(struct device *dev) +static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) { - if (dev && dev->archdata.dma_ops) - return dev->archdata.dma_ops; - else - return mips_dma_map_ops; + return mips_dma_map_ops; } static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) diff --git a/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h b/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h index 460042ee5d6f..9110988b92a1 100644 --- a/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h +++ b/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h @@ -65,7 +65,7 @@ dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr); phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr); struct dma_map_ops; -extern struct dma_map_ops *octeon_pci_dma_map_ops; +extern const struct dma_map_ops *octeon_pci_dma_map_ops; extern char *octeon_swiotlb; #endif /* __ASM_MACH_CAVIUM_OCTEON_DMA_COHERENCE_H */ diff --git a/arch/mips/include/asm/netlogic/common.h b/arch/mips/include/asm/netlogic/common.h index be52c2125d71..e0717d10e650 100644 --- a/arch/mips/include/asm/netlogic/common.h +++ b/arch/mips/include/asm/netlogic/common.h @@ -88,7 +88,7 @@ extern struct plat_smp_ops nlm_smp_ops; extern char nlm_reset_entry[], nlm_reset_entry_end[]; /* SWIOTLB */ -extern struct dma_map_ops nlm_swiotlb_dma_ops; +extern const struct dma_map_ops nlm_swiotlb_dma_ops; extern unsigned int nlm_threads_per_core; extern cpumask_t nlm_cpumask; diff --git a/arch/mips/loongson64/common/dma-swiotlb.c b/arch/mips/loongson64/common/dma-swiotlb.c index df7235e33499..178ca17a5667 100644 --- a/arch/mips/loongson64/common/dma-swiotlb.c +++ b/arch/mips/loongson64/common/dma-swiotlb.c @@ -114,7 +114,7 @@ phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) return daddr; } -static struct dma_map_ops loongson_dma_map_ops = { +static const struct dma_map_ops loongson_dma_map_ops = { .alloc = loongson_dma_alloc_coherent, .free = loongson_dma_free_coherent, .map_page = loongson_dma_map_page, diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c index 1895a692efd4..fe8df14b6169 100644 --- a/arch/mips/mm/dma-default.c +++ b/arch/mips/mm/dma-default.c @@ -417,7 +417,7 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size, EXPORT_SYMBOL(dma_cache_sync); -static struct dma_map_ops mips_default_dma_map_ops = { +static const struct dma_map_ops mips_default_dma_map_ops = { .alloc = mips_dma_alloc_coherent, .free = mips_dma_free_coherent, .mmap = mips_dma_mmap, @@ -433,7 +433,7 @@ static struct dma_map_ops mips_default_dma_map_ops = { .dma_supported = mips_dma_supported }; -struct dma_map_ops *mips_dma_map_ops = &mips_default_dma_map_ops; +const struct dma_map_ops *mips_dma_map_ops = &mips_default_dma_map_ops; EXPORT_SYMBOL(mips_dma_map_ops); #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) diff --git a/arch/mips/netlogic/common/nlm-dma.c b/arch/mips/netlogic/common/nlm-dma.c index 0630693bec2a..0ec9d9da6d51 100644 --- a/arch/mips/netlogic/common/nlm-dma.c +++ b/arch/mips/netlogic/common/nlm-dma.c @@ -67,7 +67,7 @@ static void nlm_dma_free_coherent(struct device *dev, size_t size, swiotlb_free_coherent(dev, size, vaddr, dma_handle); } -struct dma_map_ops nlm_swiotlb_dma_ops = { +const struct dma_map_ops nlm_swiotlb_dma_ops = { .alloc = nlm_dma_alloc_coherent, .free = nlm_dma_free_coherent, .map_page = swiotlb_map_page, diff --git a/arch/mips/pci/pci-octeon.c b/arch/mips/pci/pci-octeon.c index 308d051fc45c..9ee01936862e 100644 --- a/arch/mips/pci/pci-octeon.c +++ b/arch/mips/pci/pci-octeon.c @@ -167,7 +167,7 @@ int pcibios_plat_dev_init(struct pci_dev *dev) pci_write_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, dconfig); } - dev->dev.archdata.dma_ops = octeon_pci_dma_map_ops; + dev->dev.dma_ops = octeon_pci_dma_map_ops; return 0; } diff --git a/arch/mn10300/include/asm/dma-mapping.h b/arch/mn10300/include/asm/dma-mapping.h index 1dcd44757f32..737ef574b3ea 100644 --- a/arch/mn10300/include/asm/dma-mapping.h +++ b/arch/mn10300/include/asm/dma-mapping.h @@ -14,9 +14,9 @@ #include <asm/cache.h> #include <asm/io.h> -extern struct dma_map_ops mn10300_dma_ops; +extern const struct dma_map_ops mn10300_dma_ops; -static inline struct dma_map_ops *get_dma_ops(struct device *dev) +static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) { return &mn10300_dma_ops; } diff --git a/arch/mn10300/mm/dma-alloc.c b/arch/mn10300/mm/dma-alloc.c index 4f4b9029f0ea..86108d2496b3 100644 --- a/arch/mn10300/mm/dma-alloc.c +++ b/arch/mn10300/mm/dma-alloc.c @@ -121,7 +121,7 @@ static int mn10300_dma_supported(struct device *dev, u64 mask) return 1; } -struct dma_map_ops mn10300_dma_ops = { +const struct dma_map_ops mn10300_dma_ops = { .alloc = mn10300_dma_alloc, .free = mn10300_dma_free, .map_page = mn10300_dma_map_page, diff --git a/arch/nios2/include/asm/dma-mapping.h b/arch/nios2/include/asm/dma-mapping.h index bec8ac8e6ad2..7b3c6f280293 100644 --- a/arch/nios2/include/asm/dma-mapping.h +++ b/arch/nios2/include/asm/dma-mapping.h @@ -10,9 +10,9 @@ #ifndef _ASM_NIOS2_DMA_MAPPING_H #define _ASM_NIOS2_DMA_MAPPING_H -extern struct dma_map_ops nios2_dma_ops; +extern const struct dma_map_ops nios2_dma_ops; -static inline struct dma_map_ops *get_dma_ops(struct device *dev) +static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) { return &nios2_dma_ops; } diff --git a/arch/nios2/mm/dma-mapping.c b/arch/nios2/mm/dma-mapping.c index f6a5dcf9d682..7040c1adbb5e 100644 --- a/arch/nios2/mm/dma-mapping.c +++ b/arch/nios2/mm/dma-mapping.c @@ -192,7 +192,7 @@ static void nios2_dma_sync_sg_for_device(struct device *dev, } -struct dma_map_ops nios2_dma_ops = { +const struct dma_map_ops nios2_dma_ops = { .alloc = nios2_dma_alloc, .free = nios2_dma_free, .map_page = nios2_dma_map_page, diff --git a/arch/openrisc/include/asm/dma-mapping.h b/arch/openrisc/include/asm/dma-mapping.h index 1f260bccb368..0c0075f17145 100644 --- a/arch/openrisc/include/asm/dma-mapping.h +++ b/arch/openrisc/include/asm/dma-mapping.h @@ -28,9 +28,9 @@ #define DMA_ERROR_CODE (~(dma_addr_t)0x0) -extern struct dma_map_ops or1k_dma_map_ops; +extern const struct dma_map_ops or1k_dma_map_ops; -static inline struct dma_map_ops *get_dma_ops(struct device *dev) +static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) { return &or1k_dma_map_ops; } diff --git a/arch/openrisc/kernel/dma.c b/arch/openrisc/kernel/dma.c index 906998bac957..b10369b7e31b 100644 --- a/arch/openrisc/kernel/dma.c +++ b/arch/openrisc/kernel/dma.c @@ -232,7 +232,7 @@ or1k_sync_single_for_device(struct device *dev, mtspr(SPR_DCBFR, cl); } -struct dma_map_ops or1k_dma_map_ops = { +const struct dma_map_ops or1k_dma_map_ops = { .alloc = or1k_dma_alloc, .free = or1k_dma_free, .map_page = or1k_map_page, diff --git a/arch/parisc/include/asm/dma-mapping.h b/arch/parisc/include/asm/dma-mapping.h index 16e024602737..5404c6a726b2 100644 --- a/arch/parisc/include/asm/dma-mapping.h +++ b/arch/parisc/include/asm/dma-mapping.h @@ -21,13 +21,13 @@ */ #ifdef CONFIG_PA11 -extern struct dma_map_ops pcxl_dma_ops; -extern struct dma_map_ops pcx_dma_ops; +extern const struct dma_map_ops pcxl_dma_ops; +extern const struct dma_map_ops pcx_dma_ops; #endif -extern struct dma_map_ops *hppa_dma_ops; +extern const struct dma_map_ops *hppa_dma_ops; -static inline struct dma_map_ops *get_dma_ops(struct device *dev) +static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) { return hppa_dma_ops; } diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c index 700e2d2da096..fa78419100c8 100644 --- a/arch/parisc/kernel/drivers.c +++ b/arch/parisc/kernel/drivers.c @@ -40,7 +40,7 @@ #include <asm/parisc-device.h> /* See comments in include/asm-parisc/pci.h */ -struct dma_map_ops *hppa_dma_ops __read_mostly; +const struct dma_map_ops *hppa_dma_ops __read_mostly; EXPORT_SYMBOL(hppa_dma_ops); static struct device root = { diff --git a/arch/parisc/kernel/pci-dma.c b/arch/parisc/kernel/pci-dma.c index 697c53543a4d..5f0067a62738 100644 --- a/arch/parisc/kernel/pci-dma.c +++ b/arch/parisc/kernel/pci-dma.c @@ -572,7 +572,7 @@ static void pa11_dma_sync_sg_for_device(struct device *dev, struct scatterlist * flush_kernel_vmap_range(sg_virt(sg), sg->length); } -struct dma_map_ops pcxl_dma_ops = { +const struct dma_map_ops pcxl_dma_ops = { .dma_supported = pa11_dma_supported, .alloc = pa11_dma_alloc, .free = pa11_dma_free, @@ -608,7 +608,7 @@ static void pcx_dma_free(struct device *dev, size_t size, void *vaddr, return; } -struct dma_map_ops pcx_dma_ops = { +const struct dma_map_ops pcx_dma_ops = { .dma_supported = pa11_dma_supported, .alloc = pcx_dma_alloc, .free = pcx_dma_free, diff --git a/arch/powerpc/include/asm/device.h b/arch/powerpc/include/asm/device.h index 406c2b1ff82d..0245bfcaac32 100644 --- a/arch/powerpc/include/asm/device.h +++ b/arch/powerpc/include/asm/device.h @@ -6,7 +6,6 @@ #ifndef _ASM_POWERPC_DEVICE_H #define _ASM_POWERPC_DEVICE_H -struct dma_map_ops; struct device_node; #ifdef CONFIG_PPC64 struct pci_dn; @@ -20,9 +19,6 @@ struct iommu_table; * drivers/macintosh/macio_asic.c */ struct dev_archdata { - /* DMA operations on that device */ - struct dma_map_ops *dma_ops; - /* * These two used to be a union. However, with the hybrid ops we need * both so here we store both a DMA offset for direct mappings and diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h index 84e3f8dd5e4f..181a095468e4 100644 --- a/arch/powerpc/include/asm/dma-mapping.h +++ b/arch/powerpc/include/asm/dma-mapping.h @@ -76,24 +76,16 @@ static inline unsigned long device_to_mask(struct device *dev) #ifdef CONFIG_PPC64 extern struct dma_map_ops dma_iommu_ops; #endif -extern struct dma_map_ops dma_direct_ops; +extern const struct dma_map_ops dma_direct_ops; -static inline struct dma_map_ops *get_dma_ops(struct device *dev) +static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) { /* We don't handle the NULL dev case for ISA for now. We could * do it via an out of line call but it is not needed for now. The * only ISA DMA device we support is the floppy and we have a hack * in the floppy driver directly to get a device for us. */ - if (unlikely(dev == NULL)) - return NULL; - - return dev->archdata.dma_ops; -} - -static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops) -{ - dev->archdata.dma_ops = ops; + return NULL; } /* diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h index e9bd6cf0212f..93eded8d3843 100644 --- a/arch/powerpc/include/asm/pci.h +++ b/arch/powerpc/include/asm/pci.h @@ -53,8 +53,8 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) } #ifdef CONFIG_PCI -extern void set_pci_dma_ops(struct dma_map_ops *dma_ops); -extern struct dma_map_ops *get_pci_dma_ops(void); +extern void set_pci_dma_ops(const struct dma_map_ops *dma_ops); +extern const struct dma_map_ops *get_pci_dma_ops(void); #else /* CONFIG_PCI */ #define set_pci_dma_ops(d) #define get_pci_dma_ops() NULL diff --git a/arch/powerpc/include/asm/ps3.h b/arch/powerpc/include/asm/ps3.h index a19f831a4cc9..17ee719e799f 100644 --- a/arch/powerpc/include/asm/ps3.h +++ b/arch/powerpc/include/asm/ps3.h @@ -435,7 +435,7 @@ static inline void *ps3_system_bus_get_drvdata( return dev_get_drvdata(&dev->core); } -/* These two need global scope for get_dma_ops(). */ +/* These two need global scope for get_arch_dma_ops(). */ extern struct bus_type ps3_system_bus_type; diff --git a/arch/powerpc/include/asm/swiotlb.h b/arch/powerpc/include/asm/swiotlb.h index de99d6e29430..01d45a5fd00b 100644 --- a/arch/powerpc/include/asm/swiotlb.h +++ b/arch/powerpc/include/asm/swiotlb.h @@ -13,7 +13,7 @@ #include <linux/swiotlb.h> -extern struct dma_map_ops swiotlb_dma_ops; +extern const struct dma_map_ops swiotlb_dma_ops; static inline void dma_mark_clean(void *addr, size_t size) {} diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c index c6689f658b50..d0ea7860e02b 100644 --- a/arch/powerpc/kernel/dma-swiotlb.c +++ b/arch/powerpc/kernel/dma-swiotlb.c @@ -46,7 +46,7 @@ static u64 swiotlb_powerpc_get_required(struct device *dev) * map_page, and unmap_page on highmem, use normal dma_ops * for everything else. */ -struct dma_map_ops swiotlb_dma_ops = { +const struct dma_map_ops swiotlb_dma_ops = { .alloc = __dma_direct_alloc_coherent, .free = __dma_direct_free_coherent, .mmap = dma_direct_mmap_coherent, diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c index 6877e3fa95bb..41c749586bd2 100644 --- a/arch/powerpc/kernel/dma.c +++ b/arch/powerpc/kernel/dma.c @@ -33,7 +33,7 @@ static u64 __maybe_unused get_pfn_limit(struct device *dev) struct dev_archdata __maybe_unused *sd = &dev->archdata; #ifdef CONFIG_SWIOTLB - if (sd->max_direct_dma_addr && sd->dma_ops == &swiotlb_dma_ops) + if (sd->max_direct_dma_addr && dev->dma_ops == &swiotlb_dma_ops) pfn = min_t(u64, pfn, sd->max_direct_dma_addr >> PAGE_SHIFT); #endif @@ -274,7 +274,7 @@ static inline void dma_direct_sync_single(struct device *dev, } #endif -struct dma_map_ops dma_direct_ops = { +const struct dma_map_ops dma_direct_ops = { .alloc = dma_direct_alloc_coherent, .free = dma_direct_free_coherent, .mmap = dma_direct_mmap_coherent, @@ -316,7 +316,7 @@ EXPORT_SYMBOL(dma_set_coherent_mask); int __dma_set_mask(struct device *dev, u64 dma_mask) { - struct dma_map_ops *dma_ops = get_dma_ops(dev); + const struct dma_map_ops *dma_ops = get_dma_ops(dev); if ((dma_ops != NULL) && (dma_ops->set_dma_mask != NULL)) return dma_ops->set_dma_mask(dev, dma_mask); @@ -344,7 +344,7 @@ EXPORT_SYMBOL(dma_set_mask); u64 __dma_get_required_mask(struct device *dev) { - struct dma_map_ops *dma_ops = get_dma_ops(dev); + const struct dma_map_ops *dma_ops = get_dma_ops(dev); if (unlikely(dma_ops == NULL)) return 0; diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c index a3f5334f5d8c..8e6fde8d28f3 100644 --- a/arch/powerpc/kernel/pci-common.c +++ b/arch/powerpc/kernel/pci-common.c @@ -60,14 +60,14 @@ resource_size_t isa_mem_base; EXPORT_SYMBOL(isa_mem_base); -static struct dma_map_ops *pci_dma_ops = &dma_direct_ops; +static const struct dma_map_ops *pci_dma_ops = &dma_direct_ops; -void set_pci_dma_ops(struct dma_map_ops *dma_ops) +void set_pci_dma_ops(const struct dma_map_ops *dma_ops) { pci_dma_ops = dma_ops; } -struct dma_map_ops *get_pci_dma_ops(void) +const struct dma_map_ops *get_pci_dma_ops(void) { return pci_dma_ops; } diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c index 7ff51f96a00e..71b995bbcae0 100644 --- a/arch/powerpc/platforms/cell/iommu.c +++ b/arch/powerpc/platforms/cell/iommu.c @@ -651,7 +651,7 @@ static int dma_fixed_dma_supported(struct device *dev, u64 mask) static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask); -static struct dma_map_ops dma_iommu_fixed_ops = { +static const struct dma_map_ops dma_iommu_fixed_ops = { .alloc = dma_fixed_alloc_coherent, .free = dma_fixed_free_coherent, .map_sg = dma_fixed_map_sg, @@ -692,7 +692,7 @@ static int cell_of_bus_notify(struct notifier_block *nb, unsigned long action, return 0; /* We use the PCI DMA ops */ - dev->archdata.dma_ops = get_pci_dma_ops(); + dev->dma_ops = get_pci_dma_ops(); cell_dma_dev_setup(dev); @@ -1172,7 +1172,7 @@ __setup("iommu_fixed=", setup_iommu_fixed); static u64 cell_dma_get_required_mask(struct device *dev) { - struct dma_map_ops *dma_ops; + const struct dma_map_ops *dma_ops; if (!dev->dma_mask) return 0; diff --git a/arch/powerpc/platforms/pasemi/iommu.c b/arch/powerpc/platforms/pasemi/iommu.c index e74adc4e7fd8..7fec04de27fc 100644 --- a/arch/powerpc/platforms/pasemi/iommu.c +++ b/arch/powerpc/platforms/pasemi/iommu.c @@ -186,7 +186,7 @@ static void pci_dma_dev_setup_pasemi(struct pci_dev *dev) */ if (dev->vendor == 0x1959 && dev->device == 0xa007 && !firmware_has_feature(FW_FEATURE_LPAR)) { - dev->dev.archdata.dma_ops = &dma_direct_ops; + dev->dev.dma_ops = &dma_direct_ops; /* * Set the coherent DMA mask to prevent the iommu * being used unnecessarily diff --git a/arch/powerpc/platforms/pasemi/setup.c b/arch/powerpc/platforms/pasemi/setup.c index 3182400cf48f..c4a3e93dc324 100644 --- a/arch/powerpc/platforms/pasemi/setup.c +++ b/arch/powerpc/platforms/pasemi/setup.c @@ -363,7 +363,7 @@ static int pcmcia_notify(struct notifier_block *nb, unsigned long action, return 0; /* We use the direct ops for localbus */ - dev->archdata.dma_ops = &dma_direct_ops; + dev->dma_ops = &dma_direct_ops; return 0; } diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c index 73b155fd4481..1c383f38031d 100644 --- a/arch/powerpc/platforms/powernv/npu-dma.c +++ b/arch/powerpc/platforms/powernv/npu-dma.c @@ -115,7 +115,7 @@ static u64 dma_npu_get_required_mask(struct device *dev) return 0; } -static struct dma_map_ops dma_npu_ops = { +static const struct dma_map_ops dma_npu_ops = { .map_page = dma_npu_map_page, .map_sg = dma_npu_map_sg, .alloc = dma_npu_alloc, diff --git a/arch/powerpc/platforms/ps3/system-bus.c b/arch/powerpc/platforms/ps3/system-bus.c index 8af1c15aef85..2d2e5f80a3d3 100644 --- a/arch/powerpc/platforms/ps3/system-bus.c +++ b/arch/powerpc/platforms/ps3/system-bus.c @@ -701,7 +701,7 @@ static u64 ps3_dma_get_required_mask(struct device *_dev) return DMA_BIT_MASK(32); } -static struct dma_map_ops ps3_sb_dma_ops = { +static const struct dma_map_ops ps3_sb_dma_ops = { .alloc = ps3_alloc_coherent, .free = ps3_free_coherent, .map_sg = ps3_sb_map_sg, @@ -712,7 +712,7 @@ static struct dma_map_ops ps3_sb_dma_ops = { .unmap_page = ps3_unmap_page, }; -static struct dma_map_ops ps3_ioc0_dma_ops = { +static const struct dma_map_ops ps3_ioc0_dma_ops = { .alloc = ps3_alloc_coherent, .free = ps3_free_coherent, .map_sg = ps3_ioc0_map_sg, @@ -756,11 +756,11 @@ int ps3_system_bus_device_register(struct ps3_system_bus_device *dev) switch (dev->dev_type) { case PS3_DEVICE_TYPE_IOC0: - dev->core.archdata.dma_ops = &ps3_ioc0_dma_ops; + dev->core.dma_ops = &ps3_ioc0_dma_ops; dev_set_name(&dev->core, "ioc0_%02x", ++dev_ioc0_count); break; case PS3_DEVICE_TYPE_SB: - dev->core.archdata.dma_ops = &ps3_sb_dma_ops; + dev->core.dma_ops = &ps3_sb_dma_ops; dev_set_name(&dev->core, "sb_%02x", ++dev_sb_count); break; diff --git a/arch/powerpc/platforms/pseries/ibmebus.c b/arch/powerpc/platforms/pseries/ibmebus.c index 614c28537141..99a6bf7f3bcf 100644 --- a/arch/powerpc/platforms/pseries/ibmebus.c +++ b/arch/powerpc/platforms/pseries/ibmebus.c @@ -136,7 +136,7 @@ static u64 ibmebus_dma_get_required_mask(struct device *dev) return DMA_BIT_MASK(64); } -static struct dma_map_ops ibmebus_dma_ops = { +static const struct dma_map_ops ibmebus_dma_ops = { .alloc = ibmebus_alloc_coherent, .free = ibmebus_free_coherent, .map_sg = ibmebus_map_sg, @@ -169,7 +169,7 @@ static int ibmebus_create_device(struct device_node *dn) return -ENOMEM; dev->dev.bus = &ibmebus_bus_type; - dev->dev.archdata.dma_ops = &ibmebus_dma_ops; + dev->dev.dma_ops = &ibmebus_dma_ops; ret = of_device_add(dev); if (ret) diff --git a/arch/powerpc/platforms/pseries/vio.c b/arch/powerpc/platforms/pseries/vio.c index 2c8fb3ec989e..720493932486 100644 --- a/arch/powerpc/platforms/pseries/vio.c +++ b/arch/powerpc/platforms/pseries/vio.c @@ -615,7 +615,7 @@ static u64 vio_dma_get_required_mask(struct device *dev) return dma_iommu_ops.get_required_mask(dev); } -static struct dma_map_ops vio_dma_mapping_ops = { +static const struct dma_map_ops vio_dma_mapping_ops = { .alloc = vio_dma_iommu_alloc_coherent, .free = vio_dma_iommu_free_coherent, .mmap = dma_direct_mmap_coherent, diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 2ef031bee7ab..d5c1073a2584 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -137,6 +137,7 @@ config S390 select HAVE_DEBUG_KMEMLEAK select HAVE_DMA_API_DEBUG select HAVE_DMA_CONTIGUOUS + select DMA_NOOP_OPS select HAVE_DYNAMIC_FTRACE select HAVE_DYNAMIC_FTRACE_WITH_REGS select HAVE_EFFICIENT_UNALIGNED_ACCESS diff --git a/arch/s390/include/asm/device.h b/arch/s390/include/asm/device.h index 4a9f35e0973f..5203fc87f080 100644 --- a/arch/s390/include/asm/device.h +++ b/arch/s390/include/asm/device.h @@ -4,7 +4,6 @@ * This file is released under the GPLv2 */ struct dev_archdata { - struct dma_map_ops *dma_ops; }; struct pdev_archdata { diff --git a/arch/s390/include/asm/dma-mapping.h b/arch/s390/include/asm/dma-mapping.h index ffaba07f50ab..3108b8dbe266 100644 --- a/arch/s390/include/asm/dma-mapping.h +++ b/arch/s390/include/asm/dma-mapping.h @@ -10,12 +10,10 @@ #define DMA_ERROR_CODE (~(dma_addr_t) 0x0) -extern struct dma_map_ops s390_pci_dma_ops; +extern const struct dma_map_ops s390_pci_dma_ops; -static inline struct dma_map_ops *get_dma_ops(struct device *dev) +static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) { - if (dev && dev->archdata.dma_ops) - return dev->archdata.dma_ops; return &dma_noop_ops; } diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c index 4c0fa9b3b2a0..364b9d824be3 100644 --- a/arch/s390/pci/pci.c +++ b/arch/s390/pci/pci.c @@ -641,7 +641,7 @@ int pcibios_add_device(struct pci_dev *pdev) int i; pdev->dev.groups = zpci_attr_groups; - pdev->dev.archdata.dma_ops = &s390_pci_dma_ops; + pdev->dev.dma_ops = &s390_pci_dma_ops; zpci_map_resources(pdev); for (i = 0; i < PCI_BAR_COUNT; i++) { diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c index 1d7a9c71944a..9081a57fa340 100644 --- a/arch/s390/pci/pci_dma.c +++ b/arch/s390/pci/pci_dma.c @@ -650,7 +650,7 @@ static int __init dma_debug_do_init(void) } fs_initcall(dma_debug_do_init); -struct dma_map_ops s390_pci_dma_ops = { +const struct dma_map_ops s390_pci_dma_ops = { .alloc = s390_dma_alloc, .free = s390_dma_free, .map_sg = s390_dma_map_sg, diff --git a/arch/sh/include/asm/dma-mapping.h b/arch/sh/include/asm/dma-mapping.h index 0052ad40e86d..d99008af5f73 100644 --- a/arch/sh/include/asm/dma-mapping.h +++ b/arch/sh/include/asm/dma-mapping.h @@ -1,10 +1,10 @@ #ifndef __ASM_SH_DMA_MAPPING_H #define __ASM_SH_DMA_MAPPING_H -extern struct dma_map_ops *dma_ops; +extern const struct dma_map_ops *dma_ops; extern void no_iommu_init(void); -static inline struct dma_map_ops *get_dma_ops(struct device *dev) +static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) { return dma_ops; } diff --git a/arch/sh/kernel/dma-nommu.c b/arch/sh/kernel/dma-nommu.c index 47fee3b6e29c..d24c707b2181 100644 --- a/arch/sh/kernel/dma-nommu.c +++ b/arch/sh/kernel/dma-nommu.c @@ -65,7 +65,7 @@ static void nommu_sync_sg(struct device *dev, struct scatterlist *sg, } #endif -struct dma_map_ops nommu_dma_ops = { +const struct dma_map_ops nommu_dma_ops = { .alloc = dma_generic_alloc_coherent, .free = dma_generic_free_coherent, .map_page = nommu_map_page, diff --git a/arch/sh/mm/consistent.c b/arch/sh/mm/consistent.c index 92b6976fde59..d1275adfa0ef 100644 --- a/arch/sh/mm/consistent.c +++ b/arch/sh/mm/consistent.c @@ -22,7 +22,7 @@ #define PREALLOC_DMA_DEBUG_ENTRIES 4096 -struct dma_map_ops *dma_ops; +const struct dma_map_ops *dma_ops; EXPORT_SYMBOL(dma_ops); static int __init dma_init(void) diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h index 1180ae254154..69cc627779f2 100644 --- a/arch/sparc/include/asm/dma-mapping.h +++ b/arch/sparc/include/asm/dma-mapping.h @@ -18,20 +18,20 @@ static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, */ } -extern struct dma_map_ops *dma_ops; -extern struct dma_map_ops *leon_dma_ops; -extern struct dma_map_ops pci32_dma_ops; +extern const struct dma_map_ops *dma_ops; +extern const struct dma_map_ops *leon_dma_ops; +extern const struct dma_map_ops pci32_dma_ops; extern struct bus_type pci_bus_type; -static inline struct dma_map_ops *get_dma_ops(struct device *dev) +static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) { #ifdef CONFIG_SPARC_LEON if (sparc_cpu_model == sparc_leon) return leon_dma_ops; #endif #if defined(CONFIG_SPARC32) && defined(CONFIG_PCI) - if (dev->bus == &pci_bus_type) + if (bus == &pci_bus_type) return &pci32_dma_ops; #endif return dma_ops; diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c index 9df997995f6b..c63ba99ca551 100644 --- a/arch/sparc/kernel/iommu.c +++ b/arch/sparc/kernel/iommu.c @@ -741,7 +741,7 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev, spin_unlock_irqrestore(&iommu->lock, flags); } -static struct dma_map_ops sun4u_dma_ops = { +static const struct dma_map_ops sun4u_dma_ops = { .alloc = dma_4u_alloc_coherent, .free = dma_4u_free_coherent, .map_page = dma_4u_map_page, @@ -752,7 +752,7 @@ static struct dma_map_ops sun4u_dma_ops = { .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu, }; -struct dma_map_ops *dma_ops = &sun4u_dma_ops; +const struct dma_map_ops *dma_ops = &sun4u_dma_ops; EXPORT_SYMBOL(dma_ops); int dma_supported(struct device *dev, u64 device_mask) diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c index 6ffaec44931a..cf20033a1458 100644 --- a/arch/sparc/kernel/ioport.c +++ b/arch/sparc/kernel/ioport.c @@ -401,7 +401,7 @@ static void sbus_sync_sg_for_device(struct device *dev, struct scatterlist *sg, BUG(); } -static struct dma_map_ops sbus_dma_ops = { +static const struct dma_map_ops sbus_dma_ops = { .alloc = sbus_alloc_coherent, .free = sbus_free_coherent, .map_page = sbus_map_page, @@ -637,7 +637,7 @@ static void pci32_sync_sg_for_device(struct device *device, struct scatterlist * } } -struct dma_map_ops pci32_dma_ops = { +const struct dma_map_ops pci32_dma_ops = { .alloc = pci32_alloc_coherent, .free = pci32_free_coherent, .map_page = pci32_map_page, @@ -652,10 +652,10 @@ struct dma_map_ops pci32_dma_ops = { EXPORT_SYMBOL(pci32_dma_ops); /* leon re-uses pci32_dma_ops */ -struct dma_map_ops *leon_dma_ops = &pci32_dma_ops; +const struct dma_map_ops *leon_dma_ops = &pci32_dma_ops; EXPORT_SYMBOL(leon_dma_ops); -struct dma_map_ops *dma_ops = &sbus_dma_ops; +const struct dma_map_ops *dma_ops = &sbus_dma_ops; EXPORT_SYMBOL(dma_ops); diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c index f4daccd12bf5..68bec7c97cb8 100644 --- a/arch/sparc/kernel/pci_sun4v.c +++ b/arch/sparc/kernel/pci_sun4v.c @@ -669,7 +669,7 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist, local_irq_restore(flags); } -static struct dma_map_ops sun4v_dma_ops = { +static const struct dma_map_ops sun4v_dma_ops = { .alloc = dma_4v_alloc_coherent, .free = dma_4v_free_coherent, .map_page = dma_4v_map_page, diff --git a/arch/tile/include/asm/device.h b/arch/tile/include/asm/device.h index 6ab8bf146d4c..1cf45422a0df 100644 --- a/arch/tile/include/asm/device.h +++ b/arch/tile/include/asm/device.h @@ -17,9 +17,6 @@ #define _ASM_TILE_DEVICE_H struct dev_archdata { - /* DMA operations on that device */ - struct dma_map_ops *dma_ops; - /* Offset of the DMA address from the PA. */ dma_addr_t dma_offset; diff --git a/arch/tile/include/asm/dma-mapping.h b/arch/tile/include/asm/dma-mapping.h index 01ceb4a895b0..bbc71a29b2c6 100644 --- a/arch/tile/include/asm/dma-mapping.h +++ b/arch/tile/include/asm/dma-mapping.h @@ -24,17 +24,14 @@ #define ARCH_HAS_DMA_GET_REQUIRED_MASK #endif -extern struct dma_map_ops *tile_dma_map_ops; -extern struct dma_map_ops *gx_pci_dma_map_ops; -extern struct dma_map_ops *gx_legacy_pci_dma_map_ops; -extern struct dma_map_ops *gx_hybrid_pci_dma_map_ops; +extern const struct dma_map_ops *tile_dma_map_ops; +extern const struct dma_map_ops *gx_pci_dma_map_ops; +extern const struct dma_map_ops *gx_legacy_pci_dma_map_ops; +extern const struct dma_map_ops *gx_hybrid_pci_dma_map_ops; -static inline struct dma_map_ops *get_dma_ops(struct device *dev) +static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) { - if (dev && dev->archdata.dma_ops) - return dev->archdata.dma_ops; - else - return tile_dma_map_ops; + return tile_dma_map_ops; } static inline dma_addr_t get_dma_offset(struct device *dev) @@ -59,11 +56,6 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) static inline void dma_mark_clean(void *addr, size_t size) {} -static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops) -{ - dev->archdata.dma_ops = ops; -} - static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) { if (!dev->dma_mask) diff --git a/arch/tile/kernel/pci-dma.c b/arch/tile/kernel/pci-dma.c index 24e0f8c21f2f..569bb6dd154a 100644 --- a/arch/tile/kernel/pci-dma.c +++ b/arch/tile/kernel/pci-dma.c @@ -329,7 +329,7 @@ tile_dma_supported(struct device *dev, u64 mask) return 1; } -static struct dma_map_ops tile_default_dma_map_ops = { +static const struct dma_map_ops tile_default_dma_map_ops = { .alloc = tile_dma_alloc_coherent, .free = tile_dma_free_coherent, .map_page = tile_dma_map_page, @@ -344,7 +344,7 @@ static struct dma_map_ops tile_default_dma_map_ops = { .dma_supported = tile_dma_supported }; -struct dma_map_ops *tile_dma_map_ops = &tile_default_dma_map_ops; +const struct dma_map_ops *tile_dma_map_ops = &tile_default_dma_map_ops; EXPORT_SYMBOL(tile_dma_map_ops); /* Generic PCI DMA mapping functions */ @@ -516,7 +516,7 @@ tile_pci_dma_supported(struct device *dev, u64 mask) return 1; } -static struct dma_map_ops tile_pci_default_dma_map_ops = { +static const struct dma_map_ops tile_pci_default_dma_map_ops = { .alloc = tile_pci_dma_alloc_coherent, .free = tile_pci_dma_free_coherent, .map_page = tile_pci_dma_map_page, @@ -531,7 +531,7 @@ static struct dma_map_ops tile_pci_default_dma_map_ops = { .dma_supported = tile_pci_dma_supported }; -struct dma_map_ops *gx_pci_dma_map_ops = &tile_pci_default_dma_map_ops; +const struct dma_map_ops *gx_pci_dma_map_ops = &tile_pci_default_dma_map_ops; EXPORT_SYMBOL(gx_pci_dma_map_ops); /* PCI DMA mapping functions for legacy PCI devices */ @@ -552,7 +552,7 @@ static void tile_swiotlb_free_coherent(struct device *dev, size_t size, swiotlb_free_coherent(dev, size, vaddr, dma_addr); } -static struct dma_map_ops pci_swiotlb_dma_ops = { +static const struct dma_map_ops pci_swiotlb_dma_ops = { .alloc = tile_swiotlb_alloc_coherent, .free = tile_swiotlb_free_coherent, .map_page = swiotlb_map_page, @@ -567,7 +567,7 @@ static struct dma_map_ops pci_swiotlb_dma_ops = { .mapping_error = swiotlb_dma_mapping_error, }; -static struct dma_map_ops pci_hybrid_dma_ops = { +static const struct dma_map_ops pci_hybrid_dma_ops = { .alloc = tile_swiotlb_alloc_coherent, .free = tile_swiotlb_free_coherent, .map_page = tile_pci_dma_map_page, @@ -582,18 +582,18 @@ static struct dma_map_ops pci_hybrid_dma_ops = { .dma_supported = tile_pci_dma_supported }; -struct dma_map_ops *gx_legacy_pci_dma_map_ops = &pci_swiotlb_dma_ops; -struct dma_map_ops *gx_hybrid_pci_dma_map_ops = &pci_hybrid_dma_ops; +const struct dma_map_ops *gx_legacy_pci_dma_map_ops = &pci_swiotlb_dma_ops; +const struct dma_map_ops *gx_hybrid_pci_dma_map_ops = &pci_hybrid_dma_ops; #else -struct dma_map_ops *gx_legacy_pci_dma_map_ops; -struct dma_map_ops *gx_hybrid_pci_dma_map_ops; +const struct dma_map_ops *gx_legacy_pci_dma_map_ops; +const struct dma_map_ops *gx_hybrid_pci_dma_map_ops; #endif EXPORT_SYMBOL(gx_legacy_pci_dma_map_ops); EXPORT_SYMBOL(gx_hybrid_pci_dma_map_ops); int dma_set_mask(struct device *dev, u64 mask) { - struct dma_map_ops *dma_ops = get_dma_ops(dev); + const struct dma_map_ops *dma_ops = get_dma_ops(dev); /* * For PCI devices with 64-bit DMA addressing capability, promote @@ -623,7 +623,7 @@ EXPORT_SYMBOL(dma_set_mask); #ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK int dma_set_coherent_mask(struct device *dev, u64 mask) { - struct dma_map_ops *dma_ops = get_dma_ops(dev); + const struct dma_map_ops *dma_ops = get_dma_ops(dev); /* * For PCI devices with 64-bit DMA addressing capability, promote diff --git a/arch/unicore32/include/asm/dma-mapping.h b/arch/unicore32/include/asm/dma-mapping.h index 4749854afd03..518ba5848dd6 100644 --- a/arch/unicore32/include/asm/dma-mapping.h +++ b/arch/unicore32/include/asm/dma-mapping.h @@ -21,9 +21,9 @@ #include <asm/memory.h> #include <asm/cacheflush.h> -extern struct dma_map_ops swiotlb_dma_map_ops; +extern const struct dma_map_ops swiotlb_dma_map_ops; -static inline struct dma_map_ops *get_dma_ops(struct device *dev) +static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) { return &swiotlb_dma_map_ops; } diff --git a/arch/unicore32/mm/dma-swiotlb.c b/arch/unicore32/mm/dma-swiotlb.c index 3e9f6489ba38..525413d6690e 100644 --- a/arch/unicore32/mm/dma-swiotlb.c +++ b/arch/unicore32/mm/dma-swiotlb.c @@ -31,7 +31,7 @@ static void unicore_swiotlb_free_coherent(struct device *dev, size_t size, swiotlb_free_coherent(dev, size, vaddr, dma_addr); } -struct dma_map_ops swiotlb_dma_map_ops = { +const struct dma_map_ops swiotlb_dma_map_ops = { .alloc = unicore_swiotlb_alloc_coherent, .free = unicore_swiotlb_free_coherent, .map_sg = swiotlb_map_sg_attrs, diff --git a/arch/x86/include/asm/device.h b/arch/x86/include/asm/device.h index 684ed6c3aa67..1b3ef26e77df 100644 --- a/arch/x86/include/asm/device.h +++ b/arch/x86/include/asm/device.h @@ -2,9 +2,6 @@ #define _ASM_X86_DEVICE_H struct dev_archdata { -#ifdef CONFIG_X86_DEV_DMA_OPS - struct dma_map_ops *dma_ops; -#endif #if defined(CONFIG_INTEL_IOMMU) || defined(CONFIG_AMD_IOMMU) void *iommu; /* hook for IOMMU specific extension */ #endif @@ -13,7 +10,7 @@ struct dev_archdata { #if defined(CONFIG_X86_DEV_DMA_OPS) && defined(CONFIG_PCI_DOMAINS) struct dma_domain { struct list_head node; - struct dma_map_ops *dma_ops; + const struct dma_map_ops *dma_ops; int domain_nr; }; void add_dma_domain(struct dma_domain *domain); diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h index 44461626830e..08a0838b83fb 100644 --- a/arch/x86/include/asm/dma-mapping.h +++ b/arch/x86/include/asm/dma-mapping.h @@ -25,18 +25,11 @@ extern int iommu_merge; extern struct device x86_dma_fallback_dev; extern int panic_on_overflow; -extern struct dma_map_ops *dma_ops; +extern const struct dma_map_ops *dma_ops; -static inline struct dma_map_ops *get_dma_ops(struct device *dev) +static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) { -#ifndef CONFIG_X86_DEV_DMA_OPS return dma_ops; -#else - if (unlikely(!dev) || !dev->archdata.dma_ops) - return dma_ops; - else - return dev->archdata.dma_ops; -#endif } bool arch_dma_alloc_attrs(struct device **dev, gfp_t *gfp); diff --git a/arch/x86/include/asm/iommu.h b/arch/x86/include/asm/iommu.h index 345c99cef152..793869879464 100644 --- a/arch/x86/include/asm/iommu.h +++ b/arch/x86/include/asm/iommu.h @@ -1,7 +1,7 @@ #ifndef _ASM_X86_IOMMU_H #define _ASM_X86_IOMMU_H -extern struct dma_map_ops nommu_dma_ops; +extern const struct dma_map_ops nommu_dma_ops; extern int force_iommu, no_iommu; extern int iommu_detected; extern int iommu_pass_through; diff --git a/arch/x86/kernel/amd_gart_64.c b/arch/x86/kernel/amd_gart_64.c index 63ff468a7986..82dfe32faaf4 100644 --- a/arch/x86/kernel/amd_gart_64.c +++ b/arch/x86/kernel/amd_gart_64.c @@ -695,7 +695,7 @@ static __init int init_amd_gatt(struct agp_kern_info *info) return -1; } -static struct dma_map_ops gart_dma_ops = { +static const struct dma_map_ops gart_dma_ops = { .map_sg = gart_map_sg, .unmap_sg = gart_unmap_sg, .map_page = gart_map_page, diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c index d47517941bbc..0c150c06fa5a 100644 --- a/arch/x86/kernel/pci-calgary_64.c +++ b/arch/x86/kernel/pci-calgary_64.c @@ -478,7 +478,7 @@ static void calgary_free_coherent(struct device *dev, size_t size, free_pages((unsigned long)vaddr, get_order(size)); } -static struct dma_map_ops calgary_dma_ops = { +static const struct dma_map_ops calgary_dma_ops = { .alloc = calgary_alloc_coherent, .free = calgary_free_coherent, .map_sg = calgary_map_sg, @@ -1177,7 +1177,7 @@ static int __init calgary_init(void) tbl = find_iommu_table(&dev->dev); if (translation_enabled(tbl)) - dev->dev.archdata.dma_ops = &calgary_dma_ops; + dev->dev.dma_ops = &calgary_dma_ops; } return ret; @@ -1201,7 +1201,7 @@ error: calgary_disable_translation(dev); calgary_free_bus(dev); pci_dev_put(dev); /* Undo calgary_init_one()'s pci_dev_get() */ - dev->dev.archdata.dma_ops = NULL; + dev->dev.dma_ops = NULL; } while (1); return ret; diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index d5c223c9cf11..3a216ec869cd 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c @@ -17,7 +17,7 @@ static int forbid_dac __read_mostly; -struct dma_map_ops *dma_ops = &nommu_dma_ops; +const struct dma_map_ops *dma_ops = &nommu_dma_ops; EXPORT_SYMBOL(dma_ops); static int iommu_sac_force __read_mostly; @@ -215,7 +215,7 @@ early_param("iommu", iommu_setup); int dma_supported(struct device *dev, u64 mask) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); #ifdef CONFIG_PCI if (mask > 0xffffffff && forbid_dac > 0) { diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c index 00e71ce396a8..a88952ef371c 100644 --- a/arch/x86/kernel/pci-nommu.c +++ b/arch/x86/kernel/pci-nommu.c @@ -88,7 +88,7 @@ static void nommu_sync_sg_for_device(struct device *dev, flush_write_buffers(); } -struct dma_map_ops nommu_dma_ops = { +const struct dma_map_ops nommu_dma_ops = { .alloc = dma_generic_alloc_coherent, .free = dma_generic_free_coherent, .map_sg = nommu_map_sg, diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c index 410efb2c7b80..1e23577e17cf 100644 --- a/arch/x86/kernel/pci-swiotlb.c +++ b/arch/x86/kernel/pci-swiotlb.c @@ -45,7 +45,7 @@ void x86_swiotlb_free_coherent(struct device *dev, size_t size, dma_generic_free_coherent(dev, size, vaddr, dma_addr, attrs); } -static struct dma_map_ops swiotlb_dma_ops = { +static const struct dma_map_ops swiotlb_dma_ops = { .mapping_error = swiotlb_dma_mapping_error, .alloc = x86_swiotlb_alloc_coherent, .free = x86_swiotlb_free_coherent, diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c index a4fdfa7dcc1b..0cb52ae0a8f0 100644 --- a/arch/x86/pci/common.c +++ b/arch/x86/pci/common.c @@ -667,7 +667,7 @@ static void set_dma_domain_ops(struct pci_dev *pdev) spin_lock(&dma_domain_list_lock); list_for_each_entry(domain, &dma_domain_list, node) { if (pci_domain_nr(pdev->bus) == domain->domain_nr) { - pdev->dev.archdata.dma_ops = domain->dma_ops; + pdev->dev.dma_ops = domain->dma_ops; break; } } diff --git a/arch/x86/pci/sta2x11-fixup.c b/arch/x86/pci/sta2x11-fixup.c index 052c1cb76305..ec008e800b45 100644 --- a/arch/x86/pci/sta2x11-fixup.c +++ b/arch/x86/pci/sta2x11-fixup.c @@ -179,7 +179,7 @@ static void *sta2x11_swiotlb_alloc_coherent(struct device *dev, } /* We have our own dma_ops: the same as swiotlb but from alloc (above) */ -static struct dma_map_ops sta2x11_dma_ops = { +static const struct dma_map_ops sta2x11_dma_ops = { .alloc = sta2x11_swiotlb_alloc_coherent, .free = x86_swiotlb_free_coherent, .map_page = swiotlb_map_page, @@ -203,7 +203,7 @@ static void sta2x11_setup_pdev(struct pci_dev *pdev) return; pci_set_consistent_dma_mask(pdev, STA2X11_AMBA_SIZE - 1); pci_set_dma_mask(pdev, STA2X11_AMBA_SIZE - 1); - pdev->dev.archdata.dma_ops = &sta2x11_dma_ops; + pdev->dev.dma_ops = &sta2x11_dma_ops; /* We must enable all devices as master, for audio DMA to work */ pci_set_master(pdev); @@ -223,7 +223,7 @@ bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) { struct sta2x11_mapping *map; - if (dev->archdata.dma_ops != &sta2x11_dma_ops) { + if (dev->dma_ops != &sta2x11_dma_ops) { if (!dev->dma_mask) return false; return addr + size - 1 <= *dev->dma_mask; @@ -247,7 +247,7 @@ bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) */ dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) { - if (dev->archdata.dma_ops != &sta2x11_dma_ops) + if (dev->dma_ops != &sta2x11_dma_ops) return paddr; return p2a(paddr, to_pci_dev(dev)); } @@ -259,7 +259,7 @@ dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) */ phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) { - if (dev->archdata.dma_ops != &sta2x11_dma_ops) + if (dev->dma_ops != &sta2x11_dma_ops) return daddr; return a2p(daddr, to_pci_dev(dev)); } diff --git a/arch/x86/xen/pci-swiotlb-xen.c b/arch/x86/xen/pci-swiotlb-xen.c index a0b36a9d5df1..42b08f8fc2ca 100644 --- a/arch/x86/xen/pci-swiotlb-xen.c +++ b/arch/x86/xen/pci-swiotlb-xen.c @@ -18,7 +18,7 @@ int xen_swiotlb __read_mostly; -static struct dma_map_ops xen_swiotlb_dma_ops = { +static const struct dma_map_ops xen_swiotlb_dma_ops = { .alloc = xen_swiotlb_alloc_coherent, .free = xen_swiotlb_free_coherent, .sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu, diff --git a/arch/xtensa/include/asm/device.h b/arch/xtensa/include/asm/device.h index fe1f5c878493..1deeb8ebbb1b 100644 --- a/arch/xtensa/include/asm/device.h +++ b/arch/xtensa/include/asm/device.h @@ -6,11 +6,7 @@ #ifndef _ASM_XTENSA_DEVICE_H #define _ASM_XTENSA_DEVICE_H -struct dma_map_ops; - struct dev_archdata { - /* DMA operations on that device */ - struct dma_map_ops *dma_ops; }; struct pdev_archdata { diff --git a/arch/xtensa/include/asm/dma-mapping.h b/arch/xtensa/include/asm/dma-mapping.h index 3fc1170a6488..c6140fa8c0be 100644 --- a/arch/xtensa/include/asm/dma-mapping.h +++ b/arch/xtensa/include/asm/dma-mapping.h @@ -18,14 +18,11 @@ #define DMA_ERROR_CODE (~(dma_addr_t)0x0) -extern struct dma_map_ops xtensa_dma_map_ops; +extern const struct dma_map_ops xtensa_dma_map_ops; -static inline struct dma_map_ops *get_dma_ops(struct device *dev) +static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) { - if (dev && dev->archdata.dma_ops) - return dev->archdata.dma_ops; - else - return &xtensa_dma_map_ops; + return &xtensa_dma_map_ops; } void dma_cache_sync(struct device *dev, void *vaddr, size_t size, diff --git a/arch/xtensa/kernel/pci-dma.c b/arch/xtensa/kernel/pci-dma.c index 34c1f9fa6acc..cec86a1c2acc 100644 --- a/arch/xtensa/kernel/pci-dma.c +++ b/arch/xtensa/kernel/pci-dma.c @@ -250,7 +250,7 @@ int xtensa_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) return 0; } -struct dma_map_ops xtensa_dma_map_ops = { +const struct dma_map_ops xtensa_dma_map_ops = { .alloc = xtensa_dma_alloc, .free = xtensa_dma_free, .map_page = xtensa_map_page, diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index f2e48655a906..a63e8400ea3b 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -333,6 +333,15 @@ int ib_register_device(struct ib_device *device, int ret; struct ib_client *client; struct ib_udata uhw = {.outlen = 0, .inlen = 0}; + struct device *parent = device->dev.parent; + + WARN_ON_ONCE(!parent); + if (!device->dev.dma_ops) + device->dev.dma_ops = parent->dma_ops; + if (!device->dev.dma_mask) + device->dev.dma_mask = parent->dma_mask; + if (!device->dev.coherent_dma_mask) + device->dev.coherent_dma_mask = parent->coherent_dma_mask; mutex_lock(&device_mutex); diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c index c1fb545e8d78..daadf3130c9f 100644 --- a/drivers/infiniband/core/sysfs.c +++ b/drivers/infiniband/core/sysfs.c @@ -1258,7 +1258,7 @@ int ib_device_register_sysfs(struct ib_device *device, int ret; int i; - device->dev.parent = device->dma_device; + WARN_ON_ONCE(!device->dev.parent); ret = dev_set_name(class_dev, "%s", device->name); if (ret) return ret; diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c index e0a995b85a2d..cc0d51fb06e3 100644 --- a/drivers/infiniband/core/ucm.c +++ b/drivers/infiniband/core/ucm.c @@ -1290,7 +1290,7 @@ static void ib_ucm_add_one(struct ib_device *device) goto err; ucm_dev->dev.class = &cm_class; - ucm_dev->dev.parent = device->dma_device; + ucm_dev->dev.parent = device->dev.parent; ucm_dev->dev.devt = ucm_dev->cdev.dev; ucm_dev->dev.release = ib_ucm_release_dev; dev_set_name(&ucm_dev->dev, "ucm%d", ucm_dev->devnum); diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c index 249b403b43a4..aca7ff7abedc 100644 --- a/drivers/infiniband/core/user_mad.c +++ b/drivers/infiniband/core/user_mad.c @@ -1188,7 +1188,7 @@ static int ib_umad_init_port(struct ib_device *device, int port_num, if (cdev_add(&port->cdev, base, 1)) goto err_cdev; - port->dev = device_create(umad_class, device->dma_device, + port->dev = device_create(umad_class, device->dev.parent, port->cdev.dev, port, "umad%d", port->dev_num); if (IS_ERR(port->dev)) @@ -1207,7 +1207,7 @@ static int ib_umad_init_port(struct ib_device *device, int port_num, if (cdev_add(&port->sm_cdev, base, 1)) goto err_sm_cdev; - port->sm_dev = device_create(umad_class, device->dma_device, + port->sm_dev = device_create(umad_class, device->dev.parent, port->sm_cdev.dev, port, "issm%d", port->dev_num); if (IS_ERR(port->sm_dev)) diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index b3f95d453fba..e3fb4b1af1ad 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c @@ -1174,7 +1174,7 @@ static void ib_uverbs_add_one(struct ib_device *device) if (cdev_add(&uverbs_dev->cdev, base, 1)) goto err_cdev; - uverbs_dev->dev = device_create(uverbs_class, device->dma_device, + uverbs_dev->dev = device_create(uverbs_class, device->dev.parent, uverbs_dev->cdev.dev, uverbs_dev, "uverbs%d", uverbs_dev->devnum); if (IS_ERR(uverbs_dev->dev)) diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c index bd452a92b386..5d355401179b 100644 --- a/drivers/infiniband/hw/bnxt_re/main.c +++ b/drivers/infiniband/hw/bnxt_re/main.c @@ -436,7 +436,7 @@ static int bnxt_re_register_ib(struct bnxt_re_dev *rdev) bnxt_qplib_get_guid(rdev->netdev->dev_addr, (u8 *)&ibdev->node_guid); ibdev->num_comp_vectors = 1; - ibdev->dma_device = &rdev->en_dev->pdev->dev; + ibdev->dev.parent = &rdev->en_dev->pdev->dev; ibdev->local_dma_lkey = BNXT_QPLIB_RSVD_LKEY; /* User space */ diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c index 48649f93258a..318ec5267bdf 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_provider.c +++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c @@ -1393,7 +1393,7 @@ int iwch_register_device(struct iwch_dev *dev) memcpy(dev->ibdev.node_desc, IWCH_NODE_DESC, sizeof(IWCH_NODE_DESC)); dev->ibdev.phys_port_cnt = dev->rdev.port_info.nports; dev->ibdev.num_comp_vectors = 1; - dev->ibdev.dma_device = &(dev->rdev.rnic_info.pdev->dev); + dev->ibdev.dev.parent = &dev->rdev.rnic_info.pdev->dev; dev->ibdev.query_device = iwch_query_device; dev->ibdev.query_port = iwch_query_port; dev->ibdev.query_pkey = iwch_query_pkey; diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c index bdf7de571d83..df64417ab6f2 100644 --- a/drivers/infiniband/hw/cxgb4/provider.c +++ b/drivers/infiniband/hw/cxgb4/provider.c @@ -572,7 +572,7 @@ int c4iw_register_device(struct c4iw_dev *dev) memcpy(dev->ibdev.node_desc, C4IW_NODE_DESC, sizeof(C4IW_NODE_DESC)); dev->ibdev.phys_port_cnt = dev->rdev.lldi.nports; dev->ibdev.num_comp_vectors = dev->rdev.lldi.nciq; - dev->ibdev.dma_device = &(dev->rdev.lldi.pdev->dev); + dev->ibdev.dev.parent = &dev->rdev.lldi.pdev->dev; dev->ibdev.query_device = c4iw_query_device; dev->ibdev.query_port = c4iw_query_port; dev->ibdev.query_pkey = c4iw_query_pkey; diff --git a/drivers/infiniband/hw/hfi1/dma.c b/drivers/infiniband/hw/hfi1/dma.c deleted file mode 100644 index 7e8dab892848..000000000000 --- a/drivers/infiniband/hw/hfi1/dma.c +++ /dev/null @@ -1,183 +0,0 @@ -/* - * Copyright(c) 2015, 2016 Intel Corporation. - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * BSD LICENSE - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * - Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ -#include <linux/types.h> -#include <linux/scatterlist.h> - -#include "verbs.h" - -#define BAD_DMA_ADDRESS ((u64)0) - -/* - * The following functions implement driver specific replacements - * for the ib_dma_*() functions. - * - * These functions return kernel virtual addresses instead of - * device bus addresses since the driver uses the CPU to copy - * data instead of using hardware DMA. - */ - -static int hfi1_mapping_error(struct ib_device *dev, u64 dma_addr) -{ - return dma_addr == BAD_DMA_ADDRESS; -} - -static u64 hfi1_dma_map_single(struct ib_device *dev, void *cpu_addr, - size_t size, enum dma_data_direction direction) -{ - if (WARN_ON(!valid_dma_direction(direction))) - return BAD_DMA_ADDRESS; - - return (u64)cpu_addr; -} - -static void hfi1_dma_unmap_single(struct ib_device *dev, u64 addr, size_t size, - enum dma_data_direction direction) -{ - /* This is a stub, nothing to be done here */ -} - -static u64 hfi1_dma_map_page(struct ib_device *dev, struct page *page, - unsigned long offset, size_t size, - enum dma_data_direction direction) -{ - u64 addr; - - if (WARN_ON(!valid_dma_direction(direction))) - return BAD_DMA_ADDRESS; - - if (offset + size > PAGE_SIZE) - return BAD_DMA_ADDRESS; - - addr = (u64)page_address(page); - if (addr) - addr += offset; - - return addr; -} - -static void hfi1_dma_unmap_page(struct ib_device *dev, u64 addr, size_t size, - enum dma_data_direction direction) -{ - /* This is a stub, nothing to be done here */ -} - -static int hfi1_map_sg(struct ib_device *dev, struct scatterlist *sgl, - int nents, enum dma_data_direction direction) -{ - struct scatterlist *sg; - u64 addr; - int i; - int ret = nents; - - if (WARN_ON(!valid_dma_direction(direction))) - return BAD_DMA_ADDRESS; - - for_each_sg(sgl, sg, nents, i) { - addr = (u64)page_address(sg_page(sg)); - if (!addr) { - ret = 0; - break; - } - sg->dma_address = addr + sg->offset; -#ifdef CONFIG_NEED_SG_DMA_LENGTH - sg->dma_length = sg->length; -#endif - } - return ret; -} - -static void hfi1_unmap_sg(struct ib_device *dev, - struct scatterlist *sg, int nents, - enum dma_data_direction direction) -{ - /* This is a stub, nothing to be done here */ -} - -static void hfi1_sync_single_for_cpu(struct ib_device *dev, u64 addr, - size_t size, enum dma_data_direction dir) -{ -} - -static void hfi1_sync_single_for_device(struct ib_device *dev, u64 addr, - size_t size, - enum dma_data_direction dir) -{ -} - -static void *hfi1_dma_alloc_coherent(struct ib_device *dev, size_t size, - u64 *dma_handle, gfp_t flag) -{ - struct page *p; - void *addr = NULL; - - p = alloc_pages(flag, get_order(size)); - if (p) - addr = page_address(p); - if (dma_handle) - *dma_handle = (u64)addr; - return addr; -} - -static void hfi1_dma_free_coherent(struct ib_device *dev, size_t size, - void *cpu_addr, u64 dma_handle) -{ - free_pages((unsigned long)cpu_addr, get_order(size)); -} - -struct ib_dma_mapping_ops hfi1_dma_mapping_ops = { - .mapping_error = hfi1_mapping_error, - .map_single = hfi1_dma_map_single, - .unmap_single = hfi1_dma_unmap_single, - .map_page = hfi1_dma_map_page, - .unmap_page = hfi1_dma_unmap_page, - .map_sg = hfi1_map_sg, - .unmap_sg = hfi1_unmap_sg, - .sync_single_for_cpu = hfi1_sync_single_for_cpu, - .sync_single_for_device = hfi1_sync_single_for_device, - .alloc_coherent = hfi1_dma_alloc_coherent, - .free_coherent = hfi1_dma_free_coherent -}; diff --git a/drivers/infiniband/hw/hfi1/mad.c b/drivers/infiniband/hw/hfi1/mad.c index 6e595afca24c..09cda3c35e82 100644 --- a/drivers/infiniband/hw/hfi1/mad.c +++ b/drivers/infiniband/hw/hfi1/mad.c @@ -4406,7 +4406,7 @@ int hfi1_process_mad(struct ib_device *ibdev, int mad_flags, u8 port, switch (in_mad->base_version) { case OPA_MGMT_BASE_VERSION: if (unlikely(in_mad_size != sizeof(struct opa_mad))) { - dev_err(ibdev->dma_device, "invalid in_mad_size\n"); + dev_err(ibdev->dev.parent, "invalid in_mad_size\n"); return IB_MAD_RESULT_FAILURE; } return hfi1_process_opa_mad(ibdev, mad_flags, port, diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c index 33f00f0719c5..222315fadab1 100644 --- a/drivers/infiniband/hw/hfi1/verbs.c +++ b/drivers/infiniband/hw/hfi1/verbs.c @@ -1703,7 +1703,7 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd) strlcpy(ibdev->name + lcpysz, "_%d", IB_DEVICE_NAME_MAX - lcpysz); ibdev->owner = THIS_MODULE; ibdev->phys_port_cnt = dd->num_pports; - ibdev->dma_device = &dd->pcidev->dev; + ibdev->dev.parent = &dd->pcidev->dev; ibdev->modify_device = modify_device; ibdev->alloc_hw_stats = alloc_hw_stats; ibdev->get_hw_stats = get_hw_stats; diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c index 6843409fba29..c3b41f95e70a 100644 --- a/drivers/infiniband/hw/hns/hns_roce_main.c +++ b/drivers/infiniband/hw/hns/hns_roce_main.c @@ -439,7 +439,7 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev) ib_dev->owner = THIS_MODULE; ib_dev->node_type = RDMA_NODE_IB_CA; - ib_dev->dma_device = dev; + ib_dev->dev.parent = dev; ib_dev->phys_port_cnt = hr_dev->caps.num_ports; ib_dev->local_dma_lkey = hr_dev->caps.reserved_lkey; diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index f036f32f15d3..3f44f2f91f03 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -101,7 +101,7 @@ static void hns_roce_ib_qp_event(struct hns_roce_qp *hr_qp, event.event = IB_EVENT_QP_ACCESS_ERR; break; default: - dev_dbg(ibqp->device->dma_device, "roce_ib: Unexpected event type %d on QP %06lx\n", + dev_dbg(ibqp->device->dev.parent, "roce_ib: Unexpected event type %d on QP %06lx\n", type, hr_qp->qpn); return; } diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c index 5f695bf232a8..9b2849979756 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c +++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c @@ -2758,7 +2758,6 @@ static struct i40iw_ib_device *i40iw_init_rdma_device(struct i40iw_device *iwdev (1ull << IB_USER_VERBS_CMD_POST_SEND); iwibdev->ibdev.phys_port_cnt = 1; iwibdev->ibdev.num_comp_vectors = iwdev->ceqs_count; - iwibdev->ibdev.dma_device = &pcidev->dev; iwibdev->ibdev.dev.parent = &pcidev->dev; iwibdev->ibdev.query_port = i40iw_query_port; iwibdev->ibdev.modify_port = i40iw_modify_port; diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 211cbbe9ccd1..88608906ce25 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -2628,7 +2628,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) ibdev->ib_dev.phys_port_cnt = mlx4_is_bonded(dev) ? 1 : ibdev->num_ports; ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors; - ibdev->ib_dev.dma_device = &dev->persist->pdev->dev; + ibdev->ib_dev.dev.parent = &dev->persist->pdev->dev; ibdev->ib_dev.get_netdev = mlx4_ib_get_netdev; ibdev->ib_dev.add_gid = mlx4_ib_add_gid; ibdev->ib_dev.del_gid = mlx4_ib_del_gid; diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h index 7f3d976d81ed..64fed44b43a6 100644 --- a/drivers/infiniband/hw/mlx4/mlx4_ib.h +++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h @@ -55,7 +55,7 @@ #define pr_fmt(fmt) "<" MLX4_IB_DRV_NAME "> %s: " fmt, __func__ #define mlx4_ib_warn(ibdev, format, arg...) \ - dev_warn((ibdev)->dma_device, MLX4_IB_DRV_NAME ": " format, ## arg) + dev_warn((ibdev)->dev.parent, MLX4_IB_DRV_NAME ": " format, ## arg) enum { MLX4_IB_SQ_MIN_WQE_SHIFT = 6, diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c index 5d73989d9771..433bcdbdd680 100644 --- a/drivers/infiniband/hw/mlx4/mr.c +++ b/drivers/infiniband/hw/mlx4/mr.c @@ -292,10 +292,10 @@ mlx4_alloc_priv_pages(struct ib_device *device, if (!mr->pages) return -ENOMEM; - mr->page_map = dma_map_single(device->dma_device, mr->pages, + mr->page_map = dma_map_single(device->dev.parent, mr->pages, mr->page_map_size, DMA_TO_DEVICE); - if (dma_mapping_error(device->dma_device, mr->page_map)) { + if (dma_mapping_error(device->dev.parent, mr->page_map)) { ret = -ENOMEM; goto err; } @@ -313,7 +313,7 @@ mlx4_free_priv_pages(struct mlx4_ib_mr *mr) if (mr->pages) { struct ib_device *device = mr->ibmr.device; - dma_unmap_single(device->dma_device, mr->page_map, + dma_unmap_single(device->dev.parent, mr->page_map, mr->page_map_size, DMA_TO_DEVICE); free_page((unsigned long)mr->pages); mr->pages = NULL; diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 6a8498c052a5..5b3355268725 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -3363,7 +3363,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) dev->ib_dev.phys_port_cnt = dev->num_ports; dev->ib_dev.num_comp_vectors = dev->mdev->priv.eq_table.num_comp_vectors; - dev->ib_dev.dma_device = &mdev->pdev->dev; + dev->ib_dev.dev.parent = &mdev->pdev->dev; dev->ib_dev.uverbs_abi_ver = MLX5_IB_UVERBS_ABI_VERSION; dev->ib_dev.uverbs_cmd_mask = diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 3c1f483d003f..b8f9382a8b7d 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -966,7 +966,7 @@ int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages, int page_shift, int flags) { struct mlx5_ib_dev *dev = mr->dev; - struct device *ddev = dev->ib_dev.dma_device; + struct device *ddev = dev->ib_dev.dev.parent; struct mlx5_ib_ucontext *uctx = NULL; int size; void *xlt; @@ -1411,9 +1411,9 @@ mlx5_alloc_priv_descs(struct ib_device *device, mr->descs = PTR_ALIGN(mr->descs_alloc, MLX5_UMR_ALIGN); - mr->desc_map = dma_map_single(device->dma_device, mr->descs, + mr->desc_map = dma_map_single(device->dev.parent, mr->descs, size, DMA_TO_DEVICE); - if (dma_mapping_error(device->dma_device, mr->desc_map)) { + if (dma_mapping_error(device->dev.parent, mr->desc_map)) { ret = -ENOMEM; goto err; } @@ -1432,7 +1432,7 @@ mlx5_free_priv_descs(struct mlx5_ib_mr *mr) struct ib_device *device = mr->ibmr.device; int size = mr->max_descs * mr->desc_size; - dma_unmap_single(device->dma_device, mr->desc_map, + dma_unmap_single(device->dev.parent, mr->desc_map, size, DMA_TO_DEVICE); kfree(mr->descs_alloc); mr->descs = NULL; diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c index ce163184e742..22d0e6ee5af6 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.c +++ b/drivers/infiniband/hw/mthca/mthca_provider.c @@ -1224,7 +1224,7 @@ int mthca_register_device(struct mthca_dev *dev) dev->ib_dev.node_type = RDMA_NODE_IB_CA; dev->ib_dev.phys_port_cnt = dev->limits.num_ports; dev->ib_dev.num_comp_vectors = 1; - dev->ib_dev.dma_device = &dev->pdev->dev; + dev->ib_dev.dev.parent = &dev->pdev->dev; dev->ib_dev.query_device = mthca_query_device; dev->ib_dev.query_port = mthca_query_port; dev->ib_dev.modify_device = mthca_modify_device; diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c index d3eae2f3e9f5..ccf0a4cffe9c 100644 --- a/drivers/infiniband/hw/nes/nes_verbs.c +++ b/drivers/infiniband/hw/nes/nes_verbs.c @@ -3731,7 +3731,6 @@ struct nes_ib_device *nes_init_ofa_device(struct net_device *netdev) nesibdev->ibdev.phys_port_cnt = 1; nesibdev->ibdev.num_comp_vectors = 1; - nesibdev->ibdev.dma_device = &nesdev->pcidev->dev; nesibdev->ibdev.dev.parent = &nesdev->pcidev->dev; nesibdev->ibdev.query_device = nes_query_device; nesibdev->ibdev.query_port = nes_query_port; diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c index 3e43bdc81e7a..57c9a2ad0260 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c @@ -199,7 +199,7 @@ static int ocrdma_register_device(struct ocrdma_dev *dev) dev->ibdev.alloc_ucontext = ocrdma_alloc_ucontext; dev->ibdev.dealloc_ucontext = ocrdma_dealloc_ucontext; dev->ibdev.mmap = ocrdma_mmap; - dev->ibdev.dma_device = &dev->nic_info.pdev->dev; + dev->ibdev.dev.parent = &dev->nic_info.pdev->dev; dev->ibdev.process_mad = ocrdma_process_mad; dev->ibdev.get_port_immutable = ocrdma_port_immutable; diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c index 3ac8aa5ef37d..b9b47e5cc8b3 100644 --- a/drivers/infiniband/hw/qedr/main.c +++ b/drivers/infiniband/hw/qedr/main.c @@ -170,7 +170,7 @@ static int qedr_register_device(struct qedr_dev *dev) dev->ibdev.get_port_immutable = qedr_port_immutable; dev->ibdev.get_netdev = qedr_get_netdev; - dev->ibdev.dma_device = &dev->pdev->dev; + dev->ibdev.dev.parent = &dev->pdev->dev; dev->ibdev.get_link_layer = qedr_link_layer; dev->ibdev.get_dev_fw_str = qedr_get_dev_fw_str; diff --git a/drivers/infiniband/hw/qib/qib_dma.c b/drivers/infiniband/hw/qib/qib_dma.c deleted file mode 100644 index 59fe092b4b0f..000000000000 --- a/drivers/infiniband/hw/qib/qib_dma.c +++ /dev/null @@ -1,169 +0,0 @@ -/* - * Copyright (c) 2006, 2009, 2010 QLogic, Corporation. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#include <linux/types.h> -#include <linux/scatterlist.h> - -#include "qib_verbs.h" - -#define BAD_DMA_ADDRESS ((u64) 0) - -/* - * The following functions implement driver specific replacements - * for the ib_dma_*() functions. - * - * These functions return kernel virtual addresses instead of - * device bus addresses since the driver uses the CPU to copy - * data instead of using hardware DMA. - */ - -static int qib_mapping_error(struct ib_device *dev, u64 dma_addr) -{ - return dma_addr == BAD_DMA_ADDRESS; -} - -static u64 qib_dma_map_single(struct ib_device *dev, void *cpu_addr, - size_t size, enum dma_data_direction direction) -{ - BUG_ON(!valid_dma_direction(direction)); - return (u64) cpu_addr; -} - -static void qib_dma_unmap_single(struct ib_device *dev, u64 addr, size_t size, - enum dma_data_direction direction) -{ - BUG_ON(!valid_dma_direction(direction)); -} - -static u64 qib_dma_map_page(struct ib_device *dev, struct page *page, - unsigned long offset, size_t size, - enum dma_data_direction direction) -{ - u64 addr; - - BUG_ON(!valid_dma_direction(direction)); - - if (offset + size > PAGE_SIZE) { - addr = BAD_DMA_ADDRESS; - goto done; - } - - addr = (u64) page_address(page); - if (addr) - addr += offset; - /* TODO: handle highmem pages */ - -done: - return addr; -} - -static void qib_dma_unmap_page(struct ib_device *dev, u64 addr, size_t size, - enum dma_data_direction direction) -{ - BUG_ON(!valid_dma_direction(direction)); -} - -static int qib_map_sg(struct ib_device *dev, struct scatterlist *sgl, - int nents, enum dma_data_direction direction) -{ - struct scatterlist *sg; - u64 addr; - int i; - int ret = nents; - - BUG_ON(!valid_dma_direction(direction)); - - for_each_sg(sgl, sg, nents, i) { - addr = (u64) page_address(sg_page(sg)); - /* TODO: handle highmem pages */ - if (!addr) { - ret = 0; - break; - } - sg->dma_address = addr + sg->offset; -#ifdef CONFIG_NEED_SG_DMA_LENGTH - sg->dma_length = sg->length; -#endif - } - return ret; -} - -static void qib_unmap_sg(struct ib_device *dev, - struct scatterlist *sg, int nents, - enum dma_data_direction direction) -{ - BUG_ON(!valid_dma_direction(direction)); -} - -static void qib_sync_single_for_cpu(struct ib_device *dev, u64 addr, - size_t size, enum dma_data_direction dir) -{ -} - -static void qib_sync_single_for_device(struct ib_device *dev, u64 addr, - size_t size, - enum dma_data_direction dir) -{ -} - -static void *qib_dma_alloc_coherent(struct ib_device *dev, size_t size, - u64 *dma_handle, gfp_t flag) -{ - struct page *p; - void *addr = NULL; - - p = alloc_pages(flag, get_order(size)); - if (p) - addr = page_address(p); - if (dma_handle) - *dma_handle = (u64) addr; - return addr; -} - -static void qib_dma_free_coherent(struct ib_device *dev, size_t size, - void *cpu_addr, u64 dma_handle) -{ - free_pages((unsigned long) cpu_addr, get_order(size)); -} - -struct ib_dma_mapping_ops qib_dma_mapping_ops = { - .mapping_error = qib_mapping_error, - .map_single = qib_dma_map_single, - .unmap_single = qib_dma_unmap_single, - .map_page = qib_dma_map_page, - .unmap_page = qib_dma_unmap_page, - .map_sg = qib_map_sg, - .unmap_sg = qib_unmap_sg, - .sync_single_for_cpu = qib_sync_single_for_cpu, - .sync_single_for_device = qib_sync_single_for_device, - .alloc_coherent = qib_dma_alloc_coherent, - .free_coherent = qib_dma_free_coherent -}; diff --git a/drivers/infiniband/hw/qib/qib_keys.c b/drivers/infiniband/hw/qib/qib_keys.c index 2c3c93572c17..8fdf79f8d4e4 100644 --- a/drivers/infiniband/hw/qib/qib_keys.c +++ b/drivers/infiniband/hw/qib/qib_keys.c @@ -158,10 +158,7 @@ int qib_rkey_ok(struct rvt_qp *qp, struct rvt_sge *sge, unsigned n, m; size_t off; - /* - * We use RKEY == zero for kernel virtual addresses - * (see qib_get_dma_mr and qib_dma.c). - */ + /* We use RKEY == zero for kernel virtual addresses */ rcu_read_lock(); if (rkey == 0) { struct rvt_pd *pd = ibpd_to_rvtpd(qp->ibqp.pd); diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c index 6b56f1c01a07..83f8b5f24381 100644 --- a/drivers/infiniband/hw/qib/qib_verbs.c +++ b/drivers/infiniband/hw/qib/qib_verbs.c @@ -1550,7 +1550,7 @@ int qib_register_ib_device(struct qib_devdata *dd) ibdev->owner = THIS_MODULE; ibdev->node_guid = ppd->guid; ibdev->phys_port_cnt = dd->num_pports; - ibdev->dma_device = &dd->pcidev->dev; + ibdev->dev.parent = &dd->pcidev->dev; ibdev->modify_device = qib_modify_device; ibdev->process_mad = qib_process_mad; diff --git a/drivers/infiniband/hw/usnic/usnic_ib_main.c b/drivers/infiniband/hw/usnic/usnic_ib_main.c index 4f5a45db08e1..c0c1e8b027b1 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_main.c +++ b/drivers/infiniband/hw/usnic/usnic_ib_main.c @@ -382,7 +382,7 @@ static void *usnic_ib_device_add(struct pci_dev *dev) us_ibdev->ib_dev.node_type = RDMA_NODE_USNIC_UDP; us_ibdev->ib_dev.phys_port_cnt = USNIC_IB_PORT_CNT; us_ibdev->ib_dev.num_comp_vectors = USNIC_IB_NUM_COMP_VECTORS; - us_ibdev->ib_dev.dma_device = &dev->dev; + us_ibdev->ib_dev.dev.parent = &dev->dev; us_ibdev->ib_dev.uverbs_abi_ver = USNIC_UVERBS_ABI_VERSION; strlcpy(us_ibdev->ib_dev.name, "usnic_%d", IB_DEVICE_NAME_MAX); diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c index e03d2f6c1f90..100bea5c42ff 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c @@ -173,7 +173,7 @@ static int pvrdma_register_device(struct pvrdma_dev *dev) dev->flags = 0; dev->ib_dev.owner = THIS_MODULE; dev->ib_dev.num_comp_vectors = 1; - dev->ib_dev.dma_device = &dev->pdev->dev; + dev->ib_dev.dev.parent = &dev->pdev->dev; dev->ib_dev.uverbs_abi_ver = PVRDMA_UVERBS_ABI_VERSION; dev->ib_dev.uverbs_cmd_mask = (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | diff --git a/drivers/infiniband/sw/rdmavt/Kconfig b/drivers/infiniband/sw/rdmavt/Kconfig index 1da8d01a6855..fdd001ce13d8 100644 --- a/drivers/infiniband/sw/rdmavt/Kconfig +++ b/drivers/infiniband/sw/rdmavt/Kconfig @@ -1,5 +1,6 @@ config INFINIBAND_RDMAVT tristate "RDMA verbs transport library" depends on 64BIT + select DMA_VIRT_OPS ---help--- This is a common software verbs provider for RDMA networks. diff --git a/drivers/infiniband/sw/rdmavt/Makefile b/drivers/infiniband/sw/rdmavt/Makefile index c33a4f84413c..78b276a90401 100644 --- a/drivers/infiniband/sw/rdmavt/Makefile +++ b/drivers/infiniband/sw/rdmavt/Makefile @@ -7,7 +7,7 @@ # obj-$(CONFIG_INFINIBAND_RDMAVT) += rdmavt.o -rdmavt-y := vt.o ah.o cq.o dma.o mad.o mcast.o mmap.o mr.o pd.o qp.o \ +rdmavt-y := vt.o ah.o cq.o mad.o mcast.o mmap.o mr.o pd.o qp.o \ rc.o srq.o trace.o CFLAGS_trace.o = -I$(src) diff --git a/drivers/infiniband/sw/rdmavt/dma.c b/drivers/infiniband/sw/rdmavt/dma.c deleted file mode 100644 index f2cefb0d9180..000000000000 --- a/drivers/infiniband/sw/rdmavt/dma.c +++ /dev/null @@ -1,198 +0,0 @@ -/* - * Copyright(c) 2016 Intel Corporation. - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * BSD LICENSE - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * - Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ -#include <linux/types.h> -#include <linux/scatterlist.h> -#include <rdma/ib_verbs.h> - -#include "dma.h" - -#define BAD_DMA_ADDRESS ((u64)0) - -/* - * The following functions implement driver specific replacements - * for the ib_dma_*() functions. - * - * These functions return kernel virtual addresses instead of - * device bus addresses since the driver uses the CPU to copy - * data instead of using hardware DMA. - */ - -static int rvt_mapping_error(struct ib_device *dev, u64 dma_addr) -{ - return dma_addr == BAD_DMA_ADDRESS; -} - -static u64 rvt_dma_map_single(struct ib_device *dev, void *cpu_addr, - size_t size, enum dma_data_direction direction) -{ - if (WARN_ON(!valid_dma_direction(direction))) - return BAD_DMA_ADDRESS; - - return (u64)cpu_addr; -} - -static void rvt_dma_unmap_single(struct ib_device *dev, u64 addr, size_t size, - enum dma_data_direction direction) -{ - /* This is a stub, nothing to be done here */ -} - -static u64 rvt_dma_map_page(struct ib_device *dev, struct page *page, - unsigned long offset, size_t size, - enum dma_data_direction direction) -{ - u64 addr; - - if (WARN_ON(!valid_dma_direction(direction))) - return BAD_DMA_ADDRESS; - - addr = (u64)page_address(page); - if (addr) - addr += offset; - - return addr; -} - -static void rvt_dma_unmap_page(struct ib_device *dev, u64 addr, size_t size, - enum dma_data_direction direction) -{ - /* This is a stub, nothing to be done here */ -} - -static int rvt_map_sg(struct ib_device *dev, struct scatterlist *sgl, - int nents, enum dma_data_direction direction) -{ - struct scatterlist *sg; - u64 addr; - int i; - int ret = nents; - - if (WARN_ON(!valid_dma_direction(direction))) - return 0; - - for_each_sg(sgl, sg, nents, i) { - addr = (u64)page_address(sg_page(sg)); - if (!addr) { - ret = 0; - break; - } - sg->dma_address = addr + sg->offset; -#ifdef CONFIG_NEED_SG_DMA_LENGTH - sg->dma_length = sg->length; -#endif - } - return ret; -} - -static void rvt_unmap_sg(struct ib_device *dev, - struct scatterlist *sg, int nents, - enum dma_data_direction direction) -{ - /* This is a stub, nothing to be done here */ -} - -static int rvt_map_sg_attrs(struct ib_device *dev, struct scatterlist *sgl, - int nents, enum dma_data_direction direction, - unsigned long attrs) -{ - return rvt_map_sg(dev, sgl, nents, direction); -} - -static void rvt_unmap_sg_attrs(struct ib_device *dev, - struct scatterlist *sg, int nents, - enum dma_data_direction direction, - unsigned long attrs) -{ - return rvt_unmap_sg(dev, sg, nents, direction); -} - -static void rvt_sync_single_for_cpu(struct ib_device *dev, u64 addr, - size_t size, enum dma_data_direction dir) -{ -} - -static void rvt_sync_single_for_device(struct ib_device *dev, u64 addr, - size_t size, - enum dma_data_direction dir) -{ -} - -static void *rvt_dma_alloc_coherent(struct ib_device *dev, size_t size, - u64 *dma_handle, gfp_t flag) -{ - struct page *p; - void *addr = NULL; - - p = alloc_pages(flag, get_order(size)); - if (p) - addr = page_address(p); - if (dma_handle) - *dma_handle = (u64)addr; - return addr; -} - -static void rvt_dma_free_coherent(struct ib_device *dev, size_t size, - void *cpu_addr, u64 dma_handle) -{ - free_pages((unsigned long)cpu_addr, get_order(size)); -} - -struct ib_dma_mapping_ops rvt_default_dma_mapping_ops = { - .mapping_error = rvt_mapping_error, - .map_single = rvt_dma_map_single, - .unmap_single = rvt_dma_unmap_single, - .map_page = rvt_dma_map_page, - .unmap_page = rvt_dma_unmap_page, - .map_sg = rvt_map_sg, - .unmap_sg = rvt_unmap_sg, - .map_sg_attrs = rvt_map_sg_attrs, - .unmap_sg_attrs = rvt_unmap_sg_attrs, - .sync_single_for_cpu = rvt_sync_single_for_cpu, - .sync_single_for_device = rvt_sync_single_for_device, - .alloc_coherent = rvt_dma_alloc_coherent, - .free_coherent = rvt_dma_free_coherent -}; diff --git a/drivers/infiniband/sw/rdmavt/dma.h b/drivers/infiniband/sw/rdmavt/dma.h deleted file mode 100644 index 979f07e09195..000000000000 --- a/drivers/infiniband/sw/rdmavt/dma.h +++ /dev/null @@ -1,53 +0,0 @@ -#ifndef DEF_RDMAVTDMA_H -#define DEF_RDMAVTDMA_H - -/* - * Copyright(c) 2016 Intel Corporation. - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * BSD LICENSE - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * - Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * - Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -extern struct ib_dma_mapping_ops rvt_default_dma_mapping_ops; - -#endif /* DEF_RDMAVTDMA_H */ diff --git a/drivers/infiniband/sw/rdmavt/mr.c b/drivers/infiniband/sw/rdmavt/mr.c index c80a69b1ffcb..ae30b6838d79 100644 --- a/drivers/infiniband/sw/rdmavt/mr.c +++ b/drivers/infiniband/sw/rdmavt/mr.c @@ -320,8 +320,8 @@ static void __rvt_free_mr(struct rvt_mr *mr) * @acc: access flags * * Return: the memory region on success, otherwise returns an errno. - * Note that all DMA addresses should be created via the - * struct ib_dma_mapping_ops functions (see dma.c). + * Note that all DMA addresses should be created via the functions in + * struct dma_virt_ops. */ struct ib_mr *rvt_get_dma_mr(struct ib_pd *pd, int acc) { @@ -799,7 +799,7 @@ int rvt_lkey_ok(struct rvt_lkey_table *rkt, struct rvt_pd *pd, /* * We use LKEY == zero for kernel virtual addresses - * (see rvt_get_dma_mr and dma.c). + * (see rvt_get_dma_mr() and dma_virt_ops). */ rcu_read_lock(); if (sge->lkey == 0) { @@ -897,7 +897,7 @@ int rvt_rkey_ok(struct rvt_qp *qp, struct rvt_sge *sge, /* * We use RKEY == zero for kernel virtual addresses - * (see rvt_get_dma_mr and dma.c). + * (see rvt_get_dma_mr() and dma_virt_ops). */ rcu_read_lock(); if (rkey == 0) { diff --git a/drivers/infiniband/sw/rdmavt/vt.c b/drivers/infiniband/sw/rdmavt/vt.c index 1165639a914b..0d7c6bb551d9 100644 --- a/drivers/infiniband/sw/rdmavt/vt.c +++ b/drivers/infiniband/sw/rdmavt/vt.c @@ -47,6 +47,7 @@ #include <linux/module.h> #include <linux/kernel.h> +#include <linux/dma-mapping.h> #include "vt.h" #include "trace.h" @@ -778,8 +779,7 @@ int rvt_register_device(struct rvt_dev_info *rdi) } /* DMA Operations */ - rdi->ibdev.dma_ops = - rdi->ibdev.dma_ops ? : &rvt_default_dma_mapping_ops; + rdi->ibdev.dev.dma_ops = rdi->ibdev.dev.dma_ops ? : &dma_virt_ops; /* Protection Domain */ spin_lock_init(&rdi->n_pds_lock); diff --git a/drivers/infiniband/sw/rdmavt/vt.h b/drivers/infiniband/sw/rdmavt/vt.h index 6b01eaa4461b..f363505312be 100644 --- a/drivers/infiniband/sw/rdmavt/vt.h +++ b/drivers/infiniband/sw/rdmavt/vt.h @@ -50,7 +50,6 @@ #include <rdma/rdma_vt.h> #include <linux/pci.h> -#include "dma.h" #include "pd.h" #include "qp.h" #include "ah.h" diff --git a/drivers/infiniband/sw/rxe/Kconfig b/drivers/infiniband/sw/rxe/Kconfig index 1e4e628fe7b0..7d1ac27ed251 100644 --- a/drivers/infiniband/sw/rxe/Kconfig +++ b/drivers/infiniband/sw/rxe/Kconfig @@ -2,6 +2,7 @@ config RDMA_RXE tristate "Software RDMA over Ethernet (RoCE) driver" depends on INET && PCI && INFINIBAND depends on NET_UDP_TUNNEL + select DMA_VIRT_OPS ---help--- This driver implements the InfiniBand RDMA transport over the Linux network stack. It enables a system with a diff --git a/drivers/infiniband/sw/rxe/Makefile b/drivers/infiniband/sw/rxe/Makefile index 3b3fb9d1c470..ec35ff022a42 100644 --- a/drivers/infiniband/sw/rxe/Makefile +++ b/drivers/infiniband/sw/rxe/Makefile @@ -14,7 +14,6 @@ rdma_rxe-y := \ rxe_qp.o \ rxe_cq.o \ rxe_mr.o \ - rxe_dma.o \ rxe_opcode.o \ rxe_mmap.o \ rxe_icrc.o \ diff --git a/drivers/infiniband/sw/rxe/rxe_dma.c b/drivers/infiniband/sw/rxe/rxe_dma.c deleted file mode 100644 index a0f8af5851ae..000000000000 --- a/drivers/infiniband/sw/rxe/rxe_dma.c +++ /dev/null @@ -1,183 +0,0 @@ -/* - * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved. - * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "rxe.h" -#include "rxe_loc.h" - -#define DMA_BAD_ADDER ((u64)0) - -static int rxe_mapping_error(struct ib_device *dev, u64 dma_addr) -{ - return dma_addr == DMA_BAD_ADDER; -} - -static u64 rxe_dma_map_single(struct ib_device *dev, - void *cpu_addr, size_t size, - enum dma_data_direction direction) -{ - WARN_ON(!valid_dma_direction(direction)); - return (uintptr_t)cpu_addr; -} - -static void rxe_dma_unmap_single(struct ib_device *dev, - u64 addr, size_t size, - enum dma_data_direction direction) -{ - WARN_ON(!valid_dma_direction(direction)); -} - -static u64 rxe_dma_map_page(struct ib_device *dev, - struct page *page, - unsigned long offset, - size_t size, enum dma_data_direction direction) -{ - u64 addr; - - WARN_ON(!valid_dma_direction(direction)); - - if (offset + size > PAGE_SIZE) { - addr = DMA_BAD_ADDER; - goto done; - } - - addr = (uintptr_t)page_address(page); - if (addr) - addr += offset; - -done: - return addr; -} - -static void rxe_dma_unmap_page(struct ib_device *dev, - u64 addr, size_t size, - enum dma_data_direction direction) -{ - WARN_ON(!valid_dma_direction(direction)); -} - -static int rxe_map_sg(struct ib_device *dev, struct scatterlist *sgl, - int nents, enum dma_data_direction direction) -{ - struct scatterlist *sg; - u64 addr; - int i; - int ret = nents; - - WARN_ON(!valid_dma_direction(direction)); - - for_each_sg(sgl, sg, nents, i) { - addr = (uintptr_t)page_address(sg_page(sg)); - if (!addr) { - ret = 0; - break; - } - sg->dma_address = addr + sg->offset; -#ifdef CONFIG_NEED_SG_DMA_LENGTH - sg->dma_length = sg->length; -#endif - } - - return ret; -} - -static void rxe_unmap_sg(struct ib_device *dev, - struct scatterlist *sg, int nents, - enum dma_data_direction direction) -{ - WARN_ON(!valid_dma_direction(direction)); -} - -static int rxe_map_sg_attrs(struct ib_device *dev, struct scatterlist *sgl, - int nents, enum dma_data_direction direction, - unsigned long attrs) -{ - return rxe_map_sg(dev, sgl, nents, direction); -} - -static void rxe_unmap_sg_attrs(struct ib_device *dev, - struct scatterlist *sg, int nents, - enum dma_data_direction direction, - unsigned long attrs) -{ - rxe_unmap_sg(dev, sg, nents, direction); -} - -static void rxe_sync_single_for_cpu(struct ib_device *dev, - u64 addr, - size_t size, enum dma_data_direction dir) -{ -} - -static void rxe_sync_single_for_device(struct ib_device *dev, - u64 addr, - size_t size, enum dma_data_direction dir) -{ -} - -static void *rxe_dma_alloc_coherent(struct ib_device *dev, size_t size, - u64 *dma_handle, gfp_t flag) -{ - struct page *p; - void *addr = NULL; - - p = alloc_pages(flag, get_order(size)); - if (p) - addr = page_address(p); - - if (dma_handle) - *dma_handle = (uintptr_t)addr; - - return addr; -} - -static void rxe_dma_free_coherent(struct ib_device *dev, size_t size, - void *cpu_addr, u64 dma_handle) -{ - free_pages((unsigned long)cpu_addr, get_order(size)); -} - -struct ib_dma_mapping_ops rxe_dma_mapping_ops = { - .mapping_error = rxe_mapping_error, - .map_single = rxe_dma_map_single, - .unmap_single = rxe_dma_unmap_single, - .map_page = rxe_dma_map_page, - .unmap_page = rxe_dma_unmap_page, - .map_sg = rxe_map_sg, - .unmap_sg = rxe_unmap_sg, - .map_sg_attrs = rxe_map_sg_attrs, - .unmap_sg_attrs = rxe_unmap_sg_attrs, - .sync_single_for_cpu = rxe_sync_single_for_cpu, - .sync_single_for_device = rxe_sync_single_for_device, - .alloc_coherent = rxe_dma_alloc_coherent, - .free_coherent = rxe_dma_free_coherent -}; diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h index 272337e5e948..183a9d379b41 100644 --- a/drivers/infiniband/sw/rxe/rxe_loc.h +++ b/drivers/infiniband/sw/rxe/rxe_loc.h @@ -237,8 +237,6 @@ int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq, struct ib_srq_attr *attr, enum ib_srq_attr_mask mask, struct ib_udata *udata); -extern struct ib_dma_mapping_ops rxe_dma_mapping_ops; - void rxe_release(struct kref *kref); void rxe_drain_req_pkts(struct rxe_qp *qp, bool notify); diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c index d2e2eff7a515..5113e502f6f9 100644 --- a/drivers/infiniband/sw/rxe/rxe_verbs.c +++ b/drivers/infiniband/sw/rxe/rxe_verbs.c @@ -31,6 +31,7 @@ * SOFTWARE. */ +#include <linux/dma-mapping.h> #include "rxe.h" #include "rxe_loc.h" #include "rxe_queue.h" @@ -169,7 +170,7 @@ static int rxe_query_pkey(struct ib_device *device, struct rxe_port *port; if (unlikely(port_num != 1)) { - dev_warn(device->dma_device, "invalid port_num = %d\n", + dev_warn(device->dev.parent, "invalid port_num = %d\n", port_num); goto err1; } @@ -177,7 +178,7 @@ static int rxe_query_pkey(struct ib_device *device, port = &rxe->port; if (unlikely(index >= port->attr.pkey_tbl_len)) { - dev_warn(device->dma_device, "invalid index = %d\n", + dev_warn(device->dev.parent, "invalid index = %d\n", index); goto err1; } @@ -1234,10 +1235,10 @@ int rxe_register_device(struct rxe_dev *rxe) dev->node_type = RDMA_NODE_IB_CA; dev->phys_port_cnt = 1; dev->num_comp_vectors = RXE_NUM_COMP_VECTORS; - dev->dma_device = rxe_dma_device(rxe); + dev->dev.parent = rxe_dma_device(rxe); dev->local_dma_lkey = 0; dev->node_guid = rxe_node_guid(rxe); - dev->dma_ops = &rxe_dma_mapping_ops; + dev->dev.dma_ops = &dma_virt_ops; dev->uverbs_abi_ver = RXE_UVERBS_ABI_VERSION; dev->uverbs_cmd_mask = BIT_ULL(IB_USER_VERBS_CMD_GET_CONTEXT) diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c index 7b6d40ff1acf..bac455a1942d 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c @@ -65,7 +65,7 @@ static void ipoib_get_drvinfo(struct net_device *netdev, ib_get_device_fw_str(priv->ca, drvinfo->fw_version, sizeof(drvinfo->fw_version)); - strlcpy(drvinfo->bus_info, dev_name(priv->ca->dma_device), + strlcpy(drvinfo->bus_info, dev_name(priv->ca->dev.parent), sizeof(drvinfo->bus_info)); strlcpy(drvinfo->version, ipoib_driver_version, diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 259c59f67394..d1d3fb7a6127 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -2020,7 +2020,7 @@ static struct net_device *ipoib_add_port(const char *format, if (!priv) goto alloc_mem_failed; - SET_NETDEV_DEV(priv->dev, hca->dma_device); + SET_NETDEV_DEV(priv->dev, hca->dev.parent); priv->dev->dev_id = port - 1; result = ib_query_port(hca, port, &attr); diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c index 30a6985909e0..5a887efb4bdf 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.c +++ b/drivers/infiniband/ulp/iser/iscsi_iser.c @@ -652,7 +652,7 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep, } if (iscsi_host_add(shost, - ib_conn->device->ib_device->dma_device)) { + ib_conn->device->ib_device->dev.parent)) { mutex_unlock(&iser_conn->state_mutex); goto free_host; } diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index 3c7fa972a38c..cee46266f434 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c @@ -2933,7 +2933,7 @@ static int srp_add_target(struct srp_host *host, struct srp_target_port *target) sprintf(target->target_name, "SRP.T10:%016llX", be64_to_cpu(target->id_ext)); - if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device)) + if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dev.parent)) return -ENODEV; memcpy(ids.port_id, &target->id_ext, 8); @@ -3546,7 +3546,7 @@ static struct srp_host *srp_add_port(struct srp_device *device, u8 port) host->port = port; host->dev.class = &srp_class; - host->dev.parent = device->dev->dma_device; + host->dev.parent = device->dev->dev.parent; dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port); if (device_register(&host->dev)) diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c index bc5a2d86ae7e..7e314c2f2071 100644 --- a/drivers/infiniband/ulp/srpt/ib_srpt.c +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c @@ -2479,8 +2479,7 @@ static void srpt_add_one(struct ib_device *device) struct ib_srq_init_attr srq_attr; int i; - pr_debug("device = %p, device->dma_ops = %p\n", device, - device->dma_ops); + pr_debug("device = %p\n", device); sdev = kzalloc(sizeof(*sdev), GFP_KERNEL); if (!sdev) diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 09bd3b290bb8..98940d1392cb 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -117,7 +117,7 @@ const struct iommu_ops amd_iommu_ops; static ATOMIC_NOTIFIER_HEAD(ppr_notifier); int amd_iommu_max_glx_val = -1; -static struct dma_map_ops amd_iommu_dma_ops; +static const struct dma_map_ops amd_iommu_dma_ops; /* * This struct contains device specific data for the IOMMU @@ -519,7 +519,7 @@ static void iommu_uninit_device(struct device *dev) iommu_group_remove_device(dev); /* Remove dma-ops */ - dev->archdata.dma_ops = NULL; + dev->dma_ops = NULL; /* * We keep dev_data around for unplugged devices and reuse it when the @@ -2168,7 +2168,7 @@ static int amd_iommu_add_device(struct device *dev) dev_name(dev)); iommu_ignore_device(dev); - dev->archdata.dma_ops = &nommu_dma_ops; + dev->dma_ops = &nommu_dma_ops; goto out; } init_iommu_group(dev); @@ -2185,7 +2185,7 @@ static int amd_iommu_add_device(struct device *dev) if (domain->type == IOMMU_DOMAIN_IDENTITY) dev_data->passthrough = true; else - dev->archdata.dma_ops = &amd_iommu_dma_ops; + dev->dma_ops = &amd_iommu_dma_ops; out: iommu_completion_wait(iommu); @@ -2732,7 +2732,7 @@ static int amd_iommu_dma_supported(struct device *dev, u64 mask) return check_device(dev); } -static struct dma_map_ops amd_iommu_dma_ops = { +static const struct dma_map_ops amd_iommu_dma_ops = { .alloc = alloc_coherent, .free = free_coherent, .map_page = map_page, diff --git a/drivers/misc/mic/bus/mic_bus.c b/drivers/misc/mic/bus/mic_bus.c index be37890abb93..77b16ca66846 100644 --- a/drivers/misc/mic/bus/mic_bus.c +++ b/drivers/misc/mic/bus/mic_bus.c @@ -143,7 +143,7 @@ static void mbus_release_dev(struct device *d) } struct mbus_device * -mbus_register_device(struct device *pdev, int id, struct dma_map_ops *dma_ops, +mbus_register_device(struct device *pdev, int id, const struct dma_map_ops *dma_ops, struct mbus_hw_ops *hw_ops, int index, void __iomem *mmio_va) { @@ -158,7 +158,7 @@ mbus_register_device(struct device *pdev, int id, struct dma_map_ops *dma_ops, mbdev->dev.parent = pdev; mbdev->id.device = id; mbdev->id.vendor = MBUS_DEV_ANY_ID; - mbdev->dev.archdata.dma_ops = dma_ops; + mbdev->dev.dma_ops = dma_ops; mbdev->dev.dma_mask = &mbdev->dev.coherent_dma_mask; dma_set_mask(&mbdev->dev, DMA_BIT_MASK(64)); mbdev->dev.release = mbus_release_dev; diff --git a/drivers/misc/mic/bus/scif_bus.c b/drivers/misc/mic/bus/scif_bus.c index ff6e01c25810..a444db5f61fe 100644 --- a/drivers/misc/mic/bus/scif_bus.c +++ b/drivers/misc/mic/bus/scif_bus.c @@ -138,7 +138,7 @@ static void scif_release_dev(struct device *d) } struct scif_hw_dev * -scif_register_device(struct device *pdev, int id, struct dma_map_ops *dma_ops, +scif_register_device(struct device *pdev, int id, const struct dma_map_ops *dma_ops, struct scif_hw_ops *hw_ops, u8 dnode, u8 snode, struct mic_mw *mmio, struct mic_mw *aper, void *dp, void __iomem *rdp, struct dma_chan **chan, int num_chan, @@ -154,7 +154,7 @@ scif_register_device(struct device *pdev, int id, struct dma_map_ops *dma_ops, sdev->dev.parent = pdev; sdev->id.device = id; sdev->id.vendor = SCIF_DEV_ANY_ID; - sdev->dev.archdata.dma_ops = dma_ops; + sdev->dev.dma_ops = dma_ops; sdev->dev.release = scif_release_dev; sdev->hw_ops = hw_ops; sdev->dnode = dnode; diff --git a/drivers/misc/mic/bus/scif_bus.h b/drivers/misc/mic/bus/scif_bus.h index 94f29ac608b6..ff59568219ad 100644 --- a/drivers/misc/mic/bus/scif_bus.h +++ b/drivers/misc/mic/bus/scif_bus.h @@ -113,7 +113,7 @@ int scif_register_driver(struct scif_driver *driver); void scif_unregister_driver(struct scif_driver *driver); struct scif_hw_dev * scif_register_device(struct device *pdev, int id, - struct dma_map_ops *dma_ops, + const struct dma_map_ops *dma_ops, struct scif_hw_ops *hw_ops, u8 dnode, u8 snode, struct mic_mw *mmio, struct mic_mw *aper, void *dp, void __iomem *rdp, diff --git a/drivers/misc/mic/bus/vop_bus.c b/drivers/misc/mic/bus/vop_bus.c index 303da222f5b6..fd7f2a6049f8 100644 --- a/drivers/misc/mic/bus/vop_bus.c +++ b/drivers/misc/mic/bus/vop_bus.c @@ -154,7 +154,7 @@ vop_register_device(struct device *pdev, int id, vdev->dev.parent = pdev; vdev->id.device = id; vdev->id.vendor = VOP_DEV_ANY_ID; - vdev->dev.archdata.dma_ops = (struct dma_map_ops *)dma_ops; + vdev->dev.dma_ops = dma_ops; vdev->dev.dma_mask = &vdev->dev.coherent_dma_mask; dma_set_mask(&vdev->dev, DMA_BIT_MASK(64)); vdev->dev.release = vop_release_dev; diff --git a/drivers/misc/mic/host/mic_boot.c b/drivers/misc/mic/host/mic_boot.c index 9599d732aff3..c327985c9523 100644 --- a/drivers/misc/mic/host/mic_boot.c +++ b/drivers/misc/mic/host/mic_boot.c @@ -245,7 +245,7 @@ static void __mic_dma_unmap_sg(struct device *dev, dma_unmap_sg(&mdev->pdev->dev, sg, nents, dir); } -static struct dma_map_ops __mic_dma_ops = { +static const struct dma_map_ops __mic_dma_ops = { .alloc = __mic_dma_alloc, .free = __mic_dma_free, .map_page = __mic_dma_map_page, @@ -344,7 +344,7 @@ mic_dma_unmap_page(struct device *dev, dma_addr_t dma_addr, mic_unmap_single(mdev, dma_addr, size); } -static struct dma_map_ops mic_dma_ops = { +static const struct dma_map_ops mic_dma_ops = { .map_page = mic_dma_map_page, .unmap_page = mic_dma_unmap_page, }; diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 49b2121af689..bc20a2442a04 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -1251,7 +1251,7 @@ static int nvme_rdma_addr_resolved(struct nvme_rdma_queue *queue) dev = nvme_rdma_find_get_device(queue->cm_id); if (!dev) { - dev_err(queue->cm_id->device->dma_device, + dev_err(queue->cm_id->device->dev.parent, "no client data found!\n"); return -ECONNREFUSED; } diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c index 553ef8a5d588..aeb073b5fe16 100644 --- a/drivers/parisc/ccio-dma.c +++ b/drivers/parisc/ccio-dma.c @@ -1011,7 +1011,7 @@ ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, DBG_RUN_SG("%s() DONE (nents %d)\n", __func__, nents); } -static struct dma_map_ops ccio_ops = { +static const struct dma_map_ops ccio_ops = { .dma_supported = ccio_dma_supported, .alloc = ccio_alloc, .free = ccio_free, diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c index 151b86b6d2e2..33385e574433 100644 --- a/drivers/parisc/sba_iommu.c +++ b/drivers/parisc/sba_iommu.c @@ -1069,7 +1069,7 @@ sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, } -static struct dma_map_ops sba_ops = { +static const struct dma_map_ops sba_ops = { .dma_supported = sba_dma_supported, .alloc = sba_alloc, .free = sba_free, diff --git a/drivers/pci/host/vmd.c b/drivers/pci/host/vmd.c index 18ef1a93c10a..e27ad2a3bd33 100644 --- a/drivers/pci/host/vmd.c +++ b/drivers/pci/host/vmd.c @@ -282,7 +282,7 @@ static struct device *to_vmd_dev(struct device *dev) return &vmd->dev->dev; } -static struct dma_map_ops *vmd_dma_ops(struct device *dev) +static const struct dma_map_ops *vmd_dma_ops(struct device *dev) { return get_dma_ops(to_vmd_dev(dev)); } diff --git a/include/linux/device.h b/include/linux/device.h index a48a7ff70164..30c4570e928d 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -925,6 +925,7 @@ struct device { #ifdef CONFIG_NUMA int numa_node; /* NUMA node this device is close to */ #endif + const struct dma_map_ops *dma_ops; u64 *dma_mask; /* dma mask (if dma'able device) */ u64 coherent_dma_mask;/* Like dma_mask, but for alloc_coherent mappings as diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index c24721a33b4c..0977317c6835 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -134,7 +134,8 @@ struct dma_map_ops { int is_phys; }; -extern struct dma_map_ops dma_noop_ops; +extern const struct dma_map_ops dma_noop_ops; +extern const struct dma_map_ops dma_virt_ops; #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1)) @@ -171,14 +172,26 @@ int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma, #ifdef CONFIG_HAS_DMA #include <asm/dma-mapping.h> +static inline const struct dma_map_ops *get_dma_ops(struct device *dev) +{ + if (dev && dev->dma_ops) + return dev->dma_ops; + return get_arch_dma_ops(dev ? dev->bus : NULL); +} + +static inline void set_dma_ops(struct device *dev, + const struct dma_map_ops *dma_ops) +{ + dev->dma_ops = dma_ops; +} #else /* * Define the dma api to allow compilation but not linking of * dma dependent code. Code that depends on the dma-mapping * API needs to set 'depends on HAS_DMA' in its Kconfig */ -extern struct dma_map_ops bad_dma_ops; -static inline struct dma_map_ops *get_dma_ops(struct device *dev) +extern const struct dma_map_ops bad_dma_ops; +static inline const struct dma_map_ops *get_dma_ops(struct device *dev) { return &bad_dma_ops; } @@ -189,7 +202,7 @@ static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr, enum dma_data_direction dir, unsigned long attrs) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); dma_addr_t addr; kmemcheck_mark_initialized(ptr, size); @@ -208,7 +221,7 @@ static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr, enum dma_data_direction dir, unsigned long attrs) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); BUG_ON(!valid_dma_direction(dir)); if (ops->unmap_page) @@ -224,7 +237,7 @@ static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, unsigned long attrs) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); int i, ents; struct scatterlist *s; @@ -242,7 +255,7 @@ static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg int nents, enum dma_data_direction dir, unsigned long attrs) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); BUG_ON(!valid_dma_direction(dir)); debug_dma_unmap_sg(dev, sg, nents, dir); @@ -256,7 +269,7 @@ static inline dma_addr_t dma_map_page_attrs(struct device *dev, enum dma_data_direction dir, unsigned long attrs) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); dma_addr_t addr; kmemcheck_mark_initialized(page_address(page) + offset, size); @@ -272,7 +285,7 @@ static inline void dma_unmap_page_attrs(struct device *dev, enum dma_data_direction dir, unsigned long attrs) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); BUG_ON(!valid_dma_direction(dir)); if (ops->unmap_page) @@ -286,7 +299,7 @@ static inline dma_addr_t dma_map_resource(struct device *dev, enum dma_data_direction dir, unsigned long attrs) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); dma_addr_t addr; BUG_ON(!valid_dma_direction(dir)); @@ -307,7 +320,7 @@ static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir, unsigned long attrs) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); BUG_ON(!valid_dma_direction(dir)); if (ops->unmap_resource) @@ -319,7 +332,7 @@ static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); BUG_ON(!valid_dma_direction(dir)); if (ops->sync_single_for_cpu) @@ -331,7 +344,7 @@ static inline void dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); BUG_ON(!valid_dma_direction(dir)); if (ops->sync_single_for_device) @@ -371,7 +384,7 @@ static inline void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, enum dma_data_direction dir) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); BUG_ON(!valid_dma_direction(dir)); if (ops->sync_sg_for_cpu) @@ -383,7 +396,7 @@ static inline void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, enum dma_data_direction dir) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); BUG_ON(!valid_dma_direction(dir)); if (ops->sync_sg_for_device) @@ -428,7 +441,7 @@ static inline int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t dma_addr, size_t size, unsigned long attrs) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); BUG_ON(!ops); if (ops->mmap) return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs); @@ -446,7 +459,7 @@ dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr, dma_addr_t dma_addr, size_t size, unsigned long attrs) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); BUG_ON(!ops); if (ops->get_sgtable) return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size, @@ -464,7 +477,7 @@ static inline void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); void *cpu_addr; BUG_ON(!ops); @@ -486,7 +499,7 @@ static inline void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); BUG_ON(!ops); WARN_ON(irqs_disabled()); @@ -544,7 +557,7 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) #ifndef HAVE_ARCH_DMA_SUPPORTED static inline int dma_supported(struct device *dev, u64 mask) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); if (!ops) return 0; @@ -557,7 +570,7 @@ static inline int dma_supported(struct device *dev, u64 mask) #ifndef HAVE_ARCH_DMA_SET_MASK static inline int dma_set_mask(struct device *dev, u64 mask) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); if (ops->set_dma_mask) return ops->set_dma_mask(dev, mask); diff --git a/include/linux/mic_bus.h b/include/linux/mic_bus.h index 27d7c95fd0da..504d54c71bdb 100644 --- a/include/linux/mic_bus.h +++ b/include/linux/mic_bus.h @@ -90,7 +90,7 @@ struct mbus_hw_ops { }; struct mbus_device * -mbus_register_device(struct device *pdev, int id, struct dma_map_ops *dma_ops, +mbus_register_device(struct device *pdev, int id, const struct dma_map_ops *dma_ops, struct mbus_hw_ops *hw_ops, int index, void __iomem *mmio_va); void mbus_unregister_device(struct mbus_device *mbdev); diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 89f5bd4e1d52..d84849c5dc05 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -1843,53 +1843,6 @@ struct ib_cache { struct ib_port_cache *ports; }; -struct ib_dma_mapping_ops { - int (*mapping_error)(struct ib_device *dev, - u64 dma_addr); - u64 (*map_single)(struct ib_device *dev, - void *ptr, size_t size, - enum dma_data_direction direction); - void (*unmap_single)(struct ib_device *dev, - u64 addr, size_t size, - enum dma_data_direction direction); - u64 (*map_page)(struct ib_device *dev, - struct page *page, unsigned long offset, - size_t size, - enum dma_data_direction direction); - void (*unmap_page)(struct ib_device *dev, - u64 addr, size_t size, - enum dma_data_direction direction); - int (*map_sg)(struct ib_device *dev, - struct scatterlist *sg, int nents, - enum dma_data_direction direction); - void (*unmap_sg)(struct ib_device *dev, - struct scatterlist *sg, int nents, - enum dma_data_direction direction); - int (*map_sg_attrs)(struct ib_device *dev, - struct scatterlist *sg, int nents, - enum dma_data_direction direction, - unsigned long attrs); - void (*unmap_sg_attrs)(struct ib_device *dev, - struct scatterlist *sg, int nents, - enum dma_data_direction direction, - unsigned long attrs); - void (*sync_single_for_cpu)(struct ib_device *dev, - u64 dma_handle, - size_t size, - enum dma_data_direction dir); - void (*sync_single_for_device)(struct ib_device *dev, - u64 dma_handle, - size_t size, - enum dma_data_direction dir); - void *(*alloc_coherent)(struct ib_device *dev, - size_t size, - u64 *dma_handle, - gfp_t flag); - void (*free_coherent)(struct ib_device *dev, - size_t size, void *cpu_addr, - u64 dma_handle); -}; - struct iw_cm_verbs; struct ib_port_immutable { @@ -1900,8 +1853,6 @@ struct ib_port_immutable { }; struct ib_device { - struct device *dma_device; - char name[IB_DEVICE_NAME_MAX]; struct list_head event_handler_list; @@ -2151,7 +2102,6 @@ struct ib_device { struct ib_rwq_ind_table_init_attr *init_attr, struct ib_udata *udata); int (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table); - struct ib_dma_mapping_ops *dma_ops; struct module *owner; struct device dev; @@ -3043,9 +2993,7 @@ static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt) */ static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr) { - if (dev->dma_ops) - return dev->dma_ops->mapping_error(dev, dma_addr); - return dma_mapping_error(dev->dma_device, dma_addr); + return dma_mapping_error(&dev->dev, dma_addr); } /** @@ -3059,9 +3007,7 @@ static inline u64 ib_dma_map_single(struct ib_device *dev, void *cpu_addr, size_t size, enum dma_data_direction direction) { - if (dev->dma_ops) - return dev->dma_ops->map_single(dev, cpu_addr, size, direction); - return dma_map_single(dev->dma_device, cpu_addr, size, direction); + return dma_map_single(&dev->dev, cpu_addr, size, direction); } /** @@ -3075,28 +3021,7 @@ static inline void ib_dma_unmap_single(struct ib_device *dev, u64 addr, size_t size, enum dma_data_direction direction) { - if (dev->dma_ops) - dev->dma_ops->unmap_single(dev, addr, size, direction); - else - dma_unmap_single(dev->dma_device, addr, size, direction); -} - -static inline u64 ib_dma_map_single_attrs(struct ib_device *dev, - void *cpu_addr, size_t size, - enum dma_data_direction direction, - unsigned long dma_attrs) -{ - return dma_map_single_attrs(dev->dma_device, cpu_addr, size, - direction, dma_attrs); -} - -static inline void ib_dma_unmap_single_attrs(struct ib_device *dev, - u64 addr, size_t size, - enum dma_data_direction direction, - unsigned long dma_attrs) -{ - return dma_unmap_single_attrs(dev->dma_device, addr, size, - direction, dma_attrs); + dma_unmap_single(&dev->dev, addr, size, direction); } /** @@ -3113,9 +3038,7 @@ static inline u64 ib_dma_map_page(struct ib_device *dev, size_t size, enum dma_data_direction direction) { - if (dev->dma_ops) - return dev->dma_ops->map_page(dev, page, offset, size, direction); - return dma_map_page(dev->dma_device, page, offset, size, direction); + return dma_map_page(&dev->dev, page, offset, size, direction); } /** @@ -3129,10 +3052,7 @@ static inline void ib_dma_unmap_page(struct ib_device *dev, u64 addr, size_t size, enum dma_data_direction direction) { - if (dev->dma_ops) - dev->dma_ops->unmap_page(dev, addr, size, direction); - else - dma_unmap_page(dev->dma_device, addr, size, direction); + dma_unmap_page(&dev->dev, addr, size, direction); } /** @@ -3146,9 +3066,7 @@ static inline int ib_dma_map_sg(struct ib_device *dev, struct scatterlist *sg, int nents, enum dma_data_direction direction) { - if (dev->dma_ops) - return dev->dma_ops->map_sg(dev, sg, nents, direction); - return dma_map_sg(dev->dma_device, sg, nents, direction); + return dma_map_sg(&dev->dev, sg, nents, direction); } /** @@ -3162,10 +3080,7 @@ static inline void ib_dma_unmap_sg(struct ib_device *dev, struct scatterlist *sg, int nents, enum dma_data_direction direction) { - if (dev->dma_ops) - dev->dma_ops->unmap_sg(dev, sg, nents, direction); - else - dma_unmap_sg(dev->dma_device, sg, nents, direction); + dma_unmap_sg(&dev->dev, sg, nents, direction); } static inline int ib_dma_map_sg_attrs(struct ib_device *dev, @@ -3173,12 +3088,7 @@ static inline int ib_dma_map_sg_attrs(struct ib_device *dev, enum dma_data_direction direction, unsigned long dma_attrs) { - if (dev->dma_ops) - return dev->dma_ops->map_sg_attrs(dev, sg, nents, direction, - dma_attrs); - else - return dma_map_sg_attrs(dev->dma_device, sg, nents, direction, - dma_attrs); + return dma_map_sg_attrs(&dev->dev, sg, nents, direction, dma_attrs); } static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev, @@ -3186,12 +3096,7 @@ static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev, enum dma_data_direction direction, unsigned long dma_attrs) { - if (dev->dma_ops) - return dev->dma_ops->unmap_sg_attrs(dev, sg, nents, direction, - dma_attrs); - else - dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, - dma_attrs); + dma_unmap_sg_attrs(&dev->dev, sg, nents, direction, dma_attrs); } /** * ib_sg_dma_address - Return the DMA address from a scatter/gather entry @@ -3233,10 +3138,7 @@ static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev, size_t size, enum dma_data_direction dir) { - if (dev->dma_ops) - dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir); - else - dma_sync_single_for_cpu(dev->dma_device, addr, size, dir); + dma_sync_single_for_cpu(&dev->dev, addr, size, dir); } /** @@ -3251,10 +3153,7 @@ static inline void ib_dma_sync_single_for_device(struct ib_device *dev, size_t size, enum dma_data_direction dir) { - if (dev->dma_ops) - dev->dma_ops->sync_single_for_device(dev, addr, size, dir); - else - dma_sync_single_for_device(dev->dma_device, addr, size, dir); + dma_sync_single_for_device(&dev->dev, addr, size, dir); } /** @@ -3266,19 +3165,10 @@ static inline void ib_dma_sync_single_for_device(struct ib_device *dev, */ static inline void *ib_dma_alloc_coherent(struct ib_device *dev, size_t size, - u64 *dma_handle, + dma_addr_t *dma_handle, gfp_t flag) { - if (dev->dma_ops) - return dev->dma_ops->alloc_coherent(dev, size, dma_handle, flag); - else { - dma_addr_t handle; - void *ret; - - ret = dma_alloc_coherent(dev->dma_device, size, &handle, flag); - *dma_handle = handle; - return ret; - } + return dma_alloc_coherent(&dev->dev, size, dma_handle, flag); } /** @@ -3290,12 +3180,9 @@ static inline void *ib_dma_alloc_coherent(struct ib_device *dev, */ static inline void ib_dma_free_coherent(struct ib_device *dev, size_t size, void *cpu_addr, - u64 dma_handle) + dma_addr_t dma_handle) { - if (dev->dma_ops) - dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle); - else - dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle); + dma_free_coherent(&dev->dev, size, cpu_addr, dma_handle); } /** diff --git a/include/xen/arm/hypervisor.h b/include/xen/arm/hypervisor.h index 95251512e2c4..44b587b49904 100644 --- a/include/xen/arm/hypervisor.h +++ b/include/xen/arm/hypervisor.h @@ -18,7 +18,7 @@ static inline enum paravirt_lazy_mode paravirt_get_lazy_mode(void) return PARAVIRT_LAZY_NONE; } -extern struct dma_map_ops *xen_dma_ops; +extern const struct dma_map_ops *xen_dma_ops; #ifdef CONFIG_XEN void __init xen_early_init(void); diff --git a/lib/Kconfig b/lib/Kconfig index fe7e8e175db8..8f69579dfac3 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -394,6 +394,16 @@ config HAS_DMA depends on !NO_DMA default y +config DMA_NOOP_OPS + bool + depends on HAS_DMA && (!64BIT || ARCH_DMA_ADDR_T_64BIT) + default n + +config DMA_VIRT_OPS + bool + depends on HAS_DMA && (!64BIT || ARCH_DMA_ADDR_T_64BIT) + default n + config CHECK_SIGNATURE bool diff --git a/lib/Makefile b/lib/Makefile index 445a39c21f46..c9023efbd4ca 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -27,7 +27,8 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \ lib-$(CONFIG_MMU) += ioremap.o lib-$(CONFIG_SMP) += cpumask.o -lib-$(CONFIG_HAS_DMA) += dma-noop.o +lib-$(CONFIG_DMA_NOOP_OPS) += dma-noop.o +lib-$(CONFIG_DMA_VIRT_OPS) += dma-virt.o lib-y += kobject.o klist.o obj-y += lockref.o diff --git a/lib/dma-noop.c b/lib/dma-noop.c index 3d766e78fbe2..de26c8b68f34 100644 --- a/lib/dma-noop.c +++ b/lib/dma-noop.c @@ -1,7 +1,7 @@ /* * lib/dma-noop.c * - * Simple DMA noop-ops that map 1:1 with memory + * DMA operations that map to physical addresses without flushing memory. */ #include <linux/export.h> #include <linux/mm.h> @@ -64,7 +64,7 @@ static int dma_noop_supported(struct device *dev, u64 mask) return 1; } -struct dma_map_ops dma_noop_ops = { +const struct dma_map_ops dma_noop_ops = { .alloc = dma_noop_alloc, .free = dma_noop_free, .map_page = dma_noop_map_page, diff --git a/lib/dma-virt.c b/lib/dma-virt.c new file mode 100644 index 000000000000..dcd4df1f7174 --- /dev/null +++ b/lib/dma-virt.c @@ -0,0 +1,72 @@ +/* + * lib/dma-virt.c + * + * DMA operations that map to virtual addresses without flushing memory. + */ +#include <linux/export.h> +#include <linux/mm.h> +#include <linux/dma-mapping.h> +#include <linux/scatterlist.h> + +static void *dma_virt_alloc(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp, + unsigned long attrs) +{ + void *ret; + + ret = (void *)__get_free_pages(gfp, get_order(size)); + if (ret) + *dma_handle = (uintptr_t)ret; + return ret; +} + +static void dma_virt_free(struct device *dev, size_t size, + void *cpu_addr, dma_addr_t dma_addr, + unsigned long attrs) +{ + free_pages((unsigned long)cpu_addr, get_order(size)); +} + +static dma_addr_t dma_virt_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, + enum dma_data_direction dir, + unsigned long attrs) +{ + return (uintptr_t)(page_address(page) + offset); +} + +static int dma_virt_map_sg(struct device *dev, struct scatterlist *sgl, + int nents, enum dma_data_direction dir, + unsigned long attrs) +{ + int i; + struct scatterlist *sg; + + for_each_sg(sgl, sg, nents, i) { + BUG_ON(!sg_page(sg)); + sg_dma_address(sg) = (uintptr_t)sg_virt(sg); + sg_dma_len(sg) = sg->length; + } + + return nents; +} + +static int dma_virt_mapping_error(struct device *dev, dma_addr_t dma_addr) +{ + return false; +} + +static int dma_virt_supported(struct device *dev, u64 mask) +{ + return true; +} + +const struct dma_map_ops dma_virt_ops = { + .alloc = dma_virt_alloc, + .free = dma_virt_free, + .map_page = dma_virt_map_page, + .map_sg = dma_virt_map_sg, + .mapping_error = dma_virt_mapping_error, + .dma_supported = dma_virt_supported, +}; +EXPORT_SYMBOL(dma_virt_ops); diff --git a/net/rds/ib.h b/net/rds/ib.h index 540458928f3c..ec550626e221 100644 --- a/net/rds/ib.h +++ b/net/rds/ib.h @@ -136,7 +136,7 @@ struct rds_ib_connection { struct rds_ib_work_ring i_send_ring; struct rm_data_op *i_data_op; struct rds_header *i_send_hdrs; - u64 i_send_hdrs_dma; + dma_addr_t i_send_hdrs_dma; struct rds_ib_send_work *i_sends; atomic_t i_signaled_sends; @@ -146,7 +146,7 @@ struct rds_ib_connection { struct rds_ib_incoming *i_ibinc; u32 i_recv_data_rem; struct rds_header *i_recv_hdrs; - u64 i_recv_hdrs_dma; + dma_addr_t i_recv_hdrs_dma; struct rds_ib_recv_work *i_recvs; u64 i_ack_recv; /* last ACK received */ struct rds_ib_refill_cache i_cache_incs; @@ -164,7 +164,7 @@ struct rds_ib_connection { struct rds_header *i_ack; struct ib_send_wr i_ack_wr; struct ib_sge i_ack_sge; - u64 i_ack_dma; + dma_addr_t i_ack_dma; unsigned long i_ack_queued; /* Flow control related information @@ -235,7 +235,7 @@ struct rds_ib_device { int *vector_load; }; -#define ibdev_to_node(ibdev) dev_to_node(ibdev->dma_device) +#define ibdev_to_node(ibdev) dev_to_node((ibdev)->dev.parent) #define rdsibdev_to_node(rdsibdev) ibdev_to_node(rdsibdev->dev) /* bits for i_ack_flags */ diff --git a/net/rds/ib_mr.h b/net/rds/ib_mr.h index 1c754f4acbe5..24c086db4511 100644 --- a/net/rds/ib_mr.h +++ b/net/rds/ib_mr.h @@ -45,7 +45,6 @@ struct rds_ib_fmr { struct ib_fmr *fmr; - u64 *dma; }; enum rds_ib_fr_state { |