diff options
Diffstat (limited to 'arch')
26 files changed, 211 insertions, 66 deletions
diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c index cd8aad8226dd..08450a1a5b5f 100644 --- a/arch/arc/mm/dma.c +++ b/arch/arc/mm/dma.c @@ -158,7 +158,10 @@ static dma_addr_t arc_dma_map_page(struct device *dev, struct page *page, unsigned long attrs) { phys_addr_t paddr = page_to_phys(page) + offset; - _dma_cache_sync(paddr, size, dir); + + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + _dma_cache_sync(paddr, size, dir); + return plat_phys_to_dma(dev, paddr); } diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c index 301281645d08..75055df1cda3 100644 --- a/arch/arm/common/dmabounce.c +++ b/arch/arm/common/dmabounce.c @@ -243,7 +243,8 @@ static int needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size) } static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size, - enum dma_data_direction dir) + enum dma_data_direction dir, + unsigned long attrs) { struct dmabounce_device_info *device_info = dev->archdata.dmabounce; struct safe_buffer *buf; @@ -262,7 +263,8 @@ static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size, __func__, buf->ptr, virt_to_dma(dev, buf->ptr), buf->safe, buf->safe_dma_addr); - if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) { + if ((dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) && + !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) { dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n", __func__, ptr, buf->safe, size); memcpy(buf->safe, ptr, size); @@ -272,7 +274,8 @@ static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size, } static inline void unmap_single(struct device *dev, struct safe_buffer *buf, - size_t size, enum dma_data_direction dir) + size_t size, enum dma_data_direction dir, + unsigned long attrs) { BUG_ON(buf->size != size); BUG_ON(buf->direction != dir); @@ -283,7 +286,8 @@ static inline void unmap_single(struct device *dev, struct safe_buffer *buf, DO_STATS(dev->archdata.dmabounce->bounce_count++); - if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) { + if ((dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) && + !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) { void *ptr = buf->ptr; dev_dbg(dev, "%s: copy back safe %p to unsafe %p size %d\n", @@ -334,7 +338,7 @@ static dma_addr_t dmabounce_map_page(struct device *dev, struct page *page, return DMA_ERROR_CODE; } - return map_single(dev, page_address(page) + offset, size, dir); + return map_single(dev, page_address(page) + offset, size, dir, attrs); } /* @@ -357,7 +361,7 @@ static void dmabounce_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t return; } - unmap_single(dev, buf, size, dir); + unmap_single(dev, buf, size, dir, attrs); } static int __dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr, diff --git a/arch/avr32/mm/dma-coherent.c b/arch/avr32/mm/dma-coherent.c index 58610d0df7ed..54534e5d0781 100644 --- a/arch/avr32/mm/dma-coherent.c +++ b/arch/avr32/mm/dma-coherent.c @@ -146,7 +146,8 @@ static dma_addr_t avr32_dma_map_page(struct device *dev, struct page *page, { void *cpu_addr = page_address(page) + offset; - dma_cache_sync(dev, cpu_addr, size, direction); + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + dma_cache_sync(dev, cpu_addr, size, direction); return virt_to_bus(cpu_addr); } @@ -162,6 +163,10 @@ static int avr32_dma_map_sg(struct device *dev, struct scatterlist *sglist, sg->dma_address = page_to_bus(sg_page(sg)) + sg->offset; virt = sg_virt(sg); + + if (attrs & DMA_ATTR_SKIP_CPU_SYNC) + continue; + dma_cache_sync(dev, virt, sg->length, direction); } diff --git a/arch/blackfin/kernel/dma-mapping.c b/arch/blackfin/kernel/dma-mapping.c index 53fbbb61aa86..a27a74a18fb0 100644 --- a/arch/blackfin/kernel/dma-mapping.c +++ b/arch/blackfin/kernel/dma-mapping.c @@ -118,6 +118,10 @@ static int bfin_dma_map_sg(struct device *dev, struct scatterlist *sg_list, for_each_sg(sg_list, sg, nents, i) { sg->dma_address = (dma_addr_t) sg_virt(sg); + + if (attrs & DMA_ATTR_SKIP_CPU_SYNC) + continue; + __dma_sync(sg_dma_address(sg), sg_dma_len(sg), direction); } @@ -143,7 +147,9 @@ static dma_addr_t bfin_dma_map_page(struct device *dev, struct page *page, { dma_addr_t handle = (dma_addr_t)(page_address(page) + offset); - _dma_sync(handle, size, dir); + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + _dma_sync(handle, size, dir); + return handle; } diff --git a/arch/c6x/kernel/dma.c b/arch/c6x/kernel/dma.c index db4a6a301f5e..6752df32ef06 100644 --- a/arch/c6x/kernel/dma.c +++ b/arch/c6x/kernel/dma.c @@ -42,14 +42,17 @@ static dma_addr_t c6x_dma_map_page(struct device *dev, struct page *page, { dma_addr_t handle = virt_to_phys(page_address(page) + offset); - c6x_dma_sync(handle, size, dir); + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + c6x_dma_sync(handle, size, dir); + return handle; } static void c6x_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir, unsigned long attrs) { - c6x_dma_sync(handle, size, dir); + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + c6x_dma_sync(handle, size, dir); } static int c6x_dma_map_sg(struct device *dev, struct scatterlist *sglist, @@ -60,7 +63,8 @@ static int c6x_dma_map_sg(struct device *dev, struct scatterlist *sglist, for_each_sg(sglist, sg, nents, i) { sg->dma_address = sg_phys(sg); - c6x_dma_sync(sg->dma_address, sg->length, dir); + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + c6x_dma_sync(sg->dma_address, sg->length, dir); } return nents; @@ -72,9 +76,11 @@ static void c6x_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, struct scatterlist *sg; int i; + if (attrs & DMA_ATTR_SKIP_CPU_SYNC) + return; + for_each_sg(sglist, sg, nents, i) c6x_dma_sync(sg_dma_address(sg), sg->length, dir); - } static void c6x_dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, diff --git a/arch/frv/mb93090-mb00/pci-dma-nommu.c b/arch/frv/mb93090-mb00/pci-dma-nommu.c index 90f2e4cb33d6..187688128c65 100644 --- a/arch/frv/mb93090-mb00/pci-dma-nommu.c +++ b/arch/frv/mb93090-mb00/pci-dma-nommu.c @@ -109,16 +109,19 @@ static int frv_dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction, unsigned long attrs) { - int i; struct scatterlist *sg; + int i; + + BUG_ON(direction == DMA_NONE); + + if (attrs & DMA_ATTR_SKIP_CPU_SYNC) + return nents; for_each_sg(sglist, sg, nents, i) { frv_cache_wback_inv(sg_dma_address(sg), sg_dma_address(sg) + sg_dma_len(sg)); } - BUG_ON(direction == DMA_NONE); - return nents; } @@ -127,7 +130,10 @@ static dma_addr_t frv_dma_map_page(struct device *dev, struct page *page, enum dma_data_direction direction, unsigned long attrs) { BUG_ON(direction == DMA_NONE); - flush_dcache_page(page); + + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + flush_dcache_page(page); + return (dma_addr_t) page_to_phys(page) + offset; } diff --git a/arch/frv/mb93090-mb00/pci-dma.c b/arch/frv/mb93090-mb00/pci-dma.c index f585745b1abc..dba7df918144 100644 --- a/arch/frv/mb93090-mb00/pci-dma.c +++ b/arch/frv/mb93090-mb00/pci-dma.c @@ -40,13 +40,16 @@ static int frv_dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction, unsigned long attrs) { + struct scatterlist *sg; unsigned long dampr2; void *vaddr; int i; - struct scatterlist *sg; BUG_ON(direction == DMA_NONE); + if (attrs & DMA_ATTR_SKIP_CPU_SYNC) + return nents; + dampr2 = __get_DAMPR(2); for_each_sg(sglist, sg, nents, i) { @@ -70,7 +73,9 @@ static dma_addr_t frv_dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction direction, unsigned long attrs) { - flush_dcache_page(page); + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + flush_dcache_page(page); + return (dma_addr_t) page_to_phys(page) + offset; } diff --git a/arch/hexagon/kernel/dma.c b/arch/hexagon/kernel/dma.c index b9017785fb71..dbc4f1003da4 100644 --- a/arch/hexagon/kernel/dma.c +++ b/arch/hexagon/kernel/dma.c @@ -119,6 +119,9 @@ static int hexagon_map_sg(struct device *hwdev, struct scatterlist *sg, s->dma_length = s->length; + if (attrs & DMA_ATTR_SKIP_CPU_SYNC) + continue; + flush_dcache_range(dma_addr_to_virt(s->dma_address), dma_addr_to_virt(s->dma_address + s->length)); } @@ -180,7 +183,8 @@ static dma_addr_t hexagon_map_page(struct device *dev, struct page *page, if (!check_addr("map_single", dev, bus, size)) return bad_dma_address; - dma_sync(dma_addr_to_virt(bus), size, dir); + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + dma_sync(dma_addr_to_virt(bus), size, dir); return bus; } diff --git a/arch/m68k/kernel/dma.c b/arch/m68k/kernel/dma.c index 8cf97cbadc91..07070065a425 100644 --- a/arch/m68k/kernel/dma.c +++ b/arch/m68k/kernel/dma.c @@ -134,7 +134,9 @@ static dma_addr_t m68k_dma_map_page(struct device *dev, struct page *page, { dma_addr_t handle = page_to_phys(page) + offset; - dma_sync_single_for_device(dev, handle, size, dir); + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + dma_sync_single_for_device(dev, handle, size, dir); + return handle; } @@ -146,6 +148,10 @@ static int m68k_dma_map_sg(struct device *dev, struct scatterlist *sglist, for_each_sg(sglist, sg, nents, i) { sg->dma_address = sg_phys(sg); + + if (attrs & DMA_ATTR_SKIP_CPU_SYNC) + continue; + dma_sync_single_for_device(dev, sg->dma_address, sg->length, dir); } diff --git a/arch/metag/kernel/dma.c b/arch/metag/kernel/dma.c index 0db31e24c541..91968d92652b 100644 --- a/arch/metag/kernel/dma.c +++ b/arch/metag/kernel/dma.c @@ -484,8 +484,9 @@ static dma_addr_t metag_dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction direction, unsigned long attrs) { - dma_sync_for_device((void *)(page_to_phys(page) + offset), size, - direction); + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + dma_sync_for_device((void *)(page_to_phys(page) + offset), + size, direction); return page_to_phys(page) + offset; } @@ -493,7 +494,8 @@ static void metag_dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, enum dma_data_direction direction, unsigned long attrs) { - dma_sync_for_cpu(phys_to_virt(dma_address), size, direction); + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + dma_sync_for_cpu(phys_to_virt(dma_address), size, direction); } static int metag_dma_map_sg(struct device *dev, struct scatterlist *sglist, @@ -507,6 +509,10 @@ static int metag_dma_map_sg(struct device *dev, struct scatterlist *sglist, BUG_ON(!sg_page(sg)); sg->dma_address = sg_phys(sg); + + if (attrs & DMA_ATTR_SKIP_CPU_SYNC) + continue; + dma_sync_for_device(sg_virt(sg), sg->length, direction); } @@ -525,6 +531,10 @@ static void metag_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, BUG_ON(!sg_page(sg)); sg->dma_address = sg_phys(sg); + + if (attrs & DMA_ATTR_SKIP_CPU_SYNC) + continue; + dma_sync_for_cpu(sg_virt(sg), sg->length, direction); } } diff --git a/arch/microblaze/kernel/dma.c b/arch/microblaze/kernel/dma.c index ec04dc1e2527..818daf230eb4 100644 --- a/arch/microblaze/kernel/dma.c +++ b/arch/microblaze/kernel/dma.c @@ -61,6 +61,10 @@ static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, /* FIXME this part of code is untested */ for_each_sg(sgl, sg, nents, i) { sg->dma_address = sg_phys(sg); + + if (attrs & DMA_ATTR_SKIP_CPU_SYNC) + continue; + __dma_sync(page_to_phys(sg_page(sg)) + sg->offset, sg->length, direction); } @@ -80,7 +84,8 @@ static inline dma_addr_t dma_direct_map_page(struct device *dev, enum dma_data_direction direction, unsigned long attrs) { - __dma_sync(page_to_phys(page) + offset, size, direction); + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + __dma_sync(page_to_phys(page) + offset, size, direction); return page_to_phys(page) + offset; } @@ -95,7 +100,8 @@ static inline void dma_direct_unmap_page(struct device *dev, * phys_to_virt is here because in __dma_sync_page is __virt_to_phys and * dma_address is physical address */ - __dma_sync(dma_address, size, direction); + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + __dma_sync(dma_address, size, direction); } static inline void diff --git a/arch/mips/loongson64/common/dma-swiotlb.c b/arch/mips/loongson64/common/dma-swiotlb.c index 1a80b6f73ab2..aab4fd681e1f 100644 --- a/arch/mips/loongson64/common/dma-swiotlb.c +++ b/arch/mips/loongson64/common/dma-swiotlb.c @@ -61,7 +61,7 @@ static int loongson_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, unsigned long attrs) { - int r = swiotlb_map_sg_attrs(dev, sg, nents, dir, 0); + int r = swiotlb_map_sg_attrs(dev, sg, nents, dir, attrs); mb(); return r; diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c index 46d5696c4f27..a39c36af97ad 100644 --- a/arch/mips/mm/dma-default.c +++ b/arch/mips/mm/dma-default.c @@ -293,7 +293,7 @@ static inline void __dma_sync(struct page *page, static void mips_dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, enum dma_data_direction direction, unsigned long attrs) { - if (cpu_needs_post_dma_flush(dev)) + if (cpu_needs_post_dma_flush(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) __dma_sync(dma_addr_to_page(dev, dma_addr), dma_addr & ~PAGE_MASK, size, direction); plat_post_dma_flush(dev); @@ -307,7 +307,8 @@ static int mips_dma_map_sg(struct device *dev, struct scatterlist *sglist, struct scatterlist *sg; for_each_sg(sglist, sg, nents, i) { - if (!plat_device_is_coherent(dev)) + if (!plat_device_is_coherent(dev) && + !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) __dma_sync(sg_page(sg), sg->offset, sg->length, direction); #ifdef CONFIG_NEED_SG_DMA_LENGTH @@ -324,7 +325,7 @@ static dma_addr_t mips_dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction direction, unsigned long attrs) { - if (!plat_device_is_coherent(dev)) + if (!plat_device_is_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) __dma_sync(page, offset, size, direction); return plat_map_dma_mem_page(dev, page) + offset; @@ -339,6 +340,7 @@ static void mips_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, for_each_sg(sglist, sg, nhwentries, i) { if (!plat_device_is_coherent(dev) && + !(attrs & DMA_ATTR_SKIP_CPU_SYNC) && direction != DMA_TO_DEVICE) __dma_sync(sg_page(sg), sg->offset, sg->length, direction); diff --git a/arch/nios2/mm/dma-mapping.c b/arch/nios2/mm/dma-mapping.c index d800fad87896..f6a5dcf9d682 100644 --- a/arch/nios2/mm/dma-mapping.c +++ b/arch/nios2/mm/dma-mapping.c @@ -98,13 +98,17 @@ static int nios2_dma_map_sg(struct device *dev, struct scatterlist *sg, int i; for_each_sg(sg, sg, nents, i) { - void *addr; + void *addr = sg_virt(sg); - addr = sg_virt(sg); - if (addr) { - __dma_sync_for_device(addr, sg->length, direction); - sg->dma_address = sg_phys(sg); - } + if (!addr) + continue; + + sg->dma_address = sg_phys(sg); + + if (attrs & DMA_ATTR_SKIP_CPU_SYNC) + continue; + + __dma_sync_for_device(addr, sg->length, direction); } return nents; @@ -117,7 +121,9 @@ static dma_addr_t nios2_dma_map_page(struct device *dev, struct page *page, { void *addr = page_address(page) + offset; - __dma_sync_for_device(addr, size, direction); + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + __dma_sync_for_device(addr, size, direction); + return page_to_phys(page) + offset; } @@ -125,7 +131,8 @@ static void nios2_dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, enum dma_data_direction direction, unsigned long attrs) { - __dma_sync_for_cpu(phys_to_virt(dma_address), size, direction); + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + __dma_sync_for_cpu(phys_to_virt(dma_address), size, direction); } static void nios2_dma_unmap_sg(struct device *dev, struct scatterlist *sg, @@ -138,6 +145,9 @@ static void nios2_dma_unmap_sg(struct device *dev, struct scatterlist *sg, if (direction == DMA_TO_DEVICE) return; + if (attrs & DMA_ATTR_SKIP_CPU_SYNC) + return; + for_each_sg(sg, sg, nhwentries, i) { addr = sg_virt(sg); if (addr) diff --git a/arch/openrisc/kernel/dma.c b/arch/openrisc/kernel/dma.c index 140c99140649..906998bac957 100644 --- a/arch/openrisc/kernel/dma.c +++ b/arch/openrisc/kernel/dma.c @@ -141,6 +141,9 @@ or1k_map_page(struct device *dev, struct page *page, unsigned long cl; dma_addr_t addr = page_to_phys(page) + offset; + if (attrs & DMA_ATTR_SKIP_CPU_SYNC) + return addr; + switch (dir) { case DMA_TO_DEVICE: /* Flush the dcache for the requested range */ diff --git a/arch/parisc/kernel/pci-dma.c b/arch/parisc/kernel/pci-dma.c index 494ff6e8c88a..b6298a85e8ae 100644 --- a/arch/parisc/kernel/pci-dma.c +++ b/arch/parisc/kernel/pci-dma.c @@ -459,7 +459,9 @@ static dma_addr_t pa11_dma_map_page(struct device *dev, struct page *page, void *addr = page_address(page) + offset; BUG_ON(direction == DMA_NONE); - flush_kernel_dcache_range((unsigned long) addr, size); + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + flush_kernel_dcache_range((unsigned long) addr, size); + return virt_to_phys(addr); } @@ -469,8 +471,11 @@ static void pa11_dma_unmap_page(struct device *dev, dma_addr_t dma_handle, { BUG_ON(direction == DMA_NONE); + if (attrs & DMA_ATTR_SKIP_CPU_SYNC) + return; + if (direction == DMA_TO_DEVICE) - return; + return; /* * For PCI_DMA_FROMDEVICE this flush is not necessary for the @@ -479,7 +484,6 @@ static void pa11_dma_unmap_page(struct device *dev, dma_addr_t dma_handle, */ flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle), size); - return; } static int pa11_dma_map_sg(struct device *dev, struct scatterlist *sglist, @@ -496,6 +500,10 @@ static int pa11_dma_map_sg(struct device *dev, struct scatterlist *sglist, sg_dma_address(sg) = (dma_addr_t) virt_to_phys(vaddr); sg_dma_len(sg) = sg->length; + + if (attrs & DMA_ATTR_SKIP_CPU_SYNC) + continue; + flush_kernel_dcache_range(vaddr, sg->length); } return nents; @@ -510,14 +518,16 @@ static void pa11_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, BUG_ON(direction == DMA_NONE); + if (attrs & DMA_ATTR_SKIP_CPU_SYNC) + return; + if (direction == DMA_TO_DEVICE) - return; + return; /* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */ for_each_sg(sglist, sg, nents, i) flush_kernel_vmap_range(sg_virt(sg), sg->length); - return; } static void pa11_dma_sync_single_for_cpu(struct device *dev, diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c index e64a6016fba7..6877e3fa95bb 100644 --- a/arch/powerpc/kernel/dma.c +++ b/arch/powerpc/kernel/dma.c @@ -203,6 +203,10 @@ static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, for_each_sg(sgl, sg, nents, i) { sg->dma_address = sg_phys(sg) + get_dma_offset(dev); sg->dma_length = sg->length; + + if (attrs & DMA_ATTR_SKIP_CPU_SYNC) + continue; + __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction); } @@ -235,7 +239,10 @@ static inline dma_addr_t dma_direct_map_page(struct device *dev, unsigned long attrs) { BUG_ON(dir == DMA_NONE); - __dma_sync_page(page, offset, size, dir); + + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + __dma_sync_page(page, offset, size, dir); + return page_to_phys(page) + offset + get_dma_offset(dev); } diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c index 06254467e4dd..3a147122bc98 100644 --- a/arch/powerpc/platforms/cell/spufs/file.c +++ b/arch/powerpc/platforms/cell/spufs/file.c @@ -236,7 +236,6 @@ static int spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { struct spu_context *ctx = vma->vm_file->private_data; - unsigned long address = (unsigned long)vmf->virtual_address; unsigned long pfn, offset; offset = vmf->pgoff << PAGE_SHIFT; @@ -244,7 +243,7 @@ spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) return VM_FAULT_SIGBUS; pr_debug("spufs_mem_mmap_fault address=0x%lx, offset=0x%lx\n", - address, offset); + vmf->address, offset); if (spu_acquire(ctx)) return VM_FAULT_NOPAGE; @@ -256,7 +255,7 @@ spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot); pfn = (ctx->spu->local_store_phys + offset) >> PAGE_SHIFT; } - vm_insert_pfn(vma, address, pfn); + vm_insert_pfn(vma, vmf->address, pfn); spu_release(ctx); @@ -355,8 +354,7 @@ static int spufs_ps_fault(struct vm_area_struct *vma, down_read(¤t->mm->mmap_sem); } else { area = ctx->spu->problem_phys + ps_offs; - vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, - (area + offset) >> PAGE_SHIFT); + vm_insert_pfn(vma, vmf->address, (area + offset) >> PAGE_SHIFT); spu_context_trace(spufs_ps_fault__insert, ctx, ctx->spu); } diff --git a/arch/sh/kernel/dma-nommu.c b/arch/sh/kernel/dma-nommu.c index eadb669a7329..47fee3b6e29c 100644 --- a/arch/sh/kernel/dma-nommu.c +++ b/arch/sh/kernel/dma-nommu.c @@ -18,7 +18,9 @@ static dma_addr_t nommu_map_page(struct device *dev, struct page *page, dma_addr_t addr = page_to_phys(page) + offset; WARN_ON(size == 0); - dma_cache_sync(dev, page_address(page) + offset, size, dir); + + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + dma_cache_sync(dev, page_address(page) + offset, size, dir); return addr; } @@ -35,7 +37,8 @@ static int nommu_map_sg(struct device *dev, struct scatterlist *sg, for_each_sg(sg, s, nents, i) { BUG_ON(!sg_page(s)); - dma_cache_sync(dev, sg_virt(s), s->length, dir); + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + dma_cache_sync(dev, sg_virt(s), s->length, dir); s->dma_address = sg_phys(s); s->dma_length = s->length; diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c index 852a3291db96..9df997995f6b 100644 --- a/arch/sparc/kernel/iommu.c +++ b/arch/sparc/kernel/iommu.c @@ -415,7 +415,7 @@ static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr, ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL; /* Step 1: Kick data out of streaming buffers if necessary. */ - if (strbuf->strbuf_enabled) + if (strbuf->strbuf_enabled && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction); @@ -640,7 +640,7 @@ static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist, base = iommu->page_table + entry; dma_handle &= IO_PAGE_MASK; - if (strbuf->strbuf_enabled) + if (strbuf->strbuf_enabled && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) strbuf_flush(strbuf, iommu, dma_handle, ctx, npages, direction); diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c index 2344103414d1..6ffaec44931a 100644 --- a/arch/sparc/kernel/ioport.c +++ b/arch/sparc/kernel/ioport.c @@ -527,7 +527,7 @@ static dma_addr_t pci32_map_page(struct device *dev, struct page *page, static void pci32_unmap_page(struct device *dev, dma_addr_t ba, size_t size, enum dma_data_direction dir, unsigned long attrs) { - if (dir != PCI_DMA_TODEVICE) + if (dir != PCI_DMA_TODEVICE && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) dma_make_coherent(ba, PAGE_ALIGN(size)); } @@ -572,7 +572,7 @@ static void pci32_unmap_sg(struct device *dev, struct scatterlist *sgl, struct scatterlist *sg; int n; - if (dir != PCI_DMA_TODEVICE) { + if (dir != PCI_DMA_TODEVICE && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) { for_each_sg(sgl, sg, nents, n) { dma_make_coherent(sg_phys(sg), PAGE_ALIGN(sg->length)); } diff --git a/arch/sparc/kernel/nmi.c b/arch/sparc/kernel/nmi.c index a9973bb4a1b2..95e73c63c99d 100644 --- a/arch/sparc/kernel/nmi.c +++ b/arch/sparc/kernel/nmi.c @@ -42,7 +42,7 @@ static int panic_on_timeout; */ atomic_t nmi_active = ATOMIC_INIT(0); /* oprofile uses this */ EXPORT_SYMBOL(nmi_active); - +static int nmi_init_done; static unsigned int nmi_hz = HZ; static DEFINE_PER_CPU(short, wd_enabled); static int endflag __initdata; @@ -153,6 +153,8 @@ static void report_broken_nmi(int cpu, int *prev_nmi_count) void stop_nmi_watchdog(void *unused) { + if (!__this_cpu_read(wd_enabled)) + return; pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_disable); __this_cpu_write(wd_enabled, 0); atomic_dec(&nmi_active); @@ -207,6 +209,9 @@ error: void start_nmi_watchdog(void *unused) { + if (__this_cpu_read(wd_enabled)) + return; + __this_cpu_write(wd_enabled, 1); atomic_inc(&nmi_active); @@ -259,6 +264,8 @@ int __init nmi_init(void) } } + nmi_init_done = 1; + return err; } @@ -270,3 +277,38 @@ static int __init setup_nmi_watchdog(char *str) return 0; } __setup("nmi_watchdog=", setup_nmi_watchdog); + +/* + * sparc specific NMI watchdog enable function. + * Enables watchdog if it is not enabled already. + */ +int watchdog_nmi_enable(unsigned int cpu) +{ + if (atomic_read(&nmi_active) == -1) { + pr_warn("NMI watchdog cannot be enabled or disabled\n"); + return -1; + } + + /* + * watchdog thread could start even before nmi_init is called. + * Just Return in that case. Let nmi_init finish the init + * process first. + */ + if (!nmi_init_done) + return 0; + + smp_call_function_single(cpu, start_nmi_watchdog, NULL, 1); + + return 0; +} +/* + * sparc specific NMI watchdog disable function. + * Disables watchdog if it is not disabled already. + */ +void watchdog_nmi_disable(unsigned int cpu) +{ + if (atomic_read(&nmi_active) == -1) + pr_warn_once("NMI watchdog cannot be enabled or disabled\n"); + else + smp_call_function_single(cpu, stop_nmi_watchdog, NULL, 1); +} diff --git a/arch/tile/kernel/pci-dma.c b/arch/tile/kernel/pci-dma.c index 09bb774b39cd..24e0f8c21f2f 100644 --- a/arch/tile/kernel/pci-dma.c +++ b/arch/tile/kernel/pci-dma.c @@ -213,10 +213,12 @@ static int tile_dma_map_sg(struct device *dev, struct scatterlist *sglist, for_each_sg(sglist, sg, nents, i) { sg->dma_address = sg_phys(sg); - __dma_prep_pa_range(sg->dma_address, sg->length, direction); #ifdef CONFIG_NEED_SG_DMA_LENGTH sg->dma_length = sg->length; #endif + if (attrs & DMA_ATTR_SKIP_CPU_SYNC) + continue; + __dma_prep_pa_range(sg->dma_address, sg->length, direction); } return nents; @@ -232,6 +234,8 @@ static void tile_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, BUG_ON(!valid_dma_direction(direction)); for_each_sg(sglist, sg, nents, i) { sg->dma_address = sg_phys(sg); + if (attrs & DMA_ATTR_SKIP_CPU_SYNC) + continue; __dma_complete_pa_range(sg->dma_address, sg->length, direction); } @@ -245,7 +249,8 @@ static dma_addr_t tile_dma_map_page(struct device *dev, struct page *page, BUG_ON(!valid_dma_direction(direction)); BUG_ON(offset + size > PAGE_SIZE); - __dma_prep_page(page, offset, size, direction); + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + __dma_prep_page(page, offset, size, direction); return page_to_pa(page) + offset; } @@ -256,6 +261,9 @@ static void tile_dma_unmap_page(struct device *dev, dma_addr_t dma_address, { BUG_ON(!valid_dma_direction(direction)); + if (attrs & DMA_ATTR_SKIP_CPU_SYNC) + return; + __dma_complete_page(pfn_to_page(PFN_DOWN(dma_address)), dma_address & (PAGE_SIZE - 1), size, direction); } diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c index e739002427ed..40121d14d34d 100644 --- a/arch/x86/entry/vdso/vma.c +++ b/arch/x86/entry/vdso/vma.c @@ -109,7 +109,7 @@ static int vvar_fault(const struct vm_special_mapping *sm, return VM_FAULT_SIGBUS; if (sym_offset == image->sym_vvar_page) { - ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, + ret = vm_insert_pfn(vma, vmf->address, __pa_symbol(&__vvar_page) >> PAGE_SHIFT); } else if (sym_offset == image->sym_pvclock_page) { struct pvclock_vsyscall_time_info *pvti = @@ -117,7 +117,7 @@ static int vvar_fault(const struct vm_special_mapping *sm, if (pvti && vclock_was_used(VCLOCK_PVCLOCK)) { ret = vm_insert_pfn( vma, - (unsigned long)vmf->virtual_address, + vmf->address, __pa(pvti) >> PAGE_SHIFT); } } diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c index 8c1f218926d7..307b1f4543de 100644 --- a/arch/x86/kernel/machine_kexec_64.c +++ b/arch/x86/kernel/machine_kexec_64.c @@ -328,7 +328,7 @@ void machine_kexec(struct kimage *image) void arch_crash_save_vmcoreinfo(void) { - VMCOREINFO_SYMBOL(phys_base); + VMCOREINFO_NUMBER(phys_base); VMCOREINFO_SYMBOL(init_level4_pgt); #ifdef CONFIG_NUMA @@ -337,9 +337,7 @@ void arch_crash_save_vmcoreinfo(void) #endif vmcoreinfo_append_str("KERNELOFFSET=%lx\n", kaslr_offset()); - VMCOREINFO_PAGE_OFFSET(PAGE_OFFSET); - VMCOREINFO_VMALLOC_START(VMALLOC_START); - VMCOREINFO_VMEMMAP_START(VMEMMAP_START); + VMCOREINFO_NUMBER(KERNEL_IMAGE_SIZE); } /* arch-dependent functionality related to kexec file-based syscall */ diff --git a/arch/xtensa/kernel/pci-dma.c b/arch/xtensa/kernel/pci-dma.c index 1e68806d6695..6a16decf278f 100644 --- a/arch/xtensa/kernel/pci-dma.c +++ b/arch/xtensa/kernel/pci-dma.c @@ -189,7 +189,9 @@ static dma_addr_t xtensa_map_page(struct device *dev, struct page *page, { dma_addr_t dma_handle = page_to_phys(page) + offset; - xtensa_sync_single_for_device(dev, dma_handle, size, dir); + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + xtensa_sync_single_for_device(dev, dma_handle, size, dir); + return dma_handle; } @@ -197,7 +199,8 @@ static void xtensa_unmap_page(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction dir, unsigned long attrs) { - xtensa_sync_single_for_cpu(dev, dma_handle, size, dir); + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + xtensa_sync_single_for_cpu(dev, dma_handle, size, dir); } static int xtensa_map_sg(struct device *dev, struct scatterlist *sg, |