summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMarek Szyprowski <m.szyprowski@samsung.com>2012-05-22 08:55:43 +0200
committerMarek Szyprowski <m.szyprowski@samsung.com>2012-05-22 08:55:43 +0200
commit0f51596bd39a5c928307ffcffc9ba07f90f42a8b (patch)
treeb636403815316ecad2170092b70f1079df260a95
parent61f6c7a47a2f84b7ba4b65240ffe9247df772b06 (diff)
parent4ce63fcd919c32d22528e54dcd89506962933719 (diff)
downloadlinux-stable-0f51596bd39a5c928307ffcffc9ba07f90f42a8b.tar.gz
linux-stable-0f51596bd39a5c928307ffcffc9ba07f90f42a8b.tar.bz2
linux-stable-0f51596bd39a5c928307ffcffc9ba07f90f42a8b.zip
Merge branch 'for-next-arm-dma' into for-linus
Conflicts: arch/arm/Kconfig arch/arm/mm/dma-mapping.c Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
-rw-r--r--arch/arm/Kconfig9
-rw-r--r--arch/arm/common/dmabounce.c84
-rw-r--r--arch/arm/include/asm/device.h4
-rw-r--r--arch/arm/include/asm/dma-iommu.h34
-rw-r--r--arch/arm/include/asm/dma-mapping.h407
-rw-r--r--arch/arm/mm/dma-mapping.c998
-rw-r--r--arch/arm/mm/vmregion.h2
-rw-r--r--drivers/base/dma-coherent.c42
-rw-r--r--include/asm-generic/dma-coherent.h4
9 files changed, 1123 insertions, 461 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index cbbbc45f6b67..24d3302a1b8e 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -4,6 +4,7 @@ config ARM
select HAVE_AOUT
select HAVE_DMA_API_DEBUG
select HAVE_IDE if PCI || ISA || PCMCIA
+ select HAVE_DMA_ATTRS
select HAVE_DMA_CONTIGUOUS if (CPU_V6 || CPU_V6K || CPU_V7)
select CMA if (CPU_V6 || CPU_V6K || CPU_V7)
select HAVE_MEMBLOCK
@@ -47,6 +48,14 @@ config ARM
config ARM_HAS_SG_CHAIN
bool
+config NEED_SG_DMA_LENGTH
+ bool
+
+config ARM_DMA_USE_IOMMU
+ select NEED_SG_DMA_LENGTH
+ select ARM_HAS_SG_CHAIN
+ bool
+
config HAVE_PWM
bool
diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c
index 595ecd290ebf..9d7eb530f95f 100644
--- a/arch/arm/common/dmabounce.c
+++ b/arch/arm/common/dmabounce.c
@@ -173,7 +173,8 @@ find_safe_buffer(struct dmabounce_device_info *device_info, dma_addr_t safe_dma_
read_lock_irqsave(&device_info->lock, flags);
list_for_each_entry(b, &device_info->safe_buffers, node)
- if (b->safe_dma_addr == safe_dma_addr) {
+ if (b->safe_dma_addr <= safe_dma_addr &&
+ b->safe_dma_addr + b->size > safe_dma_addr) {
rb = b;
break;
}
@@ -254,7 +255,7 @@ static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size,
if (buf == NULL) {
dev_err(dev, "%s: unable to map unsafe buffer %p!\n",
__func__, ptr);
- return ~0;
+ return DMA_ERROR_CODE;
}
dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
@@ -307,8 +308,9 @@ static inline void unmap_single(struct device *dev, struct safe_buffer *buf,
* substitute the safe buffer for the unsafe one.
* (basically move the buffer from an unsafe area to a safe one)
*/
-dma_addr_t __dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size, enum dma_data_direction dir)
+static dma_addr_t dmabounce_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
{
dma_addr_t dma_addr;
int ret;
@@ -320,21 +322,20 @@ dma_addr_t __dma_map_page(struct device *dev, struct page *page,
ret = needs_bounce(dev, dma_addr, size);
if (ret < 0)
- return ~0;
+ return DMA_ERROR_CODE;
if (ret == 0) {
- __dma_page_cpu_to_dev(page, offset, size, dir);
+ arm_dma_ops.sync_single_for_device(dev, dma_addr, size, dir);
return dma_addr;
}
if (PageHighMem(page)) {
dev_err(dev, "DMA buffer bouncing of HIGHMEM pages is not supported\n");
- return ~0;
+ return DMA_ERROR_CODE;
}
return map_single(dev, page_address(page) + offset, size, dir);
}
-EXPORT_SYMBOL(__dma_map_page);
/*
* see if a mapped address was really a "safe" buffer and if so, copy
@@ -342,8 +343,8 @@ EXPORT_SYMBOL(__dma_map_page);
* the safe buffer. (basically return things back to the way they
* should be)
*/
-void __dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
- enum dma_data_direction dir)
+static void dmabounce_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
+ enum dma_data_direction dir, struct dma_attrs *attrs)
{
struct safe_buffer *buf;
@@ -352,19 +353,18 @@ void __dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
buf = find_safe_buffer_dev(dev, dma_addr, __func__);
if (!buf) {
- __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, dma_addr)),
- dma_addr & ~PAGE_MASK, size, dir);
+ arm_dma_ops.sync_single_for_cpu(dev, dma_addr, size, dir);
return;
}
unmap_single(dev, buf, size, dir);
}
-EXPORT_SYMBOL(__dma_unmap_page);
-int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
- unsigned long off, size_t sz, enum dma_data_direction dir)
+static int __dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
+ size_t sz, enum dma_data_direction dir)
{
struct safe_buffer *buf;
+ unsigned long off;
dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n",
__func__, addr, off, sz, dir);
@@ -373,6 +373,8 @@ int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
if (!buf)
return 1;
+ off = addr - buf->safe_dma_addr;
+
BUG_ON(buf->direction != dir);
dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
@@ -388,12 +390,21 @@ int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
}
return 0;
}
-EXPORT_SYMBOL(dmabounce_sync_for_cpu);
-int dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
- unsigned long off, size_t sz, enum dma_data_direction dir)
+static void dmabounce_sync_for_cpu(struct device *dev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+ if (!__dmabounce_sync_for_cpu(dev, handle, size, dir))
+ return;
+
+ arm_dma_ops.sync_single_for_cpu(dev, handle, size, dir);
+}
+
+static int __dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
+ size_t sz, enum dma_data_direction dir)
{
struct safe_buffer *buf;
+ unsigned long off;
dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n",
__func__, addr, off, sz, dir);
@@ -402,6 +413,8 @@ int dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
if (!buf)
return 1;
+ off = addr - buf->safe_dma_addr;
+
BUG_ON(buf->direction != dir);
dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
@@ -417,7 +430,38 @@ int dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
}
return 0;
}
-EXPORT_SYMBOL(dmabounce_sync_for_device);
+
+static void dmabounce_sync_for_device(struct device *dev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+ if (!__dmabounce_sync_for_device(dev, handle, size, dir))
+ return;
+
+ arm_dma_ops.sync_single_for_device(dev, handle, size, dir);
+}
+
+static int dmabounce_set_mask(struct device *dev, u64 dma_mask)
+{
+ if (dev->archdata.dmabounce)
+ return 0;
+
+ return arm_dma_ops.set_dma_mask(dev, dma_mask);
+}
+
+static struct dma_map_ops dmabounce_ops = {
+ .alloc = arm_dma_alloc,
+ .free = arm_dma_free,
+ .mmap = arm_dma_mmap,
+ .map_page = dmabounce_map_page,
+ .unmap_page = dmabounce_unmap_page,
+ .sync_single_for_cpu = dmabounce_sync_for_cpu,
+ .sync_single_for_device = dmabounce_sync_for_device,
+ .map_sg = arm_dma_map_sg,
+ .unmap_sg = arm_dma_unmap_sg,
+ .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu,
+ .sync_sg_for_device = arm_dma_sync_sg_for_device,
+ .set_dma_mask = dmabounce_set_mask,
+};
static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev,
const char *name, unsigned long size)
@@ -479,6 +523,7 @@ int dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size,
#endif
dev->archdata.dmabounce = device_info;
+ set_dma_ops(dev, &dmabounce_ops);
dev_info(dev, "dmabounce: registered device\n");
@@ -497,6 +542,7 @@ void dmabounce_unregister_dev(struct device *dev)
struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
dev->archdata.dmabounce = NULL;
+ set_dma_ops(dev, NULL);
if (!device_info) {
dev_warn(dev,
diff --git a/arch/arm/include/asm/device.h b/arch/arm/include/asm/device.h
index 7aa368003b05..b69c0d3285f8 100644
--- a/arch/arm/include/asm/device.h
+++ b/arch/arm/include/asm/device.h
@@ -7,12 +7,16 @@
#define ASMARM_DEVICE_H
struct dev_archdata {
+ struct dma_map_ops *dma_ops;
#ifdef CONFIG_DMABOUNCE
struct dmabounce_device_info *dmabounce;
#endif
#ifdef CONFIG_IOMMU_API
void *iommu; /* private IOMMU data */
#endif
+#ifdef CONFIG_ARM_DMA_USE_IOMMU
+ struct dma_iommu_mapping *mapping;
+#endif
};
struct omap_device;
diff --git a/arch/arm/include/asm/dma-iommu.h b/arch/arm/include/asm/dma-iommu.h
new file mode 100644
index 000000000000..799b09409fad
--- /dev/null
+++ b/arch/arm/include/asm/dma-iommu.h
@@ -0,0 +1,34 @@
+#ifndef ASMARM_DMA_IOMMU_H
+#define ASMARM_DMA_IOMMU_H
+
+#ifdef __KERNEL__
+
+#include <linux/mm_types.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-debug.h>
+#include <linux/kmemcheck.h>
+
+struct dma_iommu_mapping {
+ /* iommu specific data */
+ struct iommu_domain *domain;
+
+ void *bitmap;
+ size_t bits;
+ unsigned int order;
+ dma_addr_t base;
+
+ spinlock_t lock;
+ struct kref kref;
+};
+
+struct dma_iommu_mapping *
+arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size,
+ int order);
+
+void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping);
+
+int arm_iommu_attach_device(struct device *dev,
+ struct dma_iommu_mapping *mapping);
+
+#endif /* __KERNEL__ */
+#endif
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index cb3b7c981c4b..bbef15d04890 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -5,11 +5,35 @@
#include <linux/mm_types.h>
#include <linux/scatterlist.h>
+#include <linux/dma-attrs.h>
#include <linux/dma-debug.h>
#include <asm-generic/dma-coherent.h>
#include <asm/memory.h>
+#define DMA_ERROR_CODE (~0)
+extern struct dma_map_ops arm_dma_ops;
+
+static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+{
+ if (dev && dev->archdata.dma_ops)
+ return dev->archdata.dma_ops;
+ return &arm_dma_ops;
+}
+
+static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
+{
+ BUG_ON(!dev);
+ dev->archdata.dma_ops = ops;
+}
+
+#include <asm-generic/dma-mapping-common.h>
+
+static inline int dma_set_mask(struct device *dev, u64 mask)
+{
+ return get_dma_ops(dev)->set_dma_mask(dev, mask);
+}
+
#ifdef __arch_page_to_dma
#error Please update to __arch_pfn_to_dma
#endif
@@ -62,68 +86,11 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
#endif
/*
- * The DMA API is built upon the notion of "buffer ownership". A buffer
- * is either exclusively owned by the CPU (and therefore may be accessed
- * by it) or exclusively owned by the DMA device. These helper functions
- * represent the transitions between these two ownership states.
- *
- * Note, however, that on later ARMs, this notion does not work due to
- * speculative prefetches. We model our approach on the assumption that
- * the CPU does do speculative prefetches, which means we clean caches
- * before transfers and delay cache invalidation until transfer completion.
- *
- * Private support functions: these are not part of the API and are
- * liable to change. Drivers must not use these.
- */
-static inline void __dma_single_cpu_to_dev(const void *kaddr, size_t size,
- enum dma_data_direction dir)
-{
- extern void ___dma_single_cpu_to_dev(const void *, size_t,
- enum dma_data_direction);
-
- if (!arch_is_coherent())
- ___dma_single_cpu_to_dev(kaddr, size, dir);
-}
-
-static inline void __dma_single_dev_to_cpu(const void *kaddr, size_t size,
- enum dma_data_direction dir)
-{
- extern void ___dma_single_dev_to_cpu(const void *, size_t,
- enum dma_data_direction);
-
- if (!arch_is_coherent())
- ___dma_single_dev_to_cpu(kaddr, size, dir);
-}
-
-static inline void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
- size_t size, enum dma_data_direction dir)
-{
- extern void ___dma_page_cpu_to_dev(struct page *, unsigned long,
- size_t, enum dma_data_direction);
-
- if (!arch_is_coherent())
- ___dma_page_cpu_to_dev(page, off, size, dir);
-}
-
-static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
- size_t size, enum dma_data_direction dir)
-{
- extern void ___dma_page_dev_to_cpu(struct page *, unsigned long,
- size_t, enum dma_data_direction);
-
- if (!arch_is_coherent())
- ___dma_page_dev_to_cpu(page, off, size, dir);
-}
-
-extern int dma_supported(struct device *, u64);
-extern int dma_set_mask(struct device *, u64);
-
-/*
* DMA errors are defined by all-bits-set in the DMA address.
*/
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
- return dma_addr == ~0;
+ return dma_addr == DMA_ERROR_CODE;
}
/*
@@ -141,69 +108,118 @@ static inline void dma_free_noncoherent(struct device *dev, size_t size,
{
}
+extern int dma_supported(struct device *dev, u64 mask);
+
/**
- * dma_alloc_coherent - allocate consistent memory for DMA
+ * arm_dma_alloc - allocate consistent memory for DMA
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
* @size: required memory size
* @handle: bus-specific DMA address
+ * @attrs: optinal attributes that specific mapping properties
*
- * Allocate some uncached, unbuffered memory for a device for
- * performing DMA. This function allocates pages, and will
- * return the CPU-viewed address, and sets @handle to be the
- * device-viewed address.
+ * Allocate some memory for a device for performing DMA. This function
+ * allocates pages, and will return the CPU-viewed address, and sets @handle
+ * to be the device-viewed address.
*/
-extern void *dma_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t);
+extern void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
+ gfp_t gfp, struct dma_attrs *attrs);
+
+#define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL)
+
+static inline void *dma_alloc_attrs(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag,
+ struct dma_attrs *attrs)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+ void *cpu_addr;
+ BUG_ON(!ops);
+
+ cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
+ debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
+ return cpu_addr;
+}
/**
- * dma_free_coherent - free memory allocated by dma_alloc_coherent
+ * arm_dma_free - free memory allocated by arm_dma_alloc
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
* @size: size of memory originally requested in dma_alloc_coherent
* @cpu_addr: CPU-view address returned from dma_alloc_coherent
* @handle: device-view address returned from dma_alloc_coherent
+ * @attrs: optinal attributes that specific mapping properties
*
* Free (and unmap) a DMA buffer previously allocated by
- * dma_alloc_coherent().
+ * arm_dma_alloc().
*
* References to memory and mappings associated with cpu_addr/handle
* during and after this call executing are illegal.
*/
-extern void dma_free_coherent(struct device *, size_t, void *, dma_addr_t);
+extern void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t handle, struct dma_attrs *attrs);
+
+#define dma_free_coherent(d, s, c, h) dma_free_attrs(d, s, c, h, NULL)
+
+static inline void dma_free_attrs(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle,
+ struct dma_attrs *attrs)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+ BUG_ON(!ops);
+
+ debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
+ ops->free(dev, size, cpu_addr, dma_handle, attrs);
+}
/**
- * dma_mmap_coherent - map a coherent DMA allocation into user space
+ * arm_dma_mmap - map a coherent DMA allocation into user space
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
* @vma: vm_area_struct describing requested user mapping
* @cpu_addr: kernel CPU-view address returned from dma_alloc_coherent
* @handle: device-view address returned from dma_alloc_coherent
* @size: size of memory originally requested in dma_alloc_coherent
+ * @attrs: optinal attributes that specific mapping properties
*
* Map a coherent DMA buffer previously allocated by dma_alloc_coherent
* into user space. The coherent DMA buffer must not be freed by the
* driver until the user space mapping has been released.
*/
-int dma_mmap_coherent(struct device *, struct vm_area_struct *,
- void *, dma_addr_t, size_t);
+extern int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ struct dma_attrs *attrs);
+#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, NULL)
-/**
- * dma_alloc_writecombine - allocate writecombining memory for DMA
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @size: required memory size
- * @handle: bus-specific DMA address
- *
- * Allocate some uncached, buffered memory for a device for
- * performing DMA. This function allocates pages, and will
- * return the CPU-viewed address, and sets @handle to be the
- * device-viewed address.
- */
-extern void *dma_alloc_writecombine(struct device *, size_t, dma_addr_t *,
- gfp_t);
+static inline int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr,
+ size_t size, struct dma_attrs *attrs)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+ BUG_ON(!ops);
+ return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
+}
+
+static inline void *dma_alloc_writecombine(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag)
+{
+ DEFINE_DMA_ATTRS(attrs);
+ dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
+ return dma_alloc_attrs(dev, size, dma_handle, flag, &attrs);
+}
-#define dma_free_writecombine(dev,size,cpu_addr,handle) \
- dma_free_coherent(dev,size,cpu_addr,handle)
+static inline void dma_free_writecombine(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle)
+{
+ DEFINE_DMA_ATTRS(attrs);
+ dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
+ return dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs);
+}
-int dma_mmap_writecombine(struct device *, struct vm_area_struct *,
- void *, dma_addr_t, size_t);
+static inline int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size)
+{
+ DEFINE_DMA_ATTRS(attrs);
+ dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
+ return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs);
+}
/*
* This can be called during boot to increase the size of the consistent
@@ -212,8 +228,6 @@ int dma_mmap_writecombine(struct device *, struct vm_area_struct *,
*/
extern void __init init_consistent_dma_size(unsigned long size);
-
-#ifdef CONFIG_DMABOUNCE
/*
* For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic"
* and utilize bounce buffers as needed to work around limited DMA windows.
@@ -253,222 +267,19 @@ extern int dmabounce_register_dev(struct device *, unsigned long,
*/
extern void dmabounce_unregister_dev(struct device *);
-/*
- * The DMA API, implemented by dmabounce.c. See below for descriptions.
- */
-extern dma_addr_t __dma_map_page(struct device *, struct page *,
- unsigned long, size_t, enum dma_data_direction);
-extern void __dma_unmap_page(struct device *, dma_addr_t, size_t,
- enum dma_data_direction);
-
-/*
- * Private functions
- */
-int dmabounce_sync_for_cpu(struct device *, dma_addr_t, unsigned long,
- size_t, enum dma_data_direction);
-int dmabounce_sync_for_device(struct device *, dma_addr_t, unsigned long,
- size_t, enum dma_data_direction);
-#else
-static inline int dmabounce_sync_for_cpu(struct device *d, dma_addr_t addr,
- unsigned long offset, size_t size, enum dma_data_direction dir)
-{
- return 1;
-}
-static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr,
- unsigned long offset, size_t size, enum dma_data_direction dir)
-{
- return 1;
-}
-
-
-static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size, enum dma_data_direction dir)
-{
- __dma_page_cpu_to_dev(page, offset, size, dir);
- return pfn_to_dma(dev, page_to_pfn(page)) + offset;
-}
-
-static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle,
- size_t size, enum dma_data_direction dir)
-{
- __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
- handle & ~PAGE_MASK, size, dir);
-}
-#endif /* CONFIG_DMABOUNCE */
-
-/**
- * dma_map_single - map a single buffer for streaming DMA
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @cpu_addr: CPU direct mapped address of buffer
- * @size: size of buffer to map
- * @dir: DMA transfer direction
- *
- * Ensure that any data held in the cache is appropriately discarded
- * or written back.
- *
- * The device owns this memory once this call has completed. The CPU
- * can regain ownership by calling dma_unmap_single() or
- * dma_sync_single_for_cpu().
- */
-static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
- size_t size, enum dma_data_direction dir)
-{
- unsigned long offset;
- struct page *page;
- dma_addr_t addr;
-
- BUG_ON(!virt_addr_valid(cpu_addr));
- BUG_ON(!virt_addr_valid(cpu_addr + size - 1));
- BUG_ON(!valid_dma_direction(dir));
-
- page = virt_to_page(cpu_addr);
- offset = (unsigned long)cpu_addr & ~PAGE_MASK;
- addr = __dma_map_page(dev, page, offset, size, dir);
- debug_dma_map_page(dev, page, offset, size, dir, addr, true);
-
- return addr;
-}
-
-/**
- * dma_map_page - map a portion of a page for streaming DMA
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @page: page that buffer resides in
- * @offset: offset into page for start of buffer
- * @size: size of buffer to map
- * @dir: DMA transfer direction
- *
- * Ensure that any data held in the cache is appropriately discarded
- * or written back.
- *
- * The device owns this memory once this call has completed. The CPU
- * can regain ownership by calling dma_unmap_page().
- */
-static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size, enum dma_data_direction dir)
-{
- dma_addr_t addr;
-
- BUG_ON(!valid_dma_direction(dir));
-
- addr = __dma_map_page(dev, page, offset, size, dir);
- debug_dma_map_page(dev, page, offset, size, dir, addr, false);
-
- return addr;
-}
-
-/**
- * dma_unmap_single - unmap a single buffer previously mapped
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @handle: DMA address of buffer
- * @size: size of buffer (same as passed to dma_map_single)
- * @dir: DMA transfer direction (same as passed to dma_map_single)
- *
- * Unmap a single streaming mode DMA translation. The handle and size
- * must match what was provided in the previous dma_map_single() call.
- * All other usages are undefined.
- *
- * After this call, reads by the CPU to the buffer are guaranteed to see
- * whatever the device wrote there.
- */
-static inline void dma_unmap_single(struct device *dev, dma_addr_t handle,
- size_t size, enum dma_data_direction dir)
-{
- debug_dma_unmap_page(dev, handle, size, dir, true);
- __dma_unmap_page(dev, handle, size, dir);
-}
-
-/**
- * dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @handle: DMA address of buffer
- * @size: size of buffer (same as passed to dma_map_page)
- * @dir: DMA transfer direction (same as passed to dma_map_page)
- *
- * Unmap a page streaming mode DMA translation. The handle and size
- * must match what was provided in the previous dma_map_page() call.
- * All other usages are undefined.
- *
- * After this call, reads by the CPU to the buffer are guaranteed to see
- * whatever the device wrote there.
- */
-static inline void dma_unmap_page(struct device *dev, dma_addr_t handle,
- size_t size, enum dma_data_direction dir)
-{
- debug_dma_unmap_page(dev, handle, size, dir, false);
- __dma_unmap_page(dev, handle, size, dir);
-}
-
-/**
- * dma_sync_single_range_for_cpu
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @handle: DMA address of buffer
- * @offset: offset of region to start sync
- * @size: size of region to sync
- * @dir: DMA transfer direction (same as passed to dma_map_single)
- *
- * Make physical memory consistent for a single streaming mode DMA
- * translation after a transfer.
- *
- * If you perform a dma_map_single() but wish to interrogate the
- * buffer using the cpu, yet do not wish to teardown the PCI dma
- * mapping, you must call this function before doing so. At the
- * next point you give the PCI dma address back to the card, you
- * must first the perform a dma_sync_for_device, and then the
- * device again owns the buffer.
- */
-static inline void dma_sync_single_range_for_cpu(struct device *dev,
- dma_addr_t handle, unsigned long offset, size_t size,
- enum dma_data_direction dir)
-{
- BUG_ON(!valid_dma_direction(dir));
-
- debug_dma_sync_single_for_cpu(dev, handle + offset, size, dir);
-
- if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir))
- return;
-
- __dma_single_dev_to_cpu(dma_to_virt(dev, handle) + offset, size, dir);
-}
-
-static inline void dma_sync_single_range_for_device(struct device *dev,
- dma_addr_t handle, unsigned long offset, size_t size,
- enum dma_data_direction dir)
-{
- BUG_ON(!valid_dma_direction(dir));
-
- debug_dma_sync_single_for_device(dev, handle + offset, size, dir);
-
- if (!dmabounce_sync_for_device(dev, handle, offset, size, dir))
- return;
-
- __dma_single_cpu_to_dev(dma_to_virt(dev, handle) + offset, size, dir);
-}
-
-static inline void dma_sync_single_for_cpu(struct device *dev,
- dma_addr_t handle, size_t size, enum dma_data_direction dir)
-{
- dma_sync_single_range_for_cpu(dev, handle, 0, size, dir);
-}
-
-static inline void dma_sync_single_for_device(struct device *dev,
- dma_addr_t handle, size_t size, enum dma_data_direction dir)
-{
- dma_sync_single_range_for_device(dev, handle, 0, size, dir);
-}
/*
* The scatter list versions of the above methods.
*/
-extern int dma_map_sg(struct device *, struct scatterlist *, int,
- enum dma_data_direction);
-extern void dma_unmap_sg(struct device *, struct scatterlist *, int,
+extern int arm_dma_map_sg(struct device *, struct scatterlist *, int,
+ enum dma_data_direction, struct dma_attrs *attrs);
+extern void arm_dma_unmap_sg(struct device *, struct scatterlist *, int,
+ enum dma_data_direction, struct dma_attrs *attrs);
+extern void arm_dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int,
enum dma_data_direction);
-extern void dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int,
+extern void arm_dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
enum dma_data_direction);
-extern void dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
- enum dma_data_direction);
-
#endif /* __KERNEL__ */
#endif
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 153f5559406a..ea6b43154090 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -21,6 +21,8 @@
#include <linux/highmem.h>
#include <linux/memblock.h>
#include <linux/slab.h>
+#include <linux/iommu.h>
+#include <linux/vmalloc.h>
#include <asm/memory.h>
#include <asm/highmem.h>
@@ -28,12 +30,112 @@
#include <asm/tlbflush.h>
#include <asm/sizes.h>
#include <asm/mach/arch.h>
+#include <asm/dma-iommu.h>
#include <asm/mach/map.h>
#include <asm/system_info.h>
#include <asm/dma-contiguous.h>
#include "mm.h"
+/*
+ * The DMA API is built upon the notion of "buffer ownership". A buffer
+ * is either exclusively owned by the CPU (and therefore may be accessed
+ * by it) or exclusively owned by the DMA device. These helper functions
+ * represent the transitions between these two ownership states.
+ *
+ * Note, however, that on later ARMs, this notion does not work due to
+ * speculative prefetches. We model our approach on the assumption that
+ * the CPU does do speculative prefetches, which means we clean caches
+ * before transfers and delay cache invalidation until transfer completion.
+ *
+ */
+static void __dma_page_cpu_to_dev(struct page *, unsigned long,
+ size_t, enum dma_data_direction);
+static void __dma_page_dev_to_cpu(struct page *, unsigned long,
+ size_t, enum dma_data_direction);
+
+/**
+ * arm_dma_map_page - map a portion of a page for streaming DMA
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+ * @page: page that buffer resides in
+ * @offset: offset into page for start of buffer
+ * @size: size of buffer to map
+ * @dir: DMA transfer direction
+ *
+ * Ensure that any data held in the cache is appropriately discarded
+ * or written back.
+ *
+ * The device owns this memory once this call has completed. The CPU
+ * can regain ownership by calling dma_unmap_page().
+ */
+static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ if (!arch_is_coherent())
+ __dma_page_cpu_to_dev(page, offset, size, dir);
+ return pfn_to_dma(dev, page_to_pfn(page)) + offset;
+}
+
+/**
+ * arm_dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+ * @handle: DMA address of buffer
+ * @size: size of buffer (same as passed to dma_map_page)
+ * @dir: DMA transfer direction (same as passed to dma_map_page)
+ *
+ * Unmap a page streaming mode DMA translation. The handle and size
+ * must match what was provided in the previous dma_map_page() call.
+ * All other usages are undefined.
+ *
+ * After this call, reads by the CPU to the buffer are guaranteed to see
+ * whatever the device wrote there.
+ */
+static void arm_dma_unmap_page(struct device *dev, dma_addr_t handle,
+ size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ if (!arch_is_coherent())
+ __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
+ handle & ~PAGE_MASK, size, dir);
+}
+
+static void arm_dma_sync_single_for_cpu(struct device *dev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+ unsigned int offset = handle & (PAGE_SIZE - 1);
+ struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
+ if (!arch_is_coherent())
+ __dma_page_dev_to_cpu(page, offset, size, dir);
+}
+
+static void arm_dma_sync_single_for_device(struct device *dev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+ unsigned int offset = handle & (PAGE_SIZE - 1);
+ struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
+ if (!arch_is_coherent())
+ __dma_page_cpu_to_dev(page, offset, size, dir);
+}
+
+static int arm_dma_set_mask(struct device *dev, u64 dma_mask);
+
+struct dma_map_ops arm_dma_ops = {
+ .alloc = arm_dma_alloc,
+ .free = arm_dma_free,
+ .mmap = arm_dma_mmap,
+ .map_page = arm_dma_map_page,
+ .unmap_page = arm_dma_unmap_page,
+ .map_sg = arm_dma_map_sg,
+ .unmap_sg = arm_dma_unmap_sg,
+ .sync_single_for_cpu = arm_dma_sync_single_for_cpu,
+ .sync_single_for_device = arm_dma_sync_single_for_device,
+ .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu,
+ .sync_sg_for_device = arm_dma_sync_sg_for_device,
+ .set_dma_mask = arm_dma_set_mask,
+};
+EXPORT_SYMBOL(arm_dma_ops);
+
static u64 get_coherent_dma_mask(struct device *dev)
{
u64 mask = (u64)arm_dma_limit;
@@ -69,9 +171,11 @@ static void __dma_clear_buffer(struct page *page, size_t size)
* lurking in the kernel direct-mapped region is invalidated.
*/
ptr = page_address(page);
- memset(ptr, 0, size);
- dmac_flush_range(ptr, ptr + size);
- outer_flush_range(__pa(ptr), __pa(ptr) + size);
+ if (ptr) {
+ memset(ptr, 0, size);
+ dmac_flush_range(ptr, ptr + size);
+ outer_flush_range(__pa(ptr), __pa(ptr) + size);
+ }
}
/*
@@ -164,8 +268,10 @@ static int __init consistent_init(void)
unsigned long base = consistent_base;
unsigned long num_ptes = (CONSISTENT_END - base) >> PMD_SHIFT;
+#ifndef CONFIG_ARM_DMA_USE_IOMMU
if (cpu_architecture() >= CPU_ARCH_ARMv6)
return 0;
+#endif
consistent_pte = kmalloc(num_ptes * sizeof(pte_t), GFP_KERNEL);
if (!consistent_pte) {
@@ -181,14 +287,14 @@ static int __init consistent_init(void)
pud = pud_alloc(&init_mm, pgd, base);
if (!pud) {
- printk(KERN_ERR "%s: no pud tables\n", __func__);
+ pr_err("%s: no pud tables\n", __func__);
ret = -ENOMEM;
break;
}
pmd = pmd_alloc(&init_mm, pud, base);
if (!pmd) {
- printk(KERN_ERR "%s: no pmd tables\n", __func__);
+ pr_err("%s: no pmd tables\n", __func__);
ret = -ENOMEM;
break;
}
@@ -196,7 +302,7 @@ static int __init consistent_init(void)
pte = pte_alloc_kernel(pmd, base);
if (!pte) {
- printk(KERN_ERR "%s: no pte tables\n", __func__);
+ pr_err("%s: no pte tables\n", __func__);
ret = -ENOMEM;
break;
}
@@ -311,7 +417,7 @@ __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot,
int bit;
if (!consistent_pte) {
- printk(KERN_ERR "%s: not initialised\n", __func__);
+ pr_err("%s: not initialised\n", __func__);
dump_stack();
return NULL;
}
@@ -338,7 +444,7 @@ __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot,
u32 off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1);
pte = consistent_pte[idx] + off;
- c->vm_pages = page;
+ c->priv = page;
do {
BUG_ON(!pte_none(*pte));
@@ -370,14 +476,14 @@ static void __dma_free_remap(void *cpu_addr, size_t size)
c = arm_vmregion_find_remove(&consistent_head, (unsigned long)cpu_addr);
if (!c) {
- printk(KERN_ERR "%s: trying to free invalid coherent area: %p\n",
+ pr_err("%s: trying to free invalid coherent area: %p\n",
__func__, cpu_addr);
dump_stack();
return;
}
if ((c->vm_end - c->vm_start) != size) {
- printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n",
+ pr_err("%s: freeing wrong coherent size (%ld != %d)\n",
__func__, c->vm_end - c->vm_start, size);
dump_stack();
size = c->vm_end - c->vm_start;
@@ -399,8 +505,8 @@ static void __dma_free_remap(void *cpu_addr, size_t size)
}
if (pte_none(pte) || !pte_present(pte))
- printk(KERN_CRIT "%s: bad page in kernel page table\n",
- __func__);
+ pr_crit("%s: bad page in kernel page table\n",
+ __func__);
} while (size -= PAGE_SIZE);
flush_tlb_kernel_range(c->vm_start, c->vm_end);
@@ -524,12 +630,21 @@ static void __free_from_contiguous(struct device *dev, struct page *page,
dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
}
+static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot)
+{
+ prot = dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs) ?
+ pgprot_writecombine(prot) :
+ pgprot_dmacoherent(prot);
+ return prot;
+}
+
#define nommu() 0
#else /* !CONFIG_MMU */
#define nommu() 1
+#define __get_dma_pgprot(attrs, prot) __pgprot(0)
#define __alloc_remap_buffer(dev, size, gfp, prot, ret, c) NULL
#define __alloc_from_pool(dev, size, ret_page, c) NULL
#define __alloc_from_contiguous(dev, size, prot, ret) NULL
@@ -584,7 +699,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
*/
gfp &= ~(__GFP_COMP);
- *handle = ~0;
+ *handle = DMA_ERROR_CODE;
size = PAGE_ALIGN(size);
if (arch_is_coherent() || nommu())
@@ -606,39 +721,34 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
* Allocate DMA-coherent memory space and return both the kernel remapped
* virtual and bus address for that space.
*/
-void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle,
- gfp_t gfp)
+void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
+ gfp_t gfp, struct dma_attrs *attrs)
{
+ pgprot_t prot = __get_dma_pgprot(attrs, pgprot_kernel);
void *memory;
if (dma_alloc_from_coherent(dev, size, handle, &memory))
return memory;
- return __dma_alloc(dev, size, handle, gfp,
- pgprot_dmacoherent(pgprot_kernel),
+ return __dma_alloc(dev, size, handle, gfp, prot,
__builtin_return_address(0));
}
-EXPORT_SYMBOL(dma_alloc_coherent);
/*
- * Allocate a writecombining region, in much the same way as
- * dma_alloc_coherent above.
+ * Create userspace mapping for the DMA-coherent memory.
*/
-void *
-dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
-{
- return __dma_alloc(dev, size, handle, gfp,
- pgprot_writecombine(pgprot_kernel),
- __builtin_return_address(0));
-}
-EXPORT_SYMBOL(dma_alloc_writecombine);
-
-static int dma_mmap(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr, size_t size)
+int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ struct dma_attrs *attrs)
{
int ret = -ENXIO;
#ifdef CONFIG_MMU
unsigned long pfn = dma_to_pfn(dev, dma_addr);
+ vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
+
+ if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
+ return ret;
+
ret = remap_pfn_range(vma, vma->vm_start,
pfn + vma->vm_pgoff,
vma->vm_end - vma->vm_start,
@@ -648,27 +758,11 @@ static int dma_mmap(struct device *dev, struct vm_area_struct *vma,
return ret;
}
-int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr, size_t size)
-{
- vma->vm_page_prot = pgprot_dmacoherent(vma->vm_page_prot);
- return dma_mmap(dev, vma, cpu_addr, dma_addr, size);
-}
-EXPORT_SYMBOL(dma_mmap_coherent);
-
-int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr, size_t size)
-{
- vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
- return dma_mmap(dev, vma, cpu_addr, dma_addr, size);
-}
-EXPORT_SYMBOL(dma_mmap_writecombine);
-
-
/*
* Free a buffer as defined by the above mapping.
*/
-void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle)
+void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t handle, struct dma_attrs *attrs)
{
struct page *page = pfn_to_page(dma_to_pfn(dev, handle));
@@ -692,48 +786,6 @@ void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr
__free_from_contiguous(dev, page, size);
}
}
-EXPORT_SYMBOL(dma_free_coherent);
-
-/*
- * Make an area consistent for devices.
- * Note: Drivers should NOT use this function directly, as it will break
- * platforms with CONFIG_DMABOUNCE.
- * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
- */
-void ___dma_single_cpu_to_dev(const void *kaddr, size_t size,
- enum dma_data_direction dir)
-{
- unsigned long paddr;
-
- BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1));
-
- dmac_map_area(kaddr, size, dir);
-
- paddr = __pa(kaddr);
- if (dir == DMA_FROM_DEVICE) {
- outer_inv_range(paddr, paddr + size);
- } else {
- outer_clean_range(paddr, paddr + size);
- }
- /* FIXME: non-speculating: flush on bidirectional mappings? */
-}
-EXPORT_SYMBOL(___dma_single_cpu_to_dev);
-
-void ___dma_single_dev_to_cpu(const void *kaddr, size_t size,
- enum dma_data_direction dir)
-{
- BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1));
-
- /* FIXME: non-speculating: not required */
- /* don't bother invalidating if DMA to device */
- if (dir != DMA_TO_DEVICE) {
- unsigned long paddr = __pa(kaddr);
- outer_inv_range(paddr, paddr + size);
- }
-
- dmac_unmap_area(kaddr, size, dir);
-}
-EXPORT_SYMBOL(___dma_single_dev_to_cpu);
static void dma_cache_maint_page(struct page *page, unsigned long offset,
size_t size, enum dma_data_direction dir,
@@ -779,7 +831,13 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,
} while (left);
}
-void ___dma_page_cpu_to_dev(struct page *page, unsigned long off,
+/*
+ * Make an area consistent for devices.
+ * Note: Drivers should NOT use this function directly, as it will break
+ * platforms with CONFIG_DMABOUNCE.
+ * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
+ */
+static void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
size_t size, enum dma_data_direction dir)
{
unsigned long paddr;
@@ -794,9 +852,8 @@ void ___dma_page_cpu_to_dev(struct page *page, unsigned long off,
}
/* FIXME: non-speculating: flush on bidirectional mappings? */
}
-EXPORT_SYMBOL(___dma_page_cpu_to_dev);
-void ___dma_page_dev_to_cpu(struct page *page, unsigned long off,
+static void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
size_t size, enum dma_data_direction dir)
{
unsigned long paddr = page_to_phys(page) + off;
@@ -814,10 +871,9 @@ void ___dma_page_dev_to_cpu(struct page *page, unsigned long off,
if (dir != DMA_TO_DEVICE && off == 0 && size >= PAGE_SIZE)
set_bit(PG_dcache_clean, &page->flags);
}
-EXPORT_SYMBOL(___dma_page_dev_to_cpu);
/**
- * dma_map_sg - map a set of SG buffers for streaming mode DMA
+ * arm_dma_map_sg - map a set of SG buffers for streaming mode DMA
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
* @sg: list of buffers
* @nents: number of buffers to map
@@ -832,32 +888,32 @@ EXPORT_SYMBOL(___dma_page_dev_to_cpu);
* Device ownership issues as mentioned for dma_map_single are the same
* here.
*/
-int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction dir)
+int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction dir, struct dma_attrs *attrs)
{
+ struct dma_map_ops *ops = get_dma_ops(dev);
struct scatterlist *s;
int i, j;
- BUG_ON(!valid_dma_direction(dir));
-
for_each_sg(sg, s, nents, i) {
- s->dma_address = __dma_map_page(dev, sg_page(s), s->offset,
- s->length, dir);
+#ifdef CONFIG_NEED_SG_DMA_LENGTH
+ s->dma_length = s->length;
+#endif
+ s->dma_address = ops->map_page(dev, sg_page(s), s->offset,
+ s->length, dir, attrs);
if (dma_mapping_error(dev, s->dma_address))
goto bad_mapping;
}
- debug_dma_map_sg(dev, sg, nents, nents, dir);
return nents;
bad_mapping:
for_each_sg(sg, s, i, j)
- __dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
+ ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs);
return 0;
}
-EXPORT_SYMBOL(dma_map_sg);
/**
- * dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
+ * arm_dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
* @sg: list of buffers
* @nents: number of buffers to unmap (same as was passed to dma_map_sg)
@@ -866,70 +922,55 @@ EXPORT_SYMBOL(dma_map_sg);
* Unmap a set of streaming mode DMA translations. Again, CPU access
* rules concerning calls here are the same as for dma_unmap_single().
*/
-void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction dir)
+void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction dir, struct dma_attrs *attrs)
{
+ struct dma_map_ops *ops = get_dma_ops(dev);
struct scatterlist *s;
- int i;
- debug_dma_unmap_sg(dev, sg, nents, dir);
+ int i;
for_each_sg(sg, s, nents, i)
- __dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
+ ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs);
}
-EXPORT_SYMBOL(dma_unmap_sg);
/**
- * dma_sync_sg_for_cpu
+ * arm_dma_sync_sg_for_cpu
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
* @sg: list of buffers
* @nents: number of buffers to map (returned from dma_map_sg)
* @dir: DMA transfer direction (same as was passed to dma_map_sg)
*/
-void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
+void arm_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir)
{
+ struct dma_map_ops *ops = get_dma_ops(dev);
struct scatterlist *s;
int i;
- for_each_sg(sg, s, nents, i) {
- if (!dmabounce_sync_for_cpu(dev, sg_dma_address(s), 0,
- sg_dma_len(s), dir))
- continue;
-
- __dma_page_dev_to_cpu(sg_page(s), s->offset,
- s->length, dir);
- }
-
- debug_dma_sync_sg_for_cpu(dev, sg, nents, dir);
+ for_each_sg(sg, s, nents, i)
+ ops->sync_single_for_cpu(dev, sg_dma_address(s), s->length,
+ dir);
}
-EXPORT_SYMBOL(dma_sync_sg_for_cpu);
/**
- * dma_sync_sg_for_device
+ * arm_dma_sync_sg_for_device
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
* @sg: list of buffers
* @nents: number of buffers to map (returned from dma_map_sg)
* @dir: DMA transfer direction (same as was passed to dma_map_sg)
*/
-void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
+void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir)
{
+ struct dma_map_ops *ops = get_dma_ops(dev);
struct scatterlist *s;
int i;
- for_each_sg(sg, s, nents, i) {
- if (!dmabounce_sync_for_device(dev, sg_dma_address(s), 0,
- sg_dma_len(s), dir))
- continue;
-
- __dma_page_cpu_to_dev(sg_page(s), s->offset,
- s->length, dir);
- }
-
- debug_dma_sync_sg_for_device(dev, sg, nents, dir);
+ for_each_sg(sg, s, nents, i)
+ ops->sync_single_for_device(dev, sg_dma_address(s), s->length,
+ dir);
}
-EXPORT_SYMBOL(dma_sync_sg_for_device);
/*
* Return whether the given device DMA address mask can be supported
@@ -945,18 +986,15 @@ int dma_supported(struct device *dev, u64 mask)
}
EXPORT_SYMBOL(dma_supported);
-int dma_set_mask(struct device *dev, u64 dma_mask)
+static int arm_dma_set_mask(struct device *dev, u64 dma_mask)
{
if (!dev->dma_mask || !dma_supported(dev, dma_mask))
return -EIO;
-#ifndef CONFIG_DMABOUNCE
*dev->dma_mask = dma_mask;
-#endif
return 0;
}
-EXPORT_SYMBOL(dma_set_mask);
#define PREALLOC_DMA_DEBUG_ENTRIES 4096
@@ -969,3 +1007,679 @@ static int __init dma_debug_do_init(void)
return 0;
}
fs_initcall(dma_debug_do_init);
+
+#ifdef CONFIG_ARM_DMA_USE_IOMMU
+
+/* IOMMU */
+
+static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping,
+ size_t size)
+{
+ unsigned int order = get_order(size);
+ unsigned int align = 0;
+ unsigned int count, start;
+ unsigned long flags;
+
+ count = ((PAGE_ALIGN(size) >> PAGE_SHIFT) +
+ (1 << mapping->order) - 1) >> mapping->order;
+
+ if (order > mapping->order)
+ align = (1 << (order - mapping->order)) - 1;
+
+ spin_lock_irqsave(&mapping->lock, flags);
+ start = bitmap_find_next_zero_area(mapping->bitmap, mapping->bits, 0,
+ count, align);
+ if (start > mapping->bits) {
+ spin_unlock_irqrestore(&mapping->lock, flags);
+ return DMA_ERROR_CODE;
+ }
+
+ bitmap_set(mapping->bitmap, start, count);
+ spin_unlock_irqrestore(&mapping->lock, flags);
+
+ return mapping->base + (start << (mapping->order + PAGE_SHIFT));
+}
+
+static inline void __free_iova(struct dma_iommu_mapping *mapping,
+ dma_addr_t addr, size_t size)
+{
+ unsigned int start = (addr - mapping->base) >>
+ (mapping->order + PAGE_SHIFT);
+ unsigned int count = ((size >> PAGE_SHIFT) +
+ (1 << mapping->order) - 1) >> mapping->order;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mapping->lock, flags);
+ bitmap_clear(mapping->bitmap, start, count);
+ spin_unlock_irqrestore(&mapping->lock, flags);
+}
+
+static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, gfp_t gfp)
+{
+ struct page **pages;
+ int count = size >> PAGE_SHIFT;
+ int array_size = count * sizeof(struct page *);
+ int i = 0;
+
+ if (array_size <= PAGE_SIZE)
+ pages = kzalloc(array_size, gfp);
+ else
+ pages = vzalloc(array_size);
+ if (!pages)
+ return NULL;
+
+ while (count) {
+ int j, order = __ffs(count);
+
+ pages[i] = alloc_pages(gfp | __GFP_NOWARN, order);
+ while (!pages[i] && order)
+ pages[i] = alloc_pages(gfp | __GFP_NOWARN, --order);
+ if (!pages[i])
+ goto error;
+
+ if (order)
+ split_page(pages[i], order);
+ j = 1 << order;
+ while (--j)
+ pages[i + j] = pages[i] + j;
+
+ __dma_clear_buffer(pages[i], PAGE_SIZE << order);
+ i += 1 << order;
+ count -= 1 << order;
+ }
+
+ return pages;
+error:
+ while (--i)
+ if (pages[i])
+ __free_pages(pages[i], 0);
+ if (array_size < PAGE_SIZE)
+ kfree(pages);
+ else
+ vfree(pages);
+ return NULL;
+}
+
+static int __iommu_free_buffer(struct device *dev, struct page **pages, size_t size)
+{
+ int count = size >> PAGE_SHIFT;
+ int array_size = count * sizeof(struct page *);
+ int i;
+ for (i = 0; i < count; i++)
+ if (pages[i])
+ __free_pages(pages[i], 0);
+ if (array_size < PAGE_SIZE)
+ kfree(pages);
+ else
+ vfree(pages);
+ return 0;
+}
+
+/*
+ * Create a CPU mapping for a specified pages
+ */
+static void *
+__iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot)
+{
+ struct arm_vmregion *c;
+ size_t align;
+ size_t count = size >> PAGE_SHIFT;
+ int bit;
+
+ if (!consistent_pte[0]) {
+ pr_err("%s: not initialised\n", __func__);
+ dump_stack();
+ return NULL;
+ }
+
+ /*
+ * Align the virtual region allocation - maximum alignment is
+ * a section size, minimum is a page size. This helps reduce
+ * fragmentation of the DMA space, and also prevents allocations
+ * smaller than a section from crossing a section boundary.
+ */
+ bit = fls(size - 1);
+ if (bit > SECTION_SHIFT)
+ bit = SECTION_SHIFT;
+ align = 1 << bit;
+
+ /*
+ * Allocate a virtual address in the consistent mapping region.
+ */
+ c = arm_vmregion_alloc(&consistent_head, align, size,
+ gfp & ~(__GFP_DMA | __GFP_HIGHMEM), NULL);
+ if (c) {
+ pte_t *pte;
+ int idx = CONSISTENT_PTE_INDEX(c->vm_start);
+ int i = 0;
+ u32 off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1);
+
+ pte = consistent_pte[idx] + off;
+ c->priv = pages;
+
+ do {
+ BUG_ON(!pte_none(*pte));
+
+ set_pte_ext(pte, mk_pte(pages[i], prot), 0);
+ pte++;
+ off++;
+ i++;
+ if (off >= PTRS_PER_PTE) {
+ off = 0;
+ pte = consistent_pte[++idx];
+ }
+ } while (i < count);
+
+ dsb();
+
+ return (void *)c->vm_start;
+ }
+ return NULL;
+}
+
+/*
+ * Create a mapping in device IO address space for specified pages
+ */
+static dma_addr_t
+__iommu_create_mapping(struct device *dev, struct page **pages, size_t size)
+{
+ struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+ unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ dma_addr_t dma_addr, iova;
+ int i, ret = DMA_ERROR_CODE;
+
+ dma_addr = __alloc_iova(mapping, size);
+ if (dma_addr == DMA_ERROR_CODE)
+ return dma_addr;
+
+ iova = dma_addr;
+ for (i = 0; i < count; ) {
+ unsigned int next_pfn = page_to_pfn(pages[i]) + 1;
+ phys_addr_t phys = page_to_phys(pages[i]);
+ unsigned int len, j;
+
+ for (j = i + 1; j < count; j++, next_pfn++)
+ if (page_to_pfn(pages[j]) != next_pfn)
+ break;
+
+ len = (j - i) << PAGE_SHIFT;
+ ret = iommu_map(mapping->domain, iova, phys, len, 0);
+ if (ret < 0)
+ goto fail;
+ iova += len;
+ i = j;
+ }
+ return dma_addr;
+fail:
+ iommu_unmap(mapping->domain, dma_addr, iova-dma_addr);
+ __free_iova(mapping, dma_addr, size);
+ return DMA_ERROR_CODE;
+}
+
+static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t size)
+{
+ struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+
+ /*
+ * add optional in-page offset from iova to size and align
+ * result to page size
+ */
+ size = PAGE_ALIGN((iova & ~PAGE_MASK) + size);
+ iova &= PAGE_MASK;
+
+ iommu_unmap(mapping->domain, iova, size);
+ __free_iova(mapping, iova, size);
+ return 0;
+}
+
+static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
+ dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
+{
+ pgprot_t prot = __get_dma_pgprot(attrs, pgprot_kernel);
+ struct page **pages;
+ void *addr = NULL;
+
+ *handle = DMA_ERROR_CODE;
+ size = PAGE_ALIGN(size);
+
+ pages = __iommu_alloc_buffer(dev, size, gfp);
+ if (!pages)
+ return NULL;
+
+ *handle = __iommu_create_mapping(dev, pages, size);
+ if (*handle == DMA_ERROR_CODE)
+ goto err_buffer;
+
+ addr = __iommu_alloc_remap(pages, size, gfp, prot);
+ if (!addr)
+ goto err_mapping;
+
+ return addr;
+
+err_mapping:
+ __iommu_remove_mapping(dev, *handle, size);
+err_buffer:
+ __iommu_free_buffer(dev, pages, size);
+ return NULL;
+}
+
+static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ struct dma_attrs *attrs)
+{
+ struct arm_vmregion *c;
+
+ vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
+ c = arm_vmregion_find(&consistent_head, (unsigned long)cpu_addr);
+
+ if (c) {
+ struct page **pages = c->priv;
+
+ unsigned long uaddr = vma->vm_start;
+ unsigned long usize = vma->vm_end - vma->vm_start;
+ int i = 0;
+
+ do {
+ int ret;
+
+ ret = vm_insert_page(vma, uaddr, pages[i++]);
+ if (ret) {
+ pr_err("Remapping memory, error: %d\n", ret);
+ return ret;
+ }
+
+ uaddr += PAGE_SIZE;
+ usize -= PAGE_SIZE;
+ } while (usize > 0);
+ }
+ return 0;
+}
+
+/*
+ * free a page as defined by the above mapping.
+ * Must not be called with IRQs disabled.
+ */
+void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t handle, struct dma_attrs *attrs)
+{
+ struct arm_vmregion *c;
+ size = PAGE_ALIGN(size);
+
+ c = arm_vmregion_find(&consistent_head, (unsigned long)cpu_addr);
+ if (c) {
+ struct page **pages = c->priv;
+ __dma_free_remap(cpu_addr, size);
+ __iommu_remove_mapping(dev, handle, size);
+ __iommu_free_buffer(dev, pages, size);
+ }
+}
+
+/*
+ * Map a part of the scatter-gather list into contiguous io address space
+ */
+static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
+ size_t size, dma_addr_t *handle,
+ enum dma_data_direction dir)
+{
+ struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+ dma_addr_t iova, iova_base;
+ int ret = 0;
+ unsigned int count;
+ struct scatterlist *s;
+
+ size = PAGE_ALIGN(size);
+ *handle = DMA_ERROR_CODE;
+
+ iova_base = iova = __alloc_iova(mapping, size);
+ if (iova == DMA_ERROR_CODE)
+ return -ENOMEM;
+
+ for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) {
+ phys_addr_t phys = page_to_phys(sg_page(s));
+ unsigned int len = PAGE_ALIGN(s->offset + s->length);
+
+ if (!arch_is_coherent())
+ __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
+
+ ret = iommu_map(mapping->domain, iova, phys, len, 0);
+ if (ret < 0)
+ goto fail;
+ count += len >> PAGE_SHIFT;
+ iova += len;
+ }
+ *handle = iova_base;
+
+ return 0;
+fail:
+ iommu_unmap(mapping->domain, iova_base, count * PAGE_SIZE);
+ __free_iova(mapping, iova_base, size);
+ return ret;
+}
+
+/**
+ * arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA
+ * @dev: valid struct device pointer
+ * @sg: list of buffers
+ * @nents: number of buffers to map
+ * @dir: DMA transfer direction
+ *
+ * Map a set of buffers described by scatterlist in streaming mode for DMA.
+ * The scatter gather list elements are merged together (if possible) and
+ * tagged with the appropriate dma address and length. They are obtained via
+ * sg_dma_{address,length}.
+ */
+int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction dir, struct dma_attrs *attrs)
+{
+ struct scatterlist *s = sg, *dma = sg, *start = sg;
+ int i, count = 0;
+ unsigned int offset = s->offset;
+ unsigned int size = s->offset + s->length;
+ unsigned int max = dma_get_max_seg_size(dev);
+
+ for (i = 1; i < nents; i++) {
+ s = sg_next(s);
+
+ s->dma_address = DMA_ERROR_CODE;
+ s->dma_length = 0;
+
+ if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) {
+ if (__map_sg_chunk(dev, start, size, &dma->dma_address,
+ dir) < 0)
+ goto bad_mapping;
+
+ dma->dma_address += offset;
+ dma->dma_length = size - offset;
+
+ size = offset = s->offset;
+ start = s;
+ dma = sg_next(dma);
+ count += 1;
+ }
+ size += s->length;
+ }
+ if (__map_sg_chunk(dev, start, size, &dma->dma_address, dir) < 0)
+ goto bad_mapping;
+
+ dma->dma_address += offset;
+ dma->dma_length = size - offset;
+
+ return count+1;
+
+bad_mapping:
+ for_each_sg(sg, s, count, i)
+ __iommu_remove_mapping(dev, sg_dma_address(s), sg_dma_len(s));
+ return 0;
+}
+
+/**
+ * arm_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
+ * @dev: valid struct device pointer
+ * @sg: list of buffers
+ * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
+ * @dir: DMA transfer direction (same as was passed to dma_map_sg)
+ *
+ * Unmap a set of streaming mode DMA translations. Again, CPU access
+ * rules concerning calls here are the same as for dma_unmap_single().
+ */
+void arm_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction dir, struct dma_attrs *attrs)
+{
+ struct scatterlist *s;
+ int i;
+
+ for_each_sg(sg, s, nents, i) {
+ if (sg_dma_len(s))
+ __iommu_remove_mapping(dev, sg_dma_address(s),
+ sg_dma_len(s));
+ if (!arch_is_coherent())
+ __dma_page_dev_to_cpu(sg_page(s), s->offset,
+ s->length, dir);
+ }
+}
+
+/**
+ * arm_iommu_sync_sg_for_cpu
+ * @dev: valid struct device pointer
+ * @sg: list of buffers
+ * @nents: number of buffers to map (returned from dma_map_sg)
+ * @dir: DMA transfer direction (same as was passed to dma_map_sg)
+ */
+void arm_iommu_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir)
+{
+ struct scatterlist *s;
+ int i;
+
+ for_each_sg(sg, s, nents, i)
+ if (!arch_is_coherent())
+ __dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir);
+
+}
+
+/**
+ * arm_iommu_sync_sg_for_device
+ * @dev: valid struct device pointer
+ * @sg: list of buffers
+ * @nents: number of buffers to map (returned from dma_map_sg)
+ * @dir: DMA transfer direction (same as was passed to dma_map_sg)
+ */
+void arm_iommu_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir)
+{
+ struct scatterlist *s;
+ int i;
+
+ for_each_sg(sg, s, nents, i)
+ if (!arch_is_coherent())
+ __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
+}
+
+
+/**
+ * arm_iommu_map_page
+ * @dev: valid struct device pointer
+ * @page: page that buffer resides in
+ * @offset: offset into page for start of buffer
+ * @size: size of buffer to map
+ * @dir: DMA transfer direction
+ *
+ * IOMMU aware version of arm_dma_map_page()
+ */
+static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+ dma_addr_t dma_addr;
+ int ret, len = PAGE_ALIGN(size + offset);
+
+ if (!arch_is_coherent())
+ __dma_page_cpu_to_dev(page, offset, size, dir);
+
+ dma_addr = __alloc_iova(mapping, len);
+ if (dma_addr == DMA_ERROR_CODE)
+ return dma_addr;
+
+ ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, 0);
+ if (ret < 0)
+ goto fail;
+
+ return dma_addr + offset;
+fail:
+ __free_iova(mapping, dma_addr, len);
+ return DMA_ERROR_CODE;
+}
+
+/**
+ * arm_iommu_unmap_page
+ * @dev: valid struct device pointer
+ * @handle: DMA address of buffer
+ * @size: size of buffer (same as passed to dma_map_page)
+ * @dir: DMA transfer direction (same as passed to dma_map_page)
+ *
+ * IOMMU aware version of arm_dma_unmap_page()
+ */
+static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
+ size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+ dma_addr_t iova = handle & PAGE_MASK;
+ struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
+ int offset = handle & ~PAGE_MASK;
+ int len = PAGE_ALIGN(size + offset);
+
+ if (!iova)
+ return;
+
+ if (!arch_is_coherent())
+ __dma_page_dev_to_cpu(page, offset, size, dir);
+
+ iommu_unmap(mapping->domain, iova, len);
+ __free_iova(mapping, iova, len);
+}
+
+static void arm_iommu_sync_single_for_cpu(struct device *dev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+ struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+ dma_addr_t iova = handle & PAGE_MASK;
+ struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
+ unsigned int offset = handle & ~PAGE_MASK;
+
+ if (!iova)
+ return;
+
+ if (!arch_is_coherent())
+ __dma_page_dev_to_cpu(page, offset, size, dir);
+}
+
+static void arm_iommu_sync_single_for_device(struct device *dev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+ struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+ dma_addr_t iova = handle & PAGE_MASK;
+ struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
+ unsigned int offset = handle & ~PAGE_MASK;
+
+ if (!iova)
+ return;
+
+ __dma_page_cpu_to_dev(page, offset, size, dir);
+}
+
+struct dma_map_ops iommu_ops = {
+ .alloc = arm_iommu_alloc_attrs,
+ .free = arm_iommu_free_attrs,
+ .mmap = arm_iommu_mmap_attrs,
+
+ .map_page = arm_iommu_map_page,
+ .unmap_page = arm_iommu_unmap_page,
+ .sync_single_for_cpu = arm_iommu_sync_single_for_cpu,
+ .sync_single_for_device = arm_iommu_sync_single_for_device,
+
+ .map_sg = arm_iommu_map_sg,
+ .unmap_sg = arm_iommu_unmap_sg,
+ .sync_sg_for_cpu = arm_iommu_sync_sg_for_cpu,
+ .sync_sg_for_device = arm_iommu_sync_sg_for_device,
+};
+
+/**
+ * arm_iommu_create_mapping
+ * @bus: pointer to the bus holding the client device (for IOMMU calls)
+ * @base: start address of the valid IO address space
+ * @size: size of the valid IO address space
+ * @order: accuracy of the IO addresses allocations
+ *
+ * Creates a mapping structure which holds information about used/unused
+ * IO address ranges, which is required to perform memory allocation and
+ * mapping with IOMMU aware functions.
+ *
+ * The client device need to be attached to the mapping with
+ * arm_iommu_attach_device function.
+ */
+struct dma_iommu_mapping *
+arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size,
+ int order)
+{
+ unsigned int count = size >> (PAGE_SHIFT + order);
+ unsigned int bitmap_size = BITS_TO_LONGS(count) * sizeof(long);
+ struct dma_iommu_mapping *mapping;
+ int err = -ENOMEM;
+
+ if (!count)
+ return ERR_PTR(-EINVAL);
+
+ mapping = kzalloc(sizeof(struct dma_iommu_mapping), GFP_KERNEL);
+ if (!mapping)
+ goto err;
+
+ mapping->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
+ if (!mapping->bitmap)
+ goto err2;
+
+ mapping->base = base;
+ mapping->bits = BITS_PER_BYTE * bitmap_size;
+ mapping->order = order;
+ spin_lock_init(&mapping->lock);
+
+ mapping->domain = iommu_domain_alloc(bus);
+ if (!mapping->domain)
+ goto err3;
+
+ kref_init(&mapping->kref);
+ return mapping;
+err3:
+ kfree(mapping->bitmap);
+err2:
+ kfree(mapping);
+err:
+ return ERR_PTR(err);
+}
+
+static void release_iommu_mapping(struct kref *kref)
+{
+ struct dma_iommu_mapping *mapping =
+ container_of(kref, struct dma_iommu_mapping, kref);
+
+ iommu_domain_free(mapping->domain);
+ kfree(mapping->bitmap);
+ kfree(mapping);
+}
+
+void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping)
+{
+ if (mapping)
+ kref_put(&mapping->kref, release_iommu_mapping);
+}
+
+/**
+ * arm_iommu_attach_device
+ * @dev: valid struct device pointer
+ * @mapping: io address space mapping structure (returned from
+ * arm_iommu_create_mapping)
+ *
+ * Attaches specified io address space mapping to the provided device,
+ * this replaces the dma operations (dma_map_ops pointer) with the
+ * IOMMU aware version. More than one client might be attached to
+ * the same io address space mapping.
+ */
+int arm_iommu_attach_device(struct device *dev,
+ struct dma_iommu_mapping *mapping)
+{
+ int err;
+
+ err = iommu_attach_device(mapping->domain, dev);
+ if (err)
+ return err;
+
+ kref_get(&mapping->kref);
+ dev->archdata.mapping = mapping;
+ set_dma_ops(dev, &iommu_ops);
+
+ pr_info("Attached IOMMU controller to %s device.\n", dev_name(dev));
+ return 0;
+}
+
+#endif
diff --git a/arch/arm/mm/vmregion.h b/arch/arm/mm/vmregion.h
index 162be662c088..bf312c354a21 100644
--- a/arch/arm/mm/vmregion.h
+++ b/arch/arm/mm/vmregion.h
@@ -17,7 +17,7 @@ struct arm_vmregion {
struct list_head vm_list;
unsigned long vm_start;
unsigned long vm_end;
- struct page *vm_pages;
+ void *priv;
int vm_active;
const void *caller;
};
diff --git a/drivers/base/dma-coherent.c b/drivers/base/dma-coherent.c
index bb0025c510b3..1b85949e3d2f 100644
--- a/drivers/base/dma-coherent.c
+++ b/drivers/base/dma-coherent.c
@@ -10,6 +10,7 @@
struct dma_coherent_mem {
void *virt_base;
dma_addr_t device_base;
+ phys_addr_t pfn_base;
int size;
int flags;
unsigned long *bitmap;
@@ -44,6 +45,7 @@ int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
dev->dma_mem->virt_base = mem_base;
dev->dma_mem->device_base = device_addr;
+ dev->dma_mem->pfn_base = PFN_DOWN(bus_addr);
dev->dma_mem->size = pages;
dev->dma_mem->flags = flags;
@@ -176,3 +178,43 @@ int dma_release_from_coherent(struct device *dev, int order, void *vaddr)
return 0;
}
EXPORT_SYMBOL(dma_release_from_coherent);
+
+/**
+ * dma_mmap_from_coherent() - try to mmap the memory allocated from
+ * per-device coherent memory pool to userspace
+ * @dev: device from which the memory was allocated
+ * @vma: vm_area for the userspace memory
+ * @vaddr: cpu address returned by dma_alloc_from_coherent
+ * @size: size of the memory buffer allocated by dma_alloc_from_coherent
+ *
+ * This checks whether the memory was allocated from the per-device
+ * coherent memory pool and if so, maps that memory to the provided vma.
+ *
+ * Returns 1 if we correctly mapped the memory, or 0 if
+ * dma_release_coherent() should proceed with mapping memory from
+ * generic pools.
+ */
+int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
+ void *vaddr, size_t size, int *ret)
+{
+ struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
+
+ if (mem && vaddr >= mem->virt_base && vaddr + size <=
+ (mem->virt_base + (mem->size << PAGE_SHIFT))) {
+ unsigned long off = vma->vm_pgoff;
+ int start = (vaddr - mem->virt_base) >> PAGE_SHIFT;
+ int user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+ int count = size >> PAGE_SHIFT;
+
+ *ret = -ENXIO;
+ if (off < count && user_count <= count - off) {
+ unsigned pfn = mem->pfn_base + start + off;
+ *ret = remap_pfn_range(vma, vma->vm_start, pfn,
+ user_count << PAGE_SHIFT,
+ vma->vm_page_prot);
+ }
+ return 1;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(dma_mmap_from_coherent);
diff --git a/include/asm-generic/dma-coherent.h b/include/asm-generic/dma-coherent.h
index 85a3ffaa0242..abfb2682de7f 100644
--- a/include/asm-generic/dma-coherent.h
+++ b/include/asm-generic/dma-coherent.h
@@ -3,13 +3,15 @@
#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
/*
- * These two functions are only for dma allocator.
+ * These three functions are only for dma allocator.
* Don't use them in device drivers.
*/
int dma_alloc_from_coherent(struct device *dev, ssize_t size,
dma_addr_t *dma_handle, void **ret);
int dma_release_from_coherent(struct device *dev, int order, void *vaddr);
+int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, size_t size, int *ret);
/*
* Standard interface
*/