summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHiroshi Doyu <hdoyu@nvidia.com>2012-08-28 08:13:04 +0300
committerMarek Szyprowski <m.szyprowski@samsung.com>2012-08-28 21:01:07 +0200
commit479ed93a4b98eef03fd8260f7ddc00019221c450 (patch)
treeeb4c1d6cb33edd15c4b892e42c2e3f58b227525c
parent665bad7bb911d392000fa69bc6b599c0df992504 (diff)
downloadlinux-479ed93a4b98eef03fd8260f7ddc00019221c450.tar.gz
linux-479ed93a4b98eef03fd8260f7ddc00019221c450.tar.bz2
linux-479ed93a4b98eef03fd8260f7ddc00019221c450.zip
ARM: dma-mapping: IOMMU allocates pages from atomic_pool with GFP_ATOMIC
Make use of the same atomic pool as DMA does, and skip a kernel page mapping which can involve sleep'able operations at allocating a kernel page table. Signed-off-by: Hiroshi Doyu <hdoyu@nvidia.com> Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
-rw-r--r--arch/arm/mm/dma-mapping.c36
1 files changed, 36 insertions, 0 deletions
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 54b158df5afa..051204fc4617 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -1161,6 +1161,34 @@ static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs)
return NULL;
}
+static void *__iommu_alloc_atomic(struct device *dev, size_t size,
+ dma_addr_t *handle)
+{
+ struct page *page;
+ void *addr;
+
+ addr = __alloc_from_pool(size, &page);
+ if (!addr)
+ return NULL;
+
+ *handle = __iommu_create_mapping(dev, &page, size);
+ if (*handle == DMA_ERROR_CODE)
+ goto err_mapping;
+
+ return addr;
+
+err_mapping:
+ __free_from_pool(addr, size);
+ return NULL;
+}
+
+static void __iommu_free_atomic(struct device *dev, struct page **pages,
+ dma_addr_t handle, size_t size)
+{
+ __iommu_remove_mapping(dev, handle, size);
+ __free_from_pool(page_address(pages[0]), size);
+}
+
static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
{
@@ -1171,6 +1199,9 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
*handle = DMA_ERROR_CODE;
size = PAGE_ALIGN(size);
+ if (gfp & GFP_ATOMIC)
+ return __iommu_alloc_atomic(dev, size, handle);
+
pages = __iommu_alloc_buffer(dev, size, gfp);
if (!pages)
return NULL;
@@ -1237,6 +1268,11 @@ void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
return;
}
+ if (__in_atomic_pool(cpu_addr, size)) {
+ __iommu_free_atomic(dev, pages, handle, size);
+ return;
+ }
+
if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) {
unmap_kernel_range((unsigned long)cpu_addr, size);
vunmap(cpu_addr);