summaryrefslogtreecommitdiffstats
path: root/arch/arm64/mm/dma-mapping.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-12-28 14:12:21 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2018-12-28 14:12:21 -0800
commitaf7ddd8a627c62a835524b3f5b471edbbbcce025 (patch)
treeaf9777ddef6d394c7cc01fca599328b584ca2bc1 /arch/arm64/mm/dma-mapping.c
parentfe2b0cdabcd9e6aeca66a104bc03576946e5fee2 (diff)
parent8b1cce9f5832a8eda17d37a3c49fb7dd2d650f46 (diff)
downloadlinux-af7ddd8a627c62a835524b3f5b471edbbbcce025.tar.gz
linux-af7ddd8a627c62a835524b3f5b471edbbbcce025.tar.bz2
linux-af7ddd8a627c62a835524b3f5b471edbbbcce025.zip
Merge tag 'dma-mapping-4.21' of git://git.infradead.org/users/hch/dma-mapping
Pull DMA mapping updates from Christoph Hellwig: "A huge update this time, but a lot of that is just consolidating or removing code: - provide a common DMA_MAPPING_ERROR definition and avoid indirect calls for dma_map_* error checking - use direct calls for the DMA direct mapping case, avoiding huge retpoline overhead for high performance workloads - merge the swiotlb dma_map_ops into dma-direct - provide a generic remapping DMA consistent allocator for architectures that have devices that perform DMA that is not cache coherent. Based on the existing arm64 implementation and also used for csky now. - improve the dma-debug infrastructure, including dynamic allocation of entries (Robin Murphy) - default to providing chaining scatterlist everywhere, with opt-outs for the few architectures (alpha, parisc, most arm32 variants) that can't cope with it - misc sparc32 dma-related cleanups - remove the dma_mark_clean arch hook used by swiotlb on ia64 and replace it with the generic noncoherent infrastructure - fix the return type of dma_set_max_seg_size (Niklas Söderlund) - move the dummy dma ops for not DMA capable devices from arm64 to common code (Robin Murphy) - ensure dma_alloc_coherent returns zeroed memory to avoid kernel data leaks through userspace. We already did this for most common architectures, but this ensures we do it everywhere. dma_zalloc_coherent has been deprecated and can hopefully be removed after -rc1 with a coccinelle script" * tag 'dma-mapping-4.21' of git://git.infradead.org/users/hch/dma-mapping: (73 commits) dma-mapping: fix inverted logic in dma_supported dma-mapping: deprecate dma_zalloc_coherent dma-mapping: zero memory returned from dma_alloc_* sparc/iommu: fix ->map_sg return value sparc/io-unit: fix ->map_sg return value arm64: default to the direct mapping in get_arch_dma_ops PCI: Remove unused attr variable in pci_dma_configure ia64: only select ARCH_HAS_DMA_COHERENT_TO_PFN if swiotlb is enabled dma-mapping: bypass indirect calls for dma-direct vmd: use the proper dma_* APIs instead of direct methods calls dma-direct: merge swiotlb_dma_ops into the dma_direct code dma-direct: use dma_direct_map_page to implement dma_direct_map_sg dma-direct: improve addressability error reporting swiotlb: remove dma_mark_clean swiotlb: remove SWIOTLB_MAP_ERROR ACPI / scan: Refactor _CCA enforcement dma-mapping: factor out dummy DMA ops dma-mapping: always build the direct mapping code dma-mapping: move dma_cache_sync out of line dma-mapping: move various slow path functions out of line ...
Diffstat (limited to 'arch/arm64/mm/dma-mapping.c')
-rw-r--r--arch/arm64/mm/dma-mapping.c286
1 files changed, 13 insertions, 273 deletions
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index a53704406099..fb0908456a1f 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -33,113 +33,6 @@
#include <asm/cacheflush.h>
-static struct gen_pool *atomic_pool __ro_after_init;
-
-#define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
-static size_t atomic_pool_size __initdata = DEFAULT_DMA_COHERENT_POOL_SIZE;
-
-static int __init early_coherent_pool(char *p)
-{
- atomic_pool_size = memparse(p, &p);
- return 0;
-}
-early_param("coherent_pool", early_coherent_pool);
-
-static void *__alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags)
-{
- unsigned long val;
- void *ptr = NULL;
-
- if (!atomic_pool) {
- WARN(1, "coherent pool not initialised!\n");
- return NULL;
- }
-
- val = gen_pool_alloc(atomic_pool, size);
- if (val) {
- phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val);
-
- *ret_page = phys_to_page(phys);
- ptr = (void *)val;
- memset(ptr, 0, size);
- }
-
- return ptr;
-}
-
-static bool __in_atomic_pool(void *start, size_t size)
-{
- return addr_in_gen_pool(atomic_pool, (unsigned long)start, size);
-}
-
-static int __free_from_pool(void *start, size_t size)
-{
- if (!__in_atomic_pool(start, size))
- return 0;
-
- gen_pool_free(atomic_pool, (unsigned long)start, size);
-
- return 1;
-}
-
-void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
- gfp_t flags, unsigned long attrs)
-{
- struct page *page;
- void *ptr, *coherent_ptr;
- pgprot_t prot = pgprot_writecombine(PAGE_KERNEL);
-
- size = PAGE_ALIGN(size);
-
- if (!gfpflags_allow_blocking(flags)) {
- struct page *page = NULL;
- void *addr = __alloc_from_pool(size, &page, flags);
-
- if (addr)
- *dma_handle = phys_to_dma(dev, page_to_phys(page));
-
- return addr;
- }
-
- ptr = dma_direct_alloc_pages(dev, size, dma_handle, flags, attrs);
- if (!ptr)
- goto no_mem;
-
- /* remove any dirty cache lines on the kernel alias */
- __dma_flush_area(ptr, size);
-
- /* create a coherent mapping */
- page = virt_to_page(ptr);
- coherent_ptr = dma_common_contiguous_remap(page, size, VM_USERMAP,
- prot, __builtin_return_address(0));
- if (!coherent_ptr)
- goto no_map;
-
- return coherent_ptr;
-
-no_map:
- dma_direct_free_pages(dev, size, ptr, *dma_handle, attrs);
-no_mem:
- return NULL;
-}
-
-void arch_dma_free(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle, unsigned long attrs)
-{
- if (!__free_from_pool(vaddr, PAGE_ALIGN(size))) {
- void *kaddr = phys_to_virt(dma_to_phys(dev, dma_handle));
-
- vunmap(vaddr);
- dma_direct_free_pages(dev, size, kaddr, dma_handle, attrs);
- }
-}
-
-long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr,
- dma_addr_t dma_addr)
-{
- return __phys_to_pfn(dma_to_phys(dev, dma_addr));
-}
-
pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot,
unsigned long attrs)
{
@@ -160,6 +53,11 @@ void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
__dma_unmap_area(phys_to_virt(paddr), size, dir);
}
+void arch_dma_prep_coherent(struct page *page, size_t size)
+{
+ __dma_flush_area(page_address(page), size);
+}
+
#ifdef CONFIG_IOMMU_DMA
static int __swiotlb_get_sgtable_page(struct sg_table *sgt,
struct page *page, size_t size)
@@ -191,167 +89,13 @@ static int __swiotlb_mmap_pfn(struct vm_area_struct *vma,
}
#endif /* CONFIG_IOMMU_DMA */
-static int __init atomic_pool_init(void)
-{
- pgprot_t prot = __pgprot(PROT_NORMAL_NC);
- unsigned long nr_pages = atomic_pool_size >> PAGE_SHIFT;
- struct page *page;
- void *addr;
- unsigned int pool_size_order = get_order(atomic_pool_size);
-
- if (dev_get_cma_area(NULL))
- page = dma_alloc_from_contiguous(NULL, nr_pages,
- pool_size_order, false);
- else
- page = alloc_pages(GFP_DMA32, pool_size_order);
-
- if (page) {
- int ret;
- void *page_addr = page_address(page);
-
- memset(page_addr, 0, atomic_pool_size);
- __dma_flush_area(page_addr, atomic_pool_size);
-
- atomic_pool = gen_pool_create(PAGE_SHIFT, -1);
- if (!atomic_pool)
- goto free_page;
-
- addr = dma_common_contiguous_remap(page, atomic_pool_size,
- VM_USERMAP, prot, atomic_pool_init);
-
- if (!addr)
- goto destroy_genpool;
-
- ret = gen_pool_add_virt(atomic_pool, (unsigned long)addr,
- page_to_phys(page),
- atomic_pool_size, -1);
- if (ret)
- goto remove_mapping;
-
- gen_pool_set_algo(atomic_pool,
- gen_pool_first_fit_order_align,
- NULL);
-
- pr_info("DMA: preallocated %zu KiB pool for atomic allocations\n",
- atomic_pool_size / 1024);
- return 0;
- }
- goto out;
-
-remove_mapping:
- dma_common_free_remap(addr, atomic_pool_size, VM_USERMAP);
-destroy_genpool:
- gen_pool_destroy(atomic_pool);
- atomic_pool = NULL;
-free_page:
- if (!dma_release_from_contiguous(NULL, page, nr_pages))
- __free_pages(page, pool_size_order);
-out:
- pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n",
- atomic_pool_size / 1024);
- return -ENOMEM;
-}
-
-/********************************************
- * The following APIs are for dummy DMA ops *
- ********************************************/
-
-static void *__dummy_alloc(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flags,
- unsigned long attrs)
-{
- return NULL;
-}
-
-static void __dummy_free(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle,
- unsigned long attrs)
-{
-}
-
-static int __dummy_mmap(struct device *dev,
- struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr, size_t size,
- unsigned long attrs)
-{
- return -ENXIO;
-}
-
-static dma_addr_t __dummy_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction dir,
- unsigned long attrs)
-{
- return 0;
-}
-
-static void __dummy_unmap_page(struct device *dev, dma_addr_t dev_addr,
- size_t size, enum dma_data_direction dir,
- unsigned long attrs)
-{
-}
-
-static int __dummy_map_sg(struct device *dev, struct scatterlist *sgl,
- int nelems, enum dma_data_direction dir,
- unsigned long attrs)
-{
- return 0;
-}
-
-static void __dummy_unmap_sg(struct device *dev,
- struct scatterlist *sgl, int nelems,
- enum dma_data_direction dir,
- unsigned long attrs)
-{
-}
-
-static void __dummy_sync_single(struct device *dev,
- dma_addr_t dev_addr, size_t size,
- enum dma_data_direction dir)
-{
-}
-
-static void __dummy_sync_sg(struct device *dev,
- struct scatterlist *sgl, int nelems,
- enum dma_data_direction dir)
-{
-}
-
-static int __dummy_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
-{
- return 1;
-}
-
-static int __dummy_dma_supported(struct device *hwdev, u64 mask)
-{
- return 0;
-}
-
-const struct dma_map_ops dummy_dma_ops = {
- .alloc = __dummy_alloc,
- .free = __dummy_free,
- .mmap = __dummy_mmap,
- .map_page = __dummy_map_page,
- .unmap_page = __dummy_unmap_page,
- .map_sg = __dummy_map_sg,
- .unmap_sg = __dummy_unmap_sg,
- .sync_single_for_cpu = __dummy_sync_single,
- .sync_single_for_device = __dummy_sync_single,
- .sync_sg_for_cpu = __dummy_sync_sg,
- .sync_sg_for_device = __dummy_sync_sg,
- .mapping_error = __dummy_mapping_error,
- .dma_supported = __dummy_dma_supported,
-};
-EXPORT_SYMBOL(dummy_dma_ops);
-
static int __init arm64_dma_init(void)
{
WARN_TAINT(ARCH_DMA_MINALIGN < cache_line_size(),
TAINT_CPU_OUT_OF_SPEC,
"ARCH_DMA_MINALIGN smaller than CTR_EL0.CWG (%d < %d)",
ARCH_DMA_MINALIGN, cache_line_size());
-
- return atomic_pool_init();
+ return dma_atomic_pool_init(GFP_DMA32, __pgprot(PROT_NORMAL_NC));
}
arch_initcall(arm64_dma_init);
@@ -397,17 +141,17 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
page = alloc_pages(gfp, get_order(size));
addr = page ? page_address(page) : NULL;
} else {
- addr = __alloc_from_pool(size, &page, gfp);
+ addr = dma_alloc_from_pool(size, &page, gfp);
}
if (!addr)
return NULL;
*handle = iommu_dma_map_page(dev, page, 0, iosize, ioprot);
- if (iommu_dma_mapping_error(dev, *handle)) {
+ if (*handle == DMA_MAPPING_ERROR) {
if (coherent)
__free_pages(page, get_order(size));
else
- __free_from_pool(addr, size);
+ dma_free_from_pool(addr, size);
addr = NULL;
}
} else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
@@ -420,7 +164,7 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
return NULL;
*handle = iommu_dma_map_page(dev, page, 0, iosize, ioprot);
- if (iommu_dma_mapping_error(dev, *handle)) {
+ if (*handle == DMA_MAPPING_ERROR) {
dma_release_from_contiguous(dev, page,
size >> PAGE_SHIFT);
return NULL;
@@ -471,9 +215,9 @@ static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
* coherent devices.
* Hence how dodgy the below logic looks...
*/
- if (__in_atomic_pool(cpu_addr, size)) {
+ if (dma_in_atomic_pool(cpu_addr, size)) {
iommu_dma_unmap_page(dev, handle, iosize, 0, 0);
- __free_from_pool(cpu_addr, size);
+ dma_free_from_pool(cpu_addr, size);
} else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
struct page *page = vmalloc_to_page(cpu_addr);
@@ -580,7 +324,7 @@ static dma_addr_t __iommu_map_page(struct device *dev, struct page *page,
dma_addr_t dev_addr = iommu_dma_map_page(dev, page, offset, size, prot);
if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
- !iommu_dma_mapping_error(dev, dev_addr))
+ dev_addr != DMA_MAPPING_ERROR)
__dma_map_area(page_address(page) + offset, size, dir);
return dev_addr;
@@ -663,7 +407,6 @@ static const struct dma_map_ops iommu_dma_ops = {
.sync_sg_for_device = __iommu_sync_sg_for_device,
.map_resource = iommu_dma_map_resource,
.unmap_resource = iommu_dma_unmap_resource,
- .mapping_error = iommu_dma_mapping_error,
};
static int __init __iommu_dma_init(void)
@@ -719,9 +462,6 @@ static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
const struct iommu_ops *iommu, bool coherent)
{
- if (!dev->dma_ops)
- dev->dma_ops = &swiotlb_dma_ops;
-
dev->dma_coherent = coherent;
__iommu_setup_dma_ops(dev, dma_base, size, iommu);