summaryrefslogtreecommitdiffstats
path: root/kernel/dma/direct.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2018-12-03 11:43:54 +0100
committerChristoph Hellwig <hch@lst.de>2018-12-13 21:06:17 +0100
commit55897af63091ebc2c3f239c6a6666f748113ac50 (patch)
tree49340a30e15ae65d71fc5f453559340672064d70 /kernel/dma/direct.c
parent17ac524719f3fc88c1a90528f4789e4b4f618512 (diff)
downloadlinux-stable-55897af63091ebc2c3f239c6a6666f748113ac50.tar.gz
linux-stable-55897af63091ebc2c3f239c6a6666f748113ac50.tar.bz2
linux-stable-55897af63091ebc2c3f239c6a6666f748113ac50.zip
dma-direct: merge swiotlb_dma_ops into the dma_direct code
While the dma-direct code is (relatively) clean and simple we actually have to use the swiotlb ops for the mapping on many architectures due to devices with addressing limits. Instead of keeping two implementations around this commit allows the dma-direct implementation to call the swiotlb bounce buffering functions and thus share the guts of the mapping implementation. This also simplified the dma-mapping setup on a few architectures where we don't have to differenciate which implementation to use. Signed-off-by: Christoph Hellwig <hch@lst.de> Acked-by: Jesper Dangaard Brouer <brouer@redhat.com> Tested-by: Jesper Dangaard Brouer <brouer@redhat.com> Tested-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'kernel/dma/direct.c')
-rw-r--r--kernel/dma/direct.c113
1 files changed, 81 insertions, 32 deletions
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index d45306473c90..85d8286a0ba2 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -13,6 +13,7 @@
#include <linux/dma-noncoherent.h>
#include <linux/pfn.h>
#include <linux/set_memory.h>
+#include <linux/swiotlb.h>
/*
* Most architectures use ZONE_DMA for the first 16 Megabytes, but
@@ -209,69 +210,110 @@ void dma_direct_free(struct device *dev, size_t size,
dma_direct_free_pages(dev, size, cpu_addr, dma_addr, attrs);
}
-static void dma_direct_sync_single_for_device(struct device *dev,
+#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
+ defined(CONFIG_SWIOTLB)
+void dma_direct_sync_single_for_device(struct device *dev,
dma_addr_t addr, size_t size, enum dma_data_direction dir)
{
- if (dev_is_dma_coherent(dev))
- return;
- arch_sync_dma_for_device(dev, dma_to_phys(dev, addr), size, dir);
+ phys_addr_t paddr = dma_to_phys(dev, addr);
+
+ if (unlikely(is_swiotlb_buffer(paddr)))
+ swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE);
+
+ if (!dev_is_dma_coherent(dev))
+ arch_sync_dma_for_device(dev, paddr, size, dir);
}
-#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE)
-static void dma_direct_sync_sg_for_device(struct device *dev,
+void dma_direct_sync_sg_for_device(struct device *dev,
struct scatterlist *sgl, int nents, enum dma_data_direction dir)
{
struct scatterlist *sg;
int i;
- if (dev_is_dma_coherent(dev))
- return;
+ for_each_sg(sgl, sg, nents, i) {
+ if (unlikely(is_swiotlb_buffer(sg_phys(sg))))
+ swiotlb_tbl_sync_single(dev, sg_phys(sg), sg->length,
+ dir, SYNC_FOR_DEVICE);
- for_each_sg(sgl, sg, nents, i)
- arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir);
+ if (!dev_is_dma_coherent(dev))
+ arch_sync_dma_for_device(dev, sg_phys(sg), sg->length,
+ dir);
+ }
}
#endif
#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
- defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
-static void dma_direct_sync_single_for_cpu(struct device *dev,
+ defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \
+ defined(CONFIG_SWIOTLB)
+void dma_direct_sync_single_for_cpu(struct device *dev,
dma_addr_t addr, size_t size, enum dma_data_direction dir)
{
- if (dev_is_dma_coherent(dev))
- return;
- arch_sync_dma_for_cpu(dev, dma_to_phys(dev, addr), size, dir);
- arch_sync_dma_for_cpu_all(dev);
+ phys_addr_t paddr = dma_to_phys(dev, addr);
+
+ if (!dev_is_dma_coherent(dev)) {
+ arch_sync_dma_for_cpu(dev, paddr, size, dir);
+ arch_sync_dma_for_cpu_all(dev);
+ }
+
+ if (unlikely(is_swiotlb_buffer(paddr)))
+ swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_CPU);
}
-static void dma_direct_sync_sg_for_cpu(struct device *dev,
+void dma_direct_sync_sg_for_cpu(struct device *dev,
struct scatterlist *sgl, int nents, enum dma_data_direction dir)
{
struct scatterlist *sg;
int i;
- if (dev_is_dma_coherent(dev))
- return;
+ for_each_sg(sgl, sg, nents, i) {
+ if (!dev_is_dma_coherent(dev))
+ arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir);
+
+ if (unlikely(is_swiotlb_buffer(sg_phys(sg))))
+ swiotlb_tbl_sync_single(dev, sg_phys(sg), sg->length, dir,
+ SYNC_FOR_CPU);
+ }
- for_each_sg(sgl, sg, nents, i)
- arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir);
- arch_sync_dma_for_cpu_all(dev);
+ if (!dev_is_dma_coherent(dev))
+ arch_sync_dma_for_cpu_all(dev);
}
-static void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
+void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
+ phys_addr_t phys = dma_to_phys(dev, addr);
+
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
dma_direct_sync_single_for_cpu(dev, addr, size, dir);
+
+ if (unlikely(is_swiotlb_buffer(phys)))
+ swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs);
}
-static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
+void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
+ int nents, enum dma_data_direction dir, unsigned long attrs)
+{
+ struct scatterlist *sg;
+ int i;
+
+ for_each_sg(sgl, sg, nents, i)
+ dma_direct_unmap_page(dev, sg->dma_address, sg_dma_len(sg), dir,
+ attrs);
+}
+#else
+void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
int nents, enum dma_data_direction dir, unsigned long attrs)
{
- if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
- dma_direct_sync_sg_for_cpu(dev, sgl, nents, dir);
}
#endif
+static inline bool dma_direct_possible(struct device *dev, dma_addr_t dma_addr,
+ size_t size)
+{
+ return swiotlb_force != SWIOTLB_FORCE &&
+ (!dev || dma_capable(dev, dma_addr, size));
+}
+
dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction dir,
unsigned long attrs)
@@ -279,13 +321,14 @@ dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
phys_addr_t phys = page_to_phys(page) + offset;
dma_addr_t dma_addr = phys_to_dma(dev, phys);
- if (unlikely(dev && !dma_capable(dev, dma_addr, size))) {
+ if (unlikely(!dma_direct_possible(dev, dma_addr, size)) &&
+ !swiotlb_map(dev, &phys, &dma_addr, size, dir, attrs)) {
report_addr(dev, dma_addr, size);
return DMA_MAPPING_ERROR;
}
- if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
- dma_direct_sync_single_for_device(dev, dma_addr, size, dir);
+ if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+ arch_sync_dma_for_device(dev, phys, size, dir);
return dma_addr;
}
@@ -299,11 +342,15 @@ int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
sg->dma_address = dma_direct_map_page(dev, sg_page(sg),
sg->offset, sg->length, dir, attrs);
if (sg->dma_address == DMA_MAPPING_ERROR)
- return 0;
+ goto out_unmap;
sg_dma_len(sg) = sg->length;
}
return nents;
+
+out_unmap:
+ dma_direct_unmap_sg(dev, sgl, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
+ return 0;
}
/*
@@ -331,12 +378,14 @@ const struct dma_map_ops dma_direct_ops = {
.free = dma_direct_free,
.map_page = dma_direct_map_page,
.map_sg = dma_direct_map_sg,
-#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE)
+#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
+ defined(CONFIG_SWIOTLB)
.sync_single_for_device = dma_direct_sync_single_for_device,
.sync_sg_for_device = dma_direct_sync_sg_for_device,
#endif
#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
- defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
+ defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \
+ defined(CONFIG_SWIOTLB)
.sync_single_for_cpu = dma_direct_sync_single_for_cpu,
.sync_sg_for_cpu = dma_direct_sync_sg_for_cpu,
.unmap_page = dma_direct_unmap_page,