summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/dma/map_benchmark.c12
-rw-r--r--kernel/dma/mapping.c40
2 files changed, 11 insertions, 41 deletions
diff --git a/kernel/dma/map_benchmark.c b/kernel/dma/map_benchmark.c
index da95df381483..e0e64f8b0739 100644
--- a/kernel/dma/map_benchmark.c
+++ b/kernel/dma/map_benchmark.c
@@ -21,6 +21,7 @@
#define DMA_MAP_BENCHMARK _IOWR('d', 1, struct map_benchmark)
#define DMA_MAP_MAX_THREADS 1024
#define DMA_MAP_MAX_SECONDS 300
+#define DMA_MAP_MAX_TRANS_DELAY (10 * NSEC_PER_MSEC)
#define DMA_MAP_BIDIRECTIONAL 0
#define DMA_MAP_TO_DEVICE 1
@@ -36,7 +37,8 @@ struct map_benchmark {
__s32 node; /* which numa node this benchmark will run on */
__u32 dma_bits; /* DMA addressing capability */
__u32 dma_dir; /* DMA data direction */
- __u8 expansion[84]; /* For future use */
+ __u32 dma_trans_ns; /* time for DMA transmission in ns */
+ __u8 expansion[80]; /* For future use */
};
struct map_benchmark_data {
@@ -87,6 +89,9 @@ static int map_benchmark_thread(void *data)
map_etime = ktime_get();
map_delta = ktime_sub(map_etime, map_stime);
+ /* Pretend DMA is transmitting */
+ ndelay(map->bparam.dma_trans_ns);
+
unmap_stime = ktime_get();
dma_unmap_single(map->dev, dma_addr, PAGE_SIZE, map->dir);
unmap_etime = ktime_get();
@@ -218,6 +223,11 @@ static long map_benchmark_ioctl(struct file *file, unsigned int cmd,
return -EINVAL;
}
+ if (map->bparam.dma_trans_ns > DMA_MAP_MAX_TRANS_DELAY) {
+ pr_err("invalid transmission delay\n");
+ return -EINVAL;
+ }
+
if (map->bparam.node != NUMA_NO_NODE &&
!node_possible(map->bparam.node)) {
pr_err("invalid numa node\n");
diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c
index 84de6b1c5fab..b6a633679933 100644
--- a/kernel/dma/mapping.c
+++ b/kernel/dma/mapping.c
@@ -517,46 +517,6 @@ void dma_free_pages(struct device *dev, size_t size, struct page *page,
}
EXPORT_SYMBOL_GPL(dma_free_pages);
-void *dma_alloc_noncoherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
-{
- const struct dma_map_ops *ops = get_dma_ops(dev);
- void *vaddr;
-
- if (!ops || !ops->alloc_noncoherent) {
- struct page *page;
-
- page = dma_alloc_pages(dev, size, dma_handle, dir, gfp);
- if (!page)
- return NULL;
- return page_address(page);
- }
-
- size = PAGE_ALIGN(size);
- vaddr = ops->alloc_noncoherent(dev, size, dma_handle, dir, gfp);
- if (vaddr)
- debug_dma_map_page(dev, virt_to_page(vaddr), 0, size, dir,
- *dma_handle);
- return vaddr;
-}
-EXPORT_SYMBOL_GPL(dma_alloc_noncoherent);
-
-void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle, enum dma_data_direction dir)
-{
- const struct dma_map_ops *ops = get_dma_ops(dev);
-
- if (!ops || !ops->free_noncoherent) {
- dma_free_pages(dev, size, virt_to_page(vaddr), dma_handle, dir);
- return;
- }
-
- size = PAGE_ALIGN(size);
- debug_dma_unmap_page(dev, dma_handle, size, dir);
- ops->free_noncoherent(dev, size, vaddr, dma_handle, dir);
-}
-EXPORT_SYMBOL_GPL(dma_free_noncoherent);
-
int dma_supported(struct device *dev, u64 mask)
{
const struct dma_map_ops *ops = get_dma_ops(dev);