summaryrefslogtreecommitdiffstats
path: root/tools/testing/selftests/dma
diff options
context:
space:
mode:
authorXiang Chen <chenxiang66@hisilicon.com>2021-03-18 17:29:30 +0800
committerChristoph Hellwig <hch@lst.de>2021-04-02 16:41:08 +0200
commitca947482b0b30443e6da1f0f5ba7244e34a4f65a (patch)
tree4aa88cbca829f7daa3ceaf7384a4b81ab69729ca /tools/testing/selftests/dma
parent42e4eefb089f12ea900062ecdcc7ca10c3423a05 (diff)
downloadlinux-stable-ca947482b0b30443e6da1f0f5ba7244e34a4f65a.tar.gz
linux-stable-ca947482b0b30443e6da1f0f5ba7244e34a4f65a.tar.bz2
linux-stable-ca947482b0b30443e6da1f0f5ba7244e34a4f65a.zip
dma-mapping: benchmark: Add support for multi-pages map/unmap
Currently it only support one page map/unmap once a time for dma-map benchmark, but there are some other scenaries which need to support for multi-page map/unmap: for those multi-pages interfaces such as dma_alloc_coherent() and dma_map_sg(), the time spent on multi-pages map/unmap is not the time of a single page * npages (not linear) as it may use block description instead of page description when it is satified with the size such as 2M/1G, and also it can send a single TLB invalidation command to invalidate multi-pages instead of multi-times when RIL is enabled (which will short the time of unmap). So it is necessary to add support for multi-pages map/unmap. Add a parameter "-g" to support multi-pages map/unmap. Signed-off-by: Xiang Chen <chenxiang66@hisilicon.com> Acked-by: Barry Song <song.bao.hua@hisilicon.com> Signed-off-by: Christoph Hellwig <hch@lst.de>
Diffstat (limited to 'tools/testing/selftests/dma')
-rw-r--r--tools/testing/selftests/dma/dma_map_benchmark.c20
1 files changed, 16 insertions, 4 deletions
diff --git a/tools/testing/selftests/dma/dma_map_benchmark.c b/tools/testing/selftests/dma/dma_map_benchmark.c
index b492bed0936d..485dff51bad2 100644
--- a/tools/testing/selftests/dma/dma_map_benchmark.c
+++ b/tools/testing/selftests/dma/dma_map_benchmark.c
@@ -40,7 +40,8 @@ struct map_benchmark {
__u32 dma_bits; /* DMA addressing capability */
__u32 dma_dir; /* DMA data direction */
__u32 dma_trans_ns; /* time for DMA transmission in ns */
- __u8 expansion[80]; /* For future use */
+ __u32 granule; /* how many PAGE_SIZE will do map/unmap once a time */
+ __u8 expansion[76]; /* For future use */
};
int main(int argc, char **argv)
@@ -51,11 +52,13 @@ int main(int argc, char **argv)
int threads = 1, seconds = 20, node = -1;
/* default dma mask 32bit, bidirectional DMA */
int bits = 32, xdelay = 0, dir = DMA_MAP_BIDIRECTIONAL;
+ /* default granule 1 PAGESIZE */
+ int granule = 1;
int cmd = DMA_MAP_BENCHMARK;
char *p;
- while ((opt = getopt(argc, argv, "t:s:n:b:d:x:")) != -1) {
+ while ((opt = getopt(argc, argv, "t:s:n:b:d:x:g:")) != -1) {
switch (opt) {
case 't':
threads = atoi(optarg);
@@ -75,6 +78,9 @@ int main(int argc, char **argv)
case 'x':
xdelay = atoi(optarg);
break;
+ case 'g':
+ granule = atoi(optarg);
+ break;
default:
return -1;
}
@@ -110,6 +116,11 @@ int main(int argc, char **argv)
exit(1);
}
+ if (granule < 1 || granule > 1024) {
+ fprintf(stderr, "invalid granule size\n");
+ exit(1);
+ }
+
fd = open("/sys/kernel/debug/dma_map_benchmark", O_RDWR);
if (fd == -1) {
perror("open");
@@ -123,14 +134,15 @@ int main(int argc, char **argv)
map.dma_bits = bits;
map.dma_dir = dir;
map.dma_trans_ns = xdelay;
+ map.granule = granule;
if (ioctl(fd, cmd, &map)) {
perror("ioctl");
exit(1);
}
- printf("dma mapping benchmark: threads:%d seconds:%d node:%d dir:%s\n",
- threads, seconds, node, dir[directions]);
+ printf("dma mapping benchmark: threads:%d seconds:%d node:%d dir:%s granule: %d\n",
+ threads, seconds, node, dir[directions], granule);
printf("average map latency(us):%.1f standard deviation:%.1f\n",
map.avg_map_100ns/10.0, map.map_stddev/10.0);
printf("average unmap latency(us):%.1f standard deviation:%.1f\n",