diff options
author | Thomas Zimmermann <tzimmermann@suse.de> | 2020-11-03 10:30:10 +0100 |
---|---|---|
committer | Thomas Zimmermann <tzimmermann@suse.de> | 2020-11-09 09:17:36 +0100 |
commit | 43676605f890b218e551f396a55dbaea7799acb4 (patch) | |
tree | b3ce6592acfafea411d364ed64b7e98e8666d864 /drivers/gpu/drm/ttm/ttm_bo_util.c | |
parent | f216fcb8e4d76038133926aec53df92bf6b15860 (diff) | |
download | linux-stable-43676605f890b218e551f396a55dbaea7799acb4.tar.gz linux-stable-43676605f890b218e551f396a55dbaea7799acb4.tar.bz2 linux-stable-43676605f890b218e551f396a55dbaea7799acb4.zip |
drm/ttm: Add vmap/vunmap to TTM and TTM GEM helpers
The new functions ttm_bo_{vmap,vunmap}() map and unmap a TTM BO in kernel
address space. The mapping's address is returned as struct dma_buf_map.
Each function is a simplified version of TTM's existing kmap code. Both
functions respect the memory's location ani/or writecombine flags.
On top TTM's functions, GEM TTM helpers got drm_gem_ttm_{vmap,vunmap}(),
two helpers that convert a GEM object into the TTM BO and forward the call
to TTM's vmap/vunmap. These helpers can be dropped into the rsp GEM object
callbacks.
v5:
* use size_t for storing mapping size (Christian)
* ignore premapped memory areas correctly in ttm_bo_vunmap()
* rebase onto latest TTM interfaces (Christian)
* remove BUG() from ttm_bo_vmap() (Christian)
v4:
* drop ttm_kmap_obj_to_dma_buf() in favor of vmap helpers (Daniel,
Christian)
Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
Reviewed-by: Christian König <christian.koenig@amd.com>
Acked-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Tested-by: Sam Ravnborg <sam@ravnborg.org>
Link: https://patchwork.freedesktop.org/patch/msgid/20201103093015.1063-6-tzimmermann@suse.de
Diffstat (limited to 'drivers/gpu/drm/ttm/ttm_bo_util.c')
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo_util.c | 72 |
1 files changed, 72 insertions, 0 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index ecb54415d1ca..7ccb2295cac1 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -32,6 +32,7 @@ #include <drm/ttm/ttm_bo_driver.h> #include <drm/ttm/ttm_placement.h> #include <drm/drm_vma_manager.h> +#include <linux/dma-buf-map.h> #include <linux/io.h> #include <linux/highmem.h> #include <linux/wait.h> @@ -471,6 +472,77 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map) } EXPORT_SYMBOL(ttm_bo_kunmap); +int ttm_bo_vmap(struct ttm_buffer_object *bo, struct dma_buf_map *map) +{ + struct ttm_resource *mem = &bo->mem; + int ret; + + ret = ttm_mem_io_reserve(bo->bdev, mem); + if (ret) + return ret; + + if (mem->bus.is_iomem) { + void __iomem *vaddr_iomem; + size_t size = bo->num_pages << PAGE_SHIFT; + + if (mem->bus.addr) + vaddr_iomem = (void __iomem *)mem->bus.addr; + else if (mem->bus.caching == ttm_write_combined) + vaddr_iomem = ioremap_wc(mem->bus.offset, size); + else + vaddr_iomem = ioremap(mem->bus.offset, size); + + if (!vaddr_iomem) + return -ENOMEM; + + dma_buf_map_set_vaddr_iomem(map, vaddr_iomem); + + } else { + struct ttm_operation_ctx ctx = { + .interruptible = false, + .no_wait_gpu = false + }; + struct ttm_tt *ttm = bo->ttm; + pgprot_t prot; + void *vaddr; + + ret = ttm_tt_populate(bo->bdev, ttm, &ctx); + if (ret) + return ret; + + /* + * We need to use vmap to get the desired page protection + * or to make the buffer object look contiguous. + */ + prot = ttm_io_prot(bo, mem, PAGE_KERNEL); + vaddr = vmap(ttm->pages, bo->num_pages, 0, prot); + if (!vaddr) + return -ENOMEM; + + dma_buf_map_set_vaddr(map, vaddr); + } + + return 0; +} +EXPORT_SYMBOL(ttm_bo_vmap); + +void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct dma_buf_map *map) +{ + struct ttm_resource *mem = &bo->mem; + + if (dma_buf_map_is_null(map)) + return; + + if (!map->is_iomem) + vunmap(map->vaddr); + else if (!mem->bus.addr) + iounmap(map->vaddr_iomem); + dma_buf_map_clear(map); + + ttm_mem_io_free(bo->bdev, &bo->mem); +} +EXPORT_SYMBOL(ttm_bo_vunmap); + static int ttm_bo_wait_free_node(struct ttm_buffer_object *bo, bool dst_use_tt) { |