summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/tegra
diff options
context:
space:
mode:
authorArto Merilainen <amerilainen@nvidia.com>2016-11-08 19:51:34 +0200
committerThierry Reding <treding@nvidia.com>2016-11-11 15:34:49 +0100
commit7ecada3cc44798c88d2c3deed38746b9a7a9b746 (patch)
treecaf41e782c5b52fbd45dc3ca4d8cb0e24180c122 /drivers/gpu/drm/tegra
parentd4b5781890b9329b9e7720f14d3eeb5661348535 (diff)
downloadlinux-stable-7ecada3cc44798c88d2c3deed38746b9a7a9b746.tar.gz
linux-stable-7ecada3cc44798c88d2c3deed38746b9a7a9b746.tar.bz2
linux-stable-7ecada3cc44798c88d2c3deed38746b9a7a9b746.zip
drm/tegra: Support kernel mappings with IOMMU
host1x command buffer patching requires that the buffer object can be mapped into kernel address space. However, the recent addition of IOMMU support did not account for this requirement. Therefore host1x engines cannot be used if IOMMU is enabled. This patch implements kmap, kunmap, mmap and munmap functions to host1x buffer objects. Signed-off-by: Arto Merilainen <amerilainen@nvidia.com> Signed-off-by: Mikko Perttunen <mperttunen@nvidia.com> Signed-off-by: Thierry Reding <treding@nvidia.com>
Diffstat (limited to 'drivers/gpu/drm/tegra')
-rw-r--r--drivers/gpu/drm/tegra/gem.c34
1 files changed, 31 insertions, 3 deletions
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index 19bf9cdf1f11..25083729a89c 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -2,7 +2,7 @@
* NVIDIA Tegra DRM GEM helper functions
*
* Copyright (C) 2012 Sascha Hauer, Pengutronix
- * Copyright (C) 2013 NVIDIA CORPORATION, All rights reserved.
+ * Copyright (C) 2013-2015 NVIDIA CORPORATION, All rights reserved.
*
* Based on the GEM/CMA helpers
*
@@ -47,23 +47,51 @@ static void *tegra_bo_mmap(struct host1x_bo *bo)
{
struct tegra_bo *obj = host1x_to_tegra_bo(bo);
- return obj->vaddr;
+ if (obj->vaddr)
+ return obj->vaddr;
+ else if (obj->gem.import_attach)
+ return dma_buf_vmap(obj->gem.import_attach->dmabuf);
+ else
+ return vmap(obj->pages, obj->num_pages, VM_MAP,
+ pgprot_writecombine(PAGE_KERNEL));
}
static void tegra_bo_munmap(struct host1x_bo *bo, void *addr)
{
+ struct tegra_bo *obj = host1x_to_tegra_bo(bo);
+
+ if (obj->vaddr)
+ return;
+ else if (obj->gem.import_attach)
+ dma_buf_vunmap(obj->gem.import_attach->dmabuf, addr);
+ else
+ vunmap(addr);
}
static void *tegra_bo_kmap(struct host1x_bo *bo, unsigned int page)
{
struct tegra_bo *obj = host1x_to_tegra_bo(bo);
- return obj->vaddr + page * PAGE_SIZE;
+ if (obj->vaddr)
+ return obj->vaddr + page * PAGE_SIZE;
+ else if (obj->gem.import_attach)
+ return dma_buf_kmap(obj->gem.import_attach->dmabuf, page);
+ else
+ return vmap(obj->pages + page, 1, VM_MAP,
+ pgprot_writecombine(PAGE_KERNEL));
}
static void tegra_bo_kunmap(struct host1x_bo *bo, unsigned int page,
void *addr)
{
+ struct tegra_bo *obj = host1x_to_tegra_bo(bo);
+
+ if (obj->vaddr)
+ return;
+ else if (obj->gem.import_attach)
+ dma_buf_kunmap(obj->gem.import_attach->dmabuf, page, addr);
+ else
+ vunmap(addr);
}
static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo)