diff options
Diffstat (limited to 'drivers/gpu/drm/etnaviv/etnaviv_mmu.c')
-rw-r--r-- | drivers/gpu/drm/etnaviv/etnaviv_mmu.c | 326 |
1 files changed, 221 insertions, 105 deletions
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c index 8069f9f36a2e..35ebae6a1be7 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c @@ -3,15 +3,17 @@ * Copyright (C) 2015-2018 Etnaviv Project */ +#include <linux/dma-mapping.h> +#include <linux/scatterlist.h> + #include "common.xml.h" #include "etnaviv_cmdbuf.h" #include "etnaviv_drv.h" #include "etnaviv_gem.h" #include "etnaviv_gpu.h" -#include "etnaviv_iommu.h" #include "etnaviv_mmu.h" -static void etnaviv_domain_unmap(struct etnaviv_iommu_domain *domain, +static void etnaviv_context_unmap(struct etnaviv_iommu_context *context, unsigned long iova, size_t size) { size_t unmapped_page, unmapped = 0; @@ -24,7 +26,8 @@ static void etnaviv_domain_unmap(struct etnaviv_iommu_domain *domain, } while (unmapped < size) { - unmapped_page = domain->ops->unmap(domain, iova, pgsize); + unmapped_page = context->global->ops->unmap(context, iova, + pgsize); if (!unmapped_page) break; @@ -33,7 +36,7 @@ static void etnaviv_domain_unmap(struct etnaviv_iommu_domain *domain, } } -static int etnaviv_domain_map(struct etnaviv_iommu_domain *domain, +static int etnaviv_context_map(struct etnaviv_iommu_context *context, unsigned long iova, phys_addr_t paddr, size_t size, int prot) { @@ -49,7 +52,8 @@ static int etnaviv_domain_map(struct etnaviv_iommu_domain *domain, } while (size) { - ret = domain->ops->map(domain, iova, paddr, pgsize, prot); + ret = context->global->ops->map(context, iova, paddr, pgsize, + prot); if (ret) break; @@ -60,21 +64,19 @@ static int etnaviv_domain_map(struct etnaviv_iommu_domain *domain, /* unroll mapping in case something went wrong */ if (ret) - etnaviv_domain_unmap(domain, orig_iova, orig_size - size); + etnaviv_context_unmap(context, orig_iova, orig_size - size); return ret; } -static int etnaviv_iommu_map(struct etnaviv_iommu *iommu, u32 iova, +static int etnaviv_iommu_map(struct etnaviv_iommu_context *context, u32 iova, struct sg_table *sgt, unsigned len, int prot) -{ - struct etnaviv_iommu_domain *domain = iommu->domain; - struct scatterlist *sg; +{ struct scatterlist *sg; unsigned int da = iova; unsigned int i, j; int ret; - if (!domain || !sgt) + if (!context || !sgt) return -EINVAL; for_each_sg(sgt->sgl, sg, sgt->nents, i) { @@ -83,7 +85,7 @@ static int etnaviv_iommu_map(struct etnaviv_iommu *iommu, u32 iova, VERB("map[%d]: %08x %08x(%zx)", i, iova, pa, bytes); - ret = etnaviv_domain_map(domain, da, pa, bytes, prot); + ret = etnaviv_context_map(context, da, pa, bytes, prot); if (ret) goto fail; @@ -98,16 +100,15 @@ fail: for_each_sg(sgt->sgl, sg, i, j) { size_t bytes = sg_dma_len(sg) + sg->offset; - etnaviv_domain_unmap(domain, da, bytes); + etnaviv_context_unmap(context, da, bytes); da += bytes; } return ret; } -static void etnaviv_iommu_unmap(struct etnaviv_iommu *iommu, u32 iova, +static void etnaviv_iommu_unmap(struct etnaviv_iommu_context *context, u32 iova, struct sg_table *sgt, unsigned len) { - struct etnaviv_iommu_domain *domain = iommu->domain; struct scatterlist *sg; unsigned int da = iova; int i; @@ -115,7 +116,7 @@ static void etnaviv_iommu_unmap(struct etnaviv_iommu *iommu, u32 iova, for_each_sg(sgt->sgl, sg, sgt->nents, i) { size_t bytes = sg_dma_len(sg) + sg->offset; - etnaviv_domain_unmap(domain, da, bytes); + etnaviv_context_unmap(context, da, bytes); VERB("unmap[%d]: %08x(%zx)", i, iova, bytes); @@ -125,24 +126,24 @@ static void etnaviv_iommu_unmap(struct etnaviv_iommu *iommu, u32 iova, } } -static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu *mmu, +static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu_context *context, struct etnaviv_vram_mapping *mapping) { struct etnaviv_gem_object *etnaviv_obj = mapping->object; - etnaviv_iommu_unmap(mmu, mapping->vram_node.start, + etnaviv_iommu_unmap(context, mapping->vram_node.start, etnaviv_obj->sgt, etnaviv_obj->base.size); drm_mm_remove_node(&mapping->vram_node); } -static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu, +static int etnaviv_iommu_find_iova(struct etnaviv_iommu_context *context, struct drm_mm_node *node, size_t size) { struct etnaviv_vram_mapping *free = NULL; enum drm_mm_insert_mode mode = DRM_MM_INSERT_LOW; int ret; - lockdep_assert_held(&mmu->lock); + lockdep_assert_held(&context->lock); while (1) { struct etnaviv_vram_mapping *m, *n; @@ -150,17 +151,17 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu, struct list_head list; bool found; - ret = drm_mm_insert_node_in_range(&mmu->mm, node, + ret = drm_mm_insert_node_in_range(&context->mm, node, size, 0, 0, 0, U64_MAX, mode); if (ret != -ENOSPC) break; /* Try to retire some entries */ - drm_mm_scan_init(&scan, &mmu->mm, size, 0, 0, mode); + drm_mm_scan_init(&scan, &context->mm, size, 0, 0, mode); found = 0; INIT_LIST_HEAD(&list); - list_for_each_entry(free, &mmu->mappings, mmu_node) { + list_for_each_entry(free, &context->mappings, mmu_node) { /* If this vram node has not been used, skip this. */ if (!free->vram_node.mm) continue; @@ -202,8 +203,8 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu, * this mapping. */ list_for_each_entry_safe(m, n, &list, scan_node) { - etnaviv_iommu_remove_mapping(mmu, m); - m->mmu = NULL; + etnaviv_iommu_remove_mapping(context, m); + m->context = NULL; list_del_init(&m->mmu_node); list_del_init(&m->scan_node); } @@ -219,9 +220,16 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu, return ret; } -int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu, +static int etnaviv_iommu_insert_exact(struct etnaviv_iommu_context *context, + struct drm_mm_node *node, size_t size, u64 va) +{ + return drm_mm_insert_node_in_range(&context->mm, node, size, 0, 0, va, + va + size, DRM_MM_INSERT_LOWEST); +} + +int etnaviv_iommu_map_gem(struct etnaviv_iommu_context *context, struct etnaviv_gem_object *etnaviv_obj, u32 memory_base, - struct etnaviv_vram_mapping *mapping) + struct etnaviv_vram_mapping *mapping, u64 va) { struct sg_table *sgt = etnaviv_obj->sgt; struct drm_mm_node *node; @@ -229,17 +237,17 @@ int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu, lockdep_assert_held(&etnaviv_obj->lock); - mutex_lock(&mmu->lock); + mutex_lock(&context->lock); /* v1 MMU can optimize single entry (contiguous) scatterlists */ - if (mmu->version == ETNAVIV_IOMMU_V1 && + if (context->global->version == ETNAVIV_IOMMU_V1 && sgt->nents == 1 && !(etnaviv_obj->flags & ETNA_BO_FORCE_MMU)) { u32 iova; iova = sg_dma_address(sgt->sgl) - memory_base; if (iova < 0x80000000 - sg_dma_len(sgt->sgl)) { mapping->iova = iova; - list_add_tail(&mapping->mmu_node, &mmu->mappings); + list_add_tail(&mapping->mmu_node, &context->mappings); ret = 0; goto unlock; } @@ -247,12 +255,17 @@ int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu, node = &mapping->vram_node; - ret = etnaviv_iommu_find_iova(mmu, node, etnaviv_obj->base.size); + if (va) + ret = etnaviv_iommu_insert_exact(context, node, + etnaviv_obj->base.size, va); + else + ret = etnaviv_iommu_find_iova(context, node, + etnaviv_obj->base.size); if (ret < 0) goto unlock; mapping->iova = node->start; - ret = etnaviv_iommu_map(mmu, node->start, sgt, etnaviv_obj->base.size, + ret = etnaviv_iommu_map(context, node->start, sgt, etnaviv_obj->base.size, ETNAVIV_PROT_READ | ETNAVIV_PROT_WRITE); if (ret < 0) { @@ -260,130 +273,233 @@ int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu, goto unlock; } - list_add_tail(&mapping->mmu_node, &mmu->mappings); - mmu->need_flush = true; + list_add_tail(&mapping->mmu_node, &context->mappings); + context->flush_seq++; unlock: - mutex_unlock(&mmu->lock); + mutex_unlock(&context->lock); return ret; } -void etnaviv_iommu_unmap_gem(struct etnaviv_iommu *mmu, +void etnaviv_iommu_unmap_gem(struct etnaviv_iommu_context *context, struct etnaviv_vram_mapping *mapping) { WARN_ON(mapping->use); - mutex_lock(&mmu->lock); + mutex_lock(&context->lock); /* If the vram node is on the mm, unmap and remove the node */ - if (mapping->vram_node.mm == &mmu->mm) - etnaviv_iommu_remove_mapping(mmu, mapping); + if (mapping->vram_node.mm == &context->mm) + etnaviv_iommu_remove_mapping(context, mapping); list_del(&mapping->mmu_node); - mmu->need_flush = true; - mutex_unlock(&mmu->lock); + context->flush_seq++; + mutex_unlock(&context->lock); } -void etnaviv_iommu_destroy(struct etnaviv_iommu *mmu) +static void etnaviv_iommu_context_free(struct kref *kref) { - drm_mm_takedown(&mmu->mm); - mmu->domain->ops->free(mmu->domain); - kfree(mmu); + struct etnaviv_iommu_context *context = + container_of(kref, struct etnaviv_iommu_context, refcount); + + etnaviv_cmdbuf_suballoc_unmap(context, &context->cmdbuf_mapping); + + context->global->ops->free(context); +} +void etnaviv_iommu_context_put(struct etnaviv_iommu_context *context) +{ + kref_put(&context->refcount, etnaviv_iommu_context_free); } -struct etnaviv_iommu *etnaviv_iommu_new(struct etnaviv_gpu *gpu) +struct etnaviv_iommu_context * +etnaviv_iommu_context_init(struct etnaviv_iommu_global *global, + struct etnaviv_cmdbuf_suballoc *suballoc) { - enum etnaviv_iommu_version version; - struct etnaviv_iommu *mmu; + struct etnaviv_iommu_context *ctx; + int ret; - mmu = kzalloc(sizeof(*mmu), GFP_KERNEL); - if (!mmu) - return ERR_PTR(-ENOMEM); + if (global->version == ETNAVIV_IOMMU_V1) + ctx = etnaviv_iommuv1_context_alloc(global); + else + ctx = etnaviv_iommuv2_context_alloc(global); - if (!(gpu->identity.minor_features1 & chipMinorFeatures1_MMU_VERSION)) { - mmu->domain = etnaviv_iommuv1_domain_alloc(gpu); - version = ETNAVIV_IOMMU_V1; - } else { - mmu->domain = etnaviv_iommuv2_domain_alloc(gpu); - version = ETNAVIV_IOMMU_V2; - } + if (!ctx) + return NULL; - if (!mmu->domain) { - dev_err(gpu->dev, "Failed to allocate GPU IOMMU domain\n"); - kfree(mmu); - return ERR_PTR(-ENOMEM); + ret = etnaviv_cmdbuf_suballoc_map(suballoc, ctx, &ctx->cmdbuf_mapping, + global->memory_base); + if (ret) { + global->ops->free(ctx); + return NULL; } - mmu->gpu = gpu; - mmu->version = version; - mutex_init(&mmu->lock); - INIT_LIST_HEAD(&mmu->mappings); - - drm_mm_init(&mmu->mm, mmu->domain->base, mmu->domain->size); - - return mmu; + return ctx; } -void etnaviv_iommu_restore(struct etnaviv_gpu *gpu) +void etnaviv_iommu_restore(struct etnaviv_gpu *gpu, + struct etnaviv_iommu_context *context) { - if (gpu->mmu->version == ETNAVIV_IOMMU_V1) - etnaviv_iommuv1_restore(gpu); - else - etnaviv_iommuv2_restore(gpu); + context->global->ops->restore(gpu, context); } -int etnaviv_iommu_get_suballoc_va(struct etnaviv_gpu *gpu, dma_addr_t paddr, - struct drm_mm_node *vram_node, size_t size, - u32 *iova) +int etnaviv_iommu_get_suballoc_va(struct etnaviv_iommu_context *context, + struct etnaviv_vram_mapping *mapping, + u32 memory_base, dma_addr_t paddr, + size_t size) { - struct etnaviv_iommu *mmu = gpu->mmu; + mutex_lock(&context->lock); - if (mmu->version == ETNAVIV_IOMMU_V1) { - *iova = paddr - gpu->memory_base; + if (mapping->use > 0) { + mapping->use++; + mutex_unlock(&context->lock); return 0; + } + + /* + * For MMUv1 we don't add the suballoc region to the pagetables, as + * those GPUs can only work with cmdbufs accessed through the linear + * window. Instead we manufacture a mapping to make it look uniform + * to the upper layers. + */ + if (context->global->version == ETNAVIV_IOMMU_V1) { + mapping->iova = paddr - memory_base; } else { + struct drm_mm_node *node = &mapping->vram_node; int ret; - mutex_lock(&mmu->lock); - ret = etnaviv_iommu_find_iova(mmu, vram_node, size); + ret = etnaviv_iommu_find_iova(context, node, size); if (ret < 0) { - mutex_unlock(&mmu->lock); + mutex_unlock(&context->lock); return ret; } - ret = etnaviv_domain_map(mmu->domain, vram_node->start, paddr, - size, ETNAVIV_PROT_READ); + + mapping->iova = node->start; + ret = etnaviv_context_map(context, node->start, paddr, size, + ETNAVIV_PROT_READ); if (ret < 0) { - drm_mm_remove_node(vram_node); - mutex_unlock(&mmu->lock); + drm_mm_remove_node(node); + mutex_unlock(&context->lock); return ret; } - gpu->mmu->need_flush = true; - mutex_unlock(&mmu->lock); - *iova = (u32)vram_node->start; - return 0; + context->flush_seq++; } + + list_add_tail(&mapping->mmu_node, &context->mappings); + mapping->use = 1; + + mutex_unlock(&context->lock); + + return 0; } -void etnaviv_iommu_put_suballoc_va(struct etnaviv_gpu *gpu, - struct drm_mm_node *vram_node, size_t size, - u32 iova) +void etnaviv_iommu_put_suballoc_va(struct etnaviv_iommu_context *context, + struct etnaviv_vram_mapping *mapping) { - struct etnaviv_iommu *mmu = gpu->mmu; + struct drm_mm_node *node = &mapping->vram_node; - if (mmu->version == ETNAVIV_IOMMU_V2) { - mutex_lock(&mmu->lock); - etnaviv_domain_unmap(mmu->domain, iova, size); - drm_mm_remove_node(vram_node); - mutex_unlock(&mmu->lock); + mutex_lock(&context->lock); + mapping->use--; + + if (mapping->use > 0 || context->global->version == ETNAVIV_IOMMU_V1) { + mutex_unlock(&context->lock); + return; } + + etnaviv_context_unmap(context, node->start, node->size); + drm_mm_remove_node(node); + mutex_unlock(&context->lock); +} + +size_t etnaviv_iommu_dump_size(struct etnaviv_iommu_context *context) +{ + return context->global->ops->dump_size(context); +} + +void etnaviv_iommu_dump(struct etnaviv_iommu_context *context, void *buf) +{ + context->global->ops->dump(context, buf); } -size_t etnaviv_iommu_dump_size(struct etnaviv_iommu *iommu) + +int etnaviv_iommu_global_init(struct etnaviv_gpu *gpu) { - return iommu->domain->ops->dump_size(iommu->domain); + enum etnaviv_iommu_version version = ETNAVIV_IOMMU_V1; + struct etnaviv_drm_private *priv = gpu->drm->dev_private; + struct etnaviv_iommu_global *global; + struct device *dev = gpu->drm->dev; + + if (gpu->identity.minor_features1 & chipMinorFeatures1_MMU_VERSION) + version = ETNAVIV_IOMMU_V2; + + if (priv->mmu_global) { + if (priv->mmu_global->version != version) { + dev_err(gpu->dev, + "MMU version doesn't match global version\n"); + return -ENXIO; + } + + priv->mmu_global->use++; + return 0; + } + + global = kzalloc(sizeof(*global), GFP_KERNEL); + if (!global) + return -ENOMEM; + + global->bad_page_cpu = dma_alloc_wc(dev, SZ_4K, &global->bad_page_dma, + GFP_KERNEL); + if (!global->bad_page_cpu) + goto free_global; + + memset32(global->bad_page_cpu, 0xdead55aa, SZ_4K / sizeof(u32)); + + if (version == ETNAVIV_IOMMU_V2) { + global->v2.pta_cpu = dma_alloc_wc(dev, ETNAVIV_PTA_SIZE, + &global->v2.pta_dma, GFP_KERNEL); + if (!global->v2.pta_cpu) + goto free_bad_page; + } + + global->dev = dev; + global->version = version; + global->use = 1; + mutex_init(&global->lock); + + if (version == ETNAVIV_IOMMU_V1) + global->ops = &etnaviv_iommuv1_ops; + else + global->ops = &etnaviv_iommuv2_ops; + + priv->mmu_global = global; + + return 0; + +free_bad_page: + dma_free_wc(dev, SZ_4K, global->bad_page_cpu, global->bad_page_dma); +free_global: + kfree(global); + + return -ENOMEM; } -void etnaviv_iommu_dump(struct etnaviv_iommu *iommu, void *buf) +void etnaviv_iommu_global_fini(struct etnaviv_gpu *gpu) { - iommu->domain->ops->dump(iommu->domain, buf); + struct etnaviv_drm_private *priv = gpu->drm->dev_private; + struct etnaviv_iommu_global *global = priv->mmu_global; + + if (--global->use > 0) + return; + + if (global->v2.pta_cpu) + dma_free_wc(global->dev, ETNAVIV_PTA_SIZE, + global->v2.pta_cpu, global->v2.pta_dma); + + if (global->bad_page_cpu) + dma_free_wc(global->dev, SZ_4K, + global->bad_page_cpu, global->bad_page_dma); + + mutex_destroy(&global->lock); + kfree(global); + + priv->mmu_global = NULL; } |