diff options
-rw-r--r-- | drivers/gpu/drm/etnaviv/etnaviv_gem.c | 11 | ||||
-rw-r--r-- | drivers/gpu/drm/etnaviv/etnaviv_mmu.c | 54 |
2 files changed, 55 insertions, 10 deletions
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c index 507172e2780b..cc386f8a7116 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c @@ -294,18 +294,15 @@ struct etnaviv_vram_mapping *etnaviv_gem_mapping_get( list_del(&mapping->obj_node); } - mapping->context = etnaviv_iommu_context_get(mmu_context); mapping->use = 1; ret = etnaviv_iommu_map_gem(mmu_context, etnaviv_obj, mmu_context->global->memory_base, mapping, va); - if (ret < 0) { - etnaviv_iommu_context_put(mmu_context); + if (ret < 0) kfree(mapping); - } else { + else list_add_tail(&mapping->obj_node, &etnaviv_obj->vram_list); - } out: mutex_unlock(&etnaviv_obj->lock); @@ -500,10 +497,8 @@ void etnaviv_gem_free_object(struct drm_gem_object *obj) WARN_ON(mapping->use); - if (context) { + if (context) etnaviv_iommu_unmap_gem(context, mapping); - etnaviv_iommu_context_put(context); - } list_del(&mapping->obj_node); kfree(mapping); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c index 9fb1a2aadbcb..dc1aa738c4f1 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c @@ -92,6 +92,8 @@ static int etnaviv_iommu_map(struct etnaviv_iommu_context *context, u32 iova, da += bytes; } + context->flush_seq++; + return 0; fail: @@ -117,6 +119,8 @@ static void etnaviv_iommu_unmap(struct etnaviv_iommu_context *context, u32 iova, da += bytes; } + + context->flush_seq++; } static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu_context *context, @@ -219,8 +223,47 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu_context *context, static int etnaviv_iommu_insert_exact(struct etnaviv_iommu_context *context, struct drm_mm_node *node, size_t size, u64 va) { + struct etnaviv_vram_mapping *m, *n; + struct drm_mm_node *scan_node; + LIST_HEAD(scan_list); + int ret; + lockdep_assert_held(&context->lock); + ret = drm_mm_insert_node_in_range(&context->mm, node, size, 0, 0, va, + va + size, DRM_MM_INSERT_LOWEST); + if (ret != -ENOSPC) + return ret; + + /* + * When we can't insert the node, due to a existing mapping blocking + * the address space, there are two possible reasons: + * 1. Userspace genuinely messed up and tried to reuse address space + * before the last job using this VMA has finished executing. + * 2. The existing buffer mappings are idle, but the buffers are not + * destroyed yet (likely due to being referenced by another context) in + * which case the mappings will not be cleaned up and we must reap them + * here to make space for the new mapping. + */ + + drm_mm_for_each_node_in_range(scan_node, &context->mm, va, va + size) { + m = container_of(scan_node, struct etnaviv_vram_mapping, + vram_node); + + if (m->use) + return -ENOSPC; + + list_add(&m->scan_node, &scan_list); + } + + list_for_each_entry_safe(m, n, &scan_list, scan_node) { + etnaviv_iommu_remove_mapping(context, m); + etnaviv_iommu_context_put(m->context); + m->context = NULL; + list_del_init(&m->mmu_node); + list_del_init(&m->scan_node); + } + return drm_mm_insert_node_in_range(&context->mm, node, size, 0, 0, va, va + size, DRM_MM_INSERT_LOWEST); } @@ -245,6 +288,7 @@ int etnaviv_iommu_map_gem(struct etnaviv_iommu_context *context, iova = sg_dma_address(sgt->sgl) - memory_base; if (iova < 0x80000000 - sg_dma_len(sgt->sgl)) { mapping->iova = iova; + mapping->context = etnaviv_iommu_context_get(context); list_add_tail(&mapping->mmu_node, &context->mappings); ret = 0; goto unlock; @@ -271,8 +315,8 @@ int etnaviv_iommu_map_gem(struct etnaviv_iommu_context *context, goto unlock; } + mapping->context = etnaviv_iommu_context_get(context); list_add_tail(&mapping->mmu_node, &context->mappings); - context->flush_seq++; unlock: mutex_unlock(&context->lock); @@ -286,13 +330,19 @@ void etnaviv_iommu_unmap_gem(struct etnaviv_iommu_context *context, mutex_lock(&context->lock); + /* Bail if the mapping has been reaped by another thread */ + if (!mapping->context) { + mutex_unlock(&context->lock); + return; + } + /* If the vram node is on the mm, unmap and remove the node */ if (mapping->vram_node.mm == &context->mm) etnaviv_iommu_remove_mapping(context, mapping); list_del(&mapping->mmu_node); - context->flush_seq++; mutex_unlock(&context->lock); + etnaviv_iommu_context_put(context); } static void etnaviv_iommu_context_free(struct kref *kref) |