diff options
Diffstat (limited to 'drivers/gpu/drm/i915/i915_vma.c')
-rw-r--r-- | drivers/gpu/drm/i915/i915_vma.c | 40 |
1 files changed, 37 insertions, 3 deletions
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index fbfab2f33023..e0e7c48f45dc 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -142,6 +142,12 @@ vma_create(struct drm_i915_gem_object *obj, i915_gem_object_get_stride(obj)); GEM_BUG_ON(!is_power_of_2(vma->fence_alignment)); + /* + * We put the GGTT vma at the start of the vma-list, followed + * by the ppGGTT vma. This allows us to break early when + * iterating over only the GGTT vma for an object, see + * for_each_ggtt_vma() + */ vma->flags |= I915_VMA_GGTT; list_add(&vma->obj_link, &obj->vma_list); } else { @@ -305,7 +311,7 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma) ptr = vma->iomap; if (ptr == NULL) { - ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->mappable, + ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap, vma->node.start, vma->node.size); if (ptr == NULL) { @@ -322,6 +328,7 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma) if (err) goto err_unpin; + i915_vma_set_ggtt_write(vma); return ptr; err_unpin: @@ -330,12 +337,24 @@ err: return IO_ERR_PTR(err); } +void i915_vma_flush_writes(struct i915_vma *vma) +{ + if (!i915_vma_has_ggtt_write(vma)) + return; + + i915_gem_flush_ggtt_writes(vma->vm->i915); + + i915_vma_unset_ggtt_write(vma); +} + void i915_vma_unpin_iomap(struct i915_vma *vma) { lockdep_assert_held(&vma->obj->base.dev->struct_mutex); GEM_BUG_ON(vma->iomap == NULL); + i915_vma_flush_writes(vma); + i915_vma_unpin_fence(vma); i915_vma_unpin(vma); } @@ -466,6 +485,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) u64 start, end; int ret; + GEM_BUG_ON(i915_vma_is_closed(vma)); GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)); GEM_BUG_ON(drm_mm_node_allocated(&vma->node)); @@ -640,15 +660,17 @@ int __i915_vma_do_pin(struct i915_vma *vma, if (ret) goto err_unpin; } + GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); ret = i915_vma_bind(vma, vma->obj->cache_level, flags); if (ret) goto err_remove; + GEM_BUG_ON((vma->flags & I915_VMA_BIND_MASK) == 0); + if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND) __i915_vma_set_map_and_fenceable(vma); - GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags)); return 0; @@ -656,6 +678,7 @@ err_remove: if ((bound & I915_VMA_BIND_MASK) == 0) { i915_vma_remove(vma); GEM_BUG_ON(vma->pages); + GEM_BUG_ON(vma->flags & I915_VMA_BIND_MASK); } err_unpin: __i915_vma_unpin(vma); @@ -675,7 +698,9 @@ static void i915_vma_destroy(struct i915_vma *vma) GEM_BUG_ON(i915_gem_active_isset(&vma->last_read[i])); GEM_BUG_ON(i915_gem_active_isset(&vma->last_fence)); + list_del(&vma->obj_link); list_del(&vma->vm_link); + if (!i915_vma_is_ggtt(vma)) i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm)); @@ -687,7 +712,6 @@ void i915_vma_close(struct i915_vma *vma) GEM_BUG_ON(i915_vma_is_closed(vma)); vma->flags |= I915_VMA_CLOSED; - list_del(&vma->obj_link); rb_erase(&vma->obj_node, &vma->obj->vma_tree); if (!i915_vma_is_active(vma) && !i915_vma_is_pinned(vma)) @@ -740,6 +764,7 @@ int i915_vma_unbind(struct i915_vma *vma) /* First wait upon any activity as retiring the request may * have side-effects such as unpinning or even unbinding this vma. */ + might_sleep(); active = i915_vma_get_active(vma); if (active) { int idx; @@ -786,6 +811,15 @@ int i915_vma_unbind(struct i915_vma *vma) GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); if (i915_vma_is_map_and_fenceable(vma)) { + /* + * Check that we have flushed all writes through the GGTT + * before the unbind, other due to non-strict nature of those + * indirect writes they may end up referencing the GGTT PTE + * after the unbind. + */ + i915_vma_flush_writes(vma); + GEM_BUG_ON(i915_vma_has_ggtt_write(vma)); + /* release the fence reg _after_ flushing */ ret = i915_vma_put_fence(vma); if (ret) |