diff options
Diffstat (limited to 'drivers/gpu/drm/i915/selftests/i915_gem_gtt.c')
-rw-r--r-- | drivers/gpu/drm/i915/selftests/i915_gem_gtt.c | 430 |
1 files changed, 316 insertions, 114 deletions
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c index 575705c3bce9..ab751192eb3b 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c @@ -26,12 +26,16 @@ #include <linux/prime_numbers.h> #include "gem/i915_gem_context.h" +#include "gem/i915_gem_internal.h" +#include "gem/i915_gem_region.h" #include "gem/selftests/mock_context.h" #include "gt/intel_context.h" #include "gt/intel_gpu_commands.h" +#include "gt/intel_gtt.h" #include "i915_random.h" #include "i915_selftest.h" +#include "i915_vma_resource.h" #include "mock_drm.h" #include "mock_gem_device.h" @@ -237,12 +241,14 @@ static int lowlevel_hole(struct i915_address_space *vm, u64 hole_start, u64 hole_end, unsigned long end_time) { + const unsigned int min_alignment = + i915_vm_min_alignment(vm, INTEL_MEMORY_SYSTEM); I915_RND_STATE(seed_prng); - struct i915_vma *mock_vma; + struct i915_vma_resource *mock_vma_res; unsigned int size; - mock_vma = kzalloc(sizeof(*mock_vma), GFP_KERNEL); - if (!mock_vma) + mock_vma_res = kzalloc(sizeof(*mock_vma_res), GFP_KERNEL); + if (!mock_vma_res) return -ENOMEM; /* Keep creating larger objects until one cannot fit into the hole */ @@ -250,9 +256,10 @@ static int lowlevel_hole(struct i915_address_space *vm, I915_RND_SUBSTATE(prng, seed_prng); struct drm_i915_gem_object *obj; unsigned int *order, count, n; - u64 hole_size; + u64 hole_size, aligned_size; - hole_size = (hole_end - hole_start) >> size; + aligned_size = max_t(u32, ilog2(min_alignment), size); + hole_size = (hole_end - hole_start) >> aligned_size; if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32)) hole_size = KMALLOC_MAX_SIZE / sizeof(u32); count = hole_size >> 1; @@ -268,13 +275,13 @@ static int lowlevel_hole(struct i915_address_space *vm, break; } while (count >>= 1); if (!count) { - kfree(mock_vma); + kfree(mock_vma_res); return -ENOMEM; } GEM_BUG_ON(!order); - GEM_BUG_ON(count * BIT_ULL(size) > vm->total); - GEM_BUG_ON(hole_start + count * BIT_ULL(size) > hole_end); + GEM_BUG_ON(count * BIT_ULL(aligned_size) > vm->total); + GEM_BUG_ON(hole_start + count * BIT_ULL(aligned_size) > hole_end); /* Ignore allocation failures (i.e. don't report them as * a test failure) as we are purposefully allocating very @@ -297,10 +304,10 @@ static int lowlevel_hole(struct i915_address_space *vm, } for (n = 0; n < count; n++) { - u64 addr = hole_start + order[n] * BIT_ULL(size); + u64 addr = hole_start + order[n] * BIT_ULL(aligned_size); intel_wakeref_t wakeref; - GEM_BUG_ON(addr + BIT_ULL(size) > vm->total); + GEM_BUG_ON(addr + BIT_ULL(aligned_size) > vm->total); if (igt_timeout(end_time, "%s timed out before %d/%d\n", @@ -342,19 +349,19 @@ alloc_vm_end: break; } - mock_vma->pages = obj->mm.pages; - mock_vma->node.size = BIT_ULL(size); - mock_vma->node.start = addr; + mock_vma_res->bi.pages = obj->mm.pages; + mock_vma_res->node_size = BIT_ULL(aligned_size); + mock_vma_res->start = addr; with_intel_runtime_pm(vm->gt->uncore->rpm, wakeref) - vm->insert_entries(vm, mock_vma, + vm->insert_entries(vm, mock_vma_res, I915_CACHE_NONE, 0); } count = n; i915_random_reorder(order, count, &prng); for (n = 0; n < count; n++) { - u64 addr = hole_start + order[n] * BIT_ULL(size); + u64 addr = hole_start + order[n] * BIT_ULL(aligned_size); intel_wakeref_t wakeref; GEM_BUG_ON(addr + BIT_ULL(size) > vm->total); @@ -370,7 +377,7 @@ alloc_vm_end: cleanup_freed_objects(vm->i915); } - kfree(mock_vma); + kfree(mock_vma_res); return 0; } @@ -385,7 +392,7 @@ static void close_object_list(struct list_head *objects, vma = i915_vma_instance(obj, vm, NULL); if (!IS_ERR(vma)) - ignored = i915_vma_unbind(vma); + ignored = i915_vma_unbind_unlocked(vma); list_del(&obj->st_link); i915_gem_object_put(obj); @@ -398,8 +405,10 @@ static int fill_hole(struct i915_address_space *vm, { const u64 hole_size = hole_end - hole_start; struct drm_i915_gem_object *obj; + const unsigned int min_alignment = + i915_vm_min_alignment(vm, INTEL_MEMORY_SYSTEM); const unsigned long max_pages = - min_t(u64, ULONG_MAX - 1, hole_size/2 >> PAGE_SHIFT); + min_t(u64, ULONG_MAX - 1, (hole_size / 2) >> ilog2(min_alignment)); const unsigned long max_step = max(int_sqrt(max_pages), 2UL); unsigned long npages, prime, flags; struct i915_vma *vma; @@ -440,14 +449,17 @@ static int fill_hole(struct i915_address_space *vm, offset = p->offset; list_for_each_entry(obj, &objects, st_link) { + u64 aligned_size = round_up(obj->base.size, + min_alignment); + vma = i915_vma_instance(obj, vm, NULL); if (IS_ERR(vma)) continue; if (p->step < 0) { - if (offset < hole_start + obj->base.size) + if (offset < hole_start + aligned_size) break; - offset -= obj->base.size; + offset -= aligned_size; } err = i915_vma_pin(vma, 0, 0, offset | flags); @@ -469,22 +481,25 @@ static int fill_hole(struct i915_address_space *vm, i915_vma_unpin(vma); if (p->step > 0) { - if (offset + obj->base.size > hole_end) + if (offset + aligned_size > hole_end) break; - offset += obj->base.size; + offset += aligned_size; } } offset = p->offset; list_for_each_entry(obj, &objects, st_link) { + u64 aligned_size = round_up(obj->base.size, + min_alignment); + vma = i915_vma_instance(obj, vm, NULL); if (IS_ERR(vma)) continue; if (p->step < 0) { - if (offset < hole_start + obj->base.size) + if (offset < hole_start + aligned_size) break; - offset -= obj->base.size; + offset -= aligned_size; } if (!drm_mm_node_allocated(&vma->node) || @@ -496,7 +511,7 @@ static int fill_hole(struct i915_address_space *vm, goto err; } - err = i915_vma_unbind(vma); + err = i915_vma_unbind_unlocked(vma); if (err) { pr_err("%s(%s) (forward) unbind of vma.node=%llx + %llx failed with err=%d\n", __func__, p->name, vma->node.start, vma->node.size, @@ -505,22 +520,25 @@ static int fill_hole(struct i915_address_space *vm, } if (p->step > 0) { - if (offset + obj->base.size > hole_end) + if (offset + aligned_size > hole_end) break; - offset += obj->base.size; + offset += aligned_size; } } offset = p->offset; list_for_each_entry_reverse(obj, &objects, st_link) { + u64 aligned_size = round_up(obj->base.size, + min_alignment); + vma = i915_vma_instance(obj, vm, NULL); if (IS_ERR(vma)) continue; if (p->step < 0) { - if (offset < hole_start + obj->base.size) + if (offset < hole_start + aligned_size) break; - offset -= obj->base.size; + offset -= aligned_size; } err = i915_vma_pin(vma, 0, 0, offset | flags); @@ -542,22 +560,25 @@ static int fill_hole(struct i915_address_space *vm, i915_vma_unpin(vma); if (p->step > 0) { - if (offset + obj->base.size > hole_end) + if (offset + aligned_size > hole_end) break; - offset += obj->base.size; + offset += aligned_size; } } offset = p->offset; list_for_each_entry_reverse(obj, &objects, st_link) { + u64 aligned_size = round_up(obj->base.size, + min_alignment); + vma = i915_vma_instance(obj, vm, NULL); if (IS_ERR(vma)) continue; if (p->step < 0) { - if (offset < hole_start + obj->base.size) + if (offset < hole_start + aligned_size) break; - offset -= obj->base.size; + offset -= aligned_size; } if (!drm_mm_node_allocated(&vma->node) || @@ -569,7 +590,7 @@ static int fill_hole(struct i915_address_space *vm, goto err; } - err = i915_vma_unbind(vma); + err = i915_vma_unbind_unlocked(vma); if (err) { pr_err("%s(%s) (backward) unbind of vma.node=%llx + %llx failed with err=%d\n", __func__, p->name, vma->node.start, vma->node.size, @@ -578,9 +599,9 @@ static int fill_hole(struct i915_address_space *vm, } if (p->step > 0) { - if (offset + obj->base.size > hole_end) + if (offset + aligned_size > hole_end) break; - offset += obj->base.size; + offset += aligned_size; } } } @@ -610,6 +631,7 @@ static int walk_hole(struct i915_address_space *vm, const u64 hole_size = hole_end - hole_start; const unsigned long max_pages = min_t(u64, ULONG_MAX - 1, hole_size >> PAGE_SHIFT); + unsigned long min_alignment; unsigned long flags; u64 size; @@ -619,6 +641,8 @@ static int walk_hole(struct i915_address_space *vm, if (i915_is_ggtt(vm)) flags |= PIN_GLOBAL; + min_alignment = i915_vm_min_alignment(vm, INTEL_MEMORY_SYSTEM); + for_each_prime_number_from(size, 1, max_pages) { struct drm_i915_gem_object *obj; struct i915_vma *vma; @@ -637,7 +661,7 @@ static int walk_hole(struct i915_address_space *vm, for (addr = hole_start; addr + obj->base.size < hole_end; - addr += obj->base.size) { + addr += round_up(obj->base.size, min_alignment)) { err = i915_vma_pin(vma, 0, 0, addr | flags); if (err) { pr_err("%s bind failed at %llx + %llx [hole %llx- %llx] with err=%d\n", @@ -655,7 +679,7 @@ static int walk_hole(struct i915_address_space *vm, goto err_put; } - err = i915_vma_unbind(vma); + err = i915_vma_unbind_unlocked(vma); if (err) { pr_err("%s unbind failed at %llx + %llx with err=%d\n", __func__, addr, vma->size, err); @@ -689,6 +713,7 @@ static int pot_hole(struct i915_address_space *vm, { struct drm_i915_gem_object *obj; struct i915_vma *vma; + unsigned int min_alignment; unsigned long flags; unsigned int pot; int err = 0; @@ -697,6 +722,8 @@ static int pot_hole(struct i915_address_space *vm, if (i915_is_ggtt(vm)) flags |= PIN_GLOBAL; + min_alignment = i915_vm_min_alignment(vm, INTEL_MEMORY_SYSTEM); + obj = i915_gem_object_create_internal(vm->i915, 2 * I915_GTT_PAGE_SIZE); if (IS_ERR(obj)) return PTR_ERR(obj); @@ -709,13 +736,13 @@ static int pot_hole(struct i915_address_space *vm, /* Insert a pair of pages across every pot boundary within the hole */ for (pot = fls64(hole_end - 1) - 1; - pot > ilog2(2 * I915_GTT_PAGE_SIZE); + pot > ilog2(2 * min_alignment); pot--) { u64 step = BIT_ULL(pot); u64 addr; - for (addr = round_up(hole_start + I915_GTT_PAGE_SIZE, step) - I915_GTT_PAGE_SIZE; - addr <= round_down(hole_end - 2*I915_GTT_PAGE_SIZE, step) - I915_GTT_PAGE_SIZE; + for (addr = round_up(hole_start + min_alignment, step) - min_alignment; + addr <= round_down(hole_end - (2 * min_alignment), step) - min_alignment; addr += step) { err = i915_vma_pin(vma, 0, 0, addr | flags); if (err) { @@ -732,13 +759,13 @@ static int pot_hole(struct i915_address_space *vm, pr_err("%s incorrect at %llx + %llx\n", __func__, addr, vma->size); i915_vma_unpin(vma); - err = i915_vma_unbind(vma); + err = i915_vma_unbind_unlocked(vma); err = -EINVAL; goto err_obj; } i915_vma_unpin(vma); - err = i915_vma_unbind(vma); + err = i915_vma_unbind_unlocked(vma); GEM_BUG_ON(err); } @@ -760,6 +787,7 @@ static int drunk_hole(struct i915_address_space *vm, unsigned long end_time) { I915_RND_STATE(prng); + unsigned int min_alignment; unsigned int size; unsigned long flags; @@ -767,15 +795,18 @@ static int drunk_hole(struct i915_address_space *vm, if (i915_is_ggtt(vm)) flags |= PIN_GLOBAL; + min_alignment = i915_vm_min_alignment(vm, INTEL_MEMORY_SYSTEM); + /* Keep creating larger objects until one cannot fit into the hole */ for (size = 12; (hole_end - hole_start) >> size; size++) { struct drm_i915_gem_object *obj; unsigned int *order, count, n; struct i915_vma *vma; - u64 hole_size; + u64 hole_size, aligned_size; int err = -ENODEV; - hole_size = (hole_end - hole_start) >> size; + aligned_size = max_t(u32, ilog2(min_alignment), size); + hole_size = (hole_end - hole_start) >> aligned_size; if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32)) hole_size = KMALLOC_MAX_SIZE / sizeof(u32); count = hole_size >> 1; @@ -815,7 +846,7 @@ static int drunk_hole(struct i915_address_space *vm, GEM_BUG_ON(vma->size != BIT_ULL(size)); for (n = 0; n < count; n++) { - u64 addr = hole_start + order[n] * BIT_ULL(size); + u64 addr = hole_start + order[n] * BIT_ULL(aligned_size); err = i915_vma_pin(vma, 0, 0, addr | flags); if (err) { @@ -832,13 +863,13 @@ static int drunk_hole(struct i915_address_space *vm, pr_err("%s incorrect at %llx + %llx\n", __func__, addr, BIT_ULL(size)); i915_vma_unpin(vma); - err = i915_vma_unbind(vma); + err = i915_vma_unbind_unlocked(vma); err = -EINVAL; goto err_obj; } i915_vma_unpin(vma); - err = i915_vma_unbind(vma); + err = i915_vma_unbind_unlocked(vma); GEM_BUG_ON(err); if (igt_timeout(end_time, @@ -867,11 +898,14 @@ static int __shrink_hole(struct i915_address_space *vm, { struct drm_i915_gem_object *obj; unsigned long flags = PIN_OFFSET_FIXED | PIN_USER; + unsigned int min_alignment; unsigned int order = 12; LIST_HEAD(objects); int err = 0; u64 addr; + min_alignment = i915_vm_min_alignment(vm, INTEL_MEMORY_SYSTEM); + /* Keep creating larger objects until one cannot fit into the hole */ for (addr = hole_start; addr < hole_end; ) { struct i915_vma *vma; @@ -906,13 +940,13 @@ static int __shrink_hole(struct i915_address_space *vm, pr_err("%s incorrect at %llx + %llx\n", __func__, addr, size); i915_vma_unpin(vma); - err = i915_vma_unbind(vma); + err = i915_vma_unbind_unlocked(vma); err = -EINVAL; break; } i915_vma_unpin(vma); - addr += size; + addr += round_up(size, min_alignment); /* * Since we are injecting allocation faults at random intervals, @@ -1036,6 +1070,118 @@ err_purge: return err; } +static int misaligned_case(struct i915_address_space *vm, struct intel_memory_region *mr, + u64 addr, u64 size, unsigned long flags) +{ + struct drm_i915_gem_object *obj; + struct i915_vma *vma; + int err = 0; + u64 expected_vma_size, expected_node_size; + bool is_stolen = mr->type == INTEL_MEMORY_STOLEN_SYSTEM || + mr->type == INTEL_MEMORY_STOLEN_LOCAL; + + obj = i915_gem_object_create_region(mr, size, 0, 0); + if (IS_ERR(obj)) { + /* if iGVT-g or DMAR is active, stolen mem will be uninitialized */ + if (PTR_ERR(obj) == -ENODEV && is_stolen) + return 0; + return PTR_ERR(obj); + } + + vma = i915_vma_instance(obj, vm, NULL); + if (IS_ERR(vma)) { + err = PTR_ERR(vma); + goto err_put; + } + + err = i915_vma_pin(vma, 0, 0, addr | flags); + if (err) + goto err_put; + i915_vma_unpin(vma); + + if (!drm_mm_node_allocated(&vma->node)) { + err = -EINVAL; + goto err_put; + } + + if (i915_vma_misplaced(vma, 0, 0, addr | flags)) { + err = -EINVAL; + goto err_put; + } + + expected_vma_size = round_up(size, 1 << (ffs(vma->resource->page_sizes_gtt) - 1)); + expected_node_size = expected_vma_size; + + if (NEEDS_COMPACT_PT(vm->i915) && i915_gem_object_is_lmem(obj)) { + /* compact-pt should expand lmem node to 2MB */ + expected_vma_size = round_up(size, I915_GTT_PAGE_SIZE_64K); + expected_node_size = round_up(size, I915_GTT_PAGE_SIZE_2M); + } + + if (vma->size != expected_vma_size || vma->node.size != expected_node_size) { + err = i915_vma_unbind_unlocked(vma); + err = -EBADSLT; + goto err_put; + } + + err = i915_vma_unbind_unlocked(vma); + if (err) + goto err_put; + + GEM_BUG_ON(drm_mm_node_allocated(&vma->node)); + +err_put: + i915_gem_object_put(obj); + cleanup_freed_objects(vm->i915); + return err; +} + +static int misaligned_pin(struct i915_address_space *vm, + u64 hole_start, u64 hole_end, + unsigned long end_time) +{ + struct intel_memory_region *mr; + enum intel_region_id id; + unsigned long flags = PIN_OFFSET_FIXED | PIN_USER; + int err = 0; + u64 hole_size = hole_end - hole_start; + + if (i915_is_ggtt(vm)) + flags |= PIN_GLOBAL; + + for_each_memory_region(mr, vm->i915, id) { + u64 min_alignment = i915_vm_min_alignment(vm, (enum intel_memory_type)id); + u64 size = min_alignment; + u64 addr = round_down(hole_start + (hole_size / 2), min_alignment); + + /* avoid -ENOSPC on very small hole setups */ + if (hole_size < 3 * min_alignment) + continue; + + /* we can't test < 4k alignment due to flags being encoded in lower bits */ + if (min_alignment != I915_GTT_PAGE_SIZE_4K) { + err = misaligned_case(vm, mr, addr + (min_alignment / 2), size, flags); + /* misaligned should error with -EINVAL*/ + if (!err) + err = -EBADSLT; + if (err != -EINVAL) + return err; + } + + /* test for vma->size expansion to min page size */ + err = misaligned_case(vm, mr, addr, PAGE_SIZE, flags); + if (err) + return err; + + /* test for intermediate size not expanding vma->size for large alignments */ + err = misaligned_case(vm, mr, addr, size / 2, flags); + if (err) + return err; + } + + return 0; +} + static int exercise_ppgtt(struct drm_i915_private *dev_priv, int (*func)(struct i915_address_space *vm, u64 hole_start, u64 hole_end, @@ -1105,6 +1251,11 @@ static int igt_ppgtt_shrink_boom(void *arg) return exercise_ppgtt(arg, shrink_boom); } +static int igt_ppgtt_misaligned_pin(void *arg) +{ + return exercise_ppgtt(arg, misaligned_pin); +} + static int sort_holes(void *priv, const struct list_head *A, const struct list_head *B) { @@ -1122,7 +1273,7 @@ static int exercise_ggtt(struct drm_i915_private *i915, u64 hole_start, u64 hole_end, unsigned long end_time)) { - struct i915_ggtt *ggtt = &i915->ggtt; + struct i915_ggtt *ggtt = to_gt(i915)->ggtt; u64 hole_start, hole_end, last = 0; struct drm_mm_node *node; IGT_TIMEOUT(end_time); @@ -1177,12 +1328,17 @@ static int igt_ggtt_lowlevel(void *arg) return exercise_ggtt(arg, lowlevel_hole); } +static int igt_ggtt_misaligned_pin(void *arg) +{ + return exercise_ggtt(arg, misaligned_pin); +} + static int igt_ggtt_page(void *arg) { const unsigned int count = PAGE_SIZE/sizeof(u32); I915_RND_STATE(prng); struct drm_i915_private *i915 = arg; - struct i915_ggtt *ggtt = &i915->ggtt; + struct i915_ggtt *ggtt = to_gt(i915)->ggtt; struct drm_i915_gem_object *obj; intel_wakeref_t wakeref; struct drm_mm_node tmp; @@ -1279,6 +1435,7 @@ static void track_vma_bind(struct i915_vma *vma) atomic_set(&vma->pages_count, I915_VMA_PAGES_ACTIVE); __i915_gem_object_pin_pages(obj); vma->pages = obj->mm.pages; + vma->resource->bi.pages = vma->pages; mutex_lock(&vma->vm->mutex); list_add_tail(&vma->vm_link, &vma->vm->bound_list); @@ -1336,6 +1493,33 @@ static int igt_mock_drunk(void *arg) return exercise_mock(ggtt->vm.i915, drunk_hole); } +static int reserve_gtt_with_resource(struct i915_vma *vma, u64 offset) +{ + struct i915_address_space *vm = vma->vm; + struct i915_vma_resource *vma_res; + struct drm_i915_gem_object *obj = vma->obj; + int err; + + vma_res = i915_vma_resource_alloc(); + if (IS_ERR(vma_res)) + return PTR_ERR(vma_res); + + mutex_lock(&vm->mutex); + err = i915_gem_gtt_reserve(vm, NULL, &vma->node, obj->base.size, + offset, + obj->cache_level, + 0); + if (!err) { + i915_vma_resource_init_from_vma(vma_res, vma); + vma->resource = vma_res; + } else { + kfree(vma_res); + } + mutex_unlock(&vm->mutex); + + return err; +} + static int igt_gtt_reserve(void *arg) { struct i915_ggtt *ggtt = arg; @@ -1370,20 +1554,13 @@ static int igt_gtt_reserve(void *arg) } list_add(&obj->st_link, &objects); - vma = i915_vma_instance(obj, &ggtt->vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); goto out; } - mutex_lock(&ggtt->vm.mutex); - err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node, - obj->base.size, - total, - obj->cache_level, - 0); - mutex_unlock(&ggtt->vm.mutex); + err = reserve_gtt_with_resource(vma, total); if (err) { pr_err("i915_gem_gtt_reserve (pass 1) failed at %llu/%llu with err=%d\n", total, ggtt->vm.total, err); @@ -1429,13 +1606,7 @@ static int igt_gtt_reserve(void *arg) goto out; } - mutex_lock(&ggtt->vm.mutex); - err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node, - obj->base.size, - total, - obj->cache_level, - 0); - mutex_unlock(&ggtt->vm.mutex); + err = reserve_gtt_with_resource(vma, total); if (err) { pr_err("i915_gem_gtt_reserve (pass 2) failed at %llu/%llu with err=%d\n", total, ggtt->vm.total, err); @@ -1465,7 +1636,7 @@ static int igt_gtt_reserve(void *arg) goto out; } - err = i915_vma_unbind(vma); + err = i915_vma_unbind_unlocked(vma); if (err) { pr_err("i915_vma_unbind failed with err=%d!\n", err); goto out; @@ -1476,13 +1647,7 @@ static int igt_gtt_reserve(void *arg) 2 * I915_GTT_PAGE_SIZE, I915_GTT_MIN_ALIGNMENT); - mutex_lock(&ggtt->vm.mutex); - err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node, - obj->base.size, - offset, - obj->cache_level, - 0); - mutex_unlock(&ggtt->vm.mutex); + err = reserve_gtt_with_resource(vma, offset); if (err) { pr_err("i915_gem_gtt_reserve (pass 3) failed at %llu/%llu with err=%d\n", total, ggtt->vm.total, err); @@ -1509,6 +1674,31 @@ out: return err; } +static int insert_gtt_with_resource(struct i915_vma *vma) +{ + struct i915_address_space *vm = vma->vm; + struct i915_vma_resource *vma_res; + struct drm_i915_gem_object *obj = vma->obj; + int err; + + vma_res = i915_vma_resource_alloc(); + if (IS_ERR(vma_res)) + return PTR_ERR(vma_res); + + mutex_lock(&vm->mutex); + err = i915_gem_gtt_insert(vm, NULL, &vma->node, obj->base.size, 0, + obj->cache_level, 0, vm->total, 0); + if (!err) { + i915_vma_resource_init_from_vma(vma_res, vma); + vma->resource = vma_res; + } else { + kfree(vma_res); + } + mutex_unlock(&vm->mutex); + + return err; +} + static int igt_gtt_insert(void *arg) { struct i915_ggtt *ggtt = arg; @@ -1552,7 +1742,7 @@ static int igt_gtt_insert(void *arg) /* Check a couple of obviously invalid requests */ for (ii = invalid_insert; ii->size; ii++) { mutex_lock(&ggtt->vm.mutex); - err = i915_gem_gtt_insert(&ggtt->vm, &tmp, + err = i915_gem_gtt_insert(&ggtt->vm, NULL, &tmp, ii->size, ii->alignment, I915_COLOR_UNEVICTABLE, ii->start, ii->end, @@ -1593,12 +1783,7 @@ static int igt_gtt_insert(void *arg) goto out; } - mutex_lock(&ggtt->vm.mutex); - err = i915_gem_gtt_insert(&ggtt->vm, &vma->node, - obj->base.size, 0, obj->cache_level, - 0, ggtt->vm.total, - 0); - mutex_unlock(&ggtt->vm.mutex); + err = insert_gtt_with_resource(vma); if (err == -ENOSPC) { /* maxed out the GGTT space */ i915_gem_object_put(obj); @@ -1647,18 +1832,13 @@ static int igt_gtt_insert(void *arg) GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); offset = vma->node.start; - err = i915_vma_unbind(vma); + err = i915_vma_unbind_unlocked(vma); if (err) { pr_err("i915_vma_unbind failed with err=%d!\n", err); goto out; } - mutex_lock(&ggtt->vm.mutex); - err = i915_gem_gtt_insert(&ggtt->vm, &vma->node, - obj->base.size, 0, obj->cache_level, - 0, ggtt->vm.total, - 0); - mutex_unlock(&ggtt->vm.mutex); + err = insert_gtt_with_resource(vma); if (err) { pr_err("i915_gem_gtt_insert (pass 2) failed at %llu/%llu with err=%d\n", total, ggtt->vm.total, err); @@ -1702,12 +1882,7 @@ static int igt_gtt_insert(void *arg) goto out; } - mutex_lock(&ggtt->vm.mutex); - err = i915_gem_gtt_insert(&ggtt->vm, &vma->node, - obj->base.size, 0, obj->cache_level, - 0, ggtt->vm.total, - 0); - mutex_unlock(&ggtt->vm.mutex); + err = insert_gtt_with_resource(vma); if (err) { pr_err("i915_gem_gtt_insert (pass 3) failed at %llu/%llu with err=%d\n", total, ggtt->vm.total, err); @@ -1737,26 +1912,28 @@ int i915_gem_gtt_mock_selftests(void) SUBTEST(igt_gtt_insert), }; struct drm_i915_private *i915; - struct i915_ggtt *ggtt; + struct intel_gt *gt; int err; i915 = mock_gem_device(); if (!i915) return -ENOMEM; - ggtt = kmalloc(sizeof(*ggtt), GFP_KERNEL); - if (!ggtt) { - err = -ENOMEM; + /* allocate the ggtt */ + err = intel_gt_assign_ggtt(to_gt(i915)); + if (err) goto out_put; - } - mock_init_ggtt(i915, ggtt); - err = i915_subtests(tests, ggtt); + gt = to_gt(i915); + + mock_init_ggtt(gt); + + err = i915_subtests(tests, gt->ggtt); mock_device_flush(i915); i915_gem_drain_freed_objects(i915); - mock_fini_ggtt(ggtt); - kfree(ggtt); + mock_fini_ggtt(gt->ggtt); + out_put: mock_destroy_device(i915); return err; @@ -1939,6 +2116,7 @@ static int igt_cs_tlb(void *arg) struct i915_vm_pt_stash stash = {}; struct i915_request *rq; struct i915_gem_ww_ctx ww; + struct i915_vma_resource *vma_res; u64 offset; offset = igt_random_offset(&prng, @@ -1959,6 +2137,13 @@ static int igt_cs_tlb(void *arg) if (err) goto end; + vma_res = i915_vma_resource_alloc(); + if (IS_ERR(vma_res)) { + i915_vma_put_pages(vma); + err = PTR_ERR(vma_res); + goto end; + } + i915_gem_ww_ctx_init(&ww, false); retry: err = i915_vm_lock_objects(vm, &ww); @@ -1980,33 +2165,41 @@ end_ww: goto retry; } i915_gem_ww_ctx_fini(&ww); - if (err) + if (err) { + kfree(vma_res); goto end; + } + i915_vma_resource_init_from_vma(vma_res, vma); /* Prime the TLB with the dummy pages */ for (i = 0; i < count; i++) { - vma->node.start = offset + i * PAGE_SIZE; - vm->insert_entries(vm, vma, I915_CACHE_NONE, 0); + vma_res->start = offset + i * PAGE_SIZE; + vm->insert_entries(vm, vma_res, I915_CACHE_NONE, + 0); - rq = submit_batch(ce, vma->node.start); + rq = submit_batch(ce, vma_res->start); if (IS_ERR(rq)) { err = PTR_ERR(rq); + i915_vma_resource_fini(vma_res); + kfree(vma_res); goto end; } i915_request_put(rq); } - + i915_vma_resource_fini(vma_res); i915_vma_put_pages(vma); err = context_sync(ce); if (err) { pr_err("%s: dummy setup timed out\n", ce->engine->name); + kfree(vma_res); goto end; } vma = i915_vma_instance(act, vm, NULL); if (IS_ERR(vma)) { + kfree(vma_res); err = PTR_ERR(vma); goto end; } @@ -2014,19 +2207,22 @@ end_ww: i915_gem_object_lock(act, NULL); err = i915_vma_get_pages(vma); i915_gem_object_unlock(act); - if (err) + if (err) { + kfree(vma_res); goto end; + } + i915_vma_resource_init_from_vma(vma_res, vma); /* Replace the TLB with target batches */ for (i = 0; i < count; i++) { struct i915_request *rq; u32 *cs = batch + i * 64 / sizeof(*cs); u64 addr; - vma->node.start = offset + i * PAGE_SIZE; - vm->insert_entries(vm, vma, I915_CACHE_NONE, 0); + vma_res->start = offset + i * PAGE_SIZE; + vm->insert_entries(vm, vma_res, I915_CACHE_NONE, 0); - addr = vma->node.start + i * 64; + addr = vma_res->start + i * 64; cs[4] = MI_NOOP; cs[6] = lower_32_bits(addr); cs[7] = upper_32_bits(addr); @@ -2035,6 +2231,8 @@ end_ww: rq = submit_batch(ce, addr); if (IS_ERR(rq)) { err = PTR_ERR(rq); + i915_vma_resource_fini(vma_res); + kfree(vma_res); goto end; } @@ -2051,6 +2249,8 @@ end_ww: } end_spin(batch, count - 1); + i915_vma_resource_fini(vma_res); + kfree(vma_res); i915_vma_put_pages(vma); err = context_sync(ce); @@ -2105,16 +2305,18 @@ int i915_gem_gtt_live_selftests(struct drm_i915_private *i915) SUBTEST(igt_ppgtt_fill), SUBTEST(igt_ppgtt_shrink), SUBTEST(igt_ppgtt_shrink_boom), + SUBTEST(igt_ppgtt_misaligned_pin), SUBTEST(igt_ggtt_lowlevel), SUBTEST(igt_ggtt_drunk), SUBTEST(igt_ggtt_walk), SUBTEST(igt_ggtt_pot), SUBTEST(igt_ggtt_fill), SUBTEST(igt_ggtt_page), + SUBTEST(igt_ggtt_misaligned_pin), SUBTEST(igt_cs_tlb), }; - GEM_BUG_ON(offset_in_page(i915->ggtt.vm.total)); + GEM_BUG_ON(offset_in_page(to_gt(i915)->ggtt->vm.total)); return i915_subtests(tests, i915); } |