summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/selftests
diff options
context:
space:
mode:
authorMatthew Auld <matthew.auld@intel.com>2019-10-25 16:37:25 +0100
committerChris Wilson <chris@chris-wilson.co.uk>2019-10-25 22:55:49 +0100
commit340be48f2c5a3c099ee4a20586f706422d453417 (patch)
tree507fe766e3c6b0130584cf1e9164e8b93327a8d6 /drivers/gpu/drm/i915/selftests
parent01377a0d7e6648b050588cf1a6f71a5d8e6ad2b4 (diff)
downloadlinux-stable-340be48f2c5a3c099ee4a20586f706422d453417.tar.gz
linux-stable-340be48f2c5a3c099ee4a20586f706422d453417.tar.bz2
linux-stable-340be48f2c5a3c099ee4a20586f706422d453417.zip
drm/i915/selftests: add write-dword test for LMEM
Simple test writing to dwords across an object, using various engines in a randomized order, checking that our writes land from the cpu. Signed-off-by: Matthew Auld <matthew.auld@intel.com> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Link: https://patchwork.freedesktop.org/patch/msgid/20191025153728.23689-4-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers/gpu/drm/i915/selftests')
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_memory_region.c166
1 files changed, 166 insertions, 0 deletions
diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
index 2e0f91fac85d..8bc6fadd14fb 100644
--- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
@@ -11,9 +11,11 @@
#include "mock_gem_device.h"
#include "mock_region.h"
+#include "gem/i915_gem_context.h"
#include "gem/i915_gem_lmem.h"
#include "gem/i915_gem_region.h"
#include "gem/i915_gem_object_blt.h"
+#include "gem/selftests/igt_gem_utils.h"
#include "gem/selftests/mock_context.h"
#include "gt/intel_gt.h"
#include "selftests/igt_flush_test.h"
@@ -256,6 +258,125 @@ err_close_objects:
return err;
}
+static int igt_gpu_write_dw(struct intel_context *ce,
+ struct i915_vma *vma,
+ u32 dword,
+ u32 value)
+{
+ return igt_gpu_fill_dw(ce, vma, dword * sizeof(u32),
+ vma->size >> PAGE_SHIFT, value);
+}
+
+static int igt_cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
+{
+ unsigned long n;
+ int err;
+
+ i915_gem_object_lock(obj);
+ err = i915_gem_object_set_to_wc_domain(obj, false);
+ i915_gem_object_unlock(obj);
+ if (err)
+ return err;
+
+ err = i915_gem_object_pin_pages(obj);
+ if (err)
+ return err;
+
+ for (n = 0; n < obj->base.size >> PAGE_SHIFT; ++n) {
+ u32 __iomem *base;
+ u32 read_val;
+
+ base = i915_gem_object_lmem_io_map_page_atomic(obj, n);
+
+ read_val = ioread32(base + dword);
+ io_mapping_unmap_atomic(base);
+ if (read_val != val) {
+ pr_err("n=%lu base[%u]=%u, val=%u\n",
+ n, dword, read_val, val);
+ err = -EINVAL;
+ break;
+ }
+ }
+
+ i915_gem_object_unpin_pages(obj);
+ return err;
+}
+
+static int igt_gpu_write(struct i915_gem_context *ctx,
+ struct drm_i915_gem_object *obj)
+{
+ struct i915_gem_engines *engines;
+ struct i915_gem_engines_iter it;
+ struct i915_address_space *vm;
+ struct intel_context *ce;
+ I915_RND_STATE(prng);
+ IGT_TIMEOUT(end_time);
+ unsigned int count;
+ struct i915_vma *vma;
+ int *order;
+ int i, n;
+ int err = 0;
+
+ GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+
+ n = 0;
+ count = 0;
+ for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
+ count++;
+ if (!intel_engine_can_store_dword(ce->engine))
+ continue;
+
+ vm = ce->vm;
+ n++;
+ }
+ i915_gem_context_unlock_engines(ctx);
+ if (!n)
+ return 0;
+
+ order = i915_random_order(count * count, &prng);
+ if (!order)
+ return -ENOMEM;
+
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out_free;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ goto out_free;
+
+ i = 0;
+ engines = i915_gem_context_lock_engines(ctx);
+ do {
+ u32 rng = prandom_u32_state(&prng);
+ u32 dword = offset_in_page(rng) / 4;
+
+ ce = engines->engines[order[i] % engines->num_engines];
+ i = (i + 1) % (count * count);
+ if (!ce || !intel_engine_can_store_dword(ce->engine))
+ continue;
+
+ err = igt_gpu_write_dw(ce, vma, dword, rng);
+ if (err)
+ break;
+
+ err = igt_cpu_check(obj, dword, rng);
+ if (err)
+ break;
+ } while (!__igt_timeout(end_time, NULL));
+ i915_gem_context_unlock_engines(ctx);
+
+out_free:
+ kfree(order);
+
+ if (err == -ENOMEM)
+ err = 0;
+
+ return err;
+}
+
static int igt_lmem_create(void *arg)
{
struct drm_i915_private *i915 = arg;
@@ -277,6 +398,50 @@ out_put:
return err;
}
+static int igt_lmem_write_gpu(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct drm_i915_gem_object *obj;
+ struct i915_gem_context *ctx;
+ struct drm_file *file;
+ I915_RND_STATE(prng);
+ u32 sz;
+ int err;
+
+ file = mock_file(i915);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
+ ctx = live_context(i915, file);
+ if (IS_ERR(ctx)) {
+ err = PTR_ERR(ctx);
+ goto out_file;
+ }
+
+ sz = round_up(prandom_u32_state(&prng) % SZ_32M, PAGE_SIZE);
+
+ obj = i915_gem_object_create_lmem(i915, sz, 0);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto out_file;
+ }
+
+ err = i915_gem_object_pin_pages(obj);
+ if (err)
+ goto out_put;
+
+ err = igt_gpu_write(ctx, obj);
+ if (err)
+ pr_err("igt_gpu_write failed(%d)\n", err);
+
+ i915_gem_object_unpin_pages(obj);
+out_put:
+ i915_gem_object_put(obj);
+out_file:
+ mock_file_free(i915, file);
+ return err;
+}
+
static int igt_lmem_write_cpu(void *arg)
{
struct drm_i915_private *i915 = arg;
@@ -421,6 +586,7 @@ int intel_memory_region_live_selftests(struct drm_i915_private *i915)
static const struct i915_subtest tests[] = {
SUBTEST(igt_lmem_create),
SUBTEST(igt_lmem_write_cpu),
+ SUBTEST(igt_lmem_write_gpu),
};
if (!HAS_LMEM(i915)) {