summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/xe/tests
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/xe/tests')
-rw-r--r--drivers/gpu/drm/xe/tests/Makefile4
-rw-r--r--drivers/gpu/drm/xe/tests/xe_bo.c303
-rw-r--r--drivers/gpu/drm/xe/tests/xe_bo_test.c25
-rw-r--r--drivers/gpu/drm/xe/tests/xe_dma_buf.c259
-rw-r--r--drivers/gpu/drm/xe/tests/xe_dma_buf_test.c23
-rw-r--r--drivers/gpu/drm/xe/tests/xe_migrate.c378
-rw-r--r--drivers/gpu/drm/xe/tests/xe_migrate_test.c23
-rw-r--r--drivers/gpu/drm/xe/tests/xe_test.h66
8 files changed, 1081 insertions, 0 deletions
diff --git a/drivers/gpu/drm/xe/tests/Makefile b/drivers/gpu/drm/xe/tests/Makefile
new file mode 100644
index 000000000000..47056b6459e3
--- /dev/null
+++ b/drivers/gpu/drm/xe/tests/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_DRM_XE_KUNIT_TEST) += xe_bo_test.o xe_dma_buf_test.o \
+ xe_migrate_test.o
diff --git a/drivers/gpu/drm/xe/tests/xe_bo.c b/drivers/gpu/drm/xe/tests/xe_bo.c
new file mode 100644
index 000000000000..87ac21cc8ca9
--- /dev/null
+++ b/drivers/gpu/drm/xe/tests/xe_bo.c
@@ -0,0 +1,303 @@
+// SPDX-License-Identifier: GPL-2.0 AND MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include <kunit/test.h>
+
+#include "xe_bo_evict.h"
+#include "xe_pci.h"
+
+static int ccs_test_migrate(struct xe_gt *gt, struct xe_bo *bo,
+ bool clear, u64 get_val, u64 assign_val,
+ struct kunit *test)
+{
+ struct dma_fence *fence;
+ struct ttm_tt *ttm;
+ struct page *page;
+ pgoff_t ccs_page;
+ long timeout;
+ u64 *cpu_map;
+ int ret;
+ u32 offset;
+
+ /* Move bo to VRAM if not already there. */
+ ret = xe_bo_validate(bo, NULL, false);
+ if (ret) {
+ KUNIT_FAIL(test, "Failed to validate bo.\n");
+ return ret;
+ }
+
+ /* Optionally clear bo *and* CCS data in VRAM. */
+ if (clear) {
+ fence = xe_migrate_clear(gt->migrate, bo, bo->ttm.resource, 0);
+ if (IS_ERR(fence)) {
+ KUNIT_FAIL(test, "Failed to submit bo clear.\n");
+ return PTR_ERR(fence);
+ }
+ dma_fence_put(fence);
+ }
+
+ /* Evict to system. CCS data should be copied. */
+ ret = xe_bo_evict(bo, true);
+ if (ret) {
+ KUNIT_FAIL(test, "Failed to evict bo.\n");
+ return ret;
+ }
+
+ /* Sync all migration blits */
+ timeout = dma_resv_wait_timeout(bo->ttm.base.resv,
+ DMA_RESV_USAGE_KERNEL,
+ true,
+ 5 * HZ);
+ if (timeout <= 0) {
+ KUNIT_FAIL(test, "Failed to sync bo eviction.\n");
+ return -ETIME;
+ }
+
+ /*
+ * Bo with CCS data is now in system memory. Verify backing store
+ * and data integrity. Then assign for the next testing round while
+ * we still have a CPU map.
+ */
+ ttm = bo->ttm.ttm;
+ if (!ttm || !ttm_tt_is_populated(ttm)) {
+ KUNIT_FAIL(test, "Bo was not in expected placement.\n");
+ return -EINVAL;
+ }
+
+ ccs_page = xe_bo_ccs_pages_start(bo) >> PAGE_SHIFT;
+ if (ccs_page >= ttm->num_pages) {
+ KUNIT_FAIL(test, "No TTM CCS pages present.\n");
+ return -EINVAL;
+ }
+
+ page = ttm->pages[ccs_page];
+ cpu_map = kmap_local_page(page);
+
+ /* Check first CCS value */
+ if (cpu_map[0] != get_val) {
+ KUNIT_FAIL(test,
+ "Expected CCS readout 0x%016llx, got 0x%016llx.\n",
+ (unsigned long long)get_val,
+ (unsigned long long)cpu_map[0]);
+ ret = -EINVAL;
+ }
+
+ /* Check last CCS value, or at least last value in page. */
+ offset = xe_device_ccs_bytes(gt->xe, bo->size);
+ offset = min_t(u32, offset, PAGE_SIZE) / sizeof(u64) - 1;
+ if (cpu_map[offset] != get_val) {
+ KUNIT_FAIL(test,
+ "Expected CCS readout 0x%016llx, got 0x%016llx.\n",
+ (unsigned long long)get_val,
+ (unsigned long long)cpu_map[offset]);
+ ret = -EINVAL;
+ }
+
+ cpu_map[0] = assign_val;
+ cpu_map[offset] = assign_val;
+ kunmap_local(cpu_map);
+
+ return ret;
+}
+
+static void ccs_test_run_gt(struct xe_device *xe, struct xe_gt *gt,
+ struct kunit *test)
+{
+ struct xe_bo *bo;
+ u32 vram_bit;
+ int ret;
+
+ /* TODO: Sanity check */
+ vram_bit = XE_BO_CREATE_VRAM0_BIT << gt->info.vram_id;
+ kunit_info(test, "Testing gt id %u vram id %u\n", gt->info.id,
+ gt->info.vram_id);
+
+ bo = xe_bo_create_locked(xe, NULL, NULL, SZ_1M, ttm_bo_type_device,
+ vram_bit);
+ if (IS_ERR(bo)) {
+ KUNIT_FAIL(test, "Failed to create bo.\n");
+ return;
+ }
+
+ kunit_info(test, "Verifying that CCS data is cleared on creation.\n");
+ ret = ccs_test_migrate(gt, bo, false, 0ULL, 0xdeadbeefdeadbeefULL,
+ test);
+ if (ret)
+ goto out_unlock;
+
+ kunit_info(test, "Verifying that CCS data survives migration.\n");
+ ret = ccs_test_migrate(gt, bo, false, 0xdeadbeefdeadbeefULL,
+ 0xdeadbeefdeadbeefULL, test);
+ if (ret)
+ goto out_unlock;
+
+ kunit_info(test, "Verifying that CCS data can be properly cleared.\n");
+ ret = ccs_test_migrate(gt, bo, true, 0ULL, 0ULL, test);
+
+out_unlock:
+ xe_bo_unlock_no_vm(bo);
+ xe_bo_put(bo);
+}
+
+static int ccs_test_run_device(struct xe_device *xe)
+{
+ struct kunit *test = xe_cur_kunit();
+ struct xe_gt *gt;
+ int id;
+
+ if (!xe_device_has_flat_ccs(xe)) {
+ kunit_info(test, "Skipping non-flat-ccs device.\n");
+ return 0;
+ }
+
+ for_each_gt(gt, xe, id)
+ ccs_test_run_gt(xe, gt, test);
+
+ return 0;
+}
+
+void xe_ccs_migrate_kunit(struct kunit *test)
+{
+ xe_call_for_each_device(ccs_test_run_device);
+}
+EXPORT_SYMBOL(xe_ccs_migrate_kunit);
+
+static int evict_test_run_gt(struct xe_device *xe, struct xe_gt *gt, struct kunit *test)
+{
+ struct xe_bo *bo, *external;
+ unsigned int bo_flags = XE_BO_CREATE_USER_BIT |
+ XE_BO_CREATE_VRAM_IF_DGFX(gt);
+ struct xe_vm *vm = xe_migrate_get_vm(xe->gt[0].migrate);
+ struct ww_acquire_ctx ww;
+ int err, i;
+
+ kunit_info(test, "Testing device %s gt id %u vram id %u\n",
+ dev_name(xe->drm.dev), gt->info.id, gt->info.vram_id);
+
+ for (i = 0; i < 2; ++i) {
+ xe_vm_lock(vm, &ww, 0, false);
+ bo = xe_bo_create(xe, NULL, vm, 0x10000, ttm_bo_type_device,
+ bo_flags);
+ xe_vm_unlock(vm, &ww);
+ if (IS_ERR(bo)) {
+ KUNIT_FAIL(test, "bo create err=%pe\n", bo);
+ break;
+ }
+
+ external = xe_bo_create(xe, NULL, NULL, 0x10000,
+ ttm_bo_type_device, bo_flags);
+ if (IS_ERR(external)) {
+ KUNIT_FAIL(test, "external bo create err=%pe\n", external);
+ goto cleanup_bo;
+ }
+
+ xe_bo_lock(external, &ww, 0, false);
+ err = xe_bo_pin_external(external);
+ xe_bo_unlock(external, &ww);
+ if (err) {
+ KUNIT_FAIL(test, "external bo pin err=%pe\n",
+ ERR_PTR(err));
+ goto cleanup_external;
+ }
+
+ err = xe_bo_evict_all(xe);
+ if (err) {
+ KUNIT_FAIL(test, "evict err=%pe\n", ERR_PTR(err));
+ goto cleanup_all;
+ }
+
+ err = xe_bo_restore_kernel(xe);
+ if (err) {
+ KUNIT_FAIL(test, "restore kernel err=%pe\n",
+ ERR_PTR(err));
+ goto cleanup_all;
+ }
+
+ err = xe_bo_restore_user(xe);
+ if (err) {
+ KUNIT_FAIL(test, "restore user err=%pe\n", ERR_PTR(err));
+ goto cleanup_all;
+ }
+
+ if (!xe_bo_is_vram(external)) {
+ KUNIT_FAIL(test, "external bo is not vram\n");
+ err = -EPROTO;
+ goto cleanup_all;
+ }
+
+ if (xe_bo_is_vram(bo)) {
+ KUNIT_FAIL(test, "bo is vram\n");
+ err = -EPROTO;
+ goto cleanup_all;
+ }
+
+ if (i) {
+ down_read(&vm->lock);
+ xe_vm_lock(vm, &ww, 0, false);
+ err = xe_bo_validate(bo, bo->vm, false);
+ xe_vm_unlock(vm, &ww);
+ up_read(&vm->lock);
+ if (err) {
+ KUNIT_FAIL(test, "bo valid err=%pe\n",
+ ERR_PTR(err));
+ goto cleanup_all;
+ }
+ xe_bo_lock(external, &ww, 0, false);
+ err = xe_bo_validate(external, NULL, false);
+ xe_bo_unlock(external, &ww);
+ if (err) {
+ KUNIT_FAIL(test, "external bo valid err=%pe\n",
+ ERR_PTR(err));
+ goto cleanup_all;
+ }
+ }
+
+ xe_bo_lock(external, &ww, 0, false);
+ xe_bo_unpin_external(external);
+ xe_bo_unlock(external, &ww);
+
+ xe_bo_put(external);
+ xe_bo_put(bo);
+ continue;
+
+cleanup_all:
+ xe_bo_lock(external, &ww, 0, false);
+ xe_bo_unpin_external(external);
+ xe_bo_unlock(external, &ww);
+cleanup_external:
+ xe_bo_put(external);
+cleanup_bo:
+ xe_bo_put(bo);
+ break;
+ }
+
+ xe_vm_put(vm);
+
+ return 0;
+}
+
+static int evict_test_run_device(struct xe_device *xe)
+{
+ struct kunit *test = xe_cur_kunit();
+ struct xe_gt *gt;
+ int id;
+
+ if (!IS_DGFX(xe)) {
+ kunit_info(test, "Skipping non-discrete device %s.\n",
+ dev_name(xe->drm.dev));
+ return 0;
+ }
+
+ for_each_gt(gt, xe, id)
+ evict_test_run_gt(xe, gt, test);
+
+ return 0;
+}
+
+void xe_bo_evict_kunit(struct kunit *test)
+{
+ xe_call_for_each_device(evict_test_run_device);
+}
+EXPORT_SYMBOL(xe_bo_evict_kunit);
diff --git a/drivers/gpu/drm/xe/tests/xe_bo_test.c b/drivers/gpu/drm/xe/tests/xe_bo_test.c
new file mode 100644
index 000000000000..c8fa29b0b3b2
--- /dev/null
+++ b/drivers/gpu/drm/xe/tests/xe_bo_test.c
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include <kunit/test.h>
+
+void xe_ccs_migrate_kunit(struct kunit *test);
+void xe_bo_evict_kunit(struct kunit *test);
+
+static struct kunit_case xe_bo_tests[] = {
+ KUNIT_CASE(xe_ccs_migrate_kunit),
+ KUNIT_CASE(xe_bo_evict_kunit),
+ {}
+};
+
+static struct kunit_suite xe_bo_test_suite = {
+ .name = "xe_bo",
+ .test_cases = xe_bo_tests,
+};
+
+kunit_test_suite(xe_bo_test_suite);
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/xe/tests/xe_dma_buf.c b/drivers/gpu/drm/xe/tests/xe_dma_buf.c
new file mode 100644
index 000000000000..615d22e3f731
--- /dev/null
+++ b/drivers/gpu/drm/xe/tests/xe_dma_buf.c
@@ -0,0 +1,259 @@
+// SPDX-License-Identifier: GPL-2.0 AND MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include <kunit/test.h>
+
+#include "xe_pci.h"
+
+static bool p2p_enabled(struct dma_buf_test_params *params)
+{
+ return IS_ENABLED(CONFIG_PCI_P2PDMA) && params->attach_ops &&
+ params->attach_ops->allow_peer2peer;
+}
+
+static bool is_dynamic(struct dma_buf_test_params *params)
+{
+ return IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY) && params->attach_ops &&
+ params->attach_ops->move_notify;
+}
+
+static void check_residency(struct kunit *test, struct xe_bo *exported,
+ struct xe_bo *imported, struct dma_buf *dmabuf)
+{
+ struct dma_buf_test_params *params = to_dma_buf_test_params(test->priv);
+ u32 mem_type;
+ int ret;
+
+ xe_bo_assert_held(exported);
+ xe_bo_assert_held(imported);
+
+ mem_type = XE_PL_VRAM0;
+ if (!(params->mem_mask & XE_BO_CREATE_VRAM0_BIT))
+ /* No VRAM allowed */
+ mem_type = XE_PL_TT;
+ else if (params->force_different_devices && !p2p_enabled(params))
+ /* No P2P */
+ mem_type = XE_PL_TT;
+ else if (params->force_different_devices && !is_dynamic(params) &&
+ (params->mem_mask & XE_BO_CREATE_SYSTEM_BIT))
+ /* Pin migrated to TT */
+ mem_type = XE_PL_TT;
+
+ if (!xe_bo_is_mem_type(exported, mem_type)) {
+ KUNIT_FAIL(test, "Exported bo was not in expected memory type.\n");
+ return;
+ }
+
+ if (xe_bo_is_pinned(exported))
+ return;
+
+ /*
+ * Evict exporter. Note that the gem object dma_buf member isn't
+ * set from xe_gem_prime_export(), and it's needed for the move_notify()
+ * functionality, so hack that up here. Evicting the exported bo will
+ * evict also the imported bo through the move_notify() functionality if
+ * importer is on a different device. If they're on the same device,
+ * the exporter and the importer should be the same bo.
+ */
+ swap(exported->ttm.base.dma_buf, dmabuf);
+ ret = xe_bo_evict(exported, true);
+ swap(exported->ttm.base.dma_buf, dmabuf);
+ if (ret) {
+ if (ret != -EINTR && ret != -ERESTARTSYS)
+ KUNIT_FAIL(test, "Evicting exporter failed with err=%d.\n",
+ ret);
+ return;
+ }
+
+ /* Verify that also importer has been evicted to SYSTEM */
+ if (!xe_bo_is_mem_type(imported, XE_PL_SYSTEM)) {
+ KUNIT_FAIL(test, "Importer wasn't properly evicted.\n");
+ return;
+ }
+
+ /* Re-validate the importer. This should move also exporter in. */
+ ret = xe_bo_validate(imported, NULL, false);
+ if (ret) {
+ if (ret != -EINTR && ret != -ERESTARTSYS)
+ KUNIT_FAIL(test, "Validating importer failed with err=%d.\n",
+ ret);
+ return;
+ }
+
+ /*
+ * If on different devices, the exporter is kept in system if
+ * possible, saving a migration step as the transfer is just
+ * likely as fast from system memory.
+ */
+ if (params->force_different_devices &&
+ params->mem_mask & XE_BO_CREATE_SYSTEM_BIT)
+ KUNIT_EXPECT_TRUE(test, xe_bo_is_mem_type(exported, XE_PL_TT));
+ else
+ KUNIT_EXPECT_TRUE(test, xe_bo_is_mem_type(exported, mem_type));
+
+ if (params->force_different_devices)
+ KUNIT_EXPECT_TRUE(test, xe_bo_is_mem_type(imported, XE_PL_TT));
+ else
+ KUNIT_EXPECT_TRUE(test, exported == imported);
+}
+
+static void xe_test_dmabuf_import_same_driver(struct xe_device *xe)
+{
+ struct kunit *test = xe_cur_kunit();
+ struct dma_buf_test_params *params = to_dma_buf_test_params(test->priv);
+ struct drm_gem_object *import;
+ struct dma_buf *dmabuf;
+ struct xe_bo *bo;
+
+ /* No VRAM on this device? */
+ if (!ttm_manager_type(&xe->ttm, XE_PL_VRAM0) &&
+ (params->mem_mask & XE_BO_CREATE_VRAM0_BIT))
+ return;
+
+ kunit_info(test, "running %s\n", __func__);
+ bo = xe_bo_create(xe, NULL, NULL, PAGE_SIZE, ttm_bo_type_device,
+ XE_BO_CREATE_USER_BIT | params->mem_mask);
+ if (IS_ERR(bo)) {
+ KUNIT_FAIL(test, "xe_bo_create() failed with err=%ld\n",
+ PTR_ERR(bo));
+ return;
+ }
+
+ dmabuf = xe_gem_prime_export(&bo->ttm.base, 0);
+ if (IS_ERR(dmabuf)) {
+ KUNIT_FAIL(test, "xe_gem_prime_export() failed with err=%ld\n",
+ PTR_ERR(dmabuf));
+ goto out;
+ }
+
+ import = xe_gem_prime_import(&xe->drm, dmabuf);
+ if (!IS_ERR(import)) {
+ struct xe_bo *import_bo = gem_to_xe_bo(import);
+
+ /*
+ * Did import succeed when it shouldn't due to lack of p2p support?
+ */
+ if (params->force_different_devices &&
+ !p2p_enabled(params) &&
+ !(params->mem_mask & XE_BO_CREATE_SYSTEM_BIT)) {
+ KUNIT_FAIL(test,
+ "xe_gem_prime_import() succeeded when it shouldn't have\n");
+ } else {
+ int err;
+
+ /* Is everything where we expect it to be? */
+ xe_bo_lock_no_vm(import_bo, NULL);
+ err = xe_bo_validate(import_bo, NULL, false);
+ if (err && err != -EINTR && err != -ERESTARTSYS)
+ KUNIT_FAIL(test,
+ "xe_bo_validate() failed with err=%d\n", err);
+
+ check_residency(test, bo, import_bo, dmabuf);
+ xe_bo_unlock_no_vm(import_bo);
+ }
+ drm_gem_object_put(import);
+ } else if (PTR_ERR(import) != -EOPNOTSUPP) {
+ /* Unexpected error code. */
+ KUNIT_FAIL(test,
+ "xe_gem_prime_import failed with the wrong err=%ld\n",
+ PTR_ERR(import));
+ } else if (!params->force_different_devices ||
+ p2p_enabled(params) ||
+ (params->mem_mask & XE_BO_CREATE_SYSTEM_BIT)) {
+ /* Shouldn't fail if we can reuse same bo, use p2p or use system */
+ KUNIT_FAIL(test, "dynamic p2p attachment failed with err=%ld\n",
+ PTR_ERR(import));
+ }
+ dma_buf_put(dmabuf);
+out:
+ drm_gem_object_put(&bo->ttm.base);
+}
+
+static const struct dma_buf_attach_ops nop2p_attach_ops = {
+ .allow_peer2peer = false,
+ .move_notify = xe_dma_buf_move_notify
+};
+
+/*
+ * We test the implementation with bos of different residency and with
+ * importers with different capabilities; some lacking p2p support and some
+ * lacking dynamic capabilities (attach_ops == NULL). We also fake
+ * different devices avoiding the import shortcut that just reuses the same
+ * gem object.
+ */
+static const struct dma_buf_test_params test_params[] = {
+ {.mem_mask = XE_BO_CREATE_VRAM0_BIT,
+ .attach_ops = &xe_dma_buf_attach_ops},
+ {.mem_mask = XE_BO_CREATE_VRAM0_BIT,
+ .attach_ops = &xe_dma_buf_attach_ops,
+ .force_different_devices = true},
+
+ {.mem_mask = XE_BO_CREATE_VRAM0_BIT,
+ .attach_ops = &nop2p_attach_ops},
+ {.mem_mask = XE_BO_CREATE_VRAM0_BIT,
+ .attach_ops = &nop2p_attach_ops,
+ .force_different_devices = true},
+
+ {.mem_mask = XE_BO_CREATE_VRAM0_BIT},
+ {.mem_mask = XE_BO_CREATE_VRAM0_BIT,
+ .force_different_devices = true},
+
+ {.mem_mask = XE_BO_CREATE_SYSTEM_BIT,
+ .attach_ops = &xe_dma_buf_attach_ops},
+ {.mem_mask = XE_BO_CREATE_SYSTEM_BIT,
+ .attach_ops = &xe_dma_buf_attach_ops,
+ .force_different_devices = true},
+
+ {.mem_mask = XE_BO_CREATE_SYSTEM_BIT,
+ .attach_ops = &nop2p_attach_ops},
+ {.mem_mask = XE_BO_CREATE_SYSTEM_BIT,
+ .attach_ops = &nop2p_attach_ops,
+ .force_different_devices = true},
+
+ {.mem_mask = XE_BO_CREATE_SYSTEM_BIT},
+ {.mem_mask = XE_BO_CREATE_SYSTEM_BIT,
+ .force_different_devices = true},
+
+ {.mem_mask = XE_BO_CREATE_SYSTEM_BIT | XE_BO_CREATE_VRAM0_BIT,
+ .attach_ops = &xe_dma_buf_attach_ops},
+ {.mem_mask = XE_BO_CREATE_SYSTEM_BIT | XE_BO_CREATE_VRAM0_BIT,
+ .attach_ops = &xe_dma_buf_attach_ops,
+ .force_different_devices = true},
+
+ {.mem_mask = XE_BO_CREATE_SYSTEM_BIT | XE_BO_CREATE_VRAM0_BIT,
+ .attach_ops = &nop2p_attach_ops},
+ {.mem_mask = XE_BO_CREATE_SYSTEM_BIT | XE_BO_CREATE_VRAM0_BIT,
+ .attach_ops = &nop2p_attach_ops,
+ .force_different_devices = true},
+
+ {.mem_mask = XE_BO_CREATE_SYSTEM_BIT | XE_BO_CREATE_VRAM0_BIT},
+ {.mem_mask = XE_BO_CREATE_SYSTEM_BIT | XE_BO_CREATE_VRAM0_BIT,
+ .force_different_devices = true},
+
+ {}
+};
+
+static int dma_buf_run_device(struct xe_device *xe)
+{
+ const struct dma_buf_test_params *params;
+ struct kunit *test = xe_cur_kunit();
+
+ for (params = test_params; params->mem_mask; ++params) {
+ struct dma_buf_test_params p = *params;
+
+ p.base.id = XE_TEST_LIVE_DMA_BUF;
+ test->priv = &p;
+ xe_test_dmabuf_import_same_driver(xe);
+ }
+
+ /* A non-zero return would halt iteration over driver devices */
+ return 0;
+}
+
+void xe_dma_buf_kunit(struct kunit *test)
+{
+ xe_call_for_each_device(dma_buf_run_device);
+}
+EXPORT_SYMBOL(xe_dma_buf_kunit);
diff --git a/drivers/gpu/drm/xe/tests/xe_dma_buf_test.c b/drivers/gpu/drm/xe/tests/xe_dma_buf_test.c
new file mode 100644
index 000000000000..7bb292da1193
--- /dev/null
+++ b/drivers/gpu/drm/xe/tests/xe_dma_buf_test.c
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include <kunit/test.h>
+
+void xe_dma_buf_kunit(struct kunit *test);
+
+static struct kunit_case xe_dma_buf_tests[] = {
+ KUNIT_CASE(xe_dma_buf_kunit),
+ {}
+};
+
+static struct kunit_suite xe_dma_buf_test_suite = {
+ .name = "xe_dma_buf",
+ .test_cases = xe_dma_buf_tests,
+};
+
+kunit_test_suite(xe_dma_buf_test_suite);
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/xe/tests/xe_migrate.c b/drivers/gpu/drm/xe/tests/xe_migrate.c
new file mode 100644
index 000000000000..0f3b819f0a34
--- /dev/null
+++ b/drivers/gpu/drm/xe/tests/xe_migrate.c
@@ -0,0 +1,378 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020-2022 Intel Corporation
+ */
+
+#include <kunit/test.h>
+
+#include "xe_pci.h"
+
+static bool sanity_fence_failed(struct xe_device *xe, struct dma_fence *fence,
+ const char *str, struct kunit *test)
+{
+ long ret;
+
+ if (IS_ERR(fence)) {
+ KUNIT_FAIL(test, "Failed to create fence for %s: %li\n", str,
+ PTR_ERR(fence));
+ return true;
+ }
+ if (!fence)
+ return true;
+
+ ret = dma_fence_wait_timeout(fence, false, 5 * HZ);
+ if (ret <= 0) {
+ KUNIT_FAIL(test, "Fence timed out for %s: %li\n", str, ret);
+ return true;
+ }
+
+ return false;
+}
+
+static int run_sanity_job(struct xe_migrate *m, struct xe_device *xe,
+ struct xe_bb *bb, u32 second_idx, const char *str,
+ struct kunit *test)
+{
+ struct xe_sched_job *job = xe_bb_create_migration_job(m->eng, bb,
+ m->batch_base_ofs,
+ second_idx);
+ struct dma_fence *fence;
+
+ if (IS_ERR(job)) {
+ KUNIT_FAIL(test, "Failed to allocate fake pt: %li\n",
+ PTR_ERR(job));
+ return PTR_ERR(job);
+ }
+
+ xe_sched_job_arm(job);
+ fence = dma_fence_get(&job->drm.s_fence->finished);
+ xe_sched_job_push(job);
+
+ if (sanity_fence_failed(xe, fence, str, test))
+ return -ETIMEDOUT;
+
+ dma_fence_put(fence);
+ kunit_info(test, "%s: Job completed\n", str);
+ return 0;
+}
+
+static void
+sanity_populate_cb(struct xe_migrate_pt_update *pt_update,
+ struct xe_gt *gt, struct iosys_map *map, void *dst,
+ u32 qword_ofs, u32 num_qwords,
+ const struct xe_vm_pgtable_update *update)
+{
+ int i;
+ u64 *ptr = dst;
+
+ for (i = 0; i < num_qwords; i++)
+ ptr[i] = (qword_ofs + i - update->ofs) * 0x1111111111111111ULL;
+}
+
+static const struct xe_migrate_pt_update_ops sanity_ops = {
+ .populate = sanity_populate_cb,
+};
+
+#define check(_retval, _expected, str, _test) \
+ do { if ((_retval) != (_expected)) { \
+ KUNIT_FAIL(_test, "Sanity check failed: " str \
+ " expected %llx, got %llx\n", \
+ (u64)(_expected), (u64)(_retval)); \
+ } } while (0)
+
+static void test_copy(struct xe_migrate *m, struct xe_bo *bo,
+ struct kunit *test)
+{
+ struct xe_device *xe = gt_to_xe(m->gt);
+ u64 retval, expected = 0xc0c0c0c0c0c0c0c0ULL;
+ bool big = bo->size >= SZ_2M;
+ struct dma_fence *fence;
+ const char *str = big ? "Copying big bo" : "Copying small bo";
+ int err;
+
+ struct xe_bo *sysmem = xe_bo_create_locked(xe, m->gt, NULL,
+ bo->size,
+ ttm_bo_type_kernel,
+ XE_BO_CREATE_SYSTEM_BIT);
+ if (IS_ERR(sysmem)) {
+ KUNIT_FAIL(test, "Failed to allocate sysmem bo for %s: %li\n",
+ str, PTR_ERR(sysmem));
+ return;
+ }
+
+ err = xe_bo_validate(sysmem, NULL, false);
+ if (err) {
+ KUNIT_FAIL(test, "Failed to validate system bo for %s: %li\n",
+ str, err);
+ goto out_unlock;
+ }
+
+ err = xe_bo_vmap(sysmem);
+ if (err) {
+ KUNIT_FAIL(test, "Failed to vmap system bo for %s: %li\n",
+ str, err);
+ goto out_unlock;
+ }
+
+ xe_map_memset(xe, &sysmem->vmap, 0, 0xd0, sysmem->size);
+ fence = xe_migrate_clear(m, sysmem, sysmem->ttm.resource, 0xc0c0c0c0);
+ if (!sanity_fence_failed(xe, fence, big ? "Clearing sysmem big bo" :
+ "Clearing sysmem small bo", test)) {
+ retval = xe_map_rd(xe, &sysmem->vmap, 0, u64);
+ check(retval, expected, "sysmem first offset should be cleared",
+ test);
+ retval = xe_map_rd(xe, &sysmem->vmap, sysmem->size - 8, u64);
+ check(retval, expected, "sysmem last offset should be cleared",
+ test);
+ }
+ dma_fence_put(fence);
+
+ /* Try to copy 0xc0 from sysmem to lmem with 2MB or 64KiB/4KiB pages */
+ xe_map_memset(xe, &sysmem->vmap, 0, 0xc0, sysmem->size);
+ xe_map_memset(xe, &bo->vmap, 0, 0xd0, bo->size);
+
+ fence = xe_migrate_copy(m, sysmem, sysmem->ttm.resource,
+ bo->ttm.resource);
+ if (!sanity_fence_failed(xe, fence, big ? "Copying big bo sysmem -> vram" :
+ "Copying small bo sysmem -> vram", test)) {
+ retval = xe_map_rd(xe, &bo->vmap, 0, u64);
+ check(retval, expected,
+ "sysmem -> vram bo first offset should be copied", test);
+ retval = xe_map_rd(xe, &bo->vmap, bo->size - 8, u64);
+ check(retval, expected,
+ "sysmem -> vram bo offset should be copied", test);
+ }
+ dma_fence_put(fence);
+
+ /* And other way around.. slightly hacky.. */
+ xe_map_memset(xe, &sysmem->vmap, 0, 0xd0, sysmem->size);
+ xe_map_memset(xe, &bo->vmap, 0, 0xc0, bo->size);
+
+ fence = xe_migrate_copy(m, sysmem, bo->ttm.resource,
+ sysmem->ttm.resource);
+ if (!sanity_fence_failed(xe, fence, big ? "Copying big bo vram -> sysmem" :
+ "Copying small bo vram -> sysmem", test)) {
+ retval = xe_map_rd(xe, &sysmem->vmap, 0, u64);
+ check(retval, expected,
+ "vram -> sysmem bo first offset should be copied", test);
+ retval = xe_map_rd(xe, &sysmem->vmap, bo->size - 8, u64);
+ check(retval, expected,
+ "vram -> sysmem bo last offset should be copied", test);
+ }
+ dma_fence_put(fence);
+
+ xe_bo_vunmap(sysmem);
+out_unlock:
+ xe_bo_unlock_no_vm(sysmem);
+ xe_bo_put(sysmem);
+}
+
+static void test_pt_update(struct xe_migrate *m, struct xe_bo *pt,
+ struct kunit *test)
+{
+ struct xe_device *xe = gt_to_xe(m->gt);
+ struct dma_fence *fence;
+ u64 retval, expected;
+ int i;
+
+ struct xe_vm_pgtable_update update = {
+ .ofs = 1,
+ .qwords = 0x10,
+ .pt_bo = pt,
+ };
+ struct xe_migrate_pt_update pt_update = {
+ .ops = &sanity_ops,
+ };
+
+ /* Test xe_migrate_update_pgtables() updates the pagetable as expected */
+ expected = 0xf0f0f0f0f0f0f0f0ULL;
+ xe_map_memset(xe, &pt->vmap, 0, (u8)expected, pt->size);
+
+ fence = xe_migrate_update_pgtables(m, NULL, NULL, m->eng, &update, 1,
+ NULL, 0, &pt_update);
+ if (sanity_fence_failed(xe, fence, "Migration pagetable update", test))
+ return;
+
+ dma_fence_put(fence);
+ retval = xe_map_rd(xe, &pt->vmap, 0, u64);
+ check(retval, expected, "PTE[0] must stay untouched", test);
+
+ for (i = 0; i < update.qwords; i++) {
+ retval = xe_map_rd(xe, &pt->vmap, (update.ofs + i) * 8, u64);
+ check(retval, i * 0x1111111111111111ULL, "PTE update", test);
+ }
+
+ retval = xe_map_rd(xe, &pt->vmap, 8 * (update.ofs + update.qwords),
+ u64);
+ check(retval, expected, "PTE[0x11] must stay untouched", test);
+}
+
+static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
+{
+ struct xe_gt *gt = m->gt;
+ struct xe_device *xe = gt_to_xe(gt);
+ struct xe_bo *pt, *bo = m->pt_bo, *big, *tiny;
+ struct xe_res_cursor src_it;
+ struct dma_fence *fence;
+ u64 retval, expected;
+ struct xe_bb *bb;
+ int err;
+ u8 id = gt->info.id;
+
+ err = xe_bo_vmap(bo);
+ if (err) {
+ KUNIT_FAIL(test, "Failed to vmap our pagetables: %li\n",
+ PTR_ERR(bo));
+ return;
+ }
+
+ big = xe_bo_create_pin_map(xe, m->gt, m->eng->vm, SZ_4M,
+ ttm_bo_type_kernel,
+ XE_BO_CREATE_VRAM_IF_DGFX(m->gt) |
+ XE_BO_CREATE_PINNED_BIT);
+ if (IS_ERR(big)) {
+ KUNIT_FAIL(test, "Failed to allocate bo: %li\n", PTR_ERR(big));
+ goto vunmap;
+ }
+
+ pt = xe_bo_create_pin_map(xe, m->gt, m->eng->vm, GEN8_PAGE_SIZE,
+ ttm_bo_type_kernel,
+ XE_BO_CREATE_VRAM_IF_DGFX(m->gt) |
+ XE_BO_CREATE_PINNED_BIT);
+ if (IS_ERR(pt)) {
+ KUNIT_FAIL(test, "Failed to allocate fake pt: %li\n",
+ PTR_ERR(pt));
+ goto free_big;
+ }
+
+ tiny = xe_bo_create_pin_map(xe, m->gt, m->eng->vm,
+ 2 * SZ_4K,
+ ttm_bo_type_kernel,
+ XE_BO_CREATE_VRAM_IF_DGFX(m->gt) |
+ XE_BO_CREATE_PINNED_BIT);
+ if (IS_ERR(tiny)) {
+ KUNIT_FAIL(test, "Failed to allocate fake pt: %li\n",
+ PTR_ERR(pt));
+ goto free_pt;
+ }
+
+ bb = xe_bb_new(m->gt, 32, xe->info.supports_usm);
+ if (IS_ERR(bb)) {
+ KUNIT_FAIL(test, "Failed to create batchbuffer: %li\n",
+ PTR_ERR(bb));
+ goto free_tiny;
+ }
+
+ kunit_info(test, "Starting tests, top level PT addr: %llx, special pagetable base addr: %llx\n",
+ xe_bo_main_addr(m->eng->vm->pt_root[id]->bo, GEN8_PAGE_SIZE),
+ xe_bo_main_addr(m->pt_bo, GEN8_PAGE_SIZE));
+
+ /* First part of the test, are we updating our pagetable bo with a new entry? */
+ xe_map_wr(xe, &bo->vmap, GEN8_PAGE_SIZE * (NUM_KERNEL_PDE - 1), u64, 0xdeaddeadbeefbeef);
+ expected = gen8_pte_encode(NULL, pt, 0, XE_CACHE_WB, 0, 0);
+ if (m->eng->vm->flags & XE_VM_FLAGS_64K)
+ expected |= GEN12_PTE_PS64;
+ xe_res_first(pt->ttm.resource, 0, pt->size, &src_it);
+ emit_pte(m, bb, NUM_KERNEL_PDE - 1, xe_bo_is_vram(pt),
+ &src_it, GEN8_PAGE_SIZE, pt);
+ run_sanity_job(m, xe, bb, bb->len, "Writing PTE for our fake PT", test);
+
+ retval = xe_map_rd(xe, &bo->vmap, GEN8_PAGE_SIZE * (NUM_KERNEL_PDE - 1),
+ u64);
+ check(retval, expected, "PTE entry write", test);
+
+ /* Now try to write data to our newly mapped 'pagetable', see if it succeeds */
+ bb->len = 0;
+ bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
+ xe_map_wr(xe, &pt->vmap, 0, u32, 0xdeaddead);
+ expected = 0x12345678U;
+
+ emit_clear(m->gt, bb, xe_migrate_vm_addr(NUM_KERNEL_PDE - 1, 0), 4, 4,
+ expected, IS_DGFX(xe));
+ run_sanity_job(m, xe, bb, 1, "Writing to our newly mapped pagetable",
+ test);
+
+ retval = xe_map_rd(xe, &pt->vmap, 0, u32);
+ check(retval, expected, "Write to PT after adding PTE", test);
+
+ /* Sanity checks passed, try the full ones! */
+
+ /* Clear a small bo */
+ kunit_info(test, "Clearing small buffer object\n");
+ xe_map_memset(xe, &tiny->vmap, 0, 0x22, tiny->size);
+ expected = 0x224488ff;
+ fence = xe_migrate_clear(m, tiny, tiny->ttm.resource, expected);
+ if (sanity_fence_failed(xe, fence, "Clearing small bo", test))
+ goto out;
+
+ dma_fence_put(fence);
+ retval = xe_map_rd(xe, &tiny->vmap, 0, u32);
+ check(retval, expected, "Command clear small first value", test);
+ retval = xe_map_rd(xe, &tiny->vmap, tiny->size - 4, u32);
+ check(retval, expected, "Command clear small last value", test);
+
+ if (IS_DGFX(xe)) {
+ kunit_info(test, "Copying small buffer object to system\n");
+ test_copy(m, tiny, test);
+ }
+
+ /* Clear a big bo with a fixed value */
+ kunit_info(test, "Clearing big buffer object\n");
+ xe_map_memset(xe, &big->vmap, 0, 0x11, big->size);
+ expected = 0x11223344U;
+ fence = xe_migrate_clear(m, big, big->ttm.resource, expected);
+ if (sanity_fence_failed(xe, fence, "Clearing big bo", test))
+ goto out;
+
+ dma_fence_put(fence);
+ retval = xe_map_rd(xe, &big->vmap, 0, u32);
+ check(retval, expected, "Command clear big first value", test);
+ retval = xe_map_rd(xe, &big->vmap, big->size - 4, u32);
+ check(retval, expected, "Command clear big last value", test);
+
+ if (IS_DGFX(xe)) {
+ kunit_info(test, "Copying big buffer object to system\n");
+ test_copy(m, big, test);
+ }
+
+ test_pt_update(m, pt, test);
+
+out:
+ xe_bb_free(bb, NULL);
+free_tiny:
+ xe_bo_unpin(tiny);
+ xe_bo_put(tiny);
+free_pt:
+ xe_bo_unpin(pt);
+ xe_bo_put(pt);
+free_big:
+ xe_bo_unpin(big);
+ xe_bo_put(big);
+vunmap:
+ xe_bo_vunmap(m->pt_bo);
+}
+
+static int migrate_test_run_device(struct xe_device *xe)
+{
+ struct kunit *test = xe_cur_kunit();
+ struct xe_gt *gt;
+ int id;
+
+ for_each_gt(gt, xe, id) {
+ struct xe_migrate *m = gt->migrate;
+ struct ww_acquire_ctx ww;
+
+ kunit_info(test, "Testing gt id %d.\n", id);
+ xe_vm_lock(m->eng->vm, &ww, 0, true);
+ xe_migrate_sanity_test(m, test);
+ xe_vm_unlock(m->eng->vm, &ww);
+ }
+
+ return 0;
+}
+
+void xe_migrate_sanity_kunit(struct kunit *test)
+{
+ xe_call_for_each_device(migrate_test_run_device);
+}
+EXPORT_SYMBOL(xe_migrate_sanity_kunit);
diff --git a/drivers/gpu/drm/xe/tests/xe_migrate_test.c b/drivers/gpu/drm/xe/tests/xe_migrate_test.c
new file mode 100644
index 000000000000..ad779e2bd071
--- /dev/null
+++ b/drivers/gpu/drm/xe/tests/xe_migrate_test.c
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include <kunit/test.h>
+
+void xe_migrate_sanity_kunit(struct kunit *test);
+
+static struct kunit_case xe_migrate_tests[] = {
+ KUNIT_CASE(xe_migrate_sanity_kunit),
+ {}
+};
+
+static struct kunit_suite xe_migrate_test_suite = {
+ .name = "xe_migrate",
+ .test_cases = xe_migrate_tests,
+};
+
+kunit_test_suite(xe_migrate_test_suite);
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/xe/tests/xe_test.h b/drivers/gpu/drm/xe/tests/xe_test.h
new file mode 100644
index 000000000000..1ec502b5acf3
--- /dev/null
+++ b/drivers/gpu/drm/xe/tests/xe_test.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: GPL-2.0 AND MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef __XE_TEST_H__
+#define __XE_TEST_H__
+
+#include <linux/types.h>
+
+#if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
+#include <linux/sched.h>
+#include <kunit/test.h>
+
+/*
+ * Each test that provides a kunit private test structure, place a test id
+ * here and point the kunit->priv to an embedded struct xe_test_priv.
+ */
+enum xe_test_priv_id {
+ XE_TEST_LIVE_DMA_BUF,
+};
+
+/**
+ * struct xe_test_priv - Base class for test private info
+ * @id: enum xe_test_priv_id to identify the subclass.
+ */
+struct xe_test_priv {
+ enum xe_test_priv_id id;
+};
+
+#define XE_TEST_DECLARE(x) x
+#define XE_TEST_ONLY(x) unlikely(x)
+#define XE_TEST_EXPORT
+#define xe_cur_kunit() current->kunit_test
+
+/**
+ * xe_cur_kunit_priv - Obtain the struct xe_test_priv pointed to by
+ * current->kunit->priv if it exists and is embedded in the expected subclass.
+ * @id: Id of the expected subclass.
+ *
+ * Return: NULL if the process is not a kunit test, and NULL if the
+ * current kunit->priv pointer is not pointing to an object of the expected
+ * subclass. A pointer to the embedded struct xe_test_priv otherwise.
+ */
+static inline struct xe_test_priv *
+xe_cur_kunit_priv(enum xe_test_priv_id id)
+{
+ struct xe_test_priv *priv;
+
+ if (!xe_cur_kunit())
+ return NULL;
+
+ priv = xe_cur_kunit()->priv;
+ return priv->id == id ? priv : NULL;
+}
+
+#else /* if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST) */
+
+#define XE_TEST_DECLARE(x)
+#define XE_TEST_ONLY(x) 0
+#define XE_TEST_EXPORT static
+#define xe_cur_kunit() NULL
+#define xe_cur_kunit_priv(_id) NULL
+
+#endif
+#endif