summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/xe/xe_migrate.c
diff options
context:
space:
mode:
authorDavid Kershner <david.kershner@intel.com>2023-10-05 18:00:39 -0400
committerRodrigo Vivi <rodrigo.vivi@intel.com>2023-12-21 11:42:56 -0500
commitd9e85dd5c24d9503391440c65a09fdc69d486d55 (patch)
tree78fc3b9c3c30bd27c03ac30c5add5381265a1850 /drivers/gpu/drm/xe/xe_migrate.c
parentdfc83d4293f3f0b26d38952b3e491c1ed5f36b38 (diff)
downloadlinux-stable-d9e85dd5c24d9503391440c65a09fdc69d486d55.tar.gz
linux-stable-d9e85dd5c24d9503391440c65a09fdc69d486d55.tar.bz2
linux-stable-d9e85dd5c24d9503391440c65a09fdc69d486d55.zip
drm/xe/xe_migrate.c: Use DPA offset for page table entries.
Device Physical Address (DPA) is the starting offset device memory. Update xe_migrate identity map base PTE entries to start at dpa_base instead of 0. The VM offset value should be 0 relative instead of DPA relative. Reviewed-by: Niranjana Vishwanathapura <niranjana.vishwanathapura@intel.com> Reviewed-by: "Michael J. Ruhl" <michael.j.ruhl@intel.com> Signed-off-by: David Kershner <david.kershner@intel.com> Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Diffstat (limited to 'drivers/gpu/drm/xe/xe_migrate.c')
-rw-r--r--drivers/gpu/drm/xe/xe_migrate.c27
1 files changed, 17 insertions, 10 deletions
diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c
index 4b7210c793f5..4dc52ac26d52 100644
--- a/drivers/gpu/drm/xe/xe_migrate.c
+++ b/drivers/gpu/drm/xe/xe_migrate.c
@@ -114,8 +114,13 @@ static u64 xe_migrate_vm_addr(u64 slot, u32 level)
return (slot + 1ULL) << xe_pt_shift(level + 1);
}
-static u64 xe_migrate_vram_ofs(u64 addr)
+static u64 xe_migrate_vram_ofs(struct xe_device *xe, u64 addr)
{
+ /*
+ * Remove the DPA to get a correct offset into identity table for the
+ * migrate offset
+ */
+ addr -= xe->mem.vram.dpa_base;
return addr + (256ULL << xe_pt_shift(2));
}
@@ -149,7 +154,7 @@ static int xe_migrate_create_cleared_bo(struct xe_migrate *m, struct xe_vm *vm)
xe_map_memset(xe, &m->cleared_bo->vmap, 0, 0x00, cleared_size);
vram_addr = xe_bo_addr(m->cleared_bo, 0, XE_PAGE_SIZE);
- m->cleared_vram_ofs = xe_migrate_vram_ofs(vram_addr);
+ m->cleared_vram_ofs = xe_migrate_vram_ofs(xe, vram_addr);
return 0;
}
@@ -225,12 +230,12 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
} else {
u64 batch_addr = xe_bo_addr(batch, 0, XE_PAGE_SIZE);
- m->batch_base_ofs = xe_migrate_vram_ofs(batch_addr);
+ m->batch_base_ofs = xe_migrate_vram_ofs(xe, batch_addr);
if (xe->info.supports_usm) {
batch = tile->primary_gt->usm.bb_pool->bo;
batch_addr = xe_bo_addr(batch, 0, XE_PAGE_SIZE);
- m->usm_batch_base_ofs = xe_migrate_vram_ofs(batch_addr);
+ m->usm_batch_base_ofs = xe_migrate_vram_ofs(xe, batch_addr);
}
}
@@ -268,7 +273,9 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
* Use 1GB pages, it shouldn't matter the physical amount of
* vram is less, when we don't access it.
*/
- for (pos = 0; pos < xe->mem.vram.actual_physical_size; pos += SZ_1G, ofs += 8)
+ for (pos = xe->mem.vram.dpa_base;
+ pos < xe->mem.vram.actual_physical_size + xe->mem.vram.dpa_base;
+ pos += SZ_1G, ofs += 8)
xe_map_wr(xe, &bo->vmap, ofs, u64, pos | flags);
}
@@ -443,8 +450,8 @@ static u32 pte_update_size(struct xe_migrate *m,
cmds += cmd_size;
} else {
/* Offset into identity map. */
- *L0_ofs = xe_migrate_vram_ofs(cur->start +
- vram_region_gpu_offset(res));
+ *L0_ofs = xe_migrate_vram_ofs(tile_to_xe(m->tile),
+ cur->start + vram_region_gpu_offset(res));
cmds += cmd_size;
}
@@ -1060,10 +1067,10 @@ static void write_pgtable(struct xe_tile *tile, struct xe_bb *bb, u64 ppgtt_ofs,
* pages are used. Hence the assert.
*/
xe_tile_assert(tile, update->qwords <= 0x1ff);
- if (!ppgtt_ofs) {
- ppgtt_ofs = xe_migrate_vram_ofs(xe_bo_addr(update->pt_bo, 0,
+ if (!ppgtt_ofs)
+ ppgtt_ofs = xe_migrate_vram_ofs(tile_to_xe(tile),
+ xe_bo_addr(update->pt_bo, 0,
XE_PAGE_SIZE));
- }
do {
u64 addr = ppgtt_ofs + ofs * 8;