From 187916e6ed9d0c3b3abc27429f7a5f8c936bd1f0 Mon Sep 17 00:00:00 2001 From: Lang Yu Date: Fri, 5 May 2023 20:14:15 +0800 Subject: drm/amdgpu: install stub fence into potential unused fence pointers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When using cpu to update page tables, vm update fences are unused. Install stub fence into these fence pointers instead of NULL to avoid NULL dereference when calling dma_fence_wait() on them. Suggested-by: Christian König Signed-off-by: Lang Yu Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 3c0310576b3b..b6bd667df676 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -1358,6 +1358,7 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev, amdgpu_vm_bo_base_init(&bo_va->base, vm, bo); bo_va->ref_count = 1; + bo_va->last_pt_update = dma_fence_get_stub(); INIT_LIST_HEAD(&bo_va->valids); INIT_LIST_HEAD(&bo_va->invalids); @@ -2067,7 +2068,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) vm->update_funcs = &amdgpu_vm_cpu_funcs; else vm->update_funcs = &amdgpu_vm_sdma_funcs; - vm->last_update = NULL; + + vm->last_update = dma_fence_get_stub(); vm->last_unlocked = dma_fence_get_stub(); vm->last_tlb_flush = dma_fence_get_stub(); @@ -2192,7 +2194,7 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm) goto unreserve_bo; dma_fence_put(vm->last_update); - vm->last_update = NULL; + vm->last_update = dma_fence_get_stub(); vm->is_compute_context = true; /* Free the shadow bo for compute VM */ -- cgit v1.2.3 From f4caf5842652f08e024741ef6d423cb0c101d863 Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Wed, 14 Sep 2022 16:35:50 +0800 Subject: drm/amdgpu: introduce vmhub definition for multi-partition cases (v3) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit v1: Each partition has its own gfxhub or mmhub. adjust the num of MAX_VMHUBS and the GFXHUB/MMHUB layout (Le) v2: re-design the AMDGPU_GFXHUB/AMDGPU_MMHUB layout (Le) v3: apply the gfxhub/mmhub layout to new IPs (Hawking) v4: fix up gmc11 (Alex) v5: rebase (Alex) Signed-off-by: Le Ma Acked-by: Christian König Reviewed-by: Hawking Zhang Signed-off-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c | 4 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 4 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 13 +++-- drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 6 +-- drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c | 8 +-- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 8 +-- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c | 2 +- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 4 +- drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c | 10 ++-- drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c | 10 ++-- drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c | 10 ++-- drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c | 12 ++--- drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0.c | 10 ++-- drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0_3.c | 10 ++-- drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c | 33 ++++++------- drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c | 26 +++++----- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 78 +++++++++++++++--------------- drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c | 4 +- drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/mes_v10_1.c | 2 +- drivers/gpu/drm/amd/amdgpu/mes_v11_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c | 10 ++-- drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c | 10 ++-- drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c | 10 ++-- drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c | 10 ++-- drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c | 10 ++-- drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c | 10 ++-- drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c | 10 ++-- drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_2.c | 10 ++-- drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c | 12 ++--- drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 10 ++-- drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c | 4 +- drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c | 2 +- drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c | 4 +- drivers/gpu/drm/amd/amdgpu/vce_v4_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | 4 +- drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c | 4 +- drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c | 8 +-- drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c | 4 +- drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c | 2 +- 47 files changed, 204 insertions(+), 204 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index a46285841d17..f0a136d35279 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -736,7 +736,7 @@ int amdgpu_amdkfd_flush_gpu_tlb_vmid(struct amdgpu_device *adev, for (i = 0; i < adev->num_vmhubs; i++) amdgpu_gmc_flush_gpu_tlb(adev, vmid, i, 0); } else { - amdgpu_gmc_flush_gpu_tlb(adev, vmid, AMDGPU_GFXHUB_0, 0); + amdgpu_gmc_flush_gpu_tlb(adev, vmid, AMDGPU_GFXHUB(0), 0); } return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 87e1a1a9f298..488b3bb6dcb1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -315,7 +315,7 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev, ring->use_doorbell = true; ring->doorbell_index = adev->doorbell_index.kiq; ring->xcc_id = xcc_id; - ring->vm_hub = AMDGPU_GFXHUB_0; + ring->vm_hub = AMDGPU_GFXHUB(0); if (xcc_id >= 1) ring->doorbell_index = adev->doorbell_index.xcc1_kiq_start + xcc_id - 1; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c index 4e2531758866..0a4e5fcfec6b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c @@ -670,7 +670,7 @@ void amdgpu_gmc_set_vm_fault_masks(struct amdgpu_device *adev, int hub_type, for (i = 0; i < 16; i++) { reg = hub->vm_context0_cntl + hub->ctx_distance * i; - tmp = (hub_type == AMDGPU_GFXHUB_0) ? + tmp = (hub_type == AMDGPU_GFXHUB(0)) ? RREG32_SOC15_IP(GC, reg) : RREG32_SOC15_IP(MMHUB, reg); @@ -679,7 +679,7 @@ void amdgpu_gmc_set_vm_fault_masks(struct amdgpu_device *adev, int hub_type, else tmp &= ~hub->vm_cntx_cntl_vm_fault; - (hub_type == AMDGPU_GFXHUB_0) ? + (hub_type == AMDGPU_GFXHUB(0)) ? WREG32_SOC15_IP(GC, reg, tmp) : WREG32_SOC15_IP(MMHUB, reg, tmp); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index b6bd667df676..c3964c14f215 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -2374,12 +2374,12 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) case AMDGPU_VM_OP_RESERVE_VMID: /* We only have requirement to reserve vmid from gfxhub */ r = amdgpu_vmid_alloc_reserved(adev, &fpriv->vm, - AMDGPU_GFXHUB_0); + AMDGPU_GFXHUB(0)); if (r) return r; break; case AMDGPU_VM_OP_UNRESERVE_VMID: - amdgpu_vmid_free_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB_0); + amdgpu_vmid_free_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB(0)); break; default: return -EINVAL; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 6f085f0b4ef3..9f5d32b0fda1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -111,11 +111,14 @@ struct amdgpu_mem_stats; /* Reserve 4MB VRAM for page tables */ #define AMDGPU_VM_RESERVED_VRAM (8ULL << 20) -/* max number of VMHUB */ -#define AMDGPU_MAX_VMHUBS 3 -#define AMDGPU_GFXHUB_0 0 -#define AMDGPU_MMHUB_0 1 -#define AMDGPU_MMHUB_1 2 +/* + * max number of VMHUB + * layout: max 8 GFXHUB + 4 MMHUB0 + 1 MMHUB1 + */ +#define AMDGPU_MAX_VMHUBS 13 +#define AMDGPU_GFXHUB(x) (x) +#define AMDGPU_MMHUB0(x) (8 + x) +#define AMDGPU_MMHUB1(x) (8 + 4 + x) /* Reserve 2MB at top/bottom of address space for kernel use */ #define AMDGPU_VA_RESERVED_SIZE (2ULL << 20) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 8e86b2c23c0a..7b585141e10e 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -4461,7 +4461,7 @@ static int gfx_v10_0_gfx_ring_init(struct amdgpu_device *adev, int ring_id, ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1; else ring->doorbell_index = adev->doorbell_index.gfx_ring1 << 1; - ring->vm_hub = AMDGPU_GFXHUB_0; + ring->vm_hub = AMDGPU_GFXHUB(0); sprintf(ring->name, "gfx_%d.%d.%d", ring->me, ring->pipe, ring->queue); irq_type = AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP + ring->pipe; @@ -4490,7 +4490,7 @@ static int gfx_v10_0_compute_ring_init(struct amdgpu_device *adev, int ring_id, ring->doorbell_index = (adev->doorbell_index.mec_ring0 + ring_id) << 1; ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr + (ring_id * GFX10_MEC_HPD_SIZE); - ring->vm_hub = AMDGPU_GFXHUB_0; + ring->vm_hub = AMDGPU_GFXHUB(0); sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue); irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP @@ -4978,7 +4978,7 @@ static void gfx_v10_0_constants_init(struct amdgpu_device *adev) /* XXX SH_MEM regs */ /* where to put LDS, scratch, GPUVM in FSA64 space */ mutex_lock(&adev->srbm_mutex); - for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB_0].num_ids; i++) { + for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB(0)].num_ids; i++) { nv_grbm_select(adev, 0, 0, 0, i); /* CP and shaders */ WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, DEFAULT_SH_MEM_CONFIG); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index f77779c31043..790df2cc3480 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -906,7 +906,7 @@ static int gfx_v11_0_gfx_ring_init(struct amdgpu_device *adev, int ring_id, ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1; else ring->doorbell_index = adev->doorbell_index.gfx_ring1 << 1; - ring->vm_hub = AMDGPU_GFXHUB_0; + ring->vm_hub = AMDGPU_GFXHUB(0); sprintf(ring->name, "gfx_%d.%d.%d", ring->me, ring->pipe, ring->queue); irq_type = AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP + ring->pipe; @@ -937,7 +937,7 @@ static int gfx_v11_0_compute_ring_init(struct amdgpu_device *adev, int ring_id, ring->doorbell_index = (adev->doorbell_index.mec_ring0 + ring_id) << 1; ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr + (ring_id * GFX11_MEC_HPD_SIZE); - ring->vm_hub = AMDGPU_GFXHUB_0; + ring->vm_hub = AMDGPU_GFXHUB(0); sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue); irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP @@ -1707,7 +1707,7 @@ static void gfx_v11_0_constants_init(struct amdgpu_device *adev) /* XXX SH_MEM regs */ /* where to put LDS, scratch, GPUVM in FSA64 space */ mutex_lock(&adev->srbm_mutex); - for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB_0].num_ids; i++) { + for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB(0)].num_ids; i++) { soc21_grbm_select(adev, 0, 0, 0, i); /* CP and shaders */ WREG32_SOC15(GC, 0, regSH_MEM_CONFIG, DEFAULT_SH_MEM_CONFIG); @@ -4190,7 +4190,7 @@ static int gfx_v11_0_gfxhub_enable(struct amdgpu_device *adev) false : true; adev->gfxhub.funcs->set_fault_enable_default(adev, value); - amdgpu_gmc_flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB_0, 0); + amdgpu_gmc_flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB(0), 0); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 46577b59cb04..91814dc083c9 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -2005,7 +2005,7 @@ static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id, ring->doorbell_index = (adev->doorbell_index.mec_ring0 + ring_id) << 1; ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr + (ring_id * GFX9_MEC_HPD_SIZE); - ring->vm_hub = AMDGPU_GFXHUB_0; + ring->vm_hub = AMDGPU_GFXHUB(0); sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue); irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP @@ -2105,7 +2105,7 @@ static int gfx_v9_0_sw_init(void *handle) /* disable scheduler on the real ring */ ring->no_scheduler = true; - ring->vm_hub = AMDGPU_GFXHUB_0; + ring->vm_hub = AMDGPU_GFXHUB(0); r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP, AMDGPU_RING_PRIO_DEFAULT, NULL); @@ -2123,7 +2123,7 @@ static int gfx_v9_0_sw_init(void *handle) ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1; ring->is_sw_ring = true; hw_prio = amdgpu_sw_ring_priority(i); - ring->vm_hub = AMDGPU_GFXHUB_0; + ring->vm_hub = AMDGPU_GFXHUB(0); r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP, hw_prio, NULL); @@ -2393,7 +2393,7 @@ static void gfx_v9_0_constants_init(struct amdgpu_device *adev) /* XXX SH_MEM regs */ /* where to put LDS, scratch, GPUVM in FSA64 space */ mutex_lock(&adev->srbm_mutex); - for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB_0].num_ids; i++) { + for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB(0)].num_ids; i++) { soc15_grbm_select(adev, 0, 0, 0, i, 0); /* CP and shaders */ if (i == 0) { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c index d648a29c33e0..ec7c049c5952 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c @@ -1935,7 +1935,7 @@ static bool gfx_v9_4_2_query_uctl2_poison_status(struct amdgpu_device *adev) u32 status = 0; struct amdgpu_vmhub *hub; - hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; status = RREG32(hub->vm_l2_pro_fault_status); /* reset page fault status */ WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index 9d17dcfae130..f5104b982633 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -757,7 +757,7 @@ static int gfx_v9_4_3_compute_ring_init(struct amdgpu_device *adev, int ring_id, (adev->doorbell_index.mec_ring0 + ring_id) << 1; ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr + (ring_id * GFX9_MEC_HPD_SIZE); - ring->vm_hub = AMDGPU_GFXHUB_0; + ring->vm_hub = AMDGPU_GFXHUB(0); sprintf(ring->name, "comp_%d.%d.%d.%d", ring->xcc_id, ring->me, ring->pipe, ring->queue); @@ -996,7 +996,7 @@ static void gfx_v9_4_3_constants_init(struct amdgpu_device *adev) /* XXX SH_MEM regs */ /* where to put LDS, scratch, GPUVM in FSA64 space */ mutex_lock(&adev->srbm_mutex); - for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB_0].num_ids; i++) { + for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB(0)].num_ids; i++) { for (j = 0; j < adev->gfx.num_xcd; j++) { soc15_grbm_select(adev, 0, 0, 0, i, j); /* CP and shaders */ diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c index ab2325f6c7ac..d94cc1ec7242 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c @@ -40,7 +40,7 @@ static void gfxhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, uint64_t page_table_base) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, hub->ctx_addr_distance * vmid, @@ -247,7 +247,7 @@ static void gfxhub_v1_0_disable_identity_aperture(struct amdgpu_device *adev) static void gfxhub_v1_0_setup_vmid_config(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; unsigned num_level, block_size; uint32_t tmp; int i; @@ -307,7 +307,7 @@ static void gfxhub_v1_0_setup_vmid_config(struct amdgpu_device *adev) static void gfxhub_v1_0_program_invalidation(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; unsigned i; for (i = 0 ; i < 18; ++i) { @@ -338,7 +338,7 @@ static int gfxhub_v1_0_gart_enable(struct amdgpu_device *adev) static void gfxhub_v1_0_gart_disable(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; u32 tmp; u32 i; @@ -411,7 +411,7 @@ static void gfxhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev, static void gfxhub_v1_0_init(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; hub->ctx0_ptb_addr_lo32 = SOC15_REG_OFFSET(GC, 0, diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c index 79af32bb078c..9c385ce3a8c4 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c @@ -42,7 +42,7 @@ static void gfxhub_v1_2_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, uint64_t page_table_base) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; int i; for (i = 0; i < adev->gfx.num_xcd; i++) { @@ -291,7 +291,7 @@ static void gfxhub_v1_2_disable_identity_aperture(struct amdgpu_device *adev) static void gfxhub_v1_2_setup_vmid_config(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; unsigned num_level, block_size; uint32_t tmp; int i, j; @@ -357,7 +357,7 @@ static void gfxhub_v1_2_setup_vmid_config(struct amdgpu_device *adev) static void gfxhub_v1_2_program_invalidation(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; unsigned i, j; for (j = 0; j < adev->gfx.num_xcd; j++) { @@ -406,7 +406,7 @@ static int gfxhub_v1_2_gart_enable(struct amdgpu_device *adev) static void gfxhub_v1_2_gart_disable(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; u32 tmp; u32 i, j; @@ -483,7 +483,7 @@ static void gfxhub_v1_2_set_fault_enable_default(struct amdgpu_device *adev, static void gfxhub_v1_2_init(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; hub->ctx0_ptb_addr_lo32 = SOC15_REG_OFFSET(GC, 0, diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c index 9b3a02527318..f173a61c6c15 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c @@ -120,7 +120,7 @@ static u64 gfxhub_v2_0_get_mc_fb_offset(struct amdgpu_device *adev) static void gfxhub_v2_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, uint64_t page_table_base) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, hub->ctx_addr_distance * vmid, @@ -282,7 +282,7 @@ static void gfxhub_v2_0_disable_identity_aperture(struct amdgpu_device *adev) static void gfxhub_v2_0_setup_vmid_config(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; int i; uint32_t tmp; @@ -331,7 +331,7 @@ static void gfxhub_v2_0_setup_vmid_config(struct amdgpu_device *adev) static void gfxhub_v2_0_program_invalidation(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; unsigned i; for (i = 0 ; i < 18; ++i) { @@ -360,7 +360,7 @@ static int gfxhub_v2_0_gart_enable(struct amdgpu_device *adev) static void gfxhub_v2_0_gart_disable(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; u32 tmp; u32 i; @@ -433,7 +433,7 @@ static const struct amdgpu_vmhub_funcs gfxhub_v2_0_vmhub_funcs = { static void gfxhub_v2_0_init(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; hub->ctx0_ptb_addr_lo32 = SOC15_REG_OFFSET(GC, 0, diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c index 4aacbbec31e2..d8fc3e8088cd 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c @@ -123,7 +123,7 @@ static u64 gfxhub_v2_1_get_mc_fb_offset(struct amdgpu_device *adev) static void gfxhub_v2_1_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, uint64_t page_table_base) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, hub->ctx_addr_distance * vmid, @@ -291,7 +291,7 @@ static void gfxhub_v2_1_disable_identity_aperture(struct amdgpu_device *adev) static void gfxhub_v2_1_setup_vmid_config(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; int i; uint32_t tmp; @@ -340,7 +340,7 @@ static void gfxhub_v2_1_setup_vmid_config(struct amdgpu_device *adev) static void gfxhub_v2_1_program_invalidation(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; unsigned i; for (i = 0 ; i < 18; ++i) { @@ -381,7 +381,7 @@ static int gfxhub_v2_1_gart_enable(struct amdgpu_device *adev) static void gfxhub_v2_1_gart_disable(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; u32 tmp; u32 i; @@ -462,7 +462,7 @@ static const struct amdgpu_vmhub_funcs gfxhub_v2_1_vmhub_funcs = { static void gfxhub_v2_1_init(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; hub->ctx0_ptb_addr_lo32 = SOC15_REG_OFFSET(GC, 0, @@ -651,7 +651,7 @@ static void gfxhub_v2_1_restore_regs(struct amdgpu_device *adev) static void gfxhub_v2_1_halt(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; int i; uint32_t tmp; int time = 1000; diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0.c index 13712640fa46..c53147f9c9fc 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0.c @@ -119,7 +119,7 @@ static u64 gfxhub_v3_0_get_mc_fb_offset(struct amdgpu_device *adev) static void gfxhub_v3_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, uint64_t page_table_base) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; WREG32_SOC15_OFFSET(GC, 0, regGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, hub->ctx_addr_distance * vmid, @@ -290,7 +290,7 @@ static void gfxhub_v3_0_disable_identity_aperture(struct amdgpu_device *adev) static void gfxhub_v3_0_setup_vmid_config(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; int i; uint32_t tmp; @@ -339,7 +339,7 @@ static void gfxhub_v3_0_setup_vmid_config(struct amdgpu_device *adev) static void gfxhub_v3_0_program_invalidation(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; unsigned i; for (i = 0 ; i < 18; ++i) { @@ -380,7 +380,7 @@ static int gfxhub_v3_0_gart_enable(struct amdgpu_device *adev) static void gfxhub_v3_0_gart_disable(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; u32 tmp; u32 i; @@ -463,7 +463,7 @@ static const struct amdgpu_vmhub_funcs gfxhub_v3_0_vmhub_funcs = { static void gfxhub_v3_0_init(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; hub->ctx0_ptb_addr_lo32 = SOC15_REG_OFFSET(GC, 0, diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0_3.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0_3.c index 6e0bd628c889..ae777487d72e 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0_3.c @@ -122,7 +122,7 @@ static u64 gfxhub_v3_0_3_get_mc_fb_offset(struct amdgpu_device *adev) static void gfxhub_v3_0_3_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, uint64_t page_table_base) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; WREG32_SOC15_OFFSET(GC, 0, regGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, hub->ctx_addr_distance * vmid, @@ -295,7 +295,7 @@ static void gfxhub_v3_0_3_disable_identity_aperture(struct amdgpu_device *adev) static void gfxhub_v3_0_3_setup_vmid_config(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; int i; uint32_t tmp; @@ -344,7 +344,7 @@ static void gfxhub_v3_0_3_setup_vmid_config(struct amdgpu_device *adev) static void gfxhub_v3_0_3_program_invalidation(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; unsigned i; for (i = 0 ; i < 18; ++i) { @@ -373,7 +373,7 @@ static int gfxhub_v3_0_3_gart_enable(struct amdgpu_device *adev) static void gfxhub_v3_0_3_gart_disable(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; u32 tmp; u32 i; @@ -451,7 +451,7 @@ static const struct amdgpu_vmhub_funcs gfxhub_v3_0_3_vmhub_funcs = { static void gfxhub_v3_0_3_init(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; hub->ctx0_ptb_addr_lo32 = SOC15_REG_OFFSET(GC, 0, diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c index 5697b66bf0de..ea2a448147e3 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c @@ -76,7 +76,7 @@ gmc_v10_0_vm_fault_interrupt_state(struct amdgpu_device *adev, switch (state) { case AMDGPU_IRQ_STATE_DISABLE: /* MM HUB */ - amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, false); + amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB0(0), false); /* GFX HUB */ /* This works because this interrupt is only * enabled at init/resume and disabled in @@ -84,11 +84,11 @@ gmc_v10_0_vm_fault_interrupt_state(struct amdgpu_device *adev, * change over the course of suspend/resume. */ if (!adev->in_s0ix) - amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, false); + amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB(0), false); break; case AMDGPU_IRQ_STATE_ENABLE: /* MM HUB */ - amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, true); + amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB0(0), true); /* GFX HUB */ /* This works because this interrupt is only * enabled at init/resume and disabled in @@ -96,7 +96,7 @@ gmc_v10_0_vm_fault_interrupt_state(struct amdgpu_device *adev, * change over the course of suspend/resume. */ if (!adev->in_s0ix) - amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, true); + amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB(0), true); break; default: break; @@ -149,7 +149,7 @@ static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev, * be updated to avoid reading an incorrect value due to * the new fast GRBM interface. */ - if ((entry->vmid_src == AMDGPU_GFXHUB_0) && + if ((entry->vmid_src == AMDGPU_GFXHUB(0)) && (adev->ip_versions[GC_HWIP][0] < IP_VERSION(10, 3, 0))) RREG32(hub->vm_l2_pro_fault_status); @@ -212,8 +212,7 @@ static void gmc_v10_0_set_irq_funcs(struct amdgpu_device *adev) static bool gmc_v10_0_use_invalidate_semaphore(struct amdgpu_device *adev, uint32_t vmhub) { - return ((vmhub == AMDGPU_MMHUB_0 || - vmhub == AMDGPU_MMHUB_1) && + return ((vmhub == AMDGPU_MMHUB0(0)) && (!amdgpu_sriov_vf(adev))); } @@ -249,7 +248,7 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid, unsigned int i; unsigned char hub_ip = 0; - hub_ip = (vmhub == AMDGPU_GFXHUB_0) ? + hub_ip = (vmhub == AMDGPU_GFXHUB(0)) ? GC_HWIP : MMHUB_HWIP; spin_lock(&adev->gmc.invalidate_lock); @@ -284,7 +283,7 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid, * Issue a dummy read to wait for the ACK register to be cleared * to avoid a false ACK due to the new fast GRBM interface. */ - if ((vmhub == AMDGPU_GFXHUB_0) && + if ((vmhub == AMDGPU_GFXHUB(0)) && (adev->ip_versions[GC_HWIP][0] < IP_VERSION(10, 3, 0))) RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_req + hub->eng_distance * eng, hub_ip); @@ -361,19 +360,19 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, mutex_lock(&adev->mman.gtt_window_lock); - if (vmhub == AMDGPU_MMHUB_0) { - gmc_v10_0_flush_vm_hub(adev, vmid, AMDGPU_MMHUB_0, 0); + if (vmhub == AMDGPU_MMHUB0(0)) { + gmc_v10_0_flush_vm_hub(adev, vmid, AMDGPU_MMHUB0(0), 0); mutex_unlock(&adev->mman.gtt_window_lock); return; } - BUG_ON(vmhub != AMDGPU_GFXHUB_0); + BUG_ON(vmhub != AMDGPU_GFXHUB(0)); if (!adev->mman.buffer_funcs_enabled || !adev->ib_pool_ready || amdgpu_in_reset(adev) || ring->sched.ready == false) { - gmc_v10_0_flush_vm_hub(adev, vmid, AMDGPU_GFXHUB_0, 0); + gmc_v10_0_flush_vm_hub(adev, vmid, AMDGPU_GFXHUB(0), 0); mutex_unlock(&adev->mman.gtt_window_lock); return; } @@ -466,7 +465,7 @@ static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, i, flush_type); } else { gmc_v10_0_flush_gpu_tlb(adev, vmid, - AMDGPU_GFXHUB_0, flush_type); + AMDGPU_GFXHUB(0), flush_type); } if (!adev->enable_mes) break; @@ -534,7 +533,7 @@ static void gmc_v10_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid if (ring->is_mes_queue) return; - if (ring->vm_hub == AMDGPU_GFXHUB_0) + if (ring->vm_hub == AMDGPU_GFXHUB(0)) reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid; else reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid; @@ -1075,9 +1074,9 @@ static int gmc_v10_0_gart_enable(struct amdgpu_device *adev) if (!adev->in_s0ix) adev->gfxhub.funcs->set_fault_enable_default(adev, value); adev->mmhub.funcs->set_fault_enable_default(adev, value); - gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB_0, 0); + gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB0(0), 0); if (!adev->in_s0ix) - gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB_0, 0); + gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB(0), 0); DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", (unsigned)(adev->gmc.gart_size >> 20), diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c index 2f570fb5febe..fb2ac31cbba7 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c @@ -64,7 +64,7 @@ gmc_v11_0_vm_fault_interrupt_state(struct amdgpu_device *adev, switch (state) { case AMDGPU_IRQ_STATE_DISABLE: /* MM HUB */ - amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, false); + amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB0(0), false); /* GFX HUB */ /* This works because this interrupt is only * enabled at init/resume and disabled in @@ -72,11 +72,11 @@ gmc_v11_0_vm_fault_interrupt_state(struct amdgpu_device *adev, * change over the course of suspend/resume. */ if (!adev->in_s0ix) - amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, false); + amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB(0), false); break; case AMDGPU_IRQ_STATE_ENABLE: /* MM HUB */ - amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, true); + amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB0(0), true); /* GFX HUB */ /* This works because this interrupt is only * enabled at init/resume and disabled in @@ -84,7 +84,7 @@ gmc_v11_0_vm_fault_interrupt_state(struct amdgpu_device *adev, * change over the course of suspend/resume. */ if (!adev->in_s0ix) - amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, true); + amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB(0), true); break; default: break; @@ -110,7 +110,7 @@ static int gmc_v11_0_process_interrupt(struct amdgpu_device *adev, * be updated to avoid reading an incorrect value due to * the new fast GRBM interface. */ - if (entry->vmid_src == AMDGPU_GFXHUB_0) + if (entry->vmid_src == AMDGPU_GFXHUB(0)) RREG32(hub->vm_l2_pro_fault_status); status = RREG32(hub->vm_l2_pro_fault_status); @@ -170,7 +170,7 @@ static void gmc_v11_0_set_irq_funcs(struct amdgpu_device *adev) static bool gmc_v11_0_use_invalidate_semaphore(struct amdgpu_device *adev, uint32_t vmhub) { - return ((vmhub == AMDGPU_MMHUB_0) && + return ((vmhub == AMDGPU_MMHUB0(0)) && (!amdgpu_sriov_vf(adev))); } @@ -202,7 +202,7 @@ static void gmc_v11_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid, unsigned int i; unsigned char hub_ip = 0; - hub_ip = (vmhub == AMDGPU_GFXHUB_0) ? + hub_ip = (vmhub == AMDGPU_GFXHUB(0)) ? GC_HWIP : MMHUB_HWIP; spin_lock(&adev->gmc.invalidate_lock); @@ -251,7 +251,7 @@ static void gmc_v11_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid, hub->eng_distance * eng, 0, hub_ip); /* Issue additional private vm invalidation to MMHUB */ - if ((vmhub != AMDGPU_GFXHUB_0) && + if ((vmhub != AMDGPU_GFXHUB(0)) && (hub->vm_l2_bank_select_reserved_cid2) && !amdgpu_sriov_vf(adev)) { inv_req = RREG32_NO_KIQ(hub->vm_l2_bank_select_reserved_cid2); @@ -284,7 +284,7 @@ static void gmc_v11_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid, static void gmc_v11_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, uint32_t vmhub, uint32_t flush_type) { - if ((vmhub == AMDGPU_GFXHUB_0) && !adev->gfx.is_poweron) + if ((vmhub == AMDGPU_GFXHUB(0)) && !adev->gfx.is_poweron) return; /* flush hdp cache */ @@ -369,7 +369,7 @@ static int gmc_v11_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, i, flush_type); } else { gmc_v11_0_flush_gpu_tlb(adev, vmid, - AMDGPU_GFXHUB_0, flush_type); + AMDGPU_GFXHUB(0), flush_type); } } } @@ -435,7 +435,7 @@ static void gmc_v11_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid if (ring->is_mes_queue) return; - if (ring->vm_hub == AMDGPU_GFXHUB_0) + if (ring->vm_hub == AMDGPU_GFXHUB(0)) reg = SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT) + vmid; else reg = SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT_MM) + vmid; @@ -886,7 +886,7 @@ static int gmc_v11_0_sw_fini(void *handle) static void gmc_v11_0_init_golden_registers(struct amdgpu_device *adev) { if (amdgpu_sriov_vf(adev)) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; WREG32(hub->vm_contexts_disable, 0); return; @@ -921,7 +921,7 @@ static int gmc_v11_0_gart_enable(struct amdgpu_device *adev) false : true; adev->mmhub.funcs->set_fault_enable_default(adev, value); - gmc_v11_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB_0, 0); + gmc_v11_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB0(0), 0); DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", (unsigned)(adev->gmc.gart_size >> 20), diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 6ae5cee9b64b..193ba4d912a6 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -491,20 +491,20 @@ static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev, * fini/suspend, so the overall state doesn't * change over the course of suspend/resume. */ - if (adev->in_s0ix && (j == AMDGPU_GFXHUB_0)) + if (adev->in_s0ix && (j == AMDGPU_GFXHUB(0))) continue; - if (j == AMDGPU_GFXHUB_0) - tmp = RREG32_SOC15_IP(GC, reg); - else + if (j >= AMDGPU_MMHUB0(0)) tmp = RREG32_SOC15_IP(MMHUB, reg); + else + tmp = RREG32_SOC15_IP(GC, reg); tmp &= ~bits; - if (j == AMDGPU_GFXHUB_0) - WREG32_SOC15_IP(GC, reg, tmp); - else + if (j >= AMDGPU_MMHUB0(0)) WREG32_SOC15_IP(MMHUB, reg, tmp); + else + WREG32_SOC15_IP(GC, reg, tmp); } } break; @@ -519,20 +519,20 @@ static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev, * fini/suspend, so the overall state doesn't * change over the course of suspend/resume. */ - if (adev->in_s0ix && (j == AMDGPU_GFXHUB_0)) + if (adev->in_s0ix && (j == AMDGPU_GFXHUB(0))) continue; - if (j == AMDGPU_GFXHUB_0) - tmp = RREG32_SOC15_IP(GC, reg); - else + if (j >= AMDGPU_MMHUB0(0)) tmp = RREG32_SOC15_IP(MMHUB, reg); + else + tmp = RREG32_SOC15_IP(GC, reg); tmp |= bits; - if (j == AMDGPU_GFXHUB_0) - WREG32_SOC15_IP(GC, reg, tmp); - else + if (j >= AMDGPU_MMHUB0(0)) WREG32_SOC15_IP(MMHUB, reg, tmp); + else + WREG32_SOC15_IP(GC, reg, tmp); } } break; @@ -605,13 +605,13 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, if (entry->client_id == SOC15_IH_CLIENTID_VMC) { hub_name = "mmhub0"; - hub = &adev->vmhub[AMDGPU_MMHUB_0]; + hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; } else if (entry->client_id == SOC15_IH_CLIENTID_VMC1) { hub_name = "mmhub1"; - hub = &adev->vmhub[AMDGPU_MMHUB_1]; + hub = &adev->vmhub[AMDGPU_MMHUB1(0)]; } else { hub_name = "gfxhub0"; - hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; } memset(&task_info, 0, sizeof(struct amdgpu_task_info)); @@ -636,7 +636,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, * be updated to avoid reading an incorrect value due to * the new fast GRBM interface. */ - if ((entry->vmid_src == AMDGPU_GFXHUB_0) && + if ((entry->vmid_src == AMDGPU_GFXHUB(0)) && (adev->ip_versions[GC_HWIP][0] < IP_VERSION(9, 4, 2))) RREG32(hub->vm_l2_pro_fault_status); @@ -649,7 +649,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, dev_err(adev->dev, "VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n", status); - if (hub == &adev->vmhub[AMDGPU_GFXHUB_0]) { + if (hub == &adev->vmhub[AMDGPU_GFXHUB(0)]) { dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n", cid >= ARRAY_SIZE(gfxhub_client_ids) ? "unknown" : gfxhub_client_ids[cid], @@ -759,8 +759,8 @@ static bool gmc_v9_0_use_invalidate_semaphore(struct amdgpu_device *adev, adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) return false; - return ((vmhub == AMDGPU_MMHUB_0 || - vmhub == AMDGPU_MMHUB_1) && + return ((vmhub == AMDGPU_MMHUB0(0) || + vmhub == AMDGPU_MMHUB1(0)) && (!amdgpu_sriov_vf(adev)) && (!(!(adev->apu_flags & AMD_APU_IS_RAVEN2) && (adev->apu_flags & AMD_APU_IS_PICASSO)))); @@ -849,11 +849,10 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, if (use_semaphore) { for (j = 0; j < adev->usec_timeout; j++) { /* a read return value of 1 means semaphore acquire */ - if (vmhub == AMDGPU_GFXHUB_0) - tmp = RREG32_SOC15_IP_NO_KIQ(GC, hub->vm_inv_eng0_sem + hub->eng_distance * eng); - else + if (vmhub >= AMDGPU_MMHUB0(0)) tmp = RREG32_SOC15_IP_NO_KIQ(MMHUB, hub->vm_inv_eng0_sem + hub->eng_distance * eng); - + else + tmp = RREG32_SOC15_IP_NO_KIQ(GC, hub->vm_inv_eng0_sem + hub->eng_distance * eng); if (tmp & 0x1) break; udelay(1); @@ -864,27 +863,26 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, } do { - if (vmhub == AMDGPU_GFXHUB_0) - WREG32_SOC15_IP_NO_KIQ(GC, hub->vm_inv_eng0_req + hub->eng_distance * eng, inv_req); - else + if (vmhub >= AMDGPU_MMHUB0(0)) WREG32_SOC15_IP_NO_KIQ(MMHUB, hub->vm_inv_eng0_req + hub->eng_distance * eng, inv_req); + else + WREG32_SOC15_IP_NO_KIQ(GC, hub->vm_inv_eng0_req + hub->eng_distance * eng, inv_req); /* * Issue a dummy read to wait for the ACK register to * be cleared to avoid a false ACK due to the new fast * GRBM interface. */ - if ((vmhub == AMDGPU_GFXHUB_0) && + if ((vmhub == AMDGPU_GFXHUB(0)) && (adev->ip_versions[GC_HWIP][0] < IP_VERSION(9, 4, 2))) RREG32_NO_KIQ(hub->vm_inv_eng0_req + hub->eng_distance * eng); for (j = 0; j < adev->usec_timeout; j++) { - if (vmhub == AMDGPU_GFXHUB_0) - tmp = RREG32_SOC15_IP_NO_KIQ(GC, hub->vm_inv_eng0_ack + hub->eng_distance * eng); - else + if (vmhub >= AMDGPU_MMHUB0(0)) tmp = RREG32_SOC15_IP_NO_KIQ(MMHUB, hub->vm_inv_eng0_ack + hub->eng_distance * eng); - + else + tmp = RREG32_SOC15_IP_NO_KIQ(GC, hub->vm_inv_eng0_ack + hub->eng_distance * eng); if (tmp & (1 << vmid)) break; udelay(1); @@ -900,10 +898,10 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, * add semaphore release after invalidation, * write with 0 means semaphore release */ - if (vmhub == AMDGPU_GFXHUB_0) - WREG32_SOC15_IP_NO_KIQ(GC, hub->vm_inv_eng0_sem + hub->eng_distance * eng, 0); + if (vmhub >= AMDGPU_MMHUB0(0)) + WREG32_SOC15_IP_NO_KIQ(MMHUB, hub->vm_inv_eng0_req + hub->eng_distance * eng, inv_req); else - WREG32_SOC15_IP_NO_KIQ(MMHUB, hub->vm_inv_eng0_sem + hub->eng_distance * eng, 0); + WREG32_SOC15_IP_NO_KIQ(GC, hub->vm_inv_eng0_req + hub->eng_distance * eng, inv_req); } spin_unlock(&adev->gmc.invalidate_lock); @@ -994,7 +992,7 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, i, flush_type); } else { gmc_v9_0_flush_gpu_tlb(adev, vmid, - AMDGPU_GFXHUB_0, flush_type); + AMDGPU_GFXHUB(0), flush_type); } break; } @@ -1060,10 +1058,10 @@ static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid, uint32_t reg; /* Do nothing because there's no lut register for mmhub1. */ - if (ring->vm_hub == AMDGPU_MMHUB_1) + if (ring->vm_hub == AMDGPU_MMHUB1(0)) return; - if (ring->vm_hub == AMDGPU_GFXHUB_0) + if (ring->vm_hub == AMDGPU_GFXHUB(0)) reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid; else reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid; @@ -1947,7 +1945,7 @@ static int gmc_v9_0_hw_init(void *handle) adev->mmhub.funcs->set_fault_enable_default(adev, value); } for (i = 0; i < adev->num_vmhubs; ++i) { - if (adev->in_s0ix && (i == AMDGPU_GFXHUB_0)) + if (adev->in_s0ix && (i == AMDGPU_GFXHUB(0))) continue; gmc_v9_0_flush_gpu_tlb(adev, 0, i, 0); } diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c index a3076eb8af6a..71fe7f6f9889 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c @@ -485,7 +485,7 @@ int jpeg_v1_0_sw_init(void *handle) return r; ring = &adev->jpeg.inst->ring_dec; - ring->vm_hub = AMDGPU_MMHUB_0; + ring->vm_hub = AMDGPU_MMHUB0(0); sprintf(ring->name, "jpeg_dec"); r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0, AMDGPU_RING_PRIO_DEFAULT, NULL); diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c index 0eddf7c824a7..3a43e42f4834 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c @@ -86,7 +86,7 @@ static int jpeg_v2_0_sw_init(void *handle) ring = &adev->jpeg.inst->ring_dec; ring->use_doorbell = true; ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1; - ring->vm_hub = AMDGPU_MMHUB_0; + ring->vm_hub = AMDGPU_MMHUB0(0); sprintf(ring->name, "jpeg_dec"); r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0, AMDGPU_RING_PRIO_DEFAULT, NULL); diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c index b040f51d9aa9..259b7ba6a842 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c @@ -128,9 +128,9 @@ static int jpeg_v2_5_sw_init(void *handle) ring = &adev->jpeg.inst[i].ring_dec; ring->use_doorbell = true; if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(2, 5, 0)) - ring->vm_hub = AMDGPU_MMHUB_1; + ring->vm_hub = AMDGPU_MMHUB1(0); else - ring->vm_hub = AMDGPU_MMHUB_0; + ring->vm_hub = AMDGPU_MMHUB0(0); ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1 + 8 * i; sprintf(ring->name, "jpeg_dec_%d", i); r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst[i].irq, diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c index 1c2292cc5f2c..c55386c22311 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c @@ -101,7 +101,7 @@ static int jpeg_v3_0_sw_init(void *handle) ring = &adev->jpeg.inst->ring_dec; ring->use_doorbell = true; ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1; - ring->vm_hub = AMDGPU_MMHUB_0; + ring->vm_hub = AMDGPU_MMHUB0(0); sprintf(ring->name, "jpeg_dec"); r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0, AMDGPU_RING_PRIO_DEFAULT, NULL); diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c index 77e1e64aa1d1..d7d5ffc29393 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c @@ -108,7 +108,7 @@ static int jpeg_v4_0_sw_init(void *handle) ring = &adev->jpeg.inst->ring_dec; ring->use_doorbell = true; ring->doorbell_index = amdgpu_sriov_vf(adev) ? (((adev->doorbell_index.vcn.vcn_ring0_1) << 1) + 4) : ((adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1); - ring->vm_hub = AMDGPU_MMHUB_0; + ring->vm_hub = AMDGPU_MMHUB0(0); sprintf(ring->name, "jpeg_dec"); r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0, diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c b/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c index 4560476c7c31..f1a6abdad21b 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c @@ -149,7 +149,7 @@ static int mes_v10_1_add_hw_queue(struct amdgpu_mes *mes, { struct amdgpu_device *adev = mes->adev; union MESAPI__ADD_QUEUE mes_add_queue_pkt; - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; uint32_t vm_cntx_cntl = hub->vm_cntx_cntl; memset(&mes_add_queue_pkt, 0, sizeof(mes_add_queue_pkt)); diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c index 3adb450eec07..9791f3581786 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c @@ -164,7 +164,7 @@ static int mes_v11_0_add_hw_queue(struct amdgpu_mes *mes, { struct amdgpu_device *adev = mes->adev; union MESAPI__ADD_QUEUE mes_add_queue_pkt; - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; uint32_t vm_cntx_cntl = hub->vm_cntx_cntl; memset(&mes_add_queue_pkt, 0, sizeof(mes_add_queue_pkt)); diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c index 15e7cbeae75b..fb91b31056ca 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c @@ -54,7 +54,7 @@ static u64 mmhub_v1_0_get_fb_location(struct amdgpu_device *adev) static void mmhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, uint64_t page_table_base) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, hub->ctx_addr_distance * vmid, @@ -229,7 +229,7 @@ static void mmhub_v1_0_disable_identity_aperture(struct amdgpu_device *adev) static void mmhub_v1_0_setup_vmid_config(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; unsigned num_level, block_size; uint32_t tmp; int i; @@ -285,7 +285,7 @@ static void mmhub_v1_0_setup_vmid_config(struct amdgpu_device *adev) static void mmhub_v1_0_program_invalidation(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; unsigned i; for (i = 0; i < 18; ++i) { @@ -338,7 +338,7 @@ static int mmhub_v1_0_gart_enable(struct amdgpu_device *adev) static void mmhub_v1_0_gart_disable(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; u32 tmp; u32 i; @@ -415,7 +415,7 @@ static void mmhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev, bool static void mmhub_v1_0_init(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; hub->ctx0_ptb_addr_lo32 = SOC15_REG_OFFSET(MMHUB, 0, diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c index 73afbf2facc9..9086f2fdfaf4 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c @@ -54,7 +54,7 @@ static u64 mmhub_v1_7_get_fb_location(struct amdgpu_device *adev) static void mmhub_v1_7_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, uint64_t page_table_base) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; WREG32_SOC15_OFFSET(MMHUB, 0, regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, hub->ctx_addr_distance * vmid, lower_32_bits(page_table_base)); @@ -261,7 +261,7 @@ static void mmhub_v1_7_disable_identity_aperture(struct amdgpu_device *adev) static void mmhub_v1_7_setup_vmid_config(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; unsigned num_level, block_size; uint32_t tmp; int i; @@ -319,7 +319,7 @@ static void mmhub_v1_7_setup_vmid_config(struct amdgpu_device *adev) static void mmhub_v1_7_program_invalidation(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; unsigned i; for (i = 0; i < 18; ++i) { @@ -348,7 +348,7 @@ static int mmhub_v1_7_gart_enable(struct amdgpu_device *adev) static void mmhub_v1_7_gart_disable(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; u32 tmp; u32 i; @@ -425,7 +425,7 @@ static void mmhub_v1_7_set_fault_enable_default(struct amdgpu_device *adev, bool static void mmhub_v1_7_init(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; hub->ctx0_ptb_addr_lo32 = SOC15_REG_OFFSET(MMHUB, 0, diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c index 342d1702104c..9ec06f9db761 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c @@ -53,7 +53,7 @@ static u64 mmhub_v1_8_get_fb_location(struct amdgpu_device *adev) static void mmhub_v1_8_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, uint64_t page_table_base) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; WREG32_SOC15_OFFSET(MMHUB, 0, regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, hub->ctx_addr_distance * vmid, lower_32_bits(page_table_base)); @@ -253,7 +253,7 @@ static void mmhub_v1_8_disable_identity_aperture(struct amdgpu_device *adev) static void mmhub_v1_8_setup_vmid_config(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; unsigned num_level, block_size; uint32_t tmp; int i; @@ -311,7 +311,7 @@ static void mmhub_v1_8_setup_vmid_config(struct amdgpu_device *adev) static void mmhub_v1_8_program_invalidation(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; unsigned i; for (i = 0; i < 18; ++i) { @@ -352,7 +352,7 @@ static int mmhub_v1_8_gart_enable(struct amdgpu_device *adev) static void mmhub_v1_8_gart_disable(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; u32 tmp; u32 i; @@ -426,7 +426,7 @@ static void mmhub_v1_8_set_fault_enable_default(struct amdgpu_device *adev, bool static void mmhub_v1_8_init(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; hub->ctx0_ptb_addr_lo32 = SOC15_REG_OFFSET(MMHUB, 0, diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c index 278e32db878d..8f76c6ecf50a 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c @@ -187,7 +187,7 @@ mmhub_v2_0_print_l2_protection_fault_status(struct amdgpu_device *adev, static void mmhub_v2_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, uint64_t page_table_base) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; WREG32_SOC15_OFFSET_RLC(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, hub->ctx_addr_distance * vmid, @@ -362,7 +362,7 @@ static void mmhub_v2_0_disable_identity_aperture(struct amdgpu_device *adev) static void mmhub_v2_0_setup_vmid_config(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; int i; uint32_t tmp; @@ -412,7 +412,7 @@ static void mmhub_v2_0_setup_vmid_config(struct amdgpu_device *adev) static void mmhub_v2_0_program_invalidation(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; unsigned i; for (i = 0; i < 18; ++i) { @@ -441,7 +441,7 @@ static int mmhub_v2_0_gart_enable(struct amdgpu_device *adev) static void mmhub_v2_0_gart_disable(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; u32 tmp; u32 i; @@ -520,7 +520,7 @@ static const struct amdgpu_vmhub_funcs mmhub_v2_0_vmhub_funcs = { static void mmhub_v2_0_init(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; hub->ctx0_ptb_addr_lo32 = SOC15_REG_OFFSET(MMHUB, 0, diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c index fcf2813e70db..8bd0fc8d9d25 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c @@ -121,7 +121,7 @@ static void mmhub_v2_3_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, uint64_t page_table_base) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, hub->ctx_addr_distance * vmid, lower_32_bits(page_table_base)); @@ -280,7 +280,7 @@ static void mmhub_v2_3_disable_identity_aperture(struct amdgpu_device *adev) static void mmhub_v2_3_setup_vmid_config(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; int i; uint32_t tmp; @@ -330,7 +330,7 @@ static void mmhub_v2_3_setup_vmid_config(struct amdgpu_device *adev) static void mmhub_v2_3_program_invalidation(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; unsigned i; for (i = 0; i < 18; ++i) { @@ -373,7 +373,7 @@ static int mmhub_v2_3_gart_enable(struct amdgpu_device *adev) static void mmhub_v2_3_gart_disable(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; u32 tmp; u32 i; @@ -446,7 +446,7 @@ static const struct amdgpu_vmhub_funcs mmhub_v2_3_vmhub_funcs = { static void mmhub_v2_3_init(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; hub->ctx0_ptb_addr_lo32 = SOC15_REG_OFFSET(MMHUB, 0, diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c index 17a792616979..441379e91cfa 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c @@ -136,7 +136,7 @@ mmhub_v3_0_print_l2_protection_fault_status(struct amdgpu_device *adev, static void mmhub_v3_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, uint64_t page_table_base) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; WREG32_SOC15_OFFSET(MMHUB, 0, regMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, hub->ctx_addr_distance * vmid, @@ -319,7 +319,7 @@ static void mmhub_v3_0_disable_identity_aperture(struct amdgpu_device *adev) static void mmhub_v3_0_setup_vmid_config(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; int i; uint32_t tmp; @@ -369,7 +369,7 @@ static void mmhub_v3_0_setup_vmid_config(struct amdgpu_device *adev) static void mmhub_v3_0_program_invalidation(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; unsigned i; for (i = 0; i < 18; ++i) { @@ -398,7 +398,7 @@ static int mmhub_v3_0_gart_enable(struct amdgpu_device *adev) static void mmhub_v3_0_gart_disable(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; u32 tmp; u32 i; @@ -477,7 +477,7 @@ static const struct amdgpu_vmhub_funcs mmhub_v3_0_vmhub_funcs = { static void mmhub_v3_0_init(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; hub->ctx0_ptb_addr_lo32 = SOC15_REG_OFFSET(MMHUB, 0, diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c index 26509b6b8c24..12c7f4b46ea9 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c @@ -138,7 +138,7 @@ static void mmhub_v3_0_1_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, uint64_t page_table_base) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; WREG32_SOC15_OFFSET(MMHUB, 0, regMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, hub->ctx_addr_distance * vmid, @@ -306,7 +306,7 @@ static void mmhub_v3_0_1_disable_identity_aperture(struct amdgpu_device *adev) static void mmhub_v3_0_1_setup_vmid_config(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; int i; uint32_t tmp; @@ -356,7 +356,7 @@ static void mmhub_v3_0_1_setup_vmid_config(struct amdgpu_device *adev) static void mmhub_v3_0_1_program_invalidation(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; unsigned i; for (i = 0; i < 18; ++i) { @@ -385,7 +385,7 @@ static int mmhub_v3_0_1_gart_enable(struct amdgpu_device *adev) static void mmhub_v3_0_1_gart_disable(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; u32 tmp; u32 i; @@ -459,7 +459,7 @@ static const struct amdgpu_vmhub_funcs mmhub_v3_0_1_vmhub_funcs = { static void mmhub_v3_0_1_init(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; hub->ctx0_ptb_addr_lo32 = SOC15_REG_OFFSET(MMHUB, 0, diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_2.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_2.c index 26abbc6a47ab..5dadc85abf7e 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_2.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_2.c @@ -129,7 +129,7 @@ mmhub_v3_0_2_print_l2_protection_fault_status(struct amdgpu_device *adev, static void mmhub_v3_0_2_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, uint64_t page_table_base) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; WREG32_SOC15_OFFSET(MMHUB, 0, regMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, hub->ctx_addr_distance * vmid, @@ -311,7 +311,7 @@ static void mmhub_v3_0_2_disable_identity_aperture(struct amdgpu_device *adev) static void mmhub_v3_0_2_setup_vmid_config(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; int i; uint32_t tmp; @@ -361,7 +361,7 @@ static void mmhub_v3_0_2_setup_vmid_config(struct amdgpu_device *adev) static void mmhub_v3_0_2_program_invalidation(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; unsigned i; for (i = 0; i < 18; ++i) { @@ -390,7 +390,7 @@ static int mmhub_v3_0_2_gart_enable(struct amdgpu_device *adev) static void mmhub_v3_0_2_gart_disable(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; u32 tmp; u32 i; @@ -469,7 +469,7 @@ static const struct amdgpu_vmhub_funcs mmhub_v3_0_2_vmhub_funcs = { static void mmhub_v3_0_2_init(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; hub->ctx0_ptb_addr_lo32 = SOC15_REG_OFFSET(MMHUB, 0, diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c index 72083e96222f..e790f890aec6 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c @@ -57,7 +57,7 @@ static u64 mmhub_v9_4_get_fb_location(struct amdgpu_device *adev) static void mmhub_v9_4_setup_hubid_vm_pt_regs(struct amdgpu_device *adev, int hubid, uint32_t vmid, uint64_t value) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, @@ -294,7 +294,7 @@ static void mmhub_v9_4_disable_identity_aperture(struct amdgpu_device *adev, static void mmhub_v9_4_setup_vmid_config(struct amdgpu_device *adev, int hubid) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; unsigned int num_level, block_size; uint32_t tmp; int i; @@ -363,7 +363,7 @@ static void mmhub_v9_4_setup_vmid_config(struct amdgpu_device *adev, int hubid) static void mmhub_v9_4_program_invalidation(struct amdgpu_device *adev, int hubid) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; unsigned i; for (i = 0; i < 18; ++i) { @@ -404,7 +404,7 @@ static int mmhub_v9_4_gart_enable(struct amdgpu_device *adev) static void mmhub_v9_4_gart_disable(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; u32 tmp; u32 i, j; @@ -507,8 +507,8 @@ static void mmhub_v9_4_set_fault_enable_default(struct amdgpu_device *adev, bool static void mmhub_v9_4_init(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub[MMHUB_NUM_INSTANCES] = - {&adev->vmhub[AMDGPU_MMHUB_0], &adev->vmhub[AMDGPU_MMHUB_1]}; + struct amdgpu_vmhub *hub[MMHUB_NUM_INSTANCES] = { + &adev->vmhub[AMDGPU_MMHUB0(0)], &adev->vmhub[AMDGPU_MMHUB1(0)]}; int i; for (i = 0; i < MMHUB_NUM_INSTANCES; i++) { diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 9295ac7edd56..50b6eb9bcfda 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -1825,12 +1825,12 @@ static int sdma_v4_0_sw_init(void *handle) /* * On Arcturus, SDMA instance 5~7 has a different vmhub - * type(AMDGPU_MMHUB_1). + * type(AMDGPU_MMHUB1). */ if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 2) && i >= 5) - ring->vm_hub = AMDGPU_MMHUB_1; + ring->vm_hub = AMDGPU_MMHUB1(0); else - ring->vm_hub = AMDGPU_MMHUB_0; + ring->vm_hub = AMDGPU_MMHUB0(0); sprintf(ring->name, "sdma%d", i); r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq, @@ -1851,9 +1851,9 @@ static int sdma_v4_0_sw_init(void *handle) ring->doorbell_index += 0x400; if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 2) && i >= 5) - ring->vm_hub = AMDGPU_MMHUB_1; + ring->vm_hub = AMDGPU_MMHUB1(0); else - ring->vm_hub = AMDGPU_MMHUB_0; + ring->vm_hub = AMDGPU_MMHUB0(0); sprintf(ring->name, "page%d", i); r = amdgpu_ring_init(adev, ring, 1024, diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c index 64dcaa2670dd..7efe7c43fffb 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c @@ -1309,7 +1309,7 @@ static int sdma_v4_4_2_sw_init(void *handle) /* doorbell size is 2 dwords, get DWORD offset */ ring->doorbell_index = adev->doorbell_index.sdma_engine[i] << 1; - ring->vm_hub = AMDGPU_MMHUB_0; + ring->vm_hub = AMDGPU_MMHUB0(0); sprintf(ring->name, "sdma%d", i); r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq, @@ -1328,7 +1328,7 @@ static int sdma_v4_4_2_sw_init(void *handle) */ ring->doorbell_index = adev->doorbell_index.sdma_engine[i] << 1; ring->doorbell_index += 0x400; - ring->vm_hub = AMDGPU_MMHUB_0; + ring->vm_hub = AMDGPU_MMHUB0(0); sprintf(ring->name, "page%d", i); r = amdgpu_ring_init(adev, ring, 1024, diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c index 92e1299be021..a0077cf41295 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c @@ -1389,7 +1389,7 @@ static int sdma_v5_0_sw_init(void *handle) (adev->doorbell_index.sdma_engine[0] << 1) //get DWORD offset : (adev->doorbell_index.sdma_engine[1] << 1); // get DWORD offset - ring->vm_hub = AMDGPU_GFXHUB_0; + ring->vm_hub = AMDGPU_GFXHUB(0); sprintf(ring->name, "sdma%d", i); r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq, (i == 0) ? AMDGPU_SDMA_IRQ_INSTANCE0 : diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c index ca7e8757d78e..efa2c84ee78e 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c @@ -1253,7 +1253,7 @@ static int sdma_v5_2_sw_init(void *handle) ring->doorbell_index = (adev->doorbell_index.sdma_engine[i] << 1); //get DWORD offset - ring->vm_hub = AMDGPU_GFXHUB_0; + ring->vm_hub = AMDGPU_GFXHUB(0); sprintf(ring->name, "sdma%d", i); r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq, AMDGPU_SDMA_IRQ_INSTANCE0 + i, diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c index 3d9a80511a45..79d09792d2ce 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c @@ -1298,7 +1298,7 @@ static int sdma_v6_0_sw_init(void *handle) ring->doorbell_index = (adev->doorbell_index.sdma_engine[i] << 1); // get DWORD offset - ring->vm_hub = AMDGPU_GFXHUB_0; + ring->vm_hub = AMDGPU_GFXHUB(0); sprintf(ring->name, "sdma%d", i); r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq, diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c index e32b656b3dab..abaa4463e906 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c @@ -444,7 +444,7 @@ static int uvd_v7_0_sw_init(void *handle) continue; if (!amdgpu_sriov_vf(adev)) { ring = &adev->uvd.inst[j].ring; - ring->vm_hub = AMDGPU_MMHUB_0; + ring->vm_hub = AMDGPU_MMHUB0(0); sprintf(ring->name, "uvd_%d", ring->me); r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst[j].irq, 0, @@ -455,7 +455,7 @@ static int uvd_v7_0_sw_init(void *handle) for (i = 0; i < adev->uvd.num_enc_rings; ++i) { ring = &adev->uvd.inst[j].ring_enc[i]; - ring->vm_hub = AMDGPU_MMHUB_0; + ring->vm_hub = AMDGPU_MMHUB0(0); sprintf(ring->name, "uvd_enc_%d.%d", ring->me, i); if (amdgpu_sriov_vf(adev)) { ring->use_doorbell = true; diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c index 57b85bb6a1e4..e0b70cd3b697 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c @@ -466,7 +466,7 @@ static int vce_v4_0_sw_init(void *handle) enum amdgpu_ring_priority_level hw_prio = amdgpu_vce_get_ring_prio(i); ring = &adev->vce.ring[i]; - ring->vm_hub = AMDGPU_MMHUB_0; + ring->vm_hub = AMDGPU_MMHUB0(0); sprintf(ring->name, "vce%d", i); if (amdgpu_sriov_vf(adev)) { /* DOORBELL only works under SRIOV */ diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c index 761c28fa6ec1..f877c39c7cdd 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c @@ -120,7 +120,7 @@ static int vcn_v1_0_sw_init(void *handle) return r; ring = &adev->vcn.inst->ring_dec; - ring->vm_hub = AMDGPU_MMHUB_0; + ring->vm_hub = AMDGPU_MMHUB0(0); sprintf(ring->name, "vcn_dec"); r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0, AMDGPU_RING_PRIO_DEFAULT, NULL); @@ -142,7 +142,7 @@ static int vcn_v1_0_sw_init(void *handle) enum amdgpu_ring_priority_level hw_prio = amdgpu_vcn_get_enc_ring_prio(i); ring = &adev->vcn.inst->ring_enc[i]; - ring->vm_hub = AMDGPU_MMHUB_0; + ring->vm_hub = AMDGPU_MMHUB0(0); sprintf(ring->name, "vcn_enc%d", i); r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0, hw_prio, NULL); diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c index 7c2b3aa48083..c975aed2f6c7 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c @@ -129,7 +129,7 @@ static int vcn_v2_0_sw_init(void *handle) ring->use_doorbell = true; ring->doorbell_index = adev->doorbell_index.vcn.vcn_ring0_1 << 1; - ring->vm_hub = AMDGPU_MMHUB_0; + ring->vm_hub = AMDGPU_MMHUB0(0); sprintf(ring->name, "vcn_dec"); r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0, @@ -160,7 +160,7 @@ static int vcn_v2_0_sw_init(void *handle) ring = &adev->vcn.inst->ring_enc[i]; ring->use_doorbell = true; - ring->vm_hub = AMDGPU_MMHUB_0; + ring->vm_hub = AMDGPU_MMHUB0(0); if (!amdgpu_sriov_vf(adev)) ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + i; else diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c index ab0b45d0ead1..7044bd7c9f62 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c @@ -188,9 +188,9 @@ static int vcn_v2_5_sw_init(void *handle) (amdgpu_sriov_vf(adev) ? 2*j : 8*j); if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(2, 5, 0)) - ring->vm_hub = AMDGPU_MMHUB_1; + ring->vm_hub = AMDGPU_MMHUB1(0); else - ring->vm_hub = AMDGPU_MMHUB_0; + ring->vm_hub = AMDGPU_MMHUB0(0); sprintf(ring->name, "vcn_dec_%d", j); r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq, @@ -208,9 +208,9 @@ static int vcn_v2_5_sw_init(void *handle) (amdgpu_sriov_vf(adev) ? (1 + i + 2*j) : (2 + i + 8*j)); if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(2, 5, 0)) - ring->vm_hub = AMDGPU_MMHUB_1; + ring->vm_hub = AMDGPU_MMHUB1(0); else - ring->vm_hub = AMDGPU_MMHUB_0; + ring->vm_hub = AMDGPU_MMHUB0(0); sprintf(ring->name, "vcn_enc_%d.%d", j, i); r = amdgpu_ring_init(adev, ring, 512, diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c index 3eab186261aa..70fefbf26c48 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c @@ -189,7 +189,7 @@ static int vcn_v3_0_sw_init(void *handle) } else { ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8 * i; } - ring->vm_hub = AMDGPU_MMHUB_0; + ring->vm_hub = AMDGPU_MMHUB0(0); sprintf(ring->name, "vcn_dec_%d", i); r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0, AMDGPU_RING_PRIO_DEFAULT, @@ -213,7 +213,7 @@ static int vcn_v3_0_sw_init(void *handle) } else { ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + j + 8 * i; } - ring->vm_hub = AMDGPU_MMHUB_0; + ring->vm_hub = AMDGPU_MMHUB0(0); sprintf(ring->name, "vcn_enc_%d.%d", i, j); r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0, hw_prio, &adev->vcn.inst[i].sched_score); diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c index bf0674039598..81446e6996df 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c @@ -149,7 +149,7 @@ static int vcn_v4_0_sw_init(void *handle) ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + i * (adev->vcn.num_enc_rings + 1) + 1; else ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + 8 * i; - ring->vm_hub = AMDGPU_MMHUB_0; + ring->vm_hub = AMDGPU_MMHUB0(0); sprintf(ring->name, "vcn_unified_%d", i); r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0, -- cgit v1.2.3 From 5fb34bd9cf9e248d7e84e431a4a6b731334ab564 Mon Sep 17 00:00:00 2001 From: Alex Sierra Date: Tue, 24 May 2022 10:22:12 -0500 Subject: drm/amdkfd: pass kfd_node ref to svm migration api This work is required for GC 9.4.3, previous to support memory partitions per node at SVM. When multiple partition is configured, every BO should be allocated inside one specific partition which corresponds to the current amdgpu_device and kfd_node. v2: squash in compilation fix (Alex) v3: squash in fix for pre-gfx 9.4.3 (Alex) v4: squash in best_loc fix (Alex) Signed-off-by: Alex Sierra Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 7 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 3 +- drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 33 ++++--- drivers/gpu/drm/amd/amdkfd/kfd_migrate.c | 45 ++++----- drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 30 +++++- drivers/gpu/drm/amd/amdkfd/kfd_process.c | 6 +- drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 152 +++++++++++++++---------------- drivers/gpu/drm/amd/amdkfd/kfd_svm.h | 21 +++-- 9 files changed, 166 insertions(+), 133 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index c3964c14f215..c390b2856cc9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -2441,7 +2441,8 @@ void amdgpu_vm_set_task_info(struct amdgpu_vm *vm) * shouldn't be reported any more. */ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid, - uint64_t addr, bool write_fault) + u32 client_id, u32 node_id, uint64_t addr, + bool write_fault) { bool is_compute_context = false; struct amdgpu_bo *root; @@ -2465,8 +2466,8 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid, addr /= AMDGPU_GPU_PAGE_SIZE; - if (is_compute_context && - !svm_range_restore_pages(adev, pasid, addr, write_fault)) { + if (is_compute_context && !svm_range_restore_pages(adev, pasid, client_id, + node_id, addr, write_fault)) { amdgpu_bo_unref(&root); return true; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 9f5d32b0fda1..dbab31647186 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -455,7 +455,8 @@ void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev); void amdgpu_vm_get_task_info(struct amdgpu_device *adev, u32 pasid, struct amdgpu_task_info *task_info); bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid, - uint64_t addr, bool write_fault); + u32 client_id, u32 node_id, uint64_t addr, + bool write_fault); void amdgpu_vm_set_task_info(struct amdgpu_vm *vm); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c index d76f5c8d4977..01bd45651382 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c @@ -139,7 +139,7 @@ static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev, /* Try to handle the recoverable page faults by filling page * tables */ - if (amdgpu_vm_handle_fault(adev, entry->pasid, addr, write_fault)) + if (amdgpu_vm_handle_fault(adev, entry->pasid, 0, 0, addr, write_fault)) return 1; } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 2c322a25bf1c..c5752a349f3d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -557,11 +557,24 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, u64 addr; uint32_t cam_index = 0; int ret; - uint32_t node_id; + uint32_t node_id = 0; addr = (u64)entry->src_data[0] << 12; addr |= ((u64)entry->src_data[1] & 0xf) << 44; + if (entry->client_id == SOC15_IH_CLIENTID_VMC) { + hub_name = "mmhub0"; + hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; + } else if (entry->client_id == SOC15_IH_CLIENTID_VMC1) { + hub_name = "mmhub1"; + hub = &adev->vmhub[AMDGPU_MMHUB1(0)]; + } else { + hub_name = "gfxhub0"; + node_id = (adev->ip_versions[GC_HWIP][0] == + IP_VERSION(9, 4, 3)) ? entry->node_id : 0; + hub = &adev->vmhub[node_id/2]; + } + if (retry_fault) { if (adev->irq.retry_cam_enabled) { /* Delegate it to a different ring if the hardware hasn't @@ -574,7 +587,8 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, cam_index = entry->src_data[2] & 0x3ff; - ret = amdgpu_vm_handle_fault(adev, entry->pasid, addr, write_fault); + ret = amdgpu_vm_handle_fault(adev, entry->pasid, entry->client_id, node_id, + addr, write_fault); WDOORBELL32(adev->irq.retry_cam_doorbell_index, cam_index); if (ret) return 1; @@ -596,7 +610,8 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, /* Try to handle the recoverable page faults by filling page * tables */ - if (amdgpu_vm_handle_fault(adev, entry->pasid, addr, write_fault)) + if (amdgpu_vm_handle_fault(adev, entry->pasid, entry->client_id, node_id, + addr, write_fault)) return 1; } } @@ -604,18 +619,6 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, if (!printk_ratelimit()) return 0; - if (entry->client_id == SOC15_IH_CLIENTID_VMC) { - hub_name = "mmhub0"; - hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; - } else if (entry->client_id == SOC15_IH_CLIENTID_VMC1) { - hub_name = "mmhub1"; - hub = &adev->vmhub[AMDGPU_MMHUB1(0)]; - } else { - hub_name = "gfxhub0"; - node_id = (adev->ip_versions[GC_HWIP][0] == - IP_VERSION(9, 4, 3)) ? entry->node_id : 0; - hub = &adev->vmhub[node_id/2]; - } memset(&task_info, 0, sizeof(struct amdgpu_task_info)); amdgpu_vm_get_task_info(adev, entry->pasid, &task_info); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c index 5f4dc2a45bd0..e7e5abc32c84 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c @@ -287,11 +287,12 @@ static unsigned long svm_migrate_unsuccessful_pages(struct migrate_vma *migrate) } static int -svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange, +svm_migrate_copy_to_vram(struct kfd_node *node, struct svm_range *prange, struct migrate_vma *migrate, struct dma_fence **mfence, dma_addr_t *scratch, uint64_t ttm_res_offset) { - uint64_t npages = migrate->npages; + uint64_t npages = migrate->cpages; + struct amdgpu_device *adev = node->adev; struct device *dev = adev->dev; struct amdgpu_res_cursor cursor; dma_addr_t *src; @@ -321,7 +322,7 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange, DMA_TO_DEVICE); r = dma_mapping_error(dev, src[i]); if (r) { - dev_err(adev->dev, "%s: fail %d dma_map_page\n", + dev_err(dev, "%s: fail %d dma_map_page\n", __func__, r); goto out_free_vram_pages; } @@ -390,12 +391,13 @@ out_free_vram_pages: } static long -svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange, +svm_migrate_vma_to_vram(struct kfd_node *node, struct svm_range *prange, struct vm_area_struct *vma, uint64_t start, uint64_t end, uint32_t trigger, uint64_t ttm_res_offset) { struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms); uint64_t npages = (end - start) >> PAGE_SHIFT; + struct amdgpu_device *adev = node->adev; struct kfd_process_device *pdd; struct dma_fence *mfence = NULL; struct migrate_vma migrate = { 0 }; @@ -445,7 +447,7 @@ svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange, else pr_debug("0x%lx pages migrated\n", cpages); - r = svm_migrate_copy_to_vram(adev, prange, &migrate, &mfence, scratch, ttm_res_offset); + r = svm_migrate_copy_to_vram(node, prange, &migrate, &mfence, scratch, ttm_res_offset); migrate_vma_pages(&migrate); pr_debug("successful/cpages/npages 0x%lx/0x%lx/0x%lx\n", @@ -465,7 +467,7 @@ out_free: kvfree(buf); out: if (!r && cpages) { - pdd = svm_range_get_pdd_by_adev(prange, adev); + pdd = svm_range_get_pdd_by_node(prange, node); if (pdd) WRITE_ONCE(pdd->page_in, pdd->page_in + cpages); @@ -492,8 +494,8 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc, { unsigned long addr, start, end; struct vm_area_struct *vma; - struct amdgpu_device *adev; uint64_t ttm_res_offset; + struct kfd_node *node; unsigned long cpages = 0; long r = 0; @@ -503,9 +505,9 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc, return 0; } - adev = svm_range_get_adev_by_id(prange, best_loc); - if (!adev) { - pr_debug("failed to get device by id 0x%x\n", best_loc); + node = svm_range_get_node_by_id(prange, best_loc); + if (!node) { + pr_debug("failed to get kfd node by id 0x%x\n", best_loc); return -ENODEV; } @@ -515,9 +517,9 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc, start = prange->start << PAGE_SHIFT; end = (prange->last + 1) << PAGE_SHIFT; - r = svm_range_vram_node_new(adev, prange, true); + r = svm_range_vram_node_new(node, prange, true); if (r) { - dev_dbg(adev->dev, "fail %ld to alloc vram\n", r); + dev_dbg(node->adev->dev, "fail %ld to alloc vram\n", r); return r; } ttm_res_offset = prange->offset << PAGE_SHIFT; @@ -530,7 +532,7 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc, break; next = min(vma->vm_end, end); - r = svm_migrate_vma_to_vram(adev, prange, vma, addr, next, trigger, ttm_res_offset); + r = svm_migrate_vma_to_vram(node, prange, vma, addr, next, trigger, ttm_res_offset); if (r < 0) { pr_debug("failed %ld to migrate\n", r); break; @@ -663,7 +665,7 @@ out_oom: * positive values - partial migration, number of pages not migrated */ static long -svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange, +svm_migrate_vma_to_ram(struct kfd_node *node, struct svm_range *prange, struct vm_area_struct *vma, uint64_t start, uint64_t end, uint32_t trigger, struct page *fault_page) { @@ -671,6 +673,7 @@ svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange, uint64_t npages = (end - start) >> PAGE_SHIFT; unsigned long upages = npages; unsigned long cpages = 0; + struct amdgpu_device *adev = node->adev; struct kfd_process_device *pdd; struct dma_fence *mfence = NULL; struct migrate_vma migrate = { 0 }; @@ -745,7 +748,7 @@ out_free: kvfree(buf); out: if (!r && cpages) { - pdd = svm_range_get_pdd_by_adev(prange, adev); + pdd = svm_range_get_pdd_by_node(prange, node); if (pdd) WRITE_ONCE(pdd->page_out, pdd->page_out + cpages); } @@ -766,7 +769,7 @@ out: int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm, uint32_t trigger, struct page *fault_page) { - struct amdgpu_device *adev; + struct kfd_node *node; struct vm_area_struct *vma; unsigned long addr; unsigned long start; @@ -780,13 +783,11 @@ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm, return 0; } - adev = svm_range_get_adev_by_id(prange, prange->actual_loc); - if (!adev) { - pr_debug("failed to get device by id 0x%x\n", - prange->actual_loc); + node = svm_range_get_node_by_id(prange, prange->actual_loc); + if (!node) { + pr_debug("failed to get kfd node by id 0x%x\n", prange->actual_loc); return -ENODEV; } - pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] from gpu 0x%x to ram\n", prange->svms, prange, prange->start, prange->last, prange->actual_loc); @@ -805,7 +806,7 @@ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm, } next = min(vma->vm_end, end); - r = svm_migrate_vma_to_ram(adev, prange, vma, addr, next, trigger, + r = svm_migrate_vma_to_ram(node, prange, vma, addr, next, trigger, fault_page); if (r < 0) { pr_debug("failed %ld to migrate prange %p\n", r, prange); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 400b4dcbdf05..df372de6b056 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -289,6 +289,7 @@ struct kfd_node { * from the HW ring into a SW ring. */ bool interrupts_active; + uint32_t interrupt_bitmap; /* Only used for GFX 9.4.3 */ /* QCM Device instance */ struct device_queue_manager *dqm; @@ -971,9 +972,8 @@ struct kfd_process *kfd_lookup_process_by_pasid(u32 pasid); struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm); int kfd_process_gpuidx_from_gpuid(struct kfd_process *p, uint32_t gpu_id); -int kfd_process_gpuid_from_adev(struct kfd_process *p, - struct amdgpu_device *adev, uint32_t *gpuid, - uint32_t *gpuidx); +int kfd_process_gpuid_from_node(struct kfd_process *p, struct kfd_node *node, + uint32_t *gpuid, uint32_t *gpuidx); static inline int kfd_process_gpuid_from_gpuidx(struct kfd_process *p, uint32_t gpuidx, uint32_t *gpuid) { return gpuidx < p->n_pdds ? p->pdds[gpuidx]->dev->id : -EINVAL; @@ -1073,6 +1073,30 @@ struct kfd_topology_device *kfd_topology_device_by_id(uint32_t gpu_id); struct kfd_node *kfd_device_by_id(uint32_t gpu_id); struct kfd_node *kfd_device_by_pci_dev(const struct pci_dev *pdev); struct kfd_node *kfd_device_by_adev(const struct amdgpu_device *adev); +static inline bool kfd_irq_is_from_node(struct kfd_node *node, uint32_t client_id, + uint32_t node_id) +{ + if ((node->interrupt_bitmap & (0x1U << node_id)) || + ((node_id % 4) == 0 && + (node->interrupt_bitmap >> 16) & (0x1U << client_id))) + return true; + + return false; +} +static inline struct kfd_node *kfd_node_by_irq_ids(struct amdgpu_device *adev, + uint32_t client_id, uint32_t node_id) { + struct kfd_dev *dev = adev->kfd.dev; + uint32_t i; + + if (adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 3)) + return dev->nodes[0]; + + for (i = 0; i < dev->num_nodes; i++) + if (kfd_irq_is_from_node(dev->nodes[i], client_id, node_id)) + return dev->nodes[i]; + + return NULL; +} int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_node **kdev); int kfd_numa_node_to_apic_id(int numa_node_id); void kfd_double_confirm_iommu_support(struct kfd_dev *gpu); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index c3d43e6e5236..666815b227a8 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -1891,13 +1891,13 @@ int kfd_process_gpuidx_from_gpuid(struct kfd_process *p, uint32_t gpu_id) } int -kfd_process_gpuid_from_adev(struct kfd_process *p, struct amdgpu_device *adev, - uint32_t *gpuid, uint32_t *gpuidx) +kfd_process_gpuid_from_node(struct kfd_process *p, struct kfd_node *node, + uint32_t *gpuid, uint32_t *gpuidx) { int i; for (i = 0; i < p->n_pdds; i++) - if (p->pdds[i] && p->pdds[i]->dev->adev == adev) { + if (p->pdds[i] && p->pdds[i]->dev == node) { *gpuid = p->pdds[i]->user_gpu_id; *gpuidx = i; return 0; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index 4b4f3bf8b823..639831fbb6ca 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -170,8 +170,7 @@ svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange, page = hmm_pfn_to_page(hmm_pfns[i]); if (is_zone_device_page(page)) { - struct amdgpu_device *bo_adev = - amdgpu_ttm_adev(prange->svm_bo->bo->tbo.bdev); + struct amdgpu_device *bo_adev = prange->svm_bo->node->adev; addr[i] = (hmm_pfns[i] << PAGE_SHIFT) + bo_adev->vm_manager.vram_base_offset - @@ -424,10 +423,8 @@ static void svm_range_bo_unref(struct svm_range_bo *svm_bo) } static bool -svm_range_validate_svm_bo(struct amdgpu_device *adev, struct svm_range *prange) +svm_range_validate_svm_bo(struct kfd_node *node, struct svm_range *prange) { - struct amdgpu_device *bo_adev; - mutex_lock(&prange->lock); if (!prange->svm_bo) { mutex_unlock(&prange->lock); @@ -440,12 +437,11 @@ svm_range_validate_svm_bo(struct amdgpu_device *adev, struct svm_range *prange) } if (svm_bo_ref_unless_zero(prange->svm_bo)) { /* - * Migrate from GPU to GPU, remove range from source bo_adev - * svm_bo range list, and return false to allocate svm_bo from - * destination adev. + * Migrate from GPU to GPU, remove range from source svm_bo->node + * range list, and return false to allocate svm_bo from destination + * node. */ - bo_adev = amdgpu_ttm_adev(prange->svm_bo->bo->tbo.bdev); - if (bo_adev != adev) { + if (prange->svm_bo->node != node) { mutex_unlock(&prange->lock); spin_lock(&prange->svm_bo->list_lock); @@ -513,7 +509,7 @@ static struct svm_range_bo *svm_range_bo_new(void) } int -svm_range_vram_node_new(struct amdgpu_device *adev, struct svm_range *prange, +svm_range_vram_node_new(struct kfd_node *node, struct svm_range *prange, bool clear) { struct amdgpu_bo_param bp; @@ -528,7 +524,7 @@ svm_range_vram_node_new(struct amdgpu_device *adev, struct svm_range *prange, pr_debug("pasid: %x svms 0x%p [0x%lx 0x%lx]\n", p->pasid, prange->svms, prange->start, prange->last); - if (svm_range_validate_svm_bo(adev, prange)) + if (svm_range_validate_svm_bo(node, prange)) return 0; svm_bo = svm_range_bo_new(); @@ -542,6 +538,7 @@ svm_range_vram_node_new(struct amdgpu_device *adev, struct svm_range *prange, kfree(svm_bo); return -ESRCH; } + svm_bo->node = node; svm_bo->eviction_fence = amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1), mm, @@ -559,7 +556,10 @@ svm_range_vram_node_new(struct amdgpu_device *adev, struct svm_range *prange, bp.type = ttm_bo_type_device; bp.resv = NULL; - r = amdgpu_bo_create_user(adev, &bp, &ubo); + /* TODO: Allocate memory from the right memory partition. We can sort + * out the details later, once basic memory partitioning is working + */ + r = amdgpu_bo_create_user(node->adev, &bp, &ubo); if (r) { pr_debug("failed %d to create bo\n", r); goto create_bo_failed; @@ -617,45 +617,30 @@ void svm_range_vram_node_free(struct svm_range *prange) prange->ttm_res = NULL; } -struct amdgpu_device * -svm_range_get_adev_by_id(struct svm_range *prange, uint32_t gpu_id) +struct kfd_node * +svm_range_get_node_by_id(struct svm_range *prange, uint32_t gpu_id) { - struct kfd_process_device *pdd; struct kfd_process *p; - int32_t gpu_idx; + struct kfd_process_device *pdd; p = container_of(prange->svms, struct kfd_process, svms); - - gpu_idx = kfd_process_gpuidx_from_gpuid(p, gpu_id); - if (gpu_idx < 0) { - pr_debug("failed to get device by id 0x%x\n", gpu_id); - return NULL; - } - pdd = kfd_process_device_from_gpuidx(p, gpu_idx); + pdd = kfd_process_device_data_by_id(p, gpu_id); if (!pdd) { - pr_debug("failed to get device by idx 0x%x\n", gpu_idx); + pr_debug("failed to get kfd process device by id 0x%x\n", gpu_id); return NULL; } - return pdd->dev->adev; + return pdd->dev; } struct kfd_process_device * -svm_range_get_pdd_by_adev(struct svm_range *prange, struct amdgpu_device *adev) +svm_range_get_pdd_by_node(struct svm_range *prange, struct kfd_node *node) { struct kfd_process *p; - int32_t gpu_idx, gpuid; - int r; p = container_of(prange->svms, struct kfd_process, svms); - r = kfd_process_gpuid_from_adev(p, adev, &gpuid, &gpu_idx); - if (r) { - pr_debug("failed to get device id by adev %p\n", adev); - return NULL; - } - - return kfd_process_device_from_gpuidx(p, gpu_idx); + return kfd_get_process_device_data(node, p); } static int svm_range_bo_validate(void *param, struct amdgpu_bo *bo) @@ -1148,12 +1133,18 @@ svm_range_split_by_granularity(struct kfd_process *p, struct mm_struct *mm, } return 0; } +static bool +svm_nodes_in_same_hive(struct kfd_node *node_a, struct kfd_node *node_b) +{ + return (node_a->adev == node_b->adev || + amdgpu_xgmi_same_hive(node_a->adev, node_b->adev)); +} static uint64_t -svm_range_get_pte_flags(struct amdgpu_device *adev, struct svm_range *prange, - int domain) +svm_range_get_pte_flags(struct kfd_node *node, + struct svm_range *prange, int domain) { - struct amdgpu_device *bo_adev; + struct kfd_node *bo_node; uint32_t flags = prange->flags; uint32_t mapping_flags = 0; uint64_t pte_flags; @@ -1162,18 +1153,18 @@ svm_range_get_pte_flags(struct amdgpu_device *adev, struct svm_range *prange, bool uncached = flags & KFD_IOCTL_SVM_FLAG_UNCACHED; if (domain == SVM_RANGE_VRAM_DOMAIN) - bo_adev = amdgpu_ttm_adev(prange->svm_bo->bo->tbo.bdev); + bo_node = prange->svm_bo->node; - switch (KFD_GC_VERSION(adev->kfd.dev)) { + switch (node->adev->ip_versions[GC_HWIP][0]) { case IP_VERSION(9, 4, 1): if (domain == SVM_RANGE_VRAM_DOMAIN) { - if (bo_adev == adev) { + if (bo_node == node) { mapping_flags |= coherent ? AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW; } else { mapping_flags |= coherent ? AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC; - if (amdgpu_xgmi_same_hive(adev, bo_adev)) + if (svm_nodes_in_same_hive(node, bo_node)) snoop = true; } } else { @@ -1183,15 +1174,15 @@ svm_range_get_pte_flags(struct amdgpu_device *adev, struct svm_range *prange, break; case IP_VERSION(9, 4, 2): if (domain == SVM_RANGE_VRAM_DOMAIN) { - if (bo_adev == adev) { + if (bo_node == node) { mapping_flags |= coherent ? AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW; - if (adev->gmc.xgmi.connected_to_cpu) + if (node->adev->gmc.xgmi.connected_to_cpu) snoop = true; } else { mapping_flags |= coherent ? AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC; - if (amdgpu_xgmi_same_hive(adev, bo_adev)) + if (svm_nodes_in_same_hive(node, bo_node)) snoop = true; } } else { @@ -1207,7 +1198,7 @@ svm_range_get_pte_flags(struct amdgpu_device *adev, struct svm_range *prange, if (uncached) mapping_flags |= AMDGPU_VM_MTYPE_UC; /* local HBM region close to partition*/ - else if (bo_adev == adev) + else if (bo_node == node) mapping_flags |= AMDGPU_VM_MTYPE_RW; /* local HBM region far from partition or remote XGMI GPU or * system memory @@ -1231,7 +1222,7 @@ svm_range_get_pte_flags(struct amdgpu_device *adev, struct svm_range *prange, pte_flags |= (domain == SVM_RANGE_VRAM_DOMAIN) ? 0 : AMDGPU_PTE_SYSTEM; pte_flags |= snoop ? AMDGPU_PTE_SNOOPED : 0; - pte_flags |= amdgpu_gem_va_map_flags(adev, mapping_flags); + pte_flags |= amdgpu_gem_va_map_flags(node->adev, mapping_flags); return pte_flags; } @@ -1338,7 +1329,7 @@ svm_range_map_to_gpu(struct kfd_process_device *pdd, struct svm_range *prange, pr_debug("Mapping range [0x%lx 0x%llx] on domain: %s\n", last_start, prange->start + i, last_domain ? "GPU" : "CPU"); - pte_flags = svm_range_get_pte_flags(adev, prange, last_domain); + pte_flags = svm_range_get_pte_flags(pdd->dev, prange, last_domain); if (readonly) pte_flags &= ~AMDGPU_PTE_WRITEABLE; @@ -1347,6 +1338,9 @@ svm_range_map_to_gpu(struct kfd_process_device *pdd, struct svm_range *prange, (last_domain == SVM_RANGE_VRAM_DOMAIN) ? 1 : 0, pte_flags); + /* TODO: we still need to determine the vm_manager.vram_base_offset based on + * the memory partition. + */ r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb, NULL, last_start, prange->start + i, pte_flags, @@ -1384,16 +1378,14 @@ svm_range_map_to_gpus(struct svm_range *prange, unsigned long offset, unsigned long *bitmap, bool wait, bool flush_tlb) { struct kfd_process_device *pdd; - struct amdgpu_device *bo_adev; + struct amdgpu_device *bo_adev = NULL; struct kfd_process *p; struct dma_fence *fence = NULL; uint32_t gpuidx; int r = 0; if (prange->svm_bo && prange->ttm_res) - bo_adev = amdgpu_ttm_adev(prange->svm_bo->bo->tbo.bdev); - else - bo_adev = NULL; + bo_adev = prange->svm_bo->node->adev; p = container_of(prange->svms, struct kfd_process, svms); for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) { @@ -2526,17 +2518,17 @@ svm_range_from_addr(struct svm_range_list *svms, unsigned long addr, */ static int32_t svm_range_best_restore_location(struct svm_range *prange, - struct amdgpu_device *adev, + struct kfd_node *node, int32_t *gpuidx) { - struct amdgpu_device *bo_adev, *preferred_adev; + struct kfd_node *bo_node, *preferred_node; struct kfd_process *p; uint32_t gpuid; int r; p = container_of(prange->svms, struct kfd_process, svms); - r = kfd_process_gpuid_from_adev(p, adev, &gpuid, gpuidx); + r = kfd_process_gpuid_from_node(p, node, &gpuid, gpuidx); if (r < 0) { pr_debug("failed to get gpuid from kgd\n"); return -1; @@ -2546,9 +2538,8 @@ svm_range_best_restore_location(struct svm_range *prange, prange->preferred_loc == KFD_IOCTL_SVM_LOCATION_SYSMEM) { return prange->preferred_loc; } else if (prange->preferred_loc != KFD_IOCTL_SVM_LOCATION_UNDEFINED) { - preferred_adev = svm_range_get_adev_by_id(prange, - prange->preferred_loc); - if (amdgpu_xgmi_same_hive(adev, preferred_adev)) + preferred_node = svm_range_get_node_by_id(prange, prange->preferred_loc); + if (preferred_node && svm_nodes_in_same_hive(node, preferred_node)) return prange->preferred_loc; /* fall through */ } @@ -2560,8 +2551,8 @@ svm_range_best_restore_location(struct svm_range *prange, if (!prange->actual_loc) return 0; - bo_adev = svm_range_get_adev_by_id(prange, prange->actual_loc); - if (amdgpu_xgmi_same_hive(adev, bo_adev)) + bo_node = svm_range_get_node_by_id(prange, prange->actual_loc); + if (bo_node && svm_nodes_in_same_hive(node, bo_node)) return prange->actual_loc; else return 0; @@ -2678,7 +2669,7 @@ svm_range_check_vm_userptr(struct kfd_process *p, uint64_t start, uint64_t last, } static struct -svm_range *svm_range_create_unregistered_range(struct amdgpu_device *adev, +svm_range *svm_range_create_unregistered_range(struct kfd_node *node, struct kfd_process *p, struct mm_struct *mm, int64_t addr) @@ -2713,7 +2704,7 @@ svm_range *svm_range_create_unregistered_range(struct amdgpu_device *adev, pr_debug("Failed to create prange in address [0x%llx]\n", addr); return NULL; } - if (kfd_process_gpuid_from_adev(p, adev, &gpuid, &gpuidx)) { + if (kfd_process_gpuid_from_node(p, node, &gpuid, &gpuidx)) { pr_debug("failed to get gpuid from kgd\n"); svm_range_free(prange, true); return NULL; @@ -2767,7 +2758,7 @@ static bool svm_range_skip_recover(struct svm_range *prange) } static void -svm_range_count_fault(struct amdgpu_device *adev, struct kfd_process *p, +svm_range_count_fault(struct kfd_node *node, struct kfd_process *p, int32_t gpuidx) { struct kfd_process_device *pdd; @@ -2780,7 +2771,7 @@ svm_range_count_fault(struct amdgpu_device *adev, struct kfd_process *p, uint32_t gpuid; int r; - r = kfd_process_gpuid_from_adev(p, adev, &gpuid, &gpuidx); + r = kfd_process_gpuid_from_node(p, node, &gpuid, &gpuidx); if (r < 0) return; } @@ -2808,6 +2799,7 @@ svm_fault_allowed(struct vm_area_struct *vma, bool write_fault) int svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid, + uint32_t client_id, uint32_t node_id, uint64_t addr, bool write_fault) { struct mm_struct *mm = NULL; @@ -2815,6 +2807,7 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid, struct svm_range *prange; struct kfd_process *p; ktime_t timestamp = ktime_get_boottime(); + struct kfd_node *node; int32_t best_loc; int32_t gpuidx = MAX_GPU_INSTANCE; bool write_locked = false; @@ -2858,6 +2851,13 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid, goto out; } + node = kfd_node_by_irq_ids(adev, node_id, client_id); + if (!node) { + pr_debug("kfd node does not exist node_id: %d, client_id: %d\n", node_id, + client_id); + r = -EFAULT; + goto out; + } mmap_read_lock(mm); retry_write_locked: mutex_lock(&svms->lock); @@ -2876,7 +2876,7 @@ retry_write_locked: write_locked = true; goto retry_write_locked; } - prange = svm_range_create_unregistered_range(adev, p, mm, addr); + prange = svm_range_create_unregistered_range(node, p, mm, addr); if (!prange) { pr_debug("failed to create unregistered range svms 0x%p address [0x%llx]\n", svms, addr); @@ -2891,7 +2891,7 @@ retry_write_locked: mutex_lock(&prange->migrate_mutex); if (svm_range_skip_recover(prange)) { - amdgpu_gmc_filter_faults_remove(adev, addr, pasid); + amdgpu_gmc_filter_faults_remove(node->adev, addr, pasid); r = 0; goto out_unlock_range; } @@ -2922,7 +2922,7 @@ retry_write_locked: goto out_unlock_range; } - best_loc = svm_range_best_restore_location(prange, adev, &gpuidx); + best_loc = svm_range_best_restore_location(prange, node, &gpuidx); if (best_loc == -1) { pr_debug("svms %p failed get best restore loc [0x%lx 0x%lx]\n", svms, prange->start, prange->last); @@ -2981,7 +2981,7 @@ out_unlock_svms: mutex_unlock(&svms->lock); mmap_read_unlock(mm); - svm_range_count_fault(adev, p, gpuidx); + svm_range_count_fault(node, p, gpuidx); mmput(mm); out: @@ -2989,7 +2989,7 @@ out: if (r == -EAGAIN) { pr_debug("recover vm fault later\n"); - amdgpu_gmc_filter_faults_remove(adev, addr, pasid); + amdgpu_gmc_filter_faults_remove(node->adev, addr, pasid); r = 0; } return r; @@ -3231,7 +3231,7 @@ svm_range_best_prefetch_location(struct svm_range *prange) DECLARE_BITMAP(bitmap, MAX_GPU_INSTANCE); uint32_t best_loc = prange->prefetch_loc; struct kfd_process_device *pdd; - struct amdgpu_device *bo_adev; + struct kfd_node *bo_node; struct kfd_process *p; uint32_t gpuidx; @@ -3240,9 +3240,9 @@ svm_range_best_prefetch_location(struct svm_range *prange) if (!best_loc || best_loc == KFD_IOCTL_SVM_LOCATION_UNDEFINED) goto out; - bo_adev = svm_range_get_adev_by_id(prange, best_loc); - if (!bo_adev) { - WARN_ONCE(1, "failed to get device by id 0x%x\n", best_loc); + bo_node = svm_range_get_node_by_id(prange, best_loc); + if (!bo_node) { + WARN_ONCE(1, "failed to get valid kfd node at id%x\n", best_loc); best_loc = 0; goto out; } @@ -3260,10 +3260,10 @@ svm_range_best_prefetch_location(struct svm_range *prange) continue; } - if (pdd->dev->adev == bo_adev) + if (pdd->dev->adev == bo_node->adev) continue; - if (!amdgpu_xgmi_same_hive(pdd->dev->adev, bo_adev)) { + if (!svm_nodes_in_same_hive(pdd->dev, bo_node)) { best_loc = 0; break; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h index 7a33b93f9df6..a165c73b40b2 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h @@ -48,6 +48,7 @@ struct svm_range_bo { struct work_struct eviction_work; uint32_t evicting; struct work_struct release_work; + struct kfd_node *node; }; enum svm_work_list_ops { @@ -163,16 +164,17 @@ int svm_ioctl(struct kfd_process *p, enum kfd_ioctl_svm_op op, uint64_t start, struct svm_range *svm_range_from_addr(struct svm_range_list *svms, unsigned long addr, struct svm_range **parent); -struct amdgpu_device *svm_range_get_adev_by_id(struct svm_range *prange, - uint32_t id); -int svm_range_vram_node_new(struct amdgpu_device *adev, - struct svm_range *prange, bool clear); +struct kfd_node *svm_range_get_node_by_id(struct svm_range *prange, + uint32_t gpu_id); +int svm_range_vram_node_new(struct kfd_node *node, struct svm_range *prange, + bool clear); void svm_range_vram_node_free(struct svm_range *prange); int svm_range_split_by_granularity(struct kfd_process *p, struct mm_struct *mm, unsigned long addr, struct svm_range *parent, struct svm_range *prange); -int svm_range_restore_pages(struct amdgpu_device *adev, - unsigned int pasid, uint64_t addr, bool write_fault); +int svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid, + uint32_t client_id, uint32_t node_id, uint64_t addr, + bool write_fault); int svm_range_schedule_evict_svm_bo(struct amdgpu_amdkfd_fence *fence); void svm_range_add_list_work(struct svm_range_list *svms, struct svm_range *prange, struct mm_struct *mm, @@ -192,7 +194,7 @@ int kfd_criu_restore_svm(struct kfd_process *p, uint64_t max_priv_data_size); int kfd_criu_resume_svm(struct kfd_process *p); struct kfd_process_device * -svm_range_get_pdd_by_adev(struct svm_range *prange, struct amdgpu_device *adev); +svm_range_get_pdd_by_node(struct svm_range *prange, struct kfd_node *node); void svm_range_list_lock_and_flush_work(struct svm_range_list *svms, struct mm_struct *mm); /* SVM API and HMM page migration work together, device memory type @@ -219,8 +221,9 @@ static inline void svm_range_list_fini(struct kfd_process *p) } static inline int svm_range_restore_pages(struct amdgpu_device *adev, - unsigned int pasid, uint64_t addr, - bool write_fault) + unsigned int pasid, + uint32_t client_id, uint32_t node_id, + uint64_t addr, bool write_fault) { return -EFAULT; } -- cgit v1.2.3 From f5fe7edfd6ce62cd23fbd707e7f9fe0f56a45e94 Mon Sep 17 00:00:00 2001 From: Mukul Joshi Date: Fri, 30 Sep 2022 09:16:21 -0400 Subject: drm/amdkfd: Update interrupt handling for GFX9.4.3 Update interrupt handling in CPX mode for GFX9.4.3 by using the VMID space instead of SDMA client id to determine if an interrupt should be processed by a KFD node. This is especially needed for handling retry faults from MMHUB. Signed-off-by: Mukul Joshi Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 7 +++++-- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 2 +- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 4 ++-- drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 16 ++++++---------- drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 8 ++++---- drivers/gpu/drm/amd/amdkfd/kfd_svm.h | 2 +- 6 files changed, 19 insertions(+), 20 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index c390b2856cc9..22a900f298f2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -2434,6 +2434,9 @@ void amdgpu_vm_set_task_info(struct amdgpu_vm *vm) * amdgpu_vm_handle_fault - graceful handling of VM faults. * @adev: amdgpu device pointer * @pasid: PASID of the VM + * @vmid: VMID, only used for GFX 9.4.3. + * @node_id: Node_id received in IH cookie. Only applicable for + * GFX 9.4.3. * @addr: Address of the fault * @write_fault: true is write fault, false is read fault * @@ -2441,7 +2444,7 @@ void amdgpu_vm_set_task_info(struct amdgpu_vm *vm) * shouldn't be reported any more. */ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid, - u32 client_id, u32 node_id, uint64_t addr, + u32 vmid, u32 node_id, uint64_t addr, bool write_fault) { bool is_compute_context = false; @@ -2466,7 +2469,7 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid, addr /= AMDGPU_GPU_PAGE_SIZE; - if (is_compute_context && !svm_range_restore_pages(adev, pasid, client_id, + if (is_compute_context && !svm_range_restore_pages(adev, pasid, vmid, node_id, addr, write_fault)) { amdgpu_bo_unref(&root); return true; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index dbab31647186..8add5f5eb92a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -455,7 +455,7 @@ void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev); void amdgpu_vm_get_task_info(struct amdgpu_device *adev, u32 pasid, struct amdgpu_task_info *task_info); bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid, - u32 client_id, u32 node_id, uint64_t addr, + u32 vmid, u32 node_id, uint64_t addr, bool write_fault); void amdgpu_vm_set_task_info(struct amdgpu_vm *vm); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index c5752a349f3d..f2814270da40 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -587,7 +587,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, cam_index = entry->src_data[2] & 0x3ff; - ret = amdgpu_vm_handle_fault(adev, entry->pasid, entry->client_id, node_id, + ret = amdgpu_vm_handle_fault(adev, entry->pasid, entry->vmid, node_id, addr, write_fault); WDOORBELL32(adev->irq.retry_cam_doorbell_index, cam_index); if (ret) @@ -610,7 +610,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, /* Try to handle the recoverable page faults by filling page * tables */ - if (amdgpu_vm_handle_fault(adev, entry->pasid, entry->client_id, node_id, + if (amdgpu_vm_handle_fault(adev, entry->pasid, entry->vmid, node_id, addr, write_fault)) return 1; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index df372de6b056..fb3cf2c51da8 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -1073,18 +1073,14 @@ struct kfd_topology_device *kfd_topology_device_by_id(uint32_t gpu_id); struct kfd_node *kfd_device_by_id(uint32_t gpu_id); struct kfd_node *kfd_device_by_pci_dev(const struct pci_dev *pdev); struct kfd_node *kfd_device_by_adev(const struct amdgpu_device *adev); -static inline bool kfd_irq_is_from_node(struct kfd_node *node, uint32_t client_id, - uint32_t node_id) +static inline bool kfd_irq_is_from_node(struct kfd_node *node, uint32_t node_id, + uint32_t vmid) { - if ((node->interrupt_bitmap & (0x1U << node_id)) || - ((node_id % 4) == 0 && - (node->interrupt_bitmap >> 16) & (0x1U << client_id))) - return true; - - return false; + return (node->interrupt_bitmap & (1 << node_id)) != 0 && + (node->compute_vmid_bitmap & (1 << vmid)) != 0; } static inline struct kfd_node *kfd_node_by_irq_ids(struct amdgpu_device *adev, - uint32_t client_id, uint32_t node_id) { + uint32_t node_id, uint32_t vmid) { struct kfd_dev *dev = adev->kfd.dev; uint32_t i; @@ -1092,7 +1088,7 @@ static inline struct kfd_node *kfd_node_by_irq_ids(struct amdgpu_device *adev, return dev->nodes[0]; for (i = 0; i < dev->num_nodes; i++) - if (kfd_irq_is_from_node(dev->nodes[i], client_id, node_id)) + if (kfd_irq_is_from_node(dev->nodes[i], node_id, vmid)) return dev->nodes[i]; return NULL; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index 0dafbbe954ca..5d6e02559d8e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -2799,7 +2799,7 @@ svm_fault_allowed(struct vm_area_struct *vma, bool write_fault) int svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid, - uint32_t client_id, uint32_t node_id, + uint32_t vmid, uint32_t node_id, uint64_t addr, bool write_fault) { struct mm_struct *mm = NULL; @@ -2851,10 +2851,10 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid, goto out; } - node = kfd_node_by_irq_ids(adev, node_id, client_id); + node = kfd_node_by_irq_ids(adev, node_id, vmid); if (!node) { - pr_debug("kfd node does not exist node_id: %d, client_id: %d\n", node_id, - client_id); + pr_debug("kfd node does not exist node_id: %d, vmid: %d\n", node_id, + vmid); r = -EFAULT; goto out; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h index a165c73b40b2..5116786718b6 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h @@ -173,7 +173,7 @@ int svm_range_split_by_granularity(struct kfd_process *p, struct mm_struct *mm, unsigned long addr, struct svm_range *parent, struct svm_range *prange); int svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid, - uint32_t client_id, uint32_t node_id, uint64_t addr, + uint32_t vmid, uint32_t node_id, uint64_t addr, bool write_fault); int svm_range_schedule_evict_svm_bo(struct amdgpu_amdkfd_fence *fence); void svm_range_add_list_work(struct svm_range_list *svms, -- cgit v1.2.3 From a2b308044dcaca8d3e580959a4f867a1d5c37fac Mon Sep 17 00:00:00 2001 From: Bas Nieuwenhuizen Date: Sat, 13 May 2023 14:51:00 +0200 Subject: drm/amdgpu: Validate VM ioctl flags. None have been defined yet, so reject anybody setting any. Mesa sets it to 0 anyway. Signed-off-by: Bas Nieuwenhuizen Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 22a900f298f2..22f9a65ca0fc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -2370,6 +2370,10 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) struct amdgpu_fpriv *fpriv = filp->driver_priv; int r; + /* No valid flags defined yet */ + if (args->in.flags) + return -EINVAL; + switch (args->in.op) { case AMDGPU_VM_OP_RESERVE_VMID: /* We only have requirement to reserve vmid from gfxhub */ -- cgit v1.2.3 From 9f0bcf49e9895cb005d78b33a5eebfa11711b425 Mon Sep 17 00:00:00 2001 From: Chia-I Wu Date: Thu, 1 Jun 2023 15:44:12 -0700 Subject: amdgpu: validate offset_in_bo of drm_amdgpu_gem_va MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This is motivated by OOB access in amdgpu_vm_update_range when offset_in_bo+map_size overflows. v2: keep the validations in amdgpu_vm_bo_map v3: add the validations to amdgpu_vm_bo_map/amdgpu_vm_bo_replace_map rather than to amdgpu_gem_va_ioctl Fixes: 9f7eb5367d00 ("drm/amdgpu: actually use the VM map parameters") Reviewed-by: Christian König Signed-off-by: Chia-I Wu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 22f9a65ca0fc..76d57bc7ac62 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -1434,14 +1434,14 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, uint64_t eaddr; /* validate the parameters */ - if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK || - size == 0 || size & ~PAGE_MASK) + if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK || size & ~PAGE_MASK) + return -EINVAL; + if (saddr + size <= saddr || offset + size <= offset) return -EINVAL; /* make sure object fit at this offset */ eaddr = saddr + size - 1; - if (saddr >= eaddr || - (bo && offset + size > amdgpu_bo_size(bo)) || + if ((bo && offset + size > amdgpu_bo_size(bo)) || (eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT)) return -EINVAL; @@ -1500,14 +1500,14 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev, int r; /* validate the parameters */ - if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK || - size == 0 || size & ~PAGE_MASK) + if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK || size & ~PAGE_MASK) + return -EINVAL; + if (saddr + size <= saddr || offset + size <= offset) return -EINVAL; /* make sure object fit at this offset */ eaddr = saddr + size - 1; - if (saddr >= eaddr || - (bo && offset + size > amdgpu_bo_size(bo)) || + if ((bo && offset + size > amdgpu_bo_size(bo)) || (eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT)) return -EINVAL; -- cgit v1.2.3 From 80e709ee6ecc9eba8bd8d188218472822e1b38bd Mon Sep 17 00:00:00 2001 From: Chong Li Date: Wed, 7 Jun 2023 15:56:12 +0800 Subject: drm/amdgpu: add option params to enforce process isolation between graphics and compute enforce process isolation between graphics and compute via using the same reserved vmid. v2: remove params "struct amdgpu_vm *vm" from amdgpu_vmid_alloc_reserved and amdgpu_vmid_free_reserved. Signed-off-by: Chong Li Reviewed-by: Christian Koenig Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 10 +++++++++- drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c | 17 +++++++---------- drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h | 6 ++---- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 25 +++++++++++++++++-------- 5 files changed, 36 insertions(+), 23 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 74104d46a7be..a84bd4a0c421 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -214,6 +214,7 @@ extern int amdgpu_force_asic_type; extern int amdgpu_smartshift_bias; extern int amdgpu_use_xgmi_p2p; extern int amdgpu_mtype_local; +extern bool enforce_isolation; #ifdef CONFIG_HSA_AMD extern int sched_policy; extern bool debug_evictions; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 8e58d187b173..999d008b6b48 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -153,7 +153,7 @@ uint amdgpu_pg_mask = 0xffffffff; uint amdgpu_sdma_phase_quantum = 32; char *amdgpu_disable_cu; char *amdgpu_virtual_display; - +bool enforce_isolation; /* * OverDrive(bit 14) disabled by default * GFX DCS(bit 19) disabled by default @@ -973,6 +973,14 @@ MODULE_PARM_DESC( 4 = AMDGPU_CPX_PARTITION_MODE)"); module_param_named(user_partt_mode, amdgpu_user_partt_mode, uint, 0444); + +/** + * DOC: enforce_isolation (bool) + * enforce process isolation between graphics and compute via using the same reserved vmid. + */ +module_param(enforce_isolation, bool, 0444); +MODULE_PARM_DESC(enforce_isolation, "enforce process isolation between graphics and compute . enforce_isolation = on"); + /* These devices are not supported by amdgpu. * They are supported by the mach64, r128, radeon drivers */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c index c991ca0b7a1c..ff1ea99292fb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c @@ -409,7 +409,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring, if (r || !idle) goto error; - if (vm->reserved_vmid[vmhub]) { + if (vm->reserved_vmid[vmhub] || (enforce_isolation && (vmhub == AMDGPU_GFXHUB(0)))) { r = amdgpu_vmid_grab_reserved(vm, ring, job, &id, fence); if (r || !id) goto error; @@ -460,14 +460,11 @@ error: } int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev, - struct amdgpu_vm *vm, unsigned vmhub) { struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; mutex_lock(&id_mgr->lock); - if (vm->reserved_vmid[vmhub]) - goto unlock; ++id_mgr->reserved_use_count; if (!id_mgr->reserved) { @@ -479,27 +476,23 @@ int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev, list_del_init(&id->list); id_mgr->reserved = id; } - vm->reserved_vmid[vmhub] = true; -unlock: mutex_unlock(&id_mgr->lock); return 0; } void amdgpu_vmid_free_reserved(struct amdgpu_device *adev, - struct amdgpu_vm *vm, unsigned vmhub) { struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; mutex_lock(&id_mgr->lock); - if (vm->reserved_vmid[vmhub] && - !--id_mgr->reserved_use_count) { + if (!--id_mgr->reserved_use_count) { /* give the reserved ID back to normal round robin */ list_add(&id_mgr->reserved->list, &id_mgr->ids_lru); id_mgr->reserved = NULL; } - vm->reserved_vmid[vmhub] = false; + mutex_unlock(&id_mgr->lock); } @@ -578,6 +571,10 @@ void amdgpu_vmid_mgr_init(struct amdgpu_device *adev) list_add_tail(&id_mgr->ids[j].list, &id_mgr->ids_lru); } } + /* alloc a default reserved vmid to enforce isolation */ + if (enforce_isolation) + amdgpu_vmid_alloc_reserved(adev, AMDGPU_GFXHUB(0)); + } /** diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h index d1cc09b45da4..fa8c42c83d5d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h @@ -79,11 +79,9 @@ void amdgpu_pasid_free_delayed(struct dma_resv *resv, bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev, struct amdgpu_vmid *id); int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev, - struct amdgpu_vm *vm, - unsigned vmhub); + unsigned vmhub); void amdgpu_vmid_free_reserved(struct amdgpu_device *adev, - struct amdgpu_vm *vm, - unsigned vmhub); + unsigned vmhub); int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring, struct amdgpu_job *job, struct dma_fence **fence); void amdgpu_vmid_reset(struct amdgpu_device *adev, unsigned vmhub, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 76d57bc7ac62..dc80c9c8fd14 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -2284,8 +2284,14 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) } dma_fence_put(vm->last_update); - for (i = 0; i < AMDGPU_MAX_VMHUBS; i++) - amdgpu_vmid_free_reserved(adev, vm, i); + + for (i = 0; i < AMDGPU_MAX_VMHUBS; i++) { + if (vm->reserved_vmid[i]) { + amdgpu_vmid_free_reserved(adev, i); + vm->reserved_vmid[i] = false; + } + } + } /** @@ -2368,7 +2374,6 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) union drm_amdgpu_vm *args = data; struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_fpriv *fpriv = filp->driver_priv; - int r; /* No valid flags defined yet */ if (args->in.flags) @@ -2377,13 +2382,17 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) switch (args->in.op) { case AMDGPU_VM_OP_RESERVE_VMID: /* We only have requirement to reserve vmid from gfxhub */ - r = amdgpu_vmid_alloc_reserved(adev, &fpriv->vm, - AMDGPU_GFXHUB(0)); - if (r) - return r; + if (!fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)]) { + amdgpu_vmid_alloc_reserved(adev, AMDGPU_GFXHUB(0)); + fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)] = true; + } + break; case AMDGPU_VM_OP_UNRESERVE_VMID: - amdgpu_vmid_free_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB(0)); + if (fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)]) { + amdgpu_vmid_free_reserved(adev, AMDGPU_GFXHUB(0)); + fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)] = false; + } break; default: return -EINVAL; -- cgit v1.2.3