summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
diff options
context:
space:
mode:
authorAlex Deucher <alexander.deucher@amd.com>2023-10-25 13:19:28 -0400
committerLuben Tuikov <ltuikov89@gmail.com>2023-10-26 16:04:24 -0400
commitb70438004a14f4d0f9890b3297cd66248728546c (patch)
treee581bb3b83c617dd1a9a515bb69cc7e8fc08de4b /drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
parentc07bf1636f0005f9eb7956404490672286ea59d3 (diff)
downloadlinux-b70438004a14f4d0f9890b3297cd66248728546c.tar.gz
linux-b70438004a14f4d0f9890b3297cd66248728546c.tar.bz2
linux-b70438004a14f4d0f9890b3297cd66248728546c.zip
drm/amdgpu: move buffer funcs setting up a level
Rather than doing this in the IP code for the SDMA paging engine, move it up to the core device level init level. This should fix the scheduler init ordering. v2: drop extra parens v3: drop SDMA helpers v4: Added a Fixes tag because amdgpu dereferences an uninitialized scheduler without this patch, and this patch fixes this. (Luben) Tested-by: Luben Tuikov <luben.tuikov@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com> Link: https://lore.kernel.org/r/20231025171928.3318505-1-alexander.deucher@amd.com Acked-by: Christian König <christian.koenig@amd.com> Fixes: 56e449603f0ac5 ("drm/sched: Convert the GPU scheduler to variable number of run-queues") Signed-off-by: Luben Tuikov <ltuikov89@gmail.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c10
1 files changed, 1 insertions, 9 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
index 1cc34efb455b..7d7b86048e68 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
@@ -559,8 +559,6 @@ static void sdma_v5_0_gfx_stop(struct amdgpu_device *adev)
u32 rb_cntl, ib_cntl;
int i;
- amdgpu_sdma_unset_buffer_funcs_helper(adev);
-
for (i = 0; i < adev->sdma.num_instances; i++) {
rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
@@ -825,9 +823,6 @@ static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev)
r = amdgpu_ring_test_helper(ring);
if (r)
return r;
-
- if (adev->mman.buffer_funcs_ring == ring)
- amdgpu_ttm_set_buffer_funcs_status(adev, true);
}
return 0;
@@ -1427,11 +1422,8 @@ static int sdma_v5_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (amdgpu_sriov_vf(adev)) {
- /* disable the scheduler for SDMA */
- amdgpu_sdma_unset_buffer_funcs_helper(adev);
+ if (amdgpu_sriov_vf(adev))
return 0;
- }
sdma_v5_0_ctx_switch_enable(adev, false);
sdma_v5_0_enable(adev, false);