diff options
author | Ingo Molnar <mingo@kernel.org> | 2018-02-06 21:12:31 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2018-02-06 21:12:31 +0100 |
commit | 82845079160817cc6ac64e5321bbd935e0a47b3a (patch) | |
tree | 0886d1d52428e9db14536cae4b37db896e7c360a /drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | |
parent | 32e839dda3ba576943365f0f5817ce5c843137dc (diff) | |
parent | 68c5735eaa5e680e701c9a2d1e3c7880bdf5ab66 (diff) | |
download | linux-stable-82845079160817cc6ac64e5321bbd935e0a47b3a.tar.gz linux-stable-82845079160817cc6ac64e5321bbd935e0a47b3a.tar.bz2 linux-stable-82845079160817cc6ac64e5321bbd935e0a47b3a.zip |
Merge branch 'linus' into sched/urgent, to resolve conflicts
Conflicts:
arch/arm64/kernel/entry.S
arch/x86/Kconfig
include/linux/sched/mm.h
kernel/fork.c
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 221 |
1 files changed, 105 insertions, 116 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 9ecdf621a74a..4e694ae9f308 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -679,55 +679,55 @@ static void gfx_v8_0_init_golden_registers(struct amdgpu_device *adev) { switch (adev->asic_type) { case CHIP_TOPAZ: - amdgpu_program_register_sequence(adev, - iceland_mgcg_cgcg_init, - (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init)); - amdgpu_program_register_sequence(adev, - golden_settings_iceland_a11, - (const u32)ARRAY_SIZE(golden_settings_iceland_a11)); - amdgpu_program_register_sequence(adev, - iceland_golden_common_all, - (const u32)ARRAY_SIZE(iceland_golden_common_all)); + amdgpu_device_program_register_sequence(adev, + iceland_mgcg_cgcg_init, + ARRAY_SIZE(iceland_mgcg_cgcg_init)); + amdgpu_device_program_register_sequence(adev, + golden_settings_iceland_a11, + ARRAY_SIZE(golden_settings_iceland_a11)); + amdgpu_device_program_register_sequence(adev, + iceland_golden_common_all, + ARRAY_SIZE(iceland_golden_common_all)); break; case CHIP_FIJI: - amdgpu_program_register_sequence(adev, - fiji_mgcg_cgcg_init, - (const u32)ARRAY_SIZE(fiji_mgcg_cgcg_init)); - amdgpu_program_register_sequence(adev, - golden_settings_fiji_a10, - (const u32)ARRAY_SIZE(golden_settings_fiji_a10)); - amdgpu_program_register_sequence(adev, - fiji_golden_common_all, - (const u32)ARRAY_SIZE(fiji_golden_common_all)); + amdgpu_device_program_register_sequence(adev, + fiji_mgcg_cgcg_init, + ARRAY_SIZE(fiji_mgcg_cgcg_init)); + amdgpu_device_program_register_sequence(adev, + golden_settings_fiji_a10, + ARRAY_SIZE(golden_settings_fiji_a10)); + amdgpu_device_program_register_sequence(adev, + fiji_golden_common_all, + ARRAY_SIZE(fiji_golden_common_all)); break; case CHIP_TONGA: - amdgpu_program_register_sequence(adev, - tonga_mgcg_cgcg_init, - (const u32)ARRAY_SIZE(tonga_mgcg_cgcg_init)); - amdgpu_program_register_sequence(adev, - golden_settings_tonga_a11, - (const u32)ARRAY_SIZE(golden_settings_tonga_a11)); - amdgpu_program_register_sequence(adev, - tonga_golden_common_all, - (const u32)ARRAY_SIZE(tonga_golden_common_all)); + amdgpu_device_program_register_sequence(adev, + tonga_mgcg_cgcg_init, + ARRAY_SIZE(tonga_mgcg_cgcg_init)); + amdgpu_device_program_register_sequence(adev, + golden_settings_tonga_a11, + ARRAY_SIZE(golden_settings_tonga_a11)); + amdgpu_device_program_register_sequence(adev, + tonga_golden_common_all, + ARRAY_SIZE(tonga_golden_common_all)); break; case CHIP_POLARIS11: case CHIP_POLARIS12: - amdgpu_program_register_sequence(adev, - golden_settings_polaris11_a11, - (const u32)ARRAY_SIZE(golden_settings_polaris11_a11)); - amdgpu_program_register_sequence(adev, - polaris11_golden_common_all, - (const u32)ARRAY_SIZE(polaris11_golden_common_all)); + amdgpu_device_program_register_sequence(adev, + golden_settings_polaris11_a11, + ARRAY_SIZE(golden_settings_polaris11_a11)); + amdgpu_device_program_register_sequence(adev, + polaris11_golden_common_all, + ARRAY_SIZE(polaris11_golden_common_all)); break; case CHIP_POLARIS10: - amdgpu_program_register_sequence(adev, - golden_settings_polaris10_a11, - (const u32)ARRAY_SIZE(golden_settings_polaris10_a11)); - amdgpu_program_register_sequence(adev, - polaris10_golden_common_all, - (const u32)ARRAY_SIZE(polaris10_golden_common_all)); + amdgpu_device_program_register_sequence(adev, + golden_settings_polaris10_a11, + ARRAY_SIZE(golden_settings_polaris10_a11)); + amdgpu_device_program_register_sequence(adev, + polaris10_golden_common_all, + ARRAY_SIZE(polaris10_golden_common_all)); WREG32_SMC(ixCG_ACLK_CNTL, 0x0000001C); if (adev->pdev->revision == 0xc7 && ((adev->pdev->subsystem_device == 0xb37 && adev->pdev->subsystem_vendor == 0x1002) || @@ -738,26 +738,26 @@ static void gfx_v8_0_init_golden_registers(struct amdgpu_device *adev) } break; case CHIP_CARRIZO: - amdgpu_program_register_sequence(adev, - cz_mgcg_cgcg_init, - (const u32)ARRAY_SIZE(cz_mgcg_cgcg_init)); - amdgpu_program_register_sequence(adev, - cz_golden_settings_a11, - (const u32)ARRAY_SIZE(cz_golden_settings_a11)); - amdgpu_program_register_sequence(adev, - cz_golden_common_all, - (const u32)ARRAY_SIZE(cz_golden_common_all)); + amdgpu_device_program_register_sequence(adev, + cz_mgcg_cgcg_init, + ARRAY_SIZE(cz_mgcg_cgcg_init)); + amdgpu_device_program_register_sequence(adev, + cz_golden_settings_a11, + ARRAY_SIZE(cz_golden_settings_a11)); + amdgpu_device_program_register_sequence(adev, + cz_golden_common_all, + ARRAY_SIZE(cz_golden_common_all)); break; case CHIP_STONEY: - amdgpu_program_register_sequence(adev, - stoney_mgcg_cgcg_init, - (const u32)ARRAY_SIZE(stoney_mgcg_cgcg_init)); - amdgpu_program_register_sequence(adev, - stoney_golden_settings_a11, - (const u32)ARRAY_SIZE(stoney_golden_settings_a11)); - amdgpu_program_register_sequence(adev, - stoney_golden_common_all, - (const u32)ARRAY_SIZE(stoney_golden_common_all)); + amdgpu_device_program_register_sequence(adev, + stoney_mgcg_cgcg_init, + ARRAY_SIZE(stoney_mgcg_cgcg_init)); + amdgpu_device_program_register_sequence(adev, + stoney_golden_settings_a11, + ARRAY_SIZE(stoney_golden_settings_a11)); + amdgpu_device_program_register_sequence(adev, + stoney_golden_common_all, + ARRAY_SIZE(stoney_golden_common_all)); break; default: break; @@ -804,7 +804,7 @@ static int gfx_v8_0_ring_test_ring(struct amdgpu_ring *ring) DRM_UDELAY(1); } if (i < adev->usec_timeout) { - DRM_INFO("ring test on %d succeeded in %d usecs\n", + DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i); } else { DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n", @@ -856,7 +856,7 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) } tmp = RREG32(scratch); if (tmp == 0xDEADBEEF) { - DRM_INFO("ib test on ring %d succeeded\n", ring->idx); + DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx); r = 0; } else { DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n", @@ -2114,7 +2114,6 @@ static int gfx_v8_0_sw_fini(void *handle) amdgpu_gfx_compute_mqd_sw_fini(adev); amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring, &adev->gfx.kiq.irq); amdgpu_gfx_kiq_fini(adev); - amdgpu_bo_free_kernel(&adev->virt.csa_obj, &adev->virt.csa_vmid0_addr, NULL); gfx_v8_0_mec_fini(adev); gfx_v8_0_rlc_fini(adev); @@ -3851,6 +3850,14 @@ static void gfx_v8_0_wait_for_rlc_serdes(struct amdgpu_device *adev) break; udelay(1); } + if (k == adev->usec_timeout) { + gfx_v8_0_select_se_sh(adev, 0xffffffff, + 0xffffffff, 0xffffffff); + mutex_unlock(&adev->grbm_idx_mutex); + DRM_INFO("Timeout wait for RLC serdes %u,%u\n", + i, j); + return; + } } } gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); @@ -4305,37 +4312,8 @@ static int gfx_v8_0_cp_gfx_start(struct amdgpu_device *adev) amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2)); amdgpu_ring_write(ring, mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START); - switch (adev->asic_type) { - case CHIP_TONGA: - case CHIP_POLARIS10: - amdgpu_ring_write(ring, 0x16000012); - amdgpu_ring_write(ring, 0x0000002A); - break; - case CHIP_POLARIS11: - case CHIP_POLARIS12: - amdgpu_ring_write(ring, 0x16000012); - amdgpu_ring_write(ring, 0x00000000); - break; - case CHIP_FIJI: - amdgpu_ring_write(ring, 0x3a00161a); - amdgpu_ring_write(ring, 0x0000002e); - break; - case CHIP_CARRIZO: - amdgpu_ring_write(ring, 0x00000002); - amdgpu_ring_write(ring, 0x00000000); - break; - case CHIP_TOPAZ: - amdgpu_ring_write(ring, adev->gfx.config.num_rbs == 1 ? - 0x00000000 : 0x00000002); - amdgpu_ring_write(ring, 0x00000000); - break; - case CHIP_STONEY: - amdgpu_ring_write(ring, 0x00000000); - amdgpu_ring_write(ring, 0x00000000); - break; - default: - BUG(); - } + amdgpu_ring_write(ring, adev->gfx.config.rb_config[0][0].raster_config); + amdgpu_ring_write(ring, adev->gfx.config.rb_config[0][0].raster_config_1); amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE); @@ -4816,7 +4794,7 @@ static int gfx_v8_0_kiq_init_queue(struct amdgpu_ring *ring) gfx_v8_0_kiq_setting(ring); - if (adev->in_sriov_reset) { /* for GPU_RESET case */ + if (adev->in_gpu_reset) { /* for GPU_RESET case */ /* reset MQD to a clean status */ if (adev->gfx.mec.mqd_backup[mqd_idx]) memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct vi_mqd_allocation)); @@ -4853,7 +4831,7 @@ static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring) struct vi_mqd *mqd = ring->mqd_ptr; int mqd_idx = ring - &adev->gfx.compute_ring[0]; - if (!adev->in_sriov_reset && !adev->gfx.in_suspend) { + if (!adev->in_gpu_reset && !adev->gfx.in_suspend) { memset((void *)mqd, 0, sizeof(struct vi_mqd_allocation)); ((struct vi_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF; ((struct vi_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF; @@ -4865,13 +4843,10 @@ static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring) if (adev->gfx.mec.mqd_backup[mqd_idx]) memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct vi_mqd_allocation)); - } else if (adev->in_sriov_reset) { /* for GPU_RESET case */ + } else if (adev->in_gpu_reset) { /* for GPU_RESET case */ /* reset MQD to a clean status */ if (adev->gfx.mec.mqd_backup[mqd_idx]) memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct vi_mqd_allocation)); - /* reset ring buffer */ - ring->wptr = 0; - amdgpu_ring_clear_ring(ring); } else { amdgpu_ring_clear_ring(ring); } @@ -4946,6 +4921,13 @@ static int gfx_v8_0_kiq_resume(struct amdgpu_device *adev) /* Test KCQs */ for (i = 0; i < adev->gfx.num_compute_rings; i++) { ring = &adev->gfx.compute_ring[i]; + if (adev->in_gpu_reset) { + /* move reset ring buffer to here to workaround + * compute ring test failed + */ + ring->wptr = 0; + amdgpu_ring_clear_ring(ring); + } ring->ready = true; r = amdgpu_ring_test_ring(ring); if (r) @@ -5080,8 +5062,9 @@ static int gfx_v8_0_hw_fini(void *handle) gfx_v8_0_cp_enable(adev, false); gfx_v8_0_rlc_stop(adev); - amdgpu_set_powergating_state(adev, - AMD_IP_BLOCK_TYPE_GFX, AMD_PG_STATE_UNGATE); + amdgpu_device_ip_set_powergating_state(adev, + AMD_IP_BLOCK_TYPE_GFX, + AMD_PG_STATE_UNGATE); return 0; } @@ -5498,8 +5481,9 @@ static int gfx_v8_0_late_init(void *handle) if (r) return r; - amdgpu_set_powergating_state(adev, - AMD_IP_BLOCK_TYPE_GFX, AMD_PG_STATE_GATE); + amdgpu_device_ip_set_powergating_state(adev, + AMD_IP_BLOCK_TYPE_GFX, + AMD_PG_STATE_GATE); return 0; } @@ -5510,10 +5494,10 @@ static void gfx_v8_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *ade if ((adev->asic_type == CHIP_POLARIS11) || (adev->asic_type == CHIP_POLARIS12)) /* Send msg to SMU via Powerplay */ - amdgpu_set_powergating_state(adev, - AMD_IP_BLOCK_TYPE_SMC, - enable ? - AMD_PG_STATE_GATE : AMD_PG_STATE_UNGATE); + amdgpu_device_ip_set_powergating_state(adev, + AMD_IP_BLOCK_TYPE_SMC, + enable ? + AMD_PG_STATE_GATE : AMD_PG_STATE_UNGATE); WREG32_FIELD(RLC_PG_CNTL, STATIC_PER_CU_PG_ENABLE, enable ? 1 : 0); } @@ -6261,7 +6245,7 @@ static void gfx_v8_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring) static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, struct amdgpu_ib *ib, - unsigned vm_id, bool ctx_switch) + unsigned vmid, bool ctx_switch) { u32 header, control = 0; @@ -6270,7 +6254,7 @@ static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, else header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); - control |= ib->length_dw | (vm_id << 24); + control |= ib->length_dw | (vmid << 24); if (amdgpu_sriov_vf(ring->adev) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) { control |= INDIRECT_BUFFER_PRE_ENB(1); @@ -6291,9 +6275,9 @@ static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, static void gfx_v8_0_ring_emit_ib_compute(struct amdgpu_ring *ring, struct amdgpu_ib *ib, - unsigned vm_id, bool ctx_switch) + unsigned vmid, bool ctx_switch) { - u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vm_id << 24); + u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24); amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); amdgpu_ring_write(ring, @@ -6344,7 +6328,7 @@ static void gfx_v8_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) } static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring, - unsigned vm_id, uint64_t pd_addr) + unsigned vmid, uint64_t pd_addr) { int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX); @@ -6352,12 +6336,12 @@ static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring, amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) | WRITE_DATA_DST_SEL(0)) | WR_CONFIRM); - if (vm_id < 8) { + if (vmid < 8) { amdgpu_ring_write(ring, - (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id)); + (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid)); } else { amdgpu_ring_write(ring, - (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8)); + (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8)); } amdgpu_ring_write(ring, 0); amdgpu_ring_write(ring, pd_addr >> 12); @@ -6369,7 +6353,7 @@ static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring, WRITE_DATA_DST_SEL(0))); amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST); amdgpu_ring_write(ring, 0); - amdgpu_ring_write(ring, 1 << vm_id); + amdgpu_ring_write(ring, 1 << vmid); /* wait for the invalidate to complete */ amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); @@ -6490,10 +6474,10 @@ static void gfx_v8_0_hqd_set_priority(struct amdgpu_device *adev, mutex_unlock(&adev->srbm_mutex); } static void gfx_v8_0_ring_set_priority_compute(struct amdgpu_ring *ring, - enum amd_sched_priority priority) + enum drm_sched_priority priority) { struct amdgpu_device *adev = ring->adev; - bool acquire = priority == AMD_SCHED_PRIORITY_HIGH_HW; + bool acquire = priority == DRM_SCHED_PRIORITY_HIGH_HW; if (ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE) return; @@ -7132,6 +7116,11 @@ static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev) cu_info->number = active_cu_number; cu_info->ao_cu_mask = ao_cu_mask; + cu_info->simd_per_cu = NUM_SIMD_PER_CU; + cu_info->max_waves_per_simd = 10; + cu_info->max_scratch_slots_per_cu = 32; + cu_info->wave_front_size = 64; + cu_info->lds_size = 64; } const struct amdgpu_ip_block_version gfx_v8_0_ip_block = |