diff options
Diffstat (limited to 'drivers/gpu/drm/amd')
95 files changed, 1118 insertions, 536 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index 751557af09bb..a15a4787c7ee 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -297,7 +297,7 @@ void amdgpu_amdkfd_ras_poison_consumption_handler(struct kgd_dev *kgd); void amdgpu_amdkfd_gpuvm_init_mem_limits(void); void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev, struct amdgpu_vm *vm); -void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo); +void amdgpu_amdkfd_release_notify(struct amdgpu_bo *bo); void amdgpu_amdkfd_reserve_system_mem(uint64_t size); #else static inline @@ -312,7 +312,7 @@ void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev, } static inline -void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo) +void amdgpu_amdkfd_release_notify(struct amdgpu_bo *bo) { } #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 0e9cfe99ae9e..6348559608ce 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -207,7 +207,7 @@ static void unreserve_mem_limit(struct amdgpu_device *adev, spin_unlock(&kfd_mem_limit.mem_limit_lock); } -void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo) +void amdgpu_amdkfd_release_notify(struct amdgpu_bo *bo) { struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); u32 domain = bo->preferred_domains; @@ -219,6 +219,8 @@ void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo) } unreserve_mem_limit(adev, amdgpu_bo_size(bo), domain, sg); + + kfree(bo->kfd_bo); } @@ -644,12 +646,6 @@ kfd_mem_attach_dmabuf(struct amdgpu_device *adev, struct kgd_mem *mem, if (IS_ERR(gobj)) return PTR_ERR(gobj); - /* Import takes an extra reference on the dmabuf. Drop it now to - * avoid leaking it. We only need the one reference in - * kgd_mem->dmabuf. - */ - dma_buf_put(mem->dmabuf); - *bo = gem_to_amdgpu_bo(gobj); (*bo)->flags |= AMDGPU_GEM_CREATE_PREEMPTIBLE; (*bo)->parent = amdgpu_bo_ref(mem->bo); @@ -734,14 +730,19 @@ static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem, } /* Add BO to VM internal data structures */ + ret = amdgpu_bo_reserve(bo[i], false); + if (ret) { + pr_debug("Unable to reserve BO during memory attach"); + goto unwind; + } attachment[i]->bo_va = amdgpu_vm_bo_add(adev, vm, bo[i]); + amdgpu_bo_unreserve(bo[i]); if (unlikely(!attachment[i]->bo_va)) { ret = -ENOMEM; pr_err("Failed to add BO object to VM. ret == %d\n", ret); goto unwind; } - attachment[i]->va = va; attachment[i]->pte_flags = get_pte_flags(adev, mem); attachment[i]->adev = adev; @@ -757,7 +758,9 @@ unwind: if (!attachment[i]) continue; if (attachment[i]->bo_va) { + amdgpu_bo_reserve(bo[i], true); amdgpu_vm_bo_rmv(adev, attachment[i]->bo_va); + amdgpu_bo_unreserve(bo[i]); list_del(&attachment[i]->list); } if (bo[i]) @@ -1393,7 +1396,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( struct sg_table *sg = NULL; uint64_t user_addr = 0; struct amdgpu_bo *bo; - struct drm_gem_object *gobj; + struct drm_gem_object *gobj = NULL; u32 domain, alloc_domain; u64 alloc_flags; int ret; @@ -1503,14 +1506,16 @@ allocate_init_user_pages_failed: remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info); drm_vma_node_revoke(&gobj->vma_node, drm_priv); err_node_allow: - drm_gem_object_put(gobj); /* Don't unreserve system mem limit twice */ goto err_reserve_limit; err_bo_create: unreserve_mem_limit(adev, size, alloc_domain, !!sg); err_reserve_limit: mutex_destroy(&(*mem)->lock); - kfree(*mem); + if (gobj) + drm_gem_object_put(gobj); + else + kfree(*mem); err: if (sg) { sg_free_table(sg); @@ -1568,12 +1573,12 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va, mem->va + bo_size * (1 + mem->aql_queue)); - ret = unreserve_bo_and_vms(&ctx, false, false); - /* Remove from VM internal data structures */ list_for_each_entry_safe(entry, tmp, &mem->attachments, list) kfd_mem_detach(entry); + ret = unreserve_bo_and_vms(&ctx, false, false); + /* Free the sync object */ amdgpu_sync_free(&mem->sync); @@ -1600,9 +1605,13 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( drm_vma_node_revoke(&mem->bo->tbo.base.vma_node, drm_priv); if (mem->dmabuf) dma_buf_put(mem->dmabuf); - drm_gem_object_put(&mem->bo->tbo.base); mutex_destroy(&mem->lock); - kfree(mem); + + /* If this releases the last reference, it will end up calling + * amdgpu_amdkfd_release_notify and kfree the mem struct. That's why + * this needs to be the last call here. + */ + drm_gem_object_put(&mem->bo->tbo.base); return ret; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c index 96b7bb13a2dd..12a6b1c99c93 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c @@ -1569,6 +1569,18 @@ void amdgpu_atombios_scratch_regs_engine_hung(struct amdgpu_device *adev, WREG32(adev->bios_scratch_reg_offset + 3, tmp); } +void amdgpu_atombios_scratch_regs_set_backlight_level(struct amdgpu_device *adev, + u32 backlight_level) +{ + u32 tmp = RREG32(adev->bios_scratch_reg_offset + 2); + + tmp &= ~ATOM_S2_CURRENT_BL_LEVEL_MASK; + tmp |= (backlight_level << ATOM_S2_CURRENT_BL_LEVEL_SHIFT) & + ATOM_S2_CURRENT_BL_LEVEL_MASK; + + WREG32(adev->bios_scratch_reg_offset + 2, tmp); +} + bool amdgpu_atombios_scratch_need_asic_init(struct amdgpu_device *adev) { u32 tmp = RREG32(adev->bios_scratch_reg_offset + 7); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h index 8cc0222dba19..27e74b1fc260 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h @@ -185,6 +185,8 @@ bool amdgpu_atombios_has_gpu_virtualization_table(struct amdgpu_device *adev); void amdgpu_atombios_scratch_regs_lock(struct amdgpu_device *adev, bool lock); void amdgpu_atombios_scratch_regs_engine_hung(struct amdgpu_device *adev, bool hung); +void amdgpu_atombios_scratch_regs_set_backlight_level(struct amdgpu_device *adev, + u32 backlight_level); bool amdgpu_atombios_scratch_need_asic_init(struct amdgpu_device *adev); void amdgpu_atombios_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c index b9c11c2b2885..0de66f59adb8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c @@ -827,6 +827,7 @@ static int amdgpu_connector_vga_get_modes(struct drm_connector *connector) amdgpu_connector_get_edid(connector); ret = amdgpu_connector_ddc_get_modes(connector); + amdgpu_get_native_mode(connector); return ret; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 6e40cc1bc6dc..1e651b959141 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2398,10 +2398,6 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) if (!adev->gmc.xgmi.pending_reset) amdgpu_amdkfd_device_init(adev); - r = amdgpu_amdkfd_resume_iommu(adev); - if (r) - goto init_failed; - amdgpu_fru_get_product_info(adev); init_failed: @@ -3171,11 +3167,21 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type) { switch (asic_type) { #if defined(CONFIG_DRM_AMD_DC) -#if defined(CONFIG_DRM_AMD_DC_SI) case CHIP_TAHITI: case CHIP_PITCAIRN: case CHIP_VERDE: case CHIP_OLAND: + /* + * We have systems in the wild with these ASICs that require + * LVDS and VGA support which is not supported with DC. + * + * Fallback to the non-DC driver here by default so as not to + * cause regressions. + */ +#if defined(CONFIG_DRM_AMD_DC_SI) + return amdgpu_dc > 0; +#else + return false; #endif case CHIP_BONAIRE: case CHIP_KAVERI: @@ -3503,6 +3509,9 @@ int amdgpu_device_init(struct amdgpu_device *adev, adev->rmmio_size = pci_resource_len(adev->pdev, 2); } + for (i = 0; i < AMD_IP_BLOCK_TYPE_NUM; i++) + atomic_set(&adev->pm.pwr_state[i], POWER_STATE_UNKNOWN); + adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size); if (adev->rmmio == NULL) { return -ENOMEM; @@ -3824,7 +3833,7 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev) /* disable all interrupts */ amdgpu_irq_disable_all(adev); if (adev->mode_info.mode_config_initialized){ - if (!amdgpu_device_has_dc_support(adev)) + if (!drm_drv_uses_atomic_modeset(adev_to_drm(adev))) drm_helper_force_disable_all(adev_to_drm(adev)); else drm_atomic_helper_shutdown(adev_to_drm(adev)); @@ -4280,6 +4289,8 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev, { int r; + amdgpu_amdkfd_pre_reset(adev); + if (from_hypervisor) r = amdgpu_virt_request_full_gpu(adev, true); else @@ -4287,8 +4298,6 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev, if (r) return r; - amdgpu_amdkfd_pre_reset(adev); - /* Resume IP prior to SMC */ r = amdgpu_device_ip_reinit_early_sriov(adev); if (r) @@ -4850,6 +4859,9 @@ static void amdgpu_device_recheck_guilty_jobs( /* clear job's guilty and depend the folowing step to decide the real one */ drm_sched_reset_karma(s_job); + /* for the real bad job, it will be resubmitted twice, adding a dma_fence_get + * to make sure fence is balanced */ + dma_fence_get(s_job->s_fence->parent); drm_sched_resubmit_jobs_ext(&ring->sched, 1); ret = dma_fence_wait_timeout(s_job->s_fence->parent, false, ring->sched.timeout); @@ -4885,6 +4897,7 @@ retry: /* got the hw fence, signal finished fence */ atomic_dec(ring->sched.score); + dma_fence_put(s_job->s_fence->parent); dma_fence_get(&s_job->s_fence->finished); dma_fence_signal(&s_job->s_fence->finished); dma_fence_put(&s_job->s_fence->finished); @@ -5079,7 +5092,7 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */ tmp_vram_lost_counter = atomic_read(&((adev)->vram_lost_counter)); /* Actual ASIC resets if needed.*/ - /* TODO Implement XGMI hive reset logic for SRIOV */ + /* Host driver will handle XGMI hive reset for SRIOV */ if (amdgpu_sriov_vf(adev)) { r = amdgpu_device_reset_sriov(adev, job ? false : true); if (r) @@ -5120,7 +5133,7 @@ skip_hw_reset: drm_sched_start(&ring->sched, !tmp_adev->asic_reset_res); } - if (!amdgpu_device_has_dc_support(tmp_adev) && !job_signaled) { + if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled) { drm_helper_resume_force_mode(adev_to_drm(tmp_adev)); } @@ -5141,7 +5154,7 @@ skip_sched_resume: list_for_each_entry(tmp_adev, device_list_handle, reset_list) { /* unlock kfd: SRIOV would do it separately */ if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev)) - amdgpu_amdkfd_post_reset(tmp_adev); + amdgpu_amdkfd_post_reset(tmp_adev); /* kfd_post_reset will do nothing if kfd device is not initialized, * need to bring up kfd here if it's not be initialized before diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c index d7c8d9e3c203..ea00090b3fb3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c @@ -157,6 +157,8 @@ static int hw_id_map[MAX_HWIP] = { [HDP_HWIP] = HDP_HWID, [SDMA0_HWIP] = SDMA0_HWID, [SDMA1_HWIP] = SDMA1_HWID, + [SDMA2_HWIP] = SDMA2_HWID, + [SDMA3_HWIP] = SDMA3_HWID, [MMHUB_HWIP] = MMHUB_HWID, [ATHUB_HWIP] = ATHUB_HWID, [NBIO_HWIP] = NBIF_HWID, @@ -248,8 +250,8 @@ get_from_vram: offset = offsetof(struct binary_header, binary_checksum) + sizeof(bhdr->binary_checksum); - size = bhdr->binary_size - offset; - checksum = bhdr->binary_checksum; + size = le16_to_cpu(bhdr->binary_size) - offset; + checksum = le16_to_cpu(bhdr->binary_checksum); if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset, size, checksum)) { @@ -270,7 +272,7 @@ get_from_vram: } if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset, - ihdr->size, checksum)) { + le16_to_cpu(ihdr->size), checksum)) { DRM_ERROR("invalid ip discovery data table checksum\n"); r = -EINVAL; goto out; @@ -282,7 +284,7 @@ get_from_vram: ghdr = (struct gpu_info_header *)(adev->mman.discovery_bin + offset); if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset, - ghdr->size, checksum)) { + le32_to_cpu(ghdr->size), checksum)) { DRM_ERROR("invalid gc data table checksum\n"); r = -EINVAL; goto out; @@ -489,10 +491,10 @@ void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev) le16_to_cpu(bhdr->table_list[HARVEST_INFO].offset)); for (i = 0; i < 32; i++) { - if (le32_to_cpu(harvest_info->list[i].hw_id) == 0) + if (le16_to_cpu(harvest_info->list[i].hw_id) == 0) break; - switch (le32_to_cpu(harvest_info->list[i].hw_id)) { + switch (le16_to_cpu(harvest_info->list[i].hw_id)) { case VCN_HWID: vcn_harvest_count++; if (harvest_info->list[i].number_instance == 0) @@ -587,6 +589,9 @@ static int amdgpu_discovery_set_common_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &nv_common_ip_block); break; default: + dev_err(adev->dev, + "Failed to add common ip block(GC_HWIP:0x%x)\n", + adev->ip_versions[GC_HWIP][0]); return -EINVAL; } return 0; @@ -619,6 +624,9 @@ static int amdgpu_discovery_set_gmc_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block); break; default: + dev_err(adev->dev, + "Failed to add gmc ip block(GC_HWIP:0x%x)\n", + adev->ip_versions[GC_HWIP][0]); return -EINVAL; } return 0; @@ -648,6 +656,9 @@ static int amdgpu_discovery_set_ih_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); break; default: + dev_err(adev->dev, + "Failed to add ih ip block(OSSSYS_HWIP:0x%x)\n", + adev->ip_versions[OSSSYS_HWIP][0]); return -EINVAL; } return 0; @@ -688,6 +699,9 @@ static int amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block); break; default: + dev_err(adev->dev, + "Failed to add psp ip block(MP0_HWIP:0x%x)\n", + adev->ip_versions[MP0_HWIP][0]); return -EINVAL; } return 0; @@ -726,6 +740,9 @@ static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block); break; default: + dev_err(adev->dev, + "Failed to add smu ip block(MP1_HWIP:0x%x)\n", + adev->ip_versions[MP1_HWIP][0]); return -EINVAL; } return 0; @@ -753,6 +770,9 @@ static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &dm_ip_block); break; default: + dev_err(adev->dev, + "Failed to add dm ip block(DCE_HWIP:0x%x)\n", + adev->ip_versions[DCE_HWIP][0]); return -EINVAL; } } else if (adev->ip_versions[DCI_HWIP][0]) { @@ -763,6 +783,9 @@ static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &dm_ip_block); break; default: + dev_err(adev->dev, + "Failed to add dm ip block(DCI_HWIP:0x%x)\n", + adev->ip_versions[DCI_HWIP][0]); return -EINVAL; } #endif @@ -796,6 +819,9 @@ static int amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block); break; default: + dev_err(adev->dev, + "Failed to add gfx ip block(GC_HWIP:0x%x)\n", + adev->ip_versions[GC_HWIP][0]); return -EINVAL; } return 0; @@ -829,6 +855,9 @@ static int amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block); break; default: + dev_err(adev->dev, + "Failed to add sdma ip block(SDMA0_HWIP:0x%x)\n", + adev->ip_versions[SDMA0_HWIP][0]); return -EINVAL; } return 0; @@ -845,6 +874,9 @@ static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block); break; default: + dev_err(adev->dev, + "Failed to add uvd v7 ip block(UVD_HWIP:0x%x)\n", + adev->ip_versions[UVD_HWIP][0]); return -EINVAL; } switch (adev->ip_versions[VCE_HWIP][0]) { @@ -855,6 +887,9 @@ static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block); break; default: + dev_err(adev->dev, + "Failed to add VCE v4 ip block(VCE_HWIP:0x%x)\n", + adev->ip_versions[VCE_HWIP][0]); return -EINVAL; } } else { @@ -867,7 +902,8 @@ static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev) case IP_VERSION(2, 0, 2): case IP_VERSION(2, 2, 0): amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block); - amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block); + if (!amdgpu_sriov_vf(adev)) + amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block); break; case IP_VERSION(2, 0, 3): break; @@ -881,8 +917,10 @@ static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev) break; case IP_VERSION(3, 0, 0): case IP_VERSION(3, 0, 16): + case IP_VERSION(3, 0, 64): case IP_VERSION(3, 1, 1): case IP_VERSION(3, 0, 2): + case IP_VERSION(3, 0, 192): amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block); if (!amdgpu_sriov_vf(adev)) amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block); @@ -891,6 +929,9 @@ static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block); break; default: + dev_err(adev->dev, + "Failed to add vcn/jpeg ip block(UVD_HWIP:0x%x)\n", + adev->ip_versions[UVD_HWIP][0]); return -EINVAL; } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index a573424a6e0b..a1e63ba4c54a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -60,9 +60,10 @@ static vm_fault_t amdgpu_gem_fault(struct vm_fault *vmf) goto unlock; } - ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot, - TTM_BO_VM_NUM_PREFAULT, 1); - drm_dev_exit(idx); + ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot, + TTM_BO_VM_NUM_PREFAULT); + + drm_dev_exit(idx); } else { ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c index f3d62e196901..0c7963dfacad 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c @@ -223,7 +223,7 @@ int amdgpu_ih_wait_on_checkpoint_process(struct amdgpu_device *adev, */ int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih) { - unsigned int count = AMDGPU_IH_MAX_NUM_IVS; + unsigned int count; u32 wptr; if (!ih->enabled || adev->shutdown) @@ -232,6 +232,7 @@ int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih) wptr = amdgpu_ih_get_wptr(adev, ih); restart_ih: + count = AMDGPU_IH_MAX_NUM_IVS; DRM_DEBUG("%s: rptr %d, wptr %d\n", __func__, ih->rptr, wptr); /* Order reading of wptr vs. reading of IH ring data */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index dfe667ea8b05..651c7abfde03 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -1423,6 +1423,8 @@ static int amdgpu_debugfs_firmware_info_show(struct seq_file *m, void *unused) struct drm_amdgpu_info_firmware fw_info; struct drm_amdgpu_query_fw query_fw; struct atom_context *ctx = adev->mode_info.atom_context; + uint8_t smu_minor, smu_debug; + uint16_t smu_major; int ret, i; static const char *ta_fw_name[TA_FW_TYPE_MAX_INDEX] = { @@ -1568,8 +1570,11 @@ static int amdgpu_debugfs_firmware_info_show(struct seq_file *m, void *unused) ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); if (ret) return ret; - seq_printf(m, "SMC feature version: %u, firmware version: 0x%08x\n", - fw_info.feature, fw_info.ver); + smu_major = (fw_info.ver >> 16) & 0xffff; + smu_minor = (fw_info.ver >> 8) & 0xff; + smu_debug = (fw_info.ver >> 0) & 0xff; + seq_printf(m, "SMC feature version: %u, firmware version: 0x%08x (%d.%d.%d)\n", + fw_info.feature, fw_info.ver, smu_major, smu_minor, smu_debug); /* SDMA */ query_fw.fw_type = AMDGPU_INFO_FW_SDMA; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index aeb92e5677ac..4fcfc2313b8c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -1274,7 +1274,7 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo) abo = ttm_to_amdgpu_bo(bo); if (abo->kfd_bo) - amdgpu_amdkfd_unreserve_memory_limit(abo); + amdgpu_amdkfd_release_notify(abo); /* We only remove the fence if the resv has individualized. */ WARN_ON_ONCE(bo->type == ttm_bo_type_kernel diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index 2658414c503d..585961c2f5f2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -134,6 +134,8 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev) adev->vcn.indirect_sram = true; break; case IP_VERSION(3, 0, 0): + case IP_VERSION(3, 0, 64): + case IP_VERSION(3, 0, 192): if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) fw_name = FIRMWARE_SIENNA_CICHLID; else diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c index ce982afeff91..ac9a8cd21c4b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c @@ -504,8 +504,8 @@ static int amdgpu_vkms_sw_fini(void *handle) int i = 0; for (i = 0; i < adev->mode_info.num_crtc; i++) - if (adev->mode_info.crtcs[i]) - hrtimer_cancel(&adev->mode_info.crtcs[i]->vblank_timer); + if (adev->amdgpu_vkms_output[i].vblank_hrtimer.function) + hrtimer_cancel(&adev->amdgpu_vkms_output[i].vblank_hrtimer); kfree(adev->mode_info.bios_hardcoded_edid); kfree(adev->amdgpu_vkms_output); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c index 978ac927ac11..567df2db23ac 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c @@ -386,6 +386,7 @@ struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev) "%s", "xgmi_hive_info"); if (ret) { dev_err(adev->dev, "XGMI: failed initializing kobject for xgmi hive\n"); + kobject_put(&hive->kobj); kfree(hive); hive = NULL; goto pro_end; @@ -806,9 +807,9 @@ static void amdgpu_xgmi_reset_ras_error_count(struct amdgpu_device *adev) for (i = 0; i < ARRAY_SIZE(xgmi23_pcs_err_status_reg_aldebaran); i++) pcs_clear_status(adev, xgmi23_pcs_err_status_reg_aldebaran[i]); - for (i = 0; i < ARRAY_SIZE(xgmi23_pcs_err_status_reg_aldebaran); i++) + for (i = 0; i < ARRAY_SIZE(xgmi3x16_pcs_err_status_reg_aldebaran); i++) pcs_clear_status(adev, - xgmi23_pcs_err_status_reg_aldebaran[i]); + xgmi3x16_pcs_err_status_reg_aldebaran[i]); for (i = 0; i < ARRAY_SIZE(walf_pcs_err_status_reg_aldebaran); i++) pcs_clear_status(adev, walf_pcs_err_status_reg_aldebaran[i]); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 90a834dc4008..dbe7442fb25c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -7707,8 +7707,19 @@ static uint64_t gfx_v10_0_get_gpu_clock_counter(struct amdgpu_device *adev) switch (adev->ip_versions[GC_HWIP][0]) { case IP_VERSION(10, 3, 1): case IP_VERSION(10, 3, 3): - clock = (uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Vangogh) | - ((uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Vangogh) << 32ULL); + preempt_disable(); + clock_hi = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Vangogh); + clock_lo = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Vangogh); + hi_check = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Vangogh); + /* The SMUIO TSC clock frequency is 100MHz, which sets 32-bit carry over + * roughly every 42 seconds. + */ + if (hi_check != clock_hi) { + clock_lo = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Vangogh); + clock_hi = hi_check; + } + preempt_enable(); + clock = clock_lo | (clock_hi << 32ULL); break; default: preempt_disable(); @@ -8249,6 +8260,9 @@ static int gfx_v10_0_update_gfx_clock_gating(struct amdgpu_device *adev, static void gfx_v10_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid) { u32 reg, data; + + amdgpu_gfx_off_ctrl(adev, false); + /* not for *_SOC15 */ reg = SOC15_REG_OFFSET(GC, 0, mmRLC_SPM_MC_CNTL); if (amdgpu_sriov_is_pp_one_vf(adev)) @@ -8263,6 +8277,8 @@ static void gfx_v10_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid) WREG32_SOC15_NO_KIQ(GC, 0, mmRLC_SPM_MC_CNTL, data); else WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data); + + amdgpu_gfx_off_ctrl(adev, true); } static bool gfx_v10_0_check_rlcg_range(struct amdgpu_device *adev, @@ -8316,11 +8332,8 @@ static void gfx_v10_cntl_power_gating(struct amdgpu_device *adev, bool enable) if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) { switch (adev->ip_versions[GC_HWIP][0]) { case IP_VERSION(10, 3, 1): - data = 0x4E20 & RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG_MASK_Vangogh; - WREG32_SOC15(GC, 0, mmRLC_PG_DELAY_3, data); - break; case IP_VERSION(10, 3, 3): - data = 0x1388 & RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG_MASK_Vangogh; + data = 0x4E20 & RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG_MASK_Vangogh; WREG32_SOC15(GC, 0, mmRLC_PG_DELAY_3, data); break; default: diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 37b4a3db6360..d17a6f399347 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -3575,12 +3575,16 @@ static void gfx_v7_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid) { u32 data; + amdgpu_gfx_off_ctrl(adev, false); + data = RREG32(mmRLC_SPM_VMID); data &= ~RLC_SPM_VMID__RLC_SPM_VMID_MASK; data |= (vmid & RLC_SPM_VMID__RLC_SPM_VMID_MASK) << RLC_SPM_VMID__RLC_SPM_VMID__SHIFT; WREG32(mmRLC_SPM_VMID, data); + + amdgpu_gfx_off_ctrl(adev, true); } static void gfx_v7_0_enable_cgcg(struct amdgpu_device *adev, bool enable) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index e0302c23e9a7..5f112efda634 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -5624,6 +5624,8 @@ static void gfx_v8_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid) { u32 data; + amdgpu_gfx_off_ctrl(adev, false); + if (amdgpu_sriov_is_pp_one_vf(adev)) data = RREG32_NO_KIQ(mmRLC_SPM_VMID); else @@ -5636,6 +5638,8 @@ static void gfx_v8_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid) WREG32_NO_KIQ(mmRLC_SPM_VMID, data); else WREG32(mmRLC_SPM_VMID, data); + + amdgpu_gfx_off_ctrl(adev, true); } static const struct amdgpu_rlc_funcs iceland_rlc_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 7f944bb11298..edb3e3b08eed 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -140,6 +140,11 @@ MODULE_FIRMWARE("amdgpu/aldebaran_rlc.bin"); #define mmTCP_CHAN_STEER_5_ARCT 0x0b0c #define mmTCP_CHAN_STEER_5_ARCT_BASE_IDX 0 +#define mmGOLDEN_TSC_COUNT_UPPER_Renoir 0x0025 +#define mmGOLDEN_TSC_COUNT_UPPER_Renoir_BASE_IDX 1 +#define mmGOLDEN_TSC_COUNT_LOWER_Renoir 0x0026 +#define mmGOLDEN_TSC_COUNT_LOWER_Renoir_BASE_IDX 1 + enum ta_ras_gfx_subblock { /*CPC*/ TA_RAS_BLOCK__GFX_CPC_INDEX_START = 0, @@ -2462,7 +2467,9 @@ static int gfx_v9_0_sw_fini(void *handle) amdgpu_gfx_kiq_fini(adev); gfx_v9_0_mec_fini(adev); - amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj); + amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, + &adev->gfx.rlc.clear_state_gpu_addr, + (void **)&adev->gfx.rlc.cs_ptr); if (adev->flags & AMD_IS_APU) { amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, &adev->gfx.rlc.cp_table_gpu_addr, @@ -3063,8 +3070,8 @@ static void gfx_v9_0_init_pg(struct amdgpu_device *adev) AMD_PG_SUPPORT_CP | AMD_PG_SUPPORT_GDS | AMD_PG_SUPPORT_RLC_SMU_HS)) { - WREG32(mmRLC_JUMP_TABLE_RESTORE, - adev->gfx.rlc.cp_table_gpu_addr >> 8); + WREG32_SOC15(GC, 0, mmRLC_JUMP_TABLE_RESTORE, + adev->gfx.rlc.cp_table_gpu_addr >> 8); gfx_v9_0_init_gfx_power_gating(adev); } } @@ -4053,9 +4060,10 @@ static int gfx_v9_0_hw_fini(void *handle) gfx_v9_0_cp_enable(adev, false); - /* Skip suspend with A+A reset */ - if (adev->gmc.xgmi.connected_to_cpu && amdgpu_in_reset(adev)) { - dev_dbg(adev->dev, "Device in reset. Skipping RLC halt\n"); + /* Skip stopping RLC with A+A reset or when RLC controls GFX clock */ + if ((adev->gmc.xgmi.connected_to_cpu && amdgpu_in_reset(adev)) || + (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(9, 4, 2))) { + dev_dbg(adev->dev, "Skipping RLC halt\n"); return 0; } @@ -4236,19 +4244,38 @@ failed_kiq_read: static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev) { - uint64_t clock; + uint64_t clock, clock_lo, clock_hi, hi_check; - amdgpu_gfx_off_ctrl(adev, false); - mutex_lock(&adev->gfx.gpu_clock_mutex); - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 0, 1) && amdgpu_sriov_runtime(adev)) { - clock = gfx_v9_0_kiq_read_clock(adev); - } else { - WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1); - clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) | - ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL); + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(9, 3, 0): + preempt_disable(); + clock_hi = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Renoir); + clock_lo = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Renoir); + hi_check = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Renoir); + /* The SMUIO TSC clock frequency is 100MHz, which sets 32-bit carry over + * roughly every 42 seconds. + */ + if (hi_check != clock_hi) { + clock_lo = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Renoir); + clock_hi = hi_check; + } + preempt_enable(); + clock = clock_lo | (clock_hi << 32ULL); + break; + default: + amdgpu_gfx_off_ctrl(adev, false); + mutex_lock(&adev->gfx.gpu_clock_mutex); + if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 0, 1) && amdgpu_sriov_runtime(adev)) { + clock = gfx_v9_0_kiq_read_clock(adev); + } else { + WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1); + clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) | + ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL); + } + mutex_unlock(&adev->gfx.gpu_clock_mutex); + amdgpu_gfx_off_ctrl(adev, true); + break; } - mutex_unlock(&adev->gfx.gpu_clock_mutex); - amdgpu_gfx_off_ctrl(adev, true); return clock; } @@ -5102,6 +5129,8 @@ static void gfx_v9_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid) { u32 reg, data; + amdgpu_gfx_off_ctrl(adev, false); + reg = SOC15_REG_OFFSET(GC, 0, mmRLC_SPM_MC_CNTL); if (amdgpu_sriov_is_pp_one_vf(adev)) data = RREG32_NO_KIQ(reg); @@ -5115,6 +5144,8 @@ static void gfx_v9_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid) WREG32_SOC15_NO_KIQ(GC, 0, mmRLC_SPM_MC_CNTL, data); else WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data); + + amdgpu_gfx_off_ctrl(adev, true); } static bool gfx_v9_0_check_rlcg_range(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c index bda1542ef1dd..ec4d5e15b766 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c @@ -162,7 +162,6 @@ static void gfxhub_v1_0_init_tlb_regs(struct amdgpu_device *adev) ENABLE_ADVANCED_DRIVER_MODEL, 1); tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_APERTURE_UNMAPPED_ACCESS, 0); - tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0); tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, MTYPE, MTYPE_UC);/* XXX for emulation. */ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1); @@ -348,6 +347,10 @@ static void gfxhub_v1_0_gart_disable(struct amdgpu_device *adev) WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT0_CNTL, i * hub->ctx_distance, 0); + if (amdgpu_sriov_vf(adev)) + /* Avoid write to GMC registers */ + return; + /* Setup TLB control */ tmp = RREG32_SOC15(GC, 0, mmMC_VM_MX_L1_TLB_CNTL); tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0); diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.c index 497b86c376c6..90f0aefbdb39 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.c @@ -54,15 +54,17 @@ int gfxhub_v1_1_get_xgmi_info(struct amdgpu_device *adev) seg_size = REG_GET_FIELD( RREG32_SOC15(GC, 0, mmMC_VM_XGMI_LFB_SIZE_ALDE), MC_VM_XGMI_LFB_SIZE, PF_LFB_SIZE) << 24; + max_region = + REG_GET_FIELD(xgmi_lfb_cntl, MC_VM_XGMI_LFB_CNTL_ALDE, PF_MAX_REGION); } else { xgmi_lfb_cntl = RREG32_SOC15(GC, 0, mmMC_VM_XGMI_LFB_CNTL); seg_size = REG_GET_FIELD( RREG32_SOC15(GC, 0, mmMC_VM_XGMI_LFB_SIZE), MC_VM_XGMI_LFB_SIZE, PF_LFB_SIZE) << 24; + max_region = + REG_GET_FIELD(xgmi_lfb_cntl, MC_VM_XGMI_LFB_CNTL, PF_MAX_REGION); } - max_region = - REG_GET_FIELD(xgmi_lfb_cntl, MC_VM_XGMI_LFB_CNTL, PF_MAX_REGION); switch (adev->asic_type) { @@ -89,9 +91,15 @@ int gfxhub_v1_1_get_xgmi_info(struct amdgpu_device *adev) if (adev->gmc.xgmi.num_physical_nodes > max_num_physical_nodes) return -EINVAL; - adev->gmc.xgmi.physical_node_id = - REG_GET_FIELD(xgmi_lfb_cntl, MC_VM_XGMI_LFB_CNTL, - PF_LFB_REGION); + if (adev->asic_type == CHIP_ALDEBARAN) { + adev->gmc.xgmi.physical_node_id = + REG_GET_FIELD(xgmi_lfb_cntl, MC_VM_XGMI_LFB_CNTL_ALDE, + PF_LFB_REGION); + } else { + adev->gmc.xgmi.physical_node_id = + REG_GET_FIELD(xgmi_lfb_cntl, MC_VM_XGMI_LFB_CNTL, + PF_LFB_REGION); + } if (adev->gmc.xgmi.physical_node_id > max_physical_node_id) return -EINVAL; diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c index 14c1c1a297dd..6e0ace2fbfab 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c @@ -196,7 +196,6 @@ static void gfxhub_v2_0_init_tlb_regs(struct amdgpu_device *adev) ENABLE_ADVANCED_DRIVER_MODEL, 1); tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, SYSTEM_APERTURE_UNMAPPED_ACCESS, 0); - tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0); tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, MTYPE, MTYPE_UC); /* UC, uncached */ diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c index e80d1dc43079..b4eddf6e98a6 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c @@ -197,7 +197,6 @@ static void gfxhub_v2_1_init_tlb_regs(struct amdgpu_device *adev) ENABLE_ADVANCED_DRIVER_MODEL, 1); tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, SYSTEM_APERTURE_UNMAPPED_ACCESS, 0); - tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0); tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, MTYPE, MTYPE_UC); /* UC, uncached */ diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index cb82404df534..d84523cf5f75 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -1808,6 +1808,14 @@ static int gmc_v9_0_hw_fini(void *handle) return 0; } + /* + * Pair the operations did in gmc_v9_0_hw_init and thus maintain + * a correct cached state for GMC. Otherwise, the "gate" again + * operation on S3 resuming will fail due to wrong cached state. + */ + if (adev->mmhub.funcs->update_power_gating) + adev->mmhub.funcs->update_power_gating(adev, false); + amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0); amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0); diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c index a99953833820..1da2ec692057 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c @@ -145,7 +145,6 @@ static void mmhub_v1_0_init_tlb_regs(struct amdgpu_device *adev) ENABLE_ADVANCED_DRIVER_MODEL, 1); tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_APERTURE_UNMAPPED_ACCESS, 0); - tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0); tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, MTYPE, MTYPE_UC);/* XXX for emulation. */ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1); @@ -302,10 +301,10 @@ static void mmhub_v1_0_update_power_gating(struct amdgpu_device *adev, if (amdgpu_sriov_vf(adev)) return; - if (enable && adev->pg_flags & AMD_PG_SUPPORT_MMHUB) { - amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GMC, true); - - } + if (adev->pg_flags & AMD_PG_SUPPORT_MMHUB) + amdgpu_dpm_set_powergating_by_smu(adev, + AMD_IP_BLOCK_TYPE_GMC, + enable); } static int mmhub_v1_0_gart_enable(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c index f80a14a1b82d..f5f7181f9af5 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c @@ -165,7 +165,6 @@ static void mmhub_v1_7_init_tlb_regs(struct amdgpu_device *adev) ENABLE_ADVANCED_DRIVER_MODEL, 1); tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_APERTURE_UNMAPPED_ACCESS, 0); - tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0); tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, MTYPE, MTYPE_UC);/* XXX for emulation. */ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1); diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c index 25f8e93e5ec3..3718ff610ab2 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c @@ -267,7 +267,6 @@ static void mmhub_v2_0_init_tlb_regs(struct amdgpu_device *adev) ENABLE_ADVANCED_DRIVER_MODEL, 1); tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, SYSTEM_APERTURE_UNMAPPED_ACCESS, 0); - tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0); tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, MTYPE, MTYPE_UC); /* UC, uncached */ diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c index a11d60ec6321..9e16da28505a 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c @@ -194,7 +194,6 @@ static void mmhub_v2_3_init_tlb_regs(struct amdgpu_device *adev) ENABLE_ADVANCED_DRIVER_MODEL, 1); tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, SYSTEM_APERTURE_UNMAPPED_ACCESS, 0); - tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0); tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, MTYPE, MTYPE_UC); /* UC, uncached */ diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c index c4ef822bbe8c..ff49eeaf7882 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c @@ -190,8 +190,6 @@ static void mmhub_v9_4_init_tlb_regs(struct amdgpu_device *adev, int hubid) tmp = REG_SET_FIELD(tmp, VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL, SYSTEM_APERTURE_UNMAPPED_ACCESS, 0); tmp = REG_SET_FIELD(tmp, VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL, - ECO_BITS, 0); - tmp = REG_SET_FIELD(tmp, VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL, MTYPE, MTYPE_UC);/* XXX for emulation. */ tmp = REG_SET_FIELD(tmp, VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1); diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c index 1d8414c3fadb..38241cf0e1f1 100644 --- a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c @@ -160,6 +160,7 @@ static int navi10_ih_toggle_ring_interrupts(struct amdgpu_device *adev, tmp = RREG32(ih_regs->ih_rb_cntl); tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_ENABLE, (enable ? 1 : 0)); + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_GPU_TS_ENABLE, 1); /* enable_intr field is only valid in ring0 */ if (ih == &adev->irq.ih) tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, ENABLE_INTR, (enable ? 1 : 0)); @@ -275,10 +276,8 @@ static int navi10_ih_enable_ring(struct amdgpu_device *adev, tmp = navi10_ih_rb_cntl(ih, tmp); if (ih == &adev->irq.ih) tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RPTR_REARM, !!adev->irq.msi_enabled); - if (ih == &adev->irq.ih1) { - tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_ENABLE, 0); + if (ih == &adev->irq.ih1) tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_FULL_DRAIN_ENABLE, 1); - } if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) { if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) { @@ -319,7 +318,6 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev) { struct amdgpu_ih_ring *ih[] = {&adev->irq.ih, &adev->irq.ih1, &adev->irq.ih2}; u32 ih_chicken; - u32 tmp; int ret; int i; @@ -363,15 +361,6 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev) adev->nbio.funcs->ih_doorbell_range(adev, ih[0]->use_doorbell, ih[0]->doorbell_index); - tmp = RREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL); - tmp = REG_SET_FIELD(tmp, IH_STORM_CLIENT_LIST_CNTL, - CLIENT18_IS_STORM_CLIENT, 1); - WREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL, tmp); - - tmp = RREG32_SOC15(OSSSYS, 0, mmIH_INT_FLOOD_CNTL); - tmp = REG_SET_FIELD(tmp, IH_INT_FLOOD_CNTL, FLOOD_CNTL_ENABLE, 1); - WREG32_SOC15(OSSSYS, 0, mmIH_INT_FLOOD_CNTL, tmp); - pci_set_master(adev->pdev); /* enable interrupts */ @@ -420,12 +409,19 @@ static u32 navi10_ih_get_wptr(struct amdgpu_device *adev, u32 wptr, tmp; struct amdgpu_ih_regs *ih_regs; - wptr = le32_to_cpu(*ih->wptr_cpu); - ih_regs = &ih->ih_regs; + if (ih == &adev->irq.ih) { + /* Only ring0 supports writeback. On other rings fall back + * to register-based code with overflow checking below. + */ + wptr = le32_to_cpu(*ih->wptr_cpu); - if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) - goto out; + if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) + goto out; + } + ih_regs = &ih->ih_regs; + + /* Double check that the overflow wasn't already cleared. */ wptr = RREG32_NO_KIQ(ih_regs->ih_rb_wptr); if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) goto out; @@ -513,15 +509,11 @@ static int navi10_ih_self_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) { - uint32_t wptr = cpu_to_le32(entry->src_data[0]); - switch (entry->ring_id) { case 1: - *adev->irq.ih1.wptr_cpu = wptr; schedule_work(&adev->irq.ih1_work); break; case 2: - *adev->irq.ih2.wptr_cpu = wptr; schedule_work(&adev->irq.ih2_work); break; default: break; diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c index 4ecd2b5808ce..ee7cab37dfd5 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c @@ -359,6 +359,10 @@ static void nbio_v2_3_init_registers(struct amdgpu_device *adev) if (def != data) WREG32_PCIE(smnPCIE_CONFIG_CNTL, data); + + if (amdgpu_sriov_vf(adev)) + adev->rmmio_remap.reg_offset = SOC15_REG_OFFSET(NBIO, 0, + mmBIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL) << 2; } #define NAVI10_PCIE__LC_L0S_INACTIVITY_DEFAULT 0x00000000 // off by default, no gains over L1 diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c index 0d2d629e2d6a..4bbacf1be25a 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c @@ -276,6 +276,10 @@ static void nbio_v6_1_init_registers(struct amdgpu_device *adev) if (def != data) WREG32_PCIE(smnPCIE_CI_CNTL, data); + + if (amdgpu_sriov_vf(adev)) + adev->rmmio_remap.reg_offset = SOC15_REG_OFFSET(NBIO, 0, + mmBIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL) << 2; } static void nbio_v6_1_program_ltr(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c index 3c00666a13e1..37a4039fdfc5 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c @@ -273,7 +273,9 @@ const struct nbio_hdp_flush_reg nbio_v7_0_hdp_flush_reg = { static void nbio_v7_0_init_registers(struct amdgpu_device *adev) { - + if (amdgpu_sriov_vf(adev)) + adev->rmmio_remap.reg_offset = + SOC15_REG_OFFSET(NBIO, 0, mmHDP_MEM_COHERENCY_FLUSH_CNTL) << 2; } const struct amdgpu_nbio_funcs nbio_v7_0_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c index 8f2a315e7c73..3444332ea110 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c @@ -371,6 +371,10 @@ static void nbio_v7_2_init_registers(struct amdgpu_device *adev) if (def != data) WREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regPCIE_CONFIG_CNTL), data); } + + if (amdgpu_sriov_vf(adev)) + adev->rmmio_remap.reg_offset = SOC15_REG_OFFSET(NBIO, 0, + regBIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_CNTL) << 2; } const struct amdgpu_nbio_funcs nbio_v7_2_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c index b8bd03d16dba..dc5e93756fea 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c @@ -362,7 +362,9 @@ const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg_ald = { static void nbio_v7_4_init_registers(struct amdgpu_device *adev) { - + if (amdgpu_sriov_vf(adev)) + adev->rmmio_remap.reg_offset = SOC15_REG_OFFSET(NBIO, 0, + mmBIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL) << 2; } static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device *adev) @@ -692,6 +694,9 @@ static void nbio_v7_4_program_aspm(struct amdgpu_device *adev) { uint32_t def, data; + if (adev->ip_versions[NBIO_HWIP][0] == IP_VERSION(7, 4, 4)) + return; + def = data = RREG32_PCIE(smnPCIE_LC_CNTL); data &= ~PCIE_LC_CNTL__LC_L1_INACTIVITY_MASK; data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK; diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index febc903adf58..2ec1ffb36b1f 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -182,6 +182,8 @@ static int nv_query_video_codecs(struct amdgpu_device *adev, bool encode, { switch (adev->ip_versions[UVD_HWIP][0]) { case IP_VERSION(3, 0, 0): + case IP_VERSION(3, 0, 64): + case IP_VERSION(3, 0, 192): if (amdgpu_sriov_vf(adev)) { if (encode) *codecs = &sriov_sc_video_codecs_encode; @@ -730,8 +732,10 @@ static int nv_common_early_init(void *handle) #define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE) struct amdgpu_device *adev = (struct amdgpu_device *)handle; - adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET; - adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET; + if (!amdgpu_sriov_vf(adev)) { + adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET; + adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET; + } adev->smc_rreg = NULL; adev->smc_wreg = NULL; adev->pcie_rreg = &nv_pcie_rreg; @@ -1031,7 +1035,7 @@ static int nv_common_hw_init(void *handle) * for the purpose of expose those registers * to process space */ - if (adev->nbio.funcs->remap_hdp_registers) + if (adev->nbio.funcs->remap_hdp_registers && !amdgpu_sriov_vf(adev)) adev->nbio.funcs->remap_hdp_registers(adev); /* enable the doorbell aperture */ nv_enable_doorbell_aperture(adev, true); diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 0c316a2d42ed..de9b55383e9f 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -971,8 +971,10 @@ static int soc15_common_early_init(void *handle) #define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE) struct amdgpu_device *adev = (struct amdgpu_device *)handle; - adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET; - adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET; + if (!amdgpu_sriov_vf(adev)) { + adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET; + adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET; + } adev->smc_rreg = NULL; adev->smc_wreg = NULL; adev->pcie_rreg = &soc15_pcie_rreg; @@ -1285,7 +1287,7 @@ static int soc15_common_hw_init(void *handle) * for the purpose of expose those registers * to process space */ - if (adev->nbio.funcs->remap_hdp_registers) + if (adev->nbio.funcs->remap_hdp_registers && !amdgpu_sriov_vf(adev)) adev->nbio.funcs->remap_hdp_registers(adev); /* enable the doorbell aperture */ diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index d5d023a24269..2d558c2f417d 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c @@ -534,6 +534,19 @@ static int uvd_v6_0_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + cancel_delayed_work_sync(&adev->uvd.idle_work); + + if (RREG32(mmUVD_STATUS) != 0) + uvd_v6_0_stop(adev); + + return 0; +} + +static int uvd_v6_0_suspend(void *handle) +{ + int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + /* * Proper cleanups before halting the HW engine: * - cancel the delayed idle work @@ -558,17 +571,6 @@ static int uvd_v6_0_hw_fini(void *handle) AMD_CG_STATE_GATE); } - if (RREG32(mmUVD_STATUS) != 0) - uvd_v6_0_stop(adev); - - return 0; -} - -static int uvd_v6_0_suspend(void *handle) -{ - int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = uvd_v6_0_hw_fini(adev); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 0fffaf859c59..3b119db16003 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -406,7 +406,7 @@ static const struct kfd_device_info aldebaran_device_info = { static const struct kfd_device_info renoir_device_info = { .asic_family = CHIP_RENOIR, .asic_name = "renoir", - .gfx_target_version = 90002, + .gfx_target_version = 90012, .max_pasid_bits = 16, .max_no_of_hqd = 24, .doorbell_size = 8, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 533b27b35fc9..93e33dd84dd4 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -1226,6 +1226,11 @@ static int stop_cpsch(struct device_queue_manager *dqm) bool hanging; dqm_lock(dqm); + if (!dqm->sched_running) { + dqm_unlock(dqm); + return 0; + } + if (!dqm->is_hws_hang) unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0); hanging = dqm->is_hws_hang || dqm->is_resetting; @@ -1430,7 +1435,7 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm, if (!dqm->sched_running) return 0; - if (dqm->is_hws_hang) + if (dqm->is_hws_hang || dqm->is_resetting) return -EIO; if (!dqm->active_runlist) return retval; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c index 2e86692def19..d1388896f9c1 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c @@ -308,7 +308,7 @@ * 16MB are reserved for kernel use (CWSR trap handler and kernel IB * for now). */ -#define SVM_USER_BASE 0x1000000ull +#define SVM_USER_BASE (u64)(KFD_CWSR_TBA_TMA_SIZE + 2*PAGE_SIZE) #define SVM_CWSR_BASE (SVM_USER_BASE - KFD_CWSR_TBA_TMA_SIZE) #define SVM_IB_BASE (SVM_CWSR_BASE - PAGE_SIZE) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c index 6d8634e40b3b..9b9c2b9bf2ef 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c @@ -281,6 +281,19 @@ static unsigned long svm_migrate_successful_pages(struct migrate_vma *migrate) return cpages; } +static unsigned long svm_migrate_unsuccessful_pages(struct migrate_vma *migrate) +{ + unsigned long upages = 0; + unsigned long i; + + for (i = 0; i < migrate->npages; i++) { + if (migrate->src[i] & MIGRATE_PFN_VALID && + !(migrate->src[i] & MIGRATE_PFN_MIGRATE)) + upages++; + } + return upages; +} + static int svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange, struct migrate_vma *migrate, struct dma_fence **mfence, @@ -317,7 +330,6 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange, migrate->dst[i] = svm_migrate_addr_to_pfn(adev, dst[i]); svm_migrate_get_vram_page(prange, migrate->dst[i]); migrate->dst[i] = migrate_pfn(migrate->dst[i]); - migrate->dst[i] |= MIGRATE_PFN_LOCKED; src[i] = dma_map_page(dev, spage, 0, PAGE_SIZE, DMA_TO_DEVICE); r = dma_mapping_error(dev, src[i]); @@ -610,7 +622,6 @@ svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange, dst[i] >> PAGE_SHIFT, page_to_pfn(dpage)); migrate->dst[i] = migrate_pfn(page_to_pfn(dpage)); - migrate->dst[i] |= MIGRATE_PFN_LOCKED; j++; } @@ -634,10 +645,11 @@ svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange, struct vm_area_struct *vma, uint64_t start, uint64_t end) { uint64_t npages = (end - start) >> PAGE_SHIFT; + unsigned long upages = npages; + unsigned long cpages = 0; struct kfd_process_device *pdd; struct dma_fence *mfence = NULL; struct migrate_vma migrate; - unsigned long cpages = 0; dma_addr_t *scratch; size_t size; void *buf; @@ -671,6 +683,7 @@ svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange, if (!cpages) { pr_debug("failed collect migrate device pages [0x%lx 0x%lx]\n", prange->start, prange->last); + upages = svm_migrate_unsuccessful_pages(&migrate); goto out_free; } if (cpages != npages) @@ -683,8 +696,9 @@ svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange, scratch, npages); migrate_vma_pages(&migrate); - pr_debug("successful/cpages/npages 0x%lx/0x%lx/0x%lx\n", - svm_migrate_successful_pages(&migrate), cpages, migrate.npages); + upages = svm_migrate_unsuccessful_pages(&migrate); + pr_debug("unsuccessful/cpages/npages 0x%lx/0x%lx/0x%lx\n", + upages, cpages, migrate.npages); svm_migrate_copy_done(adev, mfence); migrate_vma_finalize(&migrate); @@ -698,9 +712,9 @@ out: if (pdd) WRITE_ONCE(pdd->page_out, pdd->page_out + cpages); - return cpages; + return upages; } - return r; + return r ? r : upages; } /** @@ -720,7 +734,7 @@ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm) unsigned long addr; unsigned long start; unsigned long end; - unsigned long cpages = 0; + unsigned long upages = 0; long r = 0; if (!prange->actual_loc) { @@ -756,12 +770,12 @@ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm) pr_debug("failed %ld to migrate\n", r); break; } else { - cpages += r; + upages += r; } addr = next; } - if (cpages) { + if (!upages) { svm_range_vram_node_free(prange); prange->actual_loc = 0; } @@ -784,7 +798,7 @@ static int svm_migrate_vram_to_vram(struct svm_range *prange, uint32_t best_loc, struct mm_struct *mm) { - int r; + int r, retries = 3; /* * TODO: for both devices with PCIe large bar or on same xgmi hive, skip @@ -793,9 +807,14 @@ svm_migrate_vram_to_vram(struct svm_range *prange, uint32_t best_loc, pr_debug("from gpu 0x%x to gpu 0x%x\n", prange->actual_loc, best_loc); - r = svm_migrate_vram_to_ram(prange, mm); - if (r) - return r; + do { + r = svm_migrate_vram_to_ram(prange, mm); + if (r) + return r; + } while (prange->actual_loc && --retries); + + if (prange->actual_loc) + return -EDEADLK; return svm_migrate_ram_to_vram(prange, best_loc, mm); } @@ -840,6 +859,11 @@ static vm_fault_t svm_migrate_to_ram(struct vm_fault *vmf) pr_debug("failed find process at fault address 0x%lx\n", addr); return VM_FAULT_SIGBUS; } + if (READ_ONCE(p->svms.faulting_task) == current) { + pr_debug("skipping ram migration\n"); + kfd_unref_process(p); + return 0; + } addr >>= PAGE_SHIFT; pr_debug("CPU page fault svms 0x%p address 0x%lx\n", &p->svms, addr); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 4104b167e721..8fd48d0ed240 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -766,8 +766,10 @@ struct svm_range_list { struct list_head deferred_range_list; spinlock_t deferred_list_lock; atomic_t evicted_ranges; + atomic_t drain_pagefaults; struct delayed_work restore_work; DECLARE_BITMAP(bitmap_supported, MAX_GPU_INSTANCE); + struct task_struct *faulting_task; }; /* Process data */ diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index 457863861d6f..b993011cfa64 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -1715,7 +1715,11 @@ int kfd_process_evict_queues(struct kfd_process *p) r = pdd->dev->dqm->ops.evict_process_queues(pdd->dev->dqm, &pdd->qpd); - if (r) { + /* evict return -EIO if HWS is hang or asic is resetting, in this case + * we would like to set all the queues to be in evicted state to prevent + * them been add back since they actually not be saved right now. + */ + if (r && r != -EIO) { pr_err("Failed to evict process queues\n"); goto fail; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index b691c8495d66..3cb4681c5f53 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -1496,9 +1496,11 @@ static int svm_range_validate_and_map(struct mm_struct *mm, next = min(vma->vm_end, end); npages = (next - addr) >> PAGE_SHIFT; + WRITE_ONCE(p->svms.faulting_task, current); r = amdgpu_hmm_range_get_pages(&prange->notifier, mm, NULL, addr, npages, &hmm_range, readonly, true, owner); + WRITE_ONCE(p->svms.faulting_task, NULL); if (r) { pr_debug("failed %d to get svm range pages\n", r); goto unreserve_out; @@ -1572,7 +1574,6 @@ retry_flush_work: static void svm_range_restore_work(struct work_struct *work) { struct delayed_work *dwork = to_delayed_work(work); - struct amdkfd_process_info *process_info; struct svm_range_list *svms; struct svm_range *prange; struct kfd_process *p; @@ -1592,12 +1593,10 @@ static void svm_range_restore_work(struct work_struct *work) * the lifetime of this thread, kfd_process and mm will be valid. */ p = container_of(svms, struct kfd_process, svms); - process_info = p->kgd_process_info; mm = p->mm; if (!mm) return; - mutex_lock(&process_info->lock); svm_range_list_lock_and_flush_work(svms, mm); mutex_lock(&svms->lock); @@ -1650,7 +1649,6 @@ static void svm_range_restore_work(struct work_struct *work) out_reschedule: mutex_unlock(&svms->lock); mmap_write_unlock(mm); - mutex_unlock(&process_info->lock); /* If validation failed, reschedule another attempt */ if (evicted_ranges) { @@ -1966,10 +1964,16 @@ static void svm_range_drain_retry_fault(struct svm_range_list *svms) struct kfd_process_device *pdd; struct amdgpu_device *adev; struct kfd_process *p; + int drain; uint32_t i; p = container_of(svms, struct kfd_process, svms); +restart: + drain = atomic_read(&svms->drain_pagefaults); + if (!drain) + return; + for_each_set_bit(i, svms->bitmap_supported, p->n_pdds) { pdd = p->pdds[i]; if (!pdd) @@ -1981,6 +1985,8 @@ static void svm_range_drain_retry_fault(struct svm_range_list *svms) amdgpu_ih_wait_on_checkpoint_process(adev, &adev->irq.ih1); pr_debug("drain retry fault gpu %d svms 0x%p done\n", i, svms); } + if (atomic_cmpxchg(&svms->drain_pagefaults, drain, 0) != drain) + goto restart; } static void svm_range_deferred_list_work(struct work_struct *work) @@ -1988,35 +1994,41 @@ static void svm_range_deferred_list_work(struct work_struct *work) struct svm_range_list *svms; struct svm_range *prange; struct mm_struct *mm; + struct kfd_process *p; svms = container_of(work, struct svm_range_list, deferred_list_work); pr_debug("enter svms 0x%p\n", svms); + p = container_of(svms, struct kfd_process, svms); + /* Avoid mm is gone when inserting mmu notifier */ + mm = get_task_mm(p->lead_thread); + if (!mm) { + pr_debug("svms 0x%p process mm gone\n", svms); + return; + } +retry: + mmap_write_lock(mm); + + /* Checking for the need to drain retry faults must be inside + * mmap write lock to serialize with munmap notifiers. + */ + if (unlikely(atomic_read(&svms->drain_pagefaults))) { + mmap_write_unlock(mm); + svm_range_drain_retry_fault(svms); + goto retry; + } + spin_lock(&svms->deferred_list_lock); while (!list_empty(&svms->deferred_range_list)) { prange = list_first_entry(&svms->deferred_range_list, struct svm_range, deferred_list); + list_del_init(&prange->deferred_list); spin_unlock(&svms->deferred_list_lock); + pr_debug("prange 0x%p [0x%lx 0x%lx] op %d\n", prange, prange->start, prange->last, prange->work_item.op); - /* Make sure no stale retry fault coming after range is freed */ - if (prange->work_item.op == SVM_OP_UNMAP_RANGE) - svm_range_drain_retry_fault(prange->svms); - - mm = prange->work_item.mm; - mmap_write_lock(mm); mutex_lock(&svms->lock); - - /* Remove from deferred_list must be inside mmap write lock, - * otherwise, svm_range_list_lock_and_flush_work may hold mmap - * write lock, and continue because deferred_list is empty, then - * deferred_list handle is blocked by mmap write lock. - */ - spin_lock(&svms->deferred_list_lock); - list_del_init(&prange->deferred_list); - spin_unlock(&svms->deferred_list_lock); - mutex_lock(&prange->migrate_mutex); while (!list_empty(&prange->child_list)) { struct svm_range *pchild; @@ -2032,12 +2044,13 @@ static void svm_range_deferred_list_work(struct work_struct *work) svm_range_handle_list_op(svms, prange); mutex_unlock(&svms->lock); - mmap_write_unlock(mm); spin_lock(&svms->deferred_list_lock); } spin_unlock(&svms->deferred_list_lock); + mmap_write_unlock(mm); + mmput(mm); pr_debug("exit svms 0x%p\n", svms); } @@ -2124,6 +2137,12 @@ svm_range_unmap_from_cpu(struct mm_struct *mm, struct svm_range *prange, pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] [0x%lx 0x%lx]\n", svms, prange, prange->start, prange->last, start, last); + /* Make sure pending page faults are drained in the deferred worker + * before the range is freed to avoid straggler interrupts on + * unmapped memory causing "phantom faults". + */ + atomic_inc(&svms->drain_pagefaults); + unmap_parent = start <= prange->start && last >= prange->last; list_for_each_entry(pchild, &prange->child_list, child_list) { @@ -2261,7 +2280,7 @@ svm_range_from_addr(struct svm_range_list *svms, unsigned long addr, * migration if actual loc is not best location, then update GPU page table * mapping to the best location. * - * If vm fault gpu is range preferred loc, the best_loc is preferred loc. + * If the preferred loc is accessible by faulting GPU, use preferred loc. * If vm fault gpu idx is on range ACCESSIBLE bitmap, best_loc is vm fault gpu * If vm fault gpu idx is on range ACCESSIBLE_IN_PLACE bitmap, then * if range actual loc is cpu, best_loc is cpu @@ -2278,7 +2297,7 @@ svm_range_best_restore_location(struct svm_range *prange, struct amdgpu_device *adev, int32_t *gpuidx) { - struct amdgpu_device *bo_adev; + struct amdgpu_device *bo_adev, *preferred_adev; struct kfd_process *p; uint32_t gpuid; int r; @@ -2291,8 +2310,16 @@ svm_range_best_restore_location(struct svm_range *prange, return -1; } - if (prange->preferred_loc == gpuid) + if (prange->preferred_loc == gpuid || + prange->preferred_loc == KFD_IOCTL_SVM_LOCATION_SYSMEM) { return prange->preferred_loc; + } else if (prange->preferred_loc != KFD_IOCTL_SVM_LOCATION_UNDEFINED) { + preferred_adev = svm_range_get_adev_by_id(prange, + prange->preferred_loc); + if (amdgpu_xgmi_same_hive(adev, preferred_adev)) + return prange->preferred_loc; + /* fall through */ + } if (test_bit(*gpuidx, prange->bitmap_access)) return gpuid; @@ -2313,7 +2340,8 @@ svm_range_best_restore_location(struct svm_range *prange, static int svm_range_get_range_boundaries(struct kfd_process *p, int64_t addr, - unsigned long *start, unsigned long *last) + unsigned long *start, unsigned long *last, + bool *is_heap_stack) { struct vm_area_struct *vma; struct interval_tree_node *node; @@ -2324,6 +2352,12 @@ svm_range_get_range_boundaries(struct kfd_process *p, int64_t addr, pr_debug("VMA does not exist in address [0x%llx]\n", addr); return -EFAULT; } + + *is_heap_stack = (vma->vm_start <= vma->vm_mm->brk && + vma->vm_end >= vma->vm_mm->start_brk) || + (vma->vm_start <= vma->vm_mm->start_stack && + vma->vm_end >= vma->vm_mm->start_stack); + start_limit = max(vma->vm_start >> PAGE_SHIFT, (unsigned long)ALIGN_DOWN(addr, 2UL << 8)); end_limit = min(vma->vm_end >> PAGE_SHIFT, @@ -2353,9 +2387,9 @@ svm_range_get_range_boundaries(struct kfd_process *p, int64_t addr, *start = start_limit; *last = end_limit - 1; - pr_debug("vma start: 0x%lx start: 0x%lx vma end: 0x%lx last: 0x%lx\n", - vma->vm_start >> PAGE_SHIFT, *start, - vma->vm_end >> PAGE_SHIFT, *last); + pr_debug("vma [0x%lx 0x%lx] range [0x%lx 0x%lx] is_heap_stack %d\n", + vma->vm_start >> PAGE_SHIFT, vma->vm_end >> PAGE_SHIFT, + *start, *last, *is_heap_stack); return 0; } @@ -2420,11 +2454,13 @@ svm_range *svm_range_create_unregistered_range(struct amdgpu_device *adev, struct svm_range *prange = NULL; unsigned long start, last; uint32_t gpuid, gpuidx; + bool is_heap_stack; uint64_t bo_s = 0; uint64_t bo_l = 0; int r; - if (svm_range_get_range_boundaries(p, addr, &start, &last)) + if (svm_range_get_range_boundaries(p, addr, &start, &last, + &is_heap_stack)) return NULL; r = svm_range_check_vm(p, start, last, &bo_s, &bo_l); @@ -2451,6 +2487,9 @@ svm_range *svm_range_create_unregistered_range(struct amdgpu_device *adev, return NULL; } + if (is_heap_stack) + prange->preferred_loc = KFD_IOCTL_SVM_LOCATION_SYSMEM; + svm_range_add_to_svms(prange); svm_range_add_notifier_locked(mm, prange); @@ -2523,20 +2562,13 @@ svm_range_count_fault(struct amdgpu_device *adev, struct kfd_process *p, } static bool -svm_fault_allowed(struct mm_struct *mm, uint64_t addr, bool write_fault) +svm_fault_allowed(struct vm_area_struct *vma, bool write_fault) { unsigned long requested = VM_READ; - struct vm_area_struct *vma; if (write_fault) requested |= VM_WRITE; - vma = find_vma(mm, addr << PAGE_SHIFT); - if (!vma || (addr << PAGE_SHIFT) < vma->vm_start) { - pr_debug("address 0x%llx VMA is removed\n", addr); - return true; - } - pr_debug("requested 0x%lx, vma permission flags 0x%lx\n", requested, vma->vm_flags); return (vma->vm_flags & requested) == requested; @@ -2554,6 +2586,7 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid, int32_t best_loc; int32_t gpuidx = MAX_GPU_INSTANCE; bool write_locked = false; + struct vm_area_struct *vma; int r = 0; if (!KFD_IS_SVM_API_SUPPORTED(adev->kfd.dev)) { @@ -2564,7 +2597,7 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid, p = kfd_lookup_process_by_pasid(pasid); if (!p) { pr_debug("kfd process not founded pasid 0x%x\n", pasid); - return -ESRCH; + return 0; } if (!p->xnack_enabled) { pr_debug("XNACK not enabled for pasid 0x%x\n", pasid); @@ -2575,10 +2608,19 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid, pr_debug("restoring svms 0x%p fault address 0x%llx\n", svms, addr); + if (atomic_read(&svms->drain_pagefaults)) { + pr_debug("draining retry fault, drop fault 0x%llx\n", addr); + r = 0; + goto out; + } + + /* p->lead_thread is available as kfd_process_wq_release flush the work + * before releasing task ref. + */ mm = get_task_mm(p->lead_thread); if (!mm) { pr_debug("svms 0x%p failed to get mm\n", svms); - r = -ESRCH; + r = 0; goto out; } @@ -2616,6 +2658,7 @@ retry_write_locked: if (svm_range_skip_recover(prange)) { amdgpu_gmc_filter_faults_remove(adev, addr, pasid); + r = 0; goto out_unlock_range; } @@ -2624,10 +2667,21 @@ retry_write_locked: if (timestamp < AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING) { pr_debug("svms 0x%p [0x%lx %lx] already restored\n", svms, prange->start, prange->last); + r = 0; + goto out_unlock_range; + } + + /* __do_munmap removed VMA, return success as we are handling stale + * retry fault. + */ + vma = find_vma(mm, addr << PAGE_SHIFT); + if (!vma || (addr << PAGE_SHIFT) < vma->vm_start) { + pr_debug("address 0x%llx VMA is removed\n", addr); + r = 0; goto out_unlock_range; } - if (!svm_fault_allowed(mm, addr, write_fault)) { + if (!svm_fault_allowed(vma, write_fault)) { pr_debug("fault addr 0x%llx no %s permission\n", addr, write_fault ? "write" : "read"); r = -EPERM; @@ -2705,6 +2759,14 @@ void svm_range_list_fini(struct kfd_process *p) /* Ensure list work is finished before process is destroyed */ flush_work(&p->svms.deferred_list_work); + /* + * Ensure no retry fault comes in afterwards, as page fault handler will + * not find kfd process and take mm lock to recover fault. + */ + atomic_inc(&p->svms.drain_pagefaults); + svm_range_drain_retry_fault(&p->svms); + + list_for_each_entry_safe(prange, next, &p->svms.list, list) { svm_range_unlink(prange); svm_range_remove_notifier(prange); @@ -2725,6 +2787,7 @@ int svm_range_list_init(struct kfd_process *p) mutex_init(&svms->lock); INIT_LIST_HEAD(&svms->list); atomic_set(&svms->evicted_ranges, 0); + atomic_set(&svms->drain_pagefaults, 0); INIT_DELAYED_WORK(&svms->restore_work, svm_range_restore_work); INIT_WORK(&svms->deferred_list_work, svm_range_deferred_list_work); INIT_LIST_HEAD(&svms->deferred_range_list); @@ -3076,6 +3139,8 @@ static void svm_range_evict_svm_bo_worker(struct work_struct *work) struct svm_range *prange = list_first_entry(&svm_bo->range_list, struct svm_range, svm_bo_list); + int retries = 3; + list_del_init(&prange->svm_bo_list); spin_unlock(&svm_bo->list_lock); @@ -3083,7 +3148,11 @@ static void svm_range_evict_svm_bo_worker(struct work_struct *work) prange->start, prange->last); mutex_lock(&prange->migrate_mutex); - svm_migrate_vram_to_ram(prange, svm_bo->eviction_fence->mm); + do { + svm_migrate_vram_to_ram(prange, + svm_bo->eviction_fence->mm); + } while (prange->actual_loc && --retries); + WARN(prange->actual_loc, "Migration failed during eviction"); mutex_lock(&prange->lock); prange->svm_bo = NULL; @@ -3108,7 +3177,6 @@ static int svm_range_set_attr(struct kfd_process *p, uint64_t start, uint64_t size, uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs) { - struct amdkfd_process_info *process_info = p->kgd_process_info; struct mm_struct *mm = current->mm; struct list_head update_list; struct list_head insert_list; @@ -3127,8 +3195,6 @@ svm_range_set_attr(struct kfd_process *p, uint64_t start, uint64_t size, svms = &p->svms; - mutex_lock(&process_info->lock); - svm_range_list_lock_and_flush_work(svms, mm); r = svm_range_is_valid(p, start, size); @@ -3204,8 +3270,6 @@ out_unlock_range: mutex_unlock(&svms->lock); mmap_read_unlock(mm); out: - mutex_unlock(&process_info->lock); - pr_debug("pasid 0x%x svms 0x%p [0x%llx 0x%llx] done, r=%d\n", p->pasid, &p->svms, start, start + size - 1, r); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 43e983e42c0f..e727f1dd2a9a 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -51,6 +51,7 @@ #include <drm/drm_hdcp.h> #endif #include "amdgpu_pm.h" +#include "amdgpu_atombios.h" #include "amd_shared.h" #include "amdgpu_dm_irq.h" @@ -217,6 +218,7 @@ static const struct drm_format_info * amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd); static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector); +static void handle_hpd_rx_irq(void *param); static bool is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state, @@ -619,7 +621,7 @@ static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params) amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base); } -#endif +#endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */ /** * dmub_aux_setconfig_reply_callback - Callback for AUX or SET_CONFIG command. @@ -669,10 +671,7 @@ void dmub_hpd_callback(struct amdgpu_device *adev, struct dmub_notification *not return; } - drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); - link_index = notify->link_index; - link = adev->dm.dc->links[link_index]; drm_connector_list_iter_begin(dev, &iter); @@ -685,10 +684,13 @@ void dmub_hpd_callback(struct amdgpu_device *adev, struct dmub_notification *not } } drm_connector_list_iter_end(&iter); - drm_modeset_unlock(&dev->mode_config.connection_mutex); - if (hpd_aconnector) - handle_hpd_irq_helper(hpd_aconnector); + if (hpd_aconnector) { + if (notify->type == DMUB_NOTIFICATION_HPD) + handle_hpd_irq_helper(hpd_aconnector); + else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ) + handle_hpd_rx_irq(hpd_aconnector); + } } /** @@ -764,6 +766,10 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params) DRM_ERROR("DM: notify type %d invalid!", notify.type); continue; } + if (!dm->dmub_callback[notify.type]) { + DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type); + continue; + } if (dm->dmub_thread_offload[notify.type] == true) { dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC); if (!dmub_hpd_wrk) { @@ -813,7 +819,7 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params) if (count > DMUB_TRACE_MAX_READ) DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ"); } -#endif +#endif /* CONFIG_DRM_AMD_DC_DCN */ static int dm_set_clockgating_state(void *handle, enum amd_clockgating_state state) @@ -1045,6 +1051,11 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev) return 0; } + /* Reset DMCUB if it was previously running - before we overwrite its memory. */ + status = dmub_srv_hw_reset(dmub_srv); + if (status != DMUB_STATUS_OK) + DRM_WARN("Error resetting DMUB HW: %d\n", status); + hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data; fw_inst_const = dmub_fw->data + @@ -1410,7 +1421,15 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) switch (adev->ip_versions[DCE_HWIP][0]) { case IP_VERSION(2, 1, 0): init_data.flags.gpu_vm_support = true; - init_data.flags.disable_dmcu = true; + switch (adev->dm.dmcub_fw_version) { + case 0: /* development */ + case 0x1: /* linux-firmware.git hash 6d9f399 */ + case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */ + init_data.flags.disable_dmcu = false; + break; + default: + init_data.flags.disable_dmcu = true; + } break; case IP_VERSION(1, 0, 0): case IP_VERSION(1, 0, 1): @@ -1556,7 +1575,11 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) DRM_ERROR("amdgpu: fail to register dmub hpd callback"); goto error; } -#endif + if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) { + DRM_ERROR("amdgpu: fail to register dmub hpd callback"); + goto error; + } +#endif /* CONFIG_DRM_AMD_DC_DCN */ } if (amdgpu_dm_initialize_drm_device(adev)) { @@ -2544,6 +2567,23 @@ static int dm_resume(void *handle) if (amdgpu_in_reset(adev)) { dc_state = dm->cached_dc_state; + /* + * The dc->current_state is backed up into dm->cached_dc_state + * before we commit 0 streams. + * + * DC will clear link encoder assignments on the real state + * but the changes won't propagate over to the copy we made + * before the 0 streams commit. + * + * DC expects that link encoder assignments are *not* valid + * when committing a state, so as a workaround it needs to be + * cleared here. + */ + link_enc_cfg_init(dm->dc, dc_state); + + if (dc_enable_dmub_notifications(adev->dm.dc)) + amdgpu_dm_outbox_init(adev); + r = dm_dmub_hw_init(adev); if (r) DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r); @@ -2555,8 +2595,8 @@ static int dm_resume(void *handle) for (i = 0; i < dc_state->stream_count; i++) { dc_state->streams[i]->mode_changed = true; - for (j = 0; j < dc_state->stream_status->plane_count; j++) { - dc_state->stream_status->plane_states[j]->update_flags.raw + for (j = 0; j < dc_state->stream_status[i].plane_count; j++) { + dc_state->stream_status[i].plane_states[j]->update_flags.raw = 0xffffffff; } } @@ -2591,6 +2631,10 @@ static int dm_resume(void *handle) /* TODO: Remove dc_state->dccg, use dc->dccg directly. */ dc_resource_state_construct(dm->dc, dm_state->context); + /* Re-enable outbox interrupts for DPIA. */ + if (dc_enable_dmub_notifications(adev->dm.dc)) + amdgpu_dm_outbox_init(adev); + /* Before powering on DC we need to re-initialize DMUB. */ r = dm_dmub_hw_init(adev); if (r) @@ -3892,6 +3936,9 @@ static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm, caps = dm->backlight_caps[bl_idx]; dm->brightness[bl_idx] = user_brightness; + /* update scratch register */ + if (bl_idx == 0) + amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]); brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]); link = (struct dc_link *)dm->backlight_link[bl_idx]; @@ -4225,7 +4272,8 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) } else if (dc_link_detect(link, DETECT_REASON_BOOT)) { amdgpu_dm_update_connector_after_detect(aconnector); register_backlight_device(dm, link); - + if (dm->num_of_edps) + update_connector_ext_caps(aconnector); if (psr_feature_enabled) amdgpu_dm_set_psr_caps(link); } @@ -4565,7 +4613,8 @@ static void get_min_max_dc_plane_scaling(struct drm_device *dev, } -static int fill_dc_scaling_info(const struct drm_plane_state *state, +static int fill_dc_scaling_info(struct amdgpu_device *adev, + const struct drm_plane_state *state, struct dc_scaling_info *scaling_info) { int scale_w, scale_h, min_downscale, max_upscale; @@ -4579,7 +4628,8 @@ static int fill_dc_scaling_info(const struct drm_plane_state *state, /* * For reasons we don't (yet) fully understand a non-zero * src_y coordinate into an NV12 buffer can cause a - * system hang. To avoid hangs (and maybe be overly cautious) + * system hang on DCN1x. + * To avoid hangs (and maybe be overly cautious) * let's reject both non-zero src_x and src_y. * * We currently know of only one use-case to reproduce a @@ -4587,10 +4637,10 @@ static int fill_dc_scaling_info(const struct drm_plane_state *state, * is to gesture the YouTube Android app into full screen * on ChromeOS. */ - if (state->fb && - state->fb->format->format == DRM_FORMAT_NV12 && - (scaling_info->src_rect.x != 0 || - scaling_info->src_rect.y != 0)) + if (((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) || + (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) && + (state->fb && state->fb->format->format == DRM_FORMAT_NV12 && + (scaling_info->src_rect.x != 0 || scaling_info->src_rect.y != 0))) return -EINVAL; scaling_info->src_rect.width = state->src_w >> 16; @@ -5496,7 +5546,7 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev, int ret; bool force_disable_dcc = false; - ret = fill_dc_scaling_info(plane_state, &scaling_info); + ret = fill_dc_scaling_info(adev, plane_state, &scaling_info); if (ret) return ret; @@ -6070,7 +6120,7 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector, if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel) stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel; } -#endif +#endif /* CONFIG_DRM_AMD_DC_DCN */ /** * DOC: FreeSync Video @@ -7241,8 +7291,8 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state, struct drm_connector_state *new_con_state; struct amdgpu_dm_connector *aconnector; struct dm_connector_state *dm_conn_state; - int i, j, clock; - int vcpi, pbn_div, pbn = 0; + int i, j; + int vcpi, pbn_div, pbn, slot_num = 0; for_each_new_connector_in_state(state, connector, new_con_state, i) { @@ -7270,17 +7320,7 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state, if (!stream) continue; - if (stream->timing.flags.DSC != 1) { - drm_dp_mst_atomic_enable_dsc(state, - aconnector->port, - dm_conn_state->pbn, - 0, - false); - continue; - } - pbn_div = dm_mst_get_pbn_divider(stream->link); - clock = stream->timing.pix_clk_100hz / 10; /* pbn is calculated by compute_mst_dsc_configs_for_state*/ for (j = 0; j < dc_state->stream_count; j++) { if (vars[j].aconnector == aconnector) { @@ -7289,6 +7329,23 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state, } } + if (j == dc_state->stream_count) + continue; + + slot_num = DIV_ROUND_UP(pbn, pbn_div); + + if (stream->timing.flags.DSC != 1) { + dm_conn_state->pbn = pbn; + dm_conn_state->vcpi_slots = slot_num; + + drm_dp_mst_atomic_enable_dsc(state, + aconnector->port, + dm_conn_state->pbn, + 0, + false); + continue; + } + vcpi = drm_dp_mst_atomic_enable_dsc(state, aconnector->port, pbn, pbn_div, @@ -7552,7 +7609,7 @@ static int dm_plane_atomic_check(struct drm_plane *plane, if (ret) return ret; - ret = fill_dc_scaling_info(new_plane_state, &scaling_info); + ret = fill_dc_scaling_info(adev, new_plane_state, &scaling_info); if (ret) return ret; @@ -9000,7 +9057,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix; } - fill_dc_scaling_info(new_plane_state, + fill_dc_scaling_info(dm->adev, new_plane_state, &bundle->scaling_infos[planes_count]); bundle->surface_updates[planes_count].scaling_info = @@ -10787,7 +10844,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, ret = drm_atomic_add_affected_connectors(state, crtc); if (ret) - return ret; + goto fail; ret = drm_atomic_add_affected_planes(state, crtc); if (ret) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c index cce062adc439..8a441a22c46e 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c @@ -314,6 +314,14 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name) ret = -EINVAL; goto cleanup; } + + if ((aconn->base.connector_type != DRM_MODE_CONNECTOR_DisplayPort) && + (aconn->base.connector_type != DRM_MODE_CONNECTOR_eDP)) { + DRM_DEBUG_DRIVER("No DP connector available for CRC source\n"); + ret = -EINVAL; + goto cleanup; + } + } #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c index 3655663e079b..9d43ecb1f692 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c @@ -78,12 +78,10 @@ static int parse_write_buffer_into_params(char *wr_buf, uint32_t wr_buf_size, wr_buf_ptr = wr_buf; - r = copy_from_user(wr_buf_ptr, buf, wr_buf_size); - - /* r is bytes not be copied */ - if (r >= wr_buf_size) { - DRM_DEBUG_DRIVER("user data not be read\n"); - return -EINVAL; + /* r is bytes not be copied */ + if (copy_from_user(wr_buf_ptr, buf, wr_buf_size)) { + DRM_DEBUG_DRIVER("user data could not be read successfully\n"); + return -EFAULT; } /* check number of parameters. isspace could not differ space and \n */ diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 874a49b605c7..cc34a35d0bcb 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -36,6 +36,8 @@ #include "dm_helpers.h" #include "dc_link_ddc.h" +#include "ddc_service_types.h" +#include "dpcd_defs.h" #include "i2caux_interface.h" #include "dmub_cmd.h" @@ -157,6 +159,16 @@ static const struct drm_connector_funcs dm_dp_mst_connector_funcs = { }; #if defined(CONFIG_DRM_AMD_DC_DCN) +static bool needs_dsc_aux_workaround(struct dc_link *link) +{ + if (link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 && + (link->dpcd_caps.dpcd_rev.raw == DPCD_REV_14 || link->dpcd_caps.dpcd_rev.raw == DPCD_REV_12) && + link->dpcd_caps.sink_count.bits.SINK_COUNT >= 2) + return true; + + return false; +} + static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnector) { struct dc_sink *dc_sink = aconnector->dc_sink; @@ -166,7 +178,7 @@ static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnecto u8 *dsc_branch_dec_caps = NULL; aconnector->dsc_aux = drm_dp_mst_dsc_aux_for_port(port); -#if defined(CONFIG_HP_HOOK_WORKAROUND) + /* * drm_dp_mst_dsc_aux_for_port() will return NULL for certain configs * because it only check the dsc/fec caps of the "port variable" and not the dock @@ -176,10 +188,10 @@ static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnecto * Workaround: explicitly check the use case above and use the mst dock's aux as dsc_aux * */ - - if (!aconnector->dsc_aux && !port->parent->port_parent) + if (!aconnector->dsc_aux && !port->parent->port_parent && + needs_dsc_aux_workaround(aconnector->dc_link)) aconnector->dsc_aux = &aconnector->mst_port->dm_dp_aux.aux; -#endif + if (!aconnector->dsc_aux) return false; @@ -534,13 +546,14 @@ static int kbps_to_peak_pbn(int kbps) static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *params, struct dsc_mst_fairness_vars *vars, - int count) + int count, + int k) { int i; for (i = 0; i < count; i++) { memset(¶ms[i].timing->dsc_cfg, 0, sizeof(params[i].timing->dsc_cfg)); - if (vars[i].dsc_enabled && dc_dsc_compute_config( + if (vars[i + k].dsc_enabled && dc_dsc_compute_config( params[i].sink->ctx->dc->res_pool->dscs[0], ¶ms[i].sink->dsc_caps.dsc_dec_caps, params[i].sink->ctx->dc->debug.dsc_min_slice_height_override, @@ -553,7 +566,7 @@ static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *p if (params[i].bpp_overwrite) params[i].timing->dsc_cfg.bits_per_pixel = params[i].bpp_overwrite; else - params[i].timing->dsc_cfg.bits_per_pixel = vars[i].bpp_x16; + params[i].timing->dsc_cfg.bits_per_pixel = vars[i + k].bpp_x16; if (params[i].num_slices_h) params[i].timing->dsc_cfg.num_slices_h = params[i].num_slices_h; @@ -586,7 +599,8 @@ static void increase_dsc_bpp(struct drm_atomic_state *state, struct dc_link *dc_link, struct dsc_mst_fairness_params *params, struct dsc_mst_fairness_vars *vars, - int count) + int count, + int k) { int i; bool bpp_increased[MAX_PIPES]; @@ -601,8 +615,9 @@ static void increase_dsc_bpp(struct drm_atomic_state *state, pbn_per_timeslot = dm_mst_get_pbn_divider(dc_link); for (i = 0; i < count; i++) { - if (vars[i].dsc_enabled) { - initial_slack[i] = kbps_to_peak_pbn(params[i].bw_range.max_kbps) - vars[i].pbn; + if (vars[i + k].dsc_enabled) { + initial_slack[i] = + kbps_to_peak_pbn(params[i].bw_range.max_kbps) - vars[i + k].pbn; bpp_increased[i] = false; remaining_to_increase += 1; } else { @@ -629,7 +644,7 @@ static void increase_dsc_bpp(struct drm_atomic_state *state, link_timeslots_used = 0; for (i = 0; i < count; i++) - link_timeslots_used += DIV_ROUND_UP(vars[i].pbn, pbn_per_timeslot); + link_timeslots_used += DIV_ROUND_UP(vars[i + k].pbn, pbn_per_timeslot); fair_pbn_alloc = (63 - link_timeslots_used) / remaining_to_increase * pbn_per_timeslot; @@ -682,7 +697,8 @@ static void try_disable_dsc(struct drm_atomic_state *state, struct dc_link *dc_link, struct dsc_mst_fairness_params *params, struct dsc_mst_fairness_vars *vars, - int count) + int count, + int k) { int i; bool tried[MAX_PIPES]; @@ -692,8 +708,8 @@ static void try_disable_dsc(struct drm_atomic_state *state, int remaining_to_try = 0; for (i = 0; i < count; i++) { - if (vars[i].dsc_enabled - && vars[i].bpp_x16 == params[i].bw_range.max_target_bpp_x16 + if (vars[i + k].dsc_enabled + && vars[i + k].bpp_x16 == params[i].bw_range.max_target_bpp_x16 && params[i].clock_force_enable == DSC_CLK_FORCE_DEFAULT) { kbps_increase[i] = params[i].bw_range.stream_kbps - params[i].bw_range.max_kbps; tried[i] = false; @@ -748,9 +764,10 @@ static void try_disable_dsc(struct drm_atomic_state *state, static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, struct dc_state *dc_state, struct dc_link *dc_link, - struct dsc_mst_fairness_vars *vars) + struct dsc_mst_fairness_vars *vars, + int *link_vars_start_index) { - int i; + int i, k; struct dc_stream_state *stream; struct dsc_mst_fairness_params params[MAX_PIPES]; struct amdgpu_dm_connector *aconnector; @@ -768,11 +785,17 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, if (stream->link != dc_link) continue; + aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; + if (!aconnector) + continue; + + if (!aconnector->port) + continue; + stream->timing.flags.DSC = 0; params[count].timing = &stream->timing; params[count].sink = stream->sink; - aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; params[count].aconnector = aconnector; params[count].port = aconnector->port; params[count].clock_force_enable = aconnector->dsc_settings.dsc_force_enable; @@ -794,44 +817,55 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, count++; } + + if (count == 0) { + ASSERT(0); + return true; + } + + /* k is start index of vars for current phy link used by mst hub */ + k = *link_vars_start_index; + /* set vars start index for next mst hub phy link */ + *link_vars_start_index += count; + /* Try no compression */ for (i = 0; i < count; i++) { - vars[i].aconnector = params[i].aconnector; - vars[i].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps); - vars[i].dsc_enabled = false; - vars[i].bpp_x16 = 0; + vars[i + k].aconnector = params[i].aconnector; + vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps); + vars[i + k].dsc_enabled = false; + vars[i + k].bpp_x16 = 0; if (drm_dp_atomic_find_vcpi_slots(state, params[i].port->mgr, params[i].port, - vars[i].pbn, + vars[i + k].pbn, dm_mst_get_pbn_divider(dc_link)) < 0) return false; } if (!drm_dp_mst_atomic_check(state) && !debugfs_overwrite) { - set_dsc_configs_from_fairness_vars(params, vars, count); + set_dsc_configs_from_fairness_vars(params, vars, count, k); return true; } /* Try max compression */ for (i = 0; i < count; i++) { if (params[i].compression_possible && params[i].clock_force_enable != DSC_CLK_FORCE_DISABLE) { - vars[i].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps); - vars[i].dsc_enabled = true; - vars[i].bpp_x16 = params[i].bw_range.min_target_bpp_x16; + vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps); + vars[i + k].dsc_enabled = true; + vars[i + k].bpp_x16 = params[i].bw_range.min_target_bpp_x16; if (drm_dp_atomic_find_vcpi_slots(state, params[i].port->mgr, params[i].port, - vars[i].pbn, + vars[i + k].pbn, dm_mst_get_pbn_divider(dc_link)) < 0) return false; } else { - vars[i].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps); - vars[i].dsc_enabled = false; - vars[i].bpp_x16 = 0; + vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps); + vars[i + k].dsc_enabled = false; + vars[i + k].bpp_x16 = 0; if (drm_dp_atomic_find_vcpi_slots(state, params[i].port->mgr, params[i].port, - vars[i].pbn, + vars[i + k].pbn, dm_mst_get_pbn_divider(dc_link)) < 0) return false; } @@ -840,15 +874,76 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, return false; /* Optimize degree of compression */ - increase_dsc_bpp(state, dc_link, params, vars, count); + increase_dsc_bpp(state, dc_link, params, vars, count, k); - try_disable_dsc(state, dc_link, params, vars, count); + try_disable_dsc(state, dc_link, params, vars, count, k); - set_dsc_configs_from_fairness_vars(params, vars, count); + set_dsc_configs_from_fairness_vars(params, vars, count, k); return true; } +static bool is_dsc_need_re_compute( + struct drm_atomic_state *state, + struct dc_state *dc_state, + struct dc_link *dc_link) +{ + int i; + bool is_dsc_need_re_compute = false; + + /* only check phy used by mst branch */ + if (dc_link->type != dc_connection_mst_branch) + return false; + + /* check if there is mode change in new request */ + for (i = 0; i < dc_state->stream_count; i++) { + struct amdgpu_dm_connector *aconnector; + struct dc_stream_state *stream; + struct drm_crtc_state *new_crtc_state; + struct drm_connector_state *new_conn_state; + + stream = dc_state->streams[i]; + + if (!stream) + continue; + + /* check if stream using the same link for mst */ + if (stream->link != dc_link) + continue; + + aconnector = (struct amdgpu_dm_connector *) stream->dm_stream_context; + if (!aconnector) + continue; + + new_conn_state = drm_atomic_get_new_connector_state(state, &aconnector->base); + + if (!new_conn_state) + continue; + + if (IS_ERR(new_conn_state)) + continue; + + if (!new_conn_state->crtc) + continue; + + new_crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc); + + if (!new_crtc_state) + continue; + + if (IS_ERR(new_crtc_state)) + continue; + + if (new_crtc_state->enable && new_crtc_state->active) { + if (new_crtc_state->mode_changed || new_crtc_state->active_changed || + new_crtc_state->connectors_changed) + is_dsc_need_re_compute = true; + } + } + + return is_dsc_need_re_compute; +} + bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, struct dc_state *dc_state, struct dsc_mst_fairness_vars *vars) @@ -857,6 +952,7 @@ bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, struct dc_stream_state *stream; bool computed_streams[MAX_PIPES]; struct amdgpu_dm_connector *aconnector; + int link_vars_start_index = 0; for (i = 0; i < dc_state->stream_count; i++) computed_streams[i] = false; @@ -881,8 +977,12 @@ bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, if (dcn20_remove_stream_from_ctx(stream->ctx->dc, dc_state, stream) != DC_OK) return false; + if (!is_dsc_need_re_compute(state, dc_state, stream->link)) + continue; + mutex_lock(&aconnector->mst_mgr.lock); - if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link, vars)) { + if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link, + vars, &link_vars_start_index)) { mutex_unlock(&aconnector->mst_mgr.lock); return false; } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 12e5470fa567..0ded4decee05 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -1085,6 +1085,8 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context) struct dc_stream_state *old_stream = dc->current_state->res_ctx.pipe_ctx[i].stream; bool should_disable = true; + bool pipe_split_change = + context->res_ctx.pipe_ctx[i].top_pipe != dc->current_state->res_ctx.pipe_ctx[i].top_pipe; for (j = 0; j < context->stream_count; j++) { if (old_stream == context->streams[j]) { @@ -1092,6 +1094,9 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context) break; } } + if (!should_disable && pipe_split_change) + should_disable = true; + if (should_disable && old_stream) { dc_rem_all_planes_for_stream(dc, old_stream, dangling_context); disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context); @@ -1887,6 +1892,7 @@ static bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context) return false; } +#ifdef CONFIG_DRM_AMD_DC_DCN /* Perform updates here which need to be deferred until next vupdate * * i.e. blnd lut, 3dlut, and shaper lut bypass regs are double buffered @@ -1896,7 +1902,6 @@ static bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context) */ static void process_deferred_updates(struct dc *dc) { -#ifdef CONFIG_DRM_AMD_DC_DCN int i = 0; if (dc->debug.enable_mem_low_power.bits.cm) { @@ -1905,8 +1910,8 @@ static void process_deferred_updates(struct dc *dc) if (dc->res_pool->dpps[i]->funcs->dpp_deferred_update) dc->res_pool->dpps[i]->funcs->dpp_deferred_update(dc->res_pool->dpps[i]); } -#endif } +#endif /* CONFIG_DRM_AMD_DC_DCN */ void dc_post_update_surfaces_to_stream(struct dc *dc) { @@ -1933,7 +1938,9 @@ void dc_post_update_surfaces_to_stream(struct dc *dc) dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]); } +#ifdef CONFIG_DRM_AMD_DC_DCN process_deferred_updates(dc); +#endif dc->hwss.optimize_bandwidth(dc, context); @@ -3603,7 +3610,8 @@ bool dc_enable_dmub_notifications(struct dc *dc) #if defined(CONFIG_DRM_AMD_DC_DCN) /* YELLOW_CARP B0 USB4 DPIA needs dmub notifications for interrupts */ if (dc->ctx->asic_id.chip_family == FAMILY_YELLOW_CARP && - dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) + dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0 && + !dc->debug.dpia_debug.bits.disable_dpia) return true; #endif /* dmub aux needs dmub notifications to be enabled */ diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 2796bdd17de1..c8457babfdea 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -758,6 +758,18 @@ static bool detect_dp(struct dc_link *link, dal_ddc_service_set_transaction_type(link->ddc, sink_caps->transaction_type); +#if defined(CONFIG_DRM_AMD_DC_DCN) + /* Apply work around for tunneled MST on certain USB4 docks. Always use DSC if dock + * reports DSC support. + */ + if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && + link->type == dc_connection_mst_branch && + link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 && + link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT && + !link->dc->debug.dpia_debug.bits.disable_mst_dsc_work_around) + link->wa_flags.dpia_mst_dsc_always_on = true; +#endif + #if defined(CONFIG_DRM_AMD_DC_HDCP) /* In case of fallback to SST when topology discovery below fails * HDCP caps will be querried again later by the upper layer (caller @@ -1203,6 +1215,10 @@ static bool dc_link_detect_helper(struct dc_link *link, LINK_INFO("link=%d, mst branch is now Disconnected\n", link->link_index); + /* Disable work around which keeps DSC on for tunneled MST on certain USB4 docks. */ + if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) + link->wa_flags.dpia_mst_dsc_always_on = false; + dm_helpers_dp_mst_stop_top_mgr(link->ctx, link); link->mst_stream_alloc_table.stream_count = 0; @@ -4279,6 +4295,8 @@ void core_link_enable_stream( */ if (status != DC_FAIL_DP_LINK_TRAINING || pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { + if (false == stream->link->link_status.link_active) + disable_link(stream->link, pipe_ctx->stream->signal); BREAK_TO_DEBUGGER(); return; } @@ -4768,7 +4786,7 @@ uint32_t dc_bandwidth_in_kbps_from_timing( timing->dsc_cfg.bits_per_pixel, timing->dsc_cfg.num_slices_h, timing->dsc_cfg.is_dp); -#endif +#endif /* CONFIG_DRM_AMD_DC_DCN */ switch (timing->display_color_depth) { case COLOR_DEPTH_666: diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index cc25ba0ec7db..13bc69d6b679 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -2138,7 +2138,7 @@ static enum link_training_result dp_perform_8b_10b_link_training( } for (lane = 0; lane < (uint8_t)lt_settings->link_settings.lane_count; lane++) - lt_settings->dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET = VOLTAGE_SWING_LEVEL0; + lt_settings->dpcd_lane_settings[lane].raw = 0; } if (status == LINK_TRAINING_SUCCESS) { @@ -5329,6 +5329,14 @@ bool dc_link_dp_set_test_pattern( return false; if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) { +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (test_pattern == DP_TEST_PATTERN_SQUARE_PULSE) + core_link_write_dpcd(link, + DP_LINK_SQUARE_PATTERN, + p_custom_pattern, + 1); + +#endif /* tell receiver that we are sending qualification * pattern DP 1.2 or later - DP receiver's link quality * pattern is set using DPCD LINK_QUAL_LANEx_SET diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c index 72b0f8594b4a..25e48a8cbb78 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c @@ -236,6 +236,23 @@ static struct link_encoder *get_link_enc_used_by_link( return link_enc; } +/* Clear all link encoder assignments. */ +static void clear_enc_assignments(struct dc_state *state) +{ + int i; + enum engine_id eng_id; + struct dc_stream_state *stream; + + for (i = 0; i < MAX_PIPES; i++) { + state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].valid = false; + eng_id = state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].eng_id; + stream = state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].stream; + if (eng_id != ENGINE_ID_UNKNOWN) + state->res_ctx.link_enc_cfg_ctx.link_enc_avail[eng_id - ENGINE_ID_DIGA] = eng_id; + if (stream) + stream->link_enc = NULL; + } +} void link_enc_cfg_init( struct dc *dc, @@ -250,6 +267,8 @@ void link_enc_cfg_init( state->res_ctx.link_enc_cfg_ctx.link_enc_avail[i] = ENGINE_ID_UNKNOWN; } + clear_enc_assignments(state); + state->res_ctx.link_enc_cfg_ctx.mode = LINK_ENC_CFG_STEADY; } @@ -265,6 +284,9 @@ void link_enc_cfg_link_encs_assign( ASSERT(state->stream_count == stream_count); + if (stream_count == 0) + clear_enc_assignments(state); + /* Release DIG link encoder resources before running assignment algorithm. */ for (i = 0; i < stream_count; i++) dc->res_pool->funcs->link_enc_unassign(state, streams[i]); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index c32fdccd4d92..e2d9a46d0e1a 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -1664,6 +1664,10 @@ bool dc_is_stream_unchanged( if (old_stream->ignore_msa_timing_param != stream->ignore_msa_timing_param) return false; + // Only Have Audio left to check whether it is same or not. This is a corner case for Tiled sinks + if (old_stream->audio_info.mode_count != stream->audio_info.mode_count) + return false; + return true; } @@ -2252,16 +2256,6 @@ enum dc_status dc_validate_global_state( if (!new_ctx) return DC_ERROR_UNEXPECTED; -#if defined(CONFIG_DRM_AMD_DC_DCN) - - /* - * Update link encoder to stream assignment. - * TODO: Split out reason allocation from validation. - */ - if (dc->res_pool->funcs->link_encs_assign && fast_validate == false) - dc->res_pool->funcs->link_encs_assign( - dc, new_ctx, new_ctx->streams, new_ctx->stream_count); -#endif if (dc->res_pool->funcs->validate_global) { result = dc->res_pool->funcs->validate_global(dc, new_ctx); @@ -2313,6 +2307,16 @@ enum dc_status dc_validate_global_state( if (!dc->res_pool->funcs->validate_bandwidth(dc, new_ctx, fast_validate)) result = DC_FAIL_BANDWIDTH_VALIDATE; +#if defined(CONFIG_DRM_AMD_DC_DCN) + /* + * Only update link encoder to stream assignment after bandwidth validation passed. + * TODO: Split out assignment and validation. + */ + if (result == DC_OK && dc->res_pool->funcs->link_encs_assign && fast_validate == false) + dc->res_pool->funcs->link_encs_assign( + dc, new_ctx, new_ctx->streams, new_ctx->stream_count); +#endif + return result; } diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index a5339796902a..618e7989176f 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -47,7 +47,7 @@ struct aux_payload; struct set_config_cmd_payload; struct dmub_notification; -#define DC_VER "3.2.159" +#define DC_VER "3.2.160" #define MAX_SURFACES 3 #define MAX_PLANES 6 @@ -508,7 +508,8 @@ union dpia_debug_options { uint32_t disable_dpia:1; uint32_t force_non_lttpr:1; uint32_t extend_aux_rd_interval:1; - uint32_t reserved:29; + uint32_t disable_mst_dsc_work_around:1; + uint32_t reserved:28; } bits; uint32_t raw; }; @@ -675,6 +676,7 @@ struct dc_debug_options { #endif union mem_low_power_enable_options enable_mem_low_power; union root_clock_optimization_options root_clock_optimization; + bool hpo_optimization; bool force_vblank_alignment; /* Enable dmub aux for legacy ddc */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h index bc87ea0adf94..e68e9a86a4d9 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h @@ -898,6 +898,9 @@ struct dpcd_usb4_dp_tunneling_info { #ifndef DP_DFP_CAPABILITY_EXTENSION_SUPPORT #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0A3 #endif +#ifndef DP_LINK_SQUARE_PATTERN +#define DP_LINK_SQUARE_PATTERN 0x10F +#endif #ifndef DP_DSC_CONFIGURATION #define DP_DSC_CONFIGURATION 0x161 #endif diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h index 180ecd860296..fad3d883ed89 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_link.h +++ b/drivers/gpu/drm/amd/display/dc/dc_link.h @@ -191,6 +191,8 @@ struct dc_link { bool dp_skip_DID2; bool dp_skip_reset_segment; bool dp_mot_reset_segment; + /* Some USB4 docks do not handle turning off MST DSC once it has been enabled. */ + bool dpia_mst_dsc_always_on; } wa_flags; struct link_mst_stream_allocation_table mst_stream_alloc_table; @@ -224,6 +226,8 @@ static inline void get_edp_links(const struct dc *dc, *edp_num = 0; for (i = 0; i < dc->link_count; i++) { // report any eDP links, even unconnected DDI's + if (!dc->links[i]) + continue; if (dc->links[i]->connector_signal == SIGNAL_TYPE_EDP) { edp_links[*edp_num] = dc->links[i]; if (++(*edp_num) == MAX_NUM_EDP) diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h index 989f5b6907e2..a3fee929cd12 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h @@ -671,6 +671,7 @@ struct dce_hwseq_registers { uint32_t MC_VM_FB_LOCATION_BASE; uint32_t MC_VM_FB_LOCATION_TOP; uint32_t MC_VM_FB_OFFSET; + uint32_t HPO_TOP_HW_CONTROL; }; /* set field name */ #define HWS_SF(blk_name, reg_name, field_name, post_fix)\ @@ -1152,7 +1153,8 @@ struct dce_hwseq_registers { type DOMAIN_PGFSM_PWR_STATUS;\ type HPO_HDMISTREAMCLK_G_GATE_DIS;\ type DISABLE_HOSTVM_FORCE_ALLOW_PSTATE;\ - type I2C_LIGHT_SLEEP_FORCE; + type I2C_LIGHT_SLEEP_FORCE;\ + type HPO_IO_EN; struct dce_hwseq_shift { HWSEQ_REG_FIELD_LIST(uint8_t) diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index af3e68d3e747..24e47df526f6 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -1244,6 +1244,12 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx) #endif if (dc_is_dp_signal(pipe_ctx->stream->signal)) dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DISCONNECT_DIG_FE_BE); + +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (dc->hwseq->funcs.setup_hpo_hw_control && is_dp_128b_132b_signal(pipe_ctx)) + dc->hwseq->funcs.setup_hpo_hw_control(dc->hwseq, false); +#endif + } void dce110_unblank_stream(struct pipe_ctx *pipe_ctx, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index a25732d07222..04d7bddc915b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -231,7 +231,7 @@ static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx) if (!s->blank_en) DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh" - "% 8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh" + " %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh" " %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n", pool->hubps[i]->inst, dlg_regs->refcyc_h_blank_end, dlg_regs->dlg_vblank_end, dlg_regs->min_dst_y_next_start, dlg_regs->refcyc_per_htotal, dlg_regs->refcyc_x_after_scaler, dlg_regs->dst_y_after_scaler, @@ -1637,7 +1637,7 @@ void dcn10_reset_hw_ctx_wrap( dcn10_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state); if (hws->funcs.enable_stream_gating) - hws->funcs.enable_stream_gating(dc, pipe_ctx); + hws->funcs.enable_stream_gating(dc, pipe_ctx_old); if (old_clk) old_clk->funcs->cs_power_down(old_clk); } diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index cfee456c6c9a..e6af99ae3d9f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -2270,7 +2270,7 @@ void dcn20_reset_hw_ctx_wrap( dcn20_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state); if (hws->funcs.enable_stream_gating) - hws->funcs.enable_stream_gating(dc, pipe_ctx); + hws->funcs.enable_stream_gating(dc, pipe_ctx_old); if (old_clk) old_clk->funcs->cs_power_down(old_clk); } @@ -2397,6 +2397,9 @@ void dcn20_enable_stream(struct pipe_ctx *pipe_ctx) * BY this, it is logic clean to separate stream and link */ if (is_dp_128b_132b_signal(pipe_ctx)) { + if (pipe_ctx->stream->ctx->dc->hwseq->funcs.setup_hpo_hw_control) + pipe_ctx->stream->ctx->dc->hwseq->funcs.setup_hpo_hw_control( + pipe_ctx->stream->ctx->dc->hwseq, true); setup_dp_hpo_stream(pipe_ctx, true); pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->enable_stream( pipe_ctx->stream_res.hpo_dp_stream_enc); diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c index a82319f4d081..95149734378b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c @@ -1381,13 +1381,11 @@ int mpcc3_release_rmu(struct mpc *mpc, int mpcc_id) } -static void mpc3_mpc_init(struct mpc *mpc) +static void mpc3_set_mpc_mem_lp_mode(struct mpc *mpc) { struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); int mpcc_id; - mpc1_mpc_init(mpc); - if (mpc->ctx->dc->debug.enable_mem_low_power.bits.mpc) { if (mpc30->mpc_mask->MPC_RMU0_MEM_LOW_PWR_MODE && mpc30->mpc_mask->MPC_RMU1_MEM_LOW_PWR_MODE) { REG_UPDATE(MPC_RMU_MEM_PWR_CTRL, MPC_RMU0_MEM_LOW_PWR_MODE, 3); @@ -1405,7 +1403,7 @@ const struct mpc_funcs dcn30_mpc_funcs = { .read_mpcc_state = mpc1_read_mpcc_state, .insert_plane = mpc1_insert_plane, .remove_mpcc = mpc1_remove_mpcc, - .mpc_init = mpc3_mpc_init, + .mpc_init = mpc1_mpc_init, .mpc_init_single_inst = mpc1_mpc_init_single_inst, .update_blending = mpc2_update_blending, .cursor_lock = mpc1_cursor_lock, @@ -1432,6 +1430,7 @@ const struct mpc_funcs dcn30_mpc_funcs = { .power_on_mpc_mem_pwr = mpc3_power_on_ogam_lut, .get_mpc_out_mux = mpc1_get_mpc_out_mux, .set_bg_color = mpc1_set_bg_color, + .set_mpc_mem_lp_mode = mpc3_set_mpc_mem_lp_mode, }; void dcn30_mpc_construct(struct dcn30_mpc *mpc30, diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c index e50c695e3c96..79a66e0c4303 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c @@ -2128,10 +2128,10 @@ static noinline void dcn30_calculate_wm_and_dlg_fp( int pipe_cnt, int vlevel) { + int maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb; int i, pipe_idx; - double dcfclk = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb]; - bool pstate_en = context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] != - dm_dram_clock_change_unsupported; + double dcfclk = context->bw_ctx.dml.vba.DCFCLKState[vlevel][maxMpcComb]; + bool pstate_en = context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] != dm_dram_clock_change_unsupported; if (context->bw_ctx.dml.soc.min_dcfclk > dcfclk) dcfclk = context->bw_ctx.dml.soc.min_dcfclk; @@ -2207,6 +2207,7 @@ static noinline void dcn30_calculate_wm_and_dlg_fp( context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_enter_plus_exit_time_us; context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_exit_time_us; } + context->bw_ctx.bw.dcn.watermarks.c.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c index d24ad7754d71..4d4ac4ceb1e8 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c @@ -66,6 +66,45 @@ #define FN(reg_name, field_name) \ hws->shifts->field_name, hws->masks->field_name +static void enable_memory_low_power(struct dc *dc) +{ + struct dce_hwseq *hws = dc->hwseq; + int i; + + if (dc->debug.enable_mem_low_power.bits.dmcu) { + // Force ERAM to shutdown if DMCU is not enabled + if (dc->debug.disable_dmcu || dc->config.disable_dmcu) { + REG_UPDATE(DMU_MEM_PWR_CNTL, DMCU_ERAM_MEM_PWR_FORCE, 3); + } + } + + // Set default OPTC memory power states + if (dc->debug.enable_mem_low_power.bits.optc) { + // Shutdown when unassigned and light sleep in VBLANK + REG_SET_2(ODM_MEM_PWR_CTRL3, 0, ODM_MEM_UNASSIGNED_PWR_MODE, 3, ODM_MEM_VBLANK_PWR_MODE, 1); + } + + if (dc->debug.enable_mem_low_power.bits.vga) { + // Power down VGA memory + REG_UPDATE(MMHUBBUB_MEM_PWR_CNTL, VGA_MEM_PWR_FORCE, 1); + } + + if (dc->debug.enable_mem_low_power.bits.mpc) + dc->res_pool->mpc->funcs->set_mpc_mem_lp_mode(dc->res_pool->mpc); + + + if (dc->debug.enable_mem_low_power.bits.vpg && dc->res_pool->stream_enc[0]->vpg->funcs->vpg_powerdown) { + // Power down VPGs + for (i = 0; i < dc->res_pool->stream_enc_count; i++) + dc->res_pool->stream_enc[i]->vpg->funcs->vpg_powerdown(dc->res_pool->stream_enc[i]->vpg); +#if defined(CONFIG_DRM_AMD_DC_DCN) + for (i = 0; i < dc->res_pool->hpo_dp_stream_enc_count; i++) + dc->res_pool->hpo_dp_stream_enc[i]->vpg->funcs->vpg_powerdown(dc->res_pool->hpo_dp_stream_enc[i]->vpg); +#endif + } + +} + void dcn31_init_hw(struct dc *dc) { struct abm **abms = dc->res_pool->multiple_abms; @@ -108,35 +147,7 @@ void dcn31_init_hw(struct dc *dc) if (res_pool->dccg->funcs->dccg_init) res_pool->dccg->funcs->dccg_init(res_pool->dccg); - if (dc->debug.enable_mem_low_power.bits.dmcu) { - // Force ERAM to shutdown if DMCU is not enabled - if (dc->debug.disable_dmcu || dc->config.disable_dmcu) { - REG_UPDATE(DMU_MEM_PWR_CNTL, DMCU_ERAM_MEM_PWR_FORCE, 3); - } - } - - // Set default OPTC memory power states - if (dc->debug.enable_mem_low_power.bits.optc) { - // Shutdown when unassigned and light sleep in VBLANK - REG_SET_2(ODM_MEM_PWR_CTRL3, 0, ODM_MEM_UNASSIGNED_PWR_MODE, 3, ODM_MEM_VBLANK_PWR_MODE, 1); - } - - if (dc->debug.enable_mem_low_power.bits.vga) { - // Power down VGA memory - REG_UPDATE(MMHUBBUB_MEM_PWR_CNTL, VGA_MEM_PWR_FORCE, 1); - } - -#if defined(CONFIG_DRM_AMD_DC_DCN) - if (dc->debug.enable_mem_low_power.bits.vpg && dc->res_pool->stream_enc[0]->vpg->funcs->vpg_powerdown) { - // Power down VPGs - for (i = 0; i < dc->res_pool->stream_enc_count; i++) - dc->res_pool->stream_enc[i]->vpg->funcs->vpg_powerdown(dc->res_pool->stream_enc[i]->vpg); -#if defined(CONFIG_DRM_AMD_DC_DP2_0) - for (i = 0; i < dc->res_pool->hpo_dp_stream_enc_count; i++) - dc->res_pool->hpo_dp_stream_enc[i]->vpg->funcs->vpg_powerdown(dc->res_pool->hpo_dp_stream_enc[i]->vpg); -#endif - } -#endif + enable_memory_low_power(dc); if (dc->ctx->dc_bios->fw_info_valid) { res_pool->ref_clocks.xtalin_clock_inKhz = @@ -264,6 +275,9 @@ void dcn31_init_hw(struct dc *dc) if (dc->debug.enable_mem_low_power.bits.i2c) REG_UPDATE(DIO_MEM_PWR_CTRL, I2C_LIGHT_SLEEP_FORCE, 1); + if (hws->funcs.setup_hpo_hw_control) + hws->funcs.setup_hpo_hw_control(hws, false); + if (!dc->debug.disable_clock_gate) { /* enable all DCN clock gating */ REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0); @@ -588,7 +602,7 @@ void dcn31_reset_hw_ctx_wrap( dcn31_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state); if (hws->funcs.enable_stream_gating) - hws->funcs.enable_stream_gating(dc, pipe_ctx); + hws->funcs.enable_stream_gating(dc, pipe_ctx_old); if (old_clk) old_clk->funcs->cs_power_down(old_clk); } @@ -597,3 +611,9 @@ void dcn31_reset_hw_ctx_wrap( /* New dc_state in the process of being applied to hardware. */ dc->current_state->res_ctx.link_enc_cfg_ctx.mode = LINK_ENC_CFG_TRANSIENT; } + +void dcn31_setup_hpo_hw_control(const struct dce_hwseq *hws, bool enable) +{ + if (hws->ctx->dc->debug.hpo_optimization) + REG_UPDATE(HPO_TOP_HW_CONTROL, HPO_IO_EN, !!enable); +} diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h index 7ae45dd202d9..edfc01d6ad73 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h @@ -54,5 +54,6 @@ void dcn31_reset_hw_ctx_wrap( bool dcn31_is_abm_supported(struct dc *dc, struct dc_state *context, struct dc_stream_state *stream); void dcn31_init_pipes(struct dc *dc, struct dc_state *context); +void dcn31_setup_hpo_hw_control(const struct dce_hwseq *hws, bool enable); #endif /* __DC_HWSS_DCN31_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c index c6a737781ad1..4f6e639e9353 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c @@ -101,6 +101,7 @@ static const struct hw_sequencer_funcs dcn31_funcs = { .z10_restore = dcn31_z10_restore, .z10_save_init = dcn31_z10_save_init, .set_disp_pattern_generator = dcn30_set_disp_pattern_generator, + .exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state, .update_visual_confirm_color = dcn20_update_visual_confirm_color, }; @@ -137,6 +138,7 @@ static const struct hwseq_private_funcs dcn31_private_funcs = { .dccg_init = dcn20_dccg_init, .set_blend_lut = dcn30_set_blend_lut, .set_shaper_3dlut = dcn20_set_shaper_3dlut, + .setup_hpo_hw_control = dcn31_setup_hpo_hw_control, }; void dcn31_hw_sequencer_construct(struct dc *dc) diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c index 87b2c2428842..18896294ae12 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c @@ -860,7 +860,8 @@ static const struct dccg_mask dccg_mask = { SR(D6VGA_CONTROL), \ SR(DC_IP_REQUEST_CNTL), \ SR(AZALIA_AUDIO_DTO), \ - SR(AZALIA_CONTROLLER_CLOCK_GATING) + SR(AZALIA_CONTROLLER_CLOCK_GATING), \ + SR(HPO_TOP_HW_CONTROL) static const struct dce_hwseq_registers hwseq_reg = { HWSEQ_DCN31_REG_LIST() @@ -898,7 +899,8 @@ static const struct dce_hwseq_registers hwseq_reg = { HWS_SF(, ODM_MEM_PWR_CTRL3, ODM_MEM_UNASSIGNED_PWR_MODE, mask_sh), \ HWS_SF(, ODM_MEM_PWR_CTRL3, ODM_MEM_VBLANK_PWR_MODE, mask_sh), \ HWS_SF(, MMHUBBUB_MEM_PWR_CNTL, VGA_MEM_PWR_FORCE, mask_sh), \ - HWS_SF(, DIO_MEM_PWR_CTRL, I2C_LIGHT_SLEEP_FORCE, mask_sh) + HWS_SF(, DIO_MEM_PWR_CTRL, I2C_LIGHT_SLEEP_FORCE, mask_sh), \ + HWS_SF(, HPO_TOP_HW_CONTROL, HPO_IO_EN, mask_sh) static const struct dce_hwseq_shift hwseq_shift = { HWSEQ_DCN31_MASK_SH_LIST(__SHIFT) diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c index e3d9f1decdfc..f47d82da115c 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c @@ -3576,16 +3576,9 @@ static double TruncToValidBPP( MinDSCBPP = 8; MaxDSCBPP = 3 * DSCInputBitPerComponent - 1.0 / 16; } else { - if (Output == dm_hdmi) { - NonDSCBPP0 = 24; - NonDSCBPP1 = 24; - NonDSCBPP2 = 24; - } - else { - NonDSCBPP0 = 16; - NonDSCBPP1 = 20; - NonDSCBPP2 = 24; - } + NonDSCBPP0 = 16; + NonDSCBPP1 = 20; + NonDSCBPP2 = 24; if (Format == dm_n422) { MinDSCBPP = 7; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c index d58925cff420..7e937bdcea00 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c @@ -3892,15 +3892,11 @@ static double TruncToValidBPP( MinDSCBPP = 8; MaxDSCBPP = 3 * DSCInputBitPerComponent - 1.0 / 16; } else { - if (Output == dm_hdmi) { - NonDSCBPP0 = 24; - NonDSCBPP1 = 24; - NonDSCBPP2 = 24; - } else { - NonDSCBPP0 = 16; - NonDSCBPP1 = 20; - NonDSCBPP2 = 24; - } + + NonDSCBPP0 = 16; + NonDSCBPP1 = 20; + NonDSCBPP2 = 24; + if (Format == dm_n422) { MinDSCBPP = 7; MaxDSCBPP = 2 * DSCInputBitPerComponent - 1.0 / 16.0; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h index 04d6ec3f021f..f5fd2a067323 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h @@ -367,6 +367,7 @@ struct mpc_funcs { void (*set_bg_color)(struct mpc *mpc, struct tg_color *bg_color, int mpcc_id); + void (*set_mpc_mem_lp_mode)(struct mpc *mpc); }; #endif diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h index f324285394be..c2008258c50a 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h @@ -143,6 +143,7 @@ struct hwseq_private_funcs { const struct dc_plane_state *plane_state); void (*PLAT_58856_wa)(struct dc_state *context, struct pipe_ctx *pipe_ctx); + void (*setup_hpo_hw_control)(const struct dce_hwseq *hws, bool enable); }; struct dce_hwseq { diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h index 717c0e572d2f..cd204eef073b 100644 --- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h +++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h @@ -238,6 +238,7 @@ struct dmub_srv_hw_params { bool load_inst_const; bool skip_panel_power_sequence; bool disable_z10; + bool power_optimization; bool dpia_supported; bool disable_dpia; }; diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h index 0293c58f0701..c29a67ccef17 100644 --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h @@ -46,10 +46,10 @@ /* Firmware versioning. */ #ifdef DMUB_EXPOSE_VERSION -#define DMUB_FW_VERSION_GIT_HASH 0x9525efb5 +#define DMUB_FW_VERSION_GIT_HASH 0x1d82d23e #define DMUB_FW_VERSION_MAJOR 0 #define DMUB_FW_VERSION_MINOR 0 -#define DMUB_FW_VERSION_REVISION 90 +#define DMUB_FW_VERSION_REVISION 91 #define DMUB_FW_VERSION_TEST 0 #define DMUB_FW_VERSION_VBIOS 0 #define DMUB_FW_VERSION_HOTFIX 0 diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c index 10ebf20eaa41..fa0569174aec 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c @@ -340,6 +340,7 @@ void dmub_dcn31_enable_dmub_boot_options(struct dmub_srv *dmub, const struct dmu boot_options.bits.z10_disable = params->disable_z10; boot_options.bits.dpia_supported = params->dpia_supported; boot_options.bits.enable_dpia = params->disable_dpia ? 0 : 1; + boot_options.bits.power_optimization = params->power_optimization; boot_options.bits.sel_mux_phy_c_d_phy_f_g = (dmub->asic == DMUB_ASIC_DCN31B) ? 1 : 0; diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h index f1a46d16f7ea..4b9e68a79f06 100644 --- a/drivers/gpu/drm/amd/include/amd_shared.h +++ b/drivers/gpu/drm/amd/include/amd_shared.h @@ -98,7 +98,8 @@ enum amd_ip_block_type { AMD_IP_BLOCK_TYPE_ACP, AMD_IP_BLOCK_TYPE_VCN, AMD_IP_BLOCK_TYPE_MES, - AMD_IP_BLOCK_TYPE_JPEG + AMD_IP_BLOCK_TYPE_JPEG, + AMD_IP_BLOCK_TYPE_NUM, }; enum amd_clockgating_state { diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c index 03581d5b1836..08362d506534 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c @@ -927,6 +927,13 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block { int ret = 0; const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + enum ip_power_state pwr_state = gate ? POWER_STATE_OFF : POWER_STATE_ON; + + if (atomic_read(&adev->pm.pwr_state[block_type]) == pwr_state) { + dev_dbg(adev->dev, "IP block%d already in the target %s state!", + block_type, gate ? "gate" : "ungate"); + return 0; + } switch (block_type) { case AMD_IP_BLOCK_TYPE_UVD: @@ -979,6 +986,9 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block break; } + if (!ret) + atomic_set(&adev->pm.pwr_state[block_type], pwr_state); + return ret; } diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c index 49fe4155c374..41472ed99253 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c @@ -2094,6 +2094,10 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_ } else if (DEVICE_ATTR_IS(pp_dpm_dclk)) { if (!(asic_type == CHIP_VANGOGH || asic_type == CHIP_SIENNA_CICHLID)) *states = ATTR_STATE_UNSUPPORTED; + } else if (DEVICE_ATTR_IS(pp_power_profile_mode)) { + if (!adev->powerplay.pp_funcs->get_power_profile_mode || + amdgpu_dpm_get_power_profile_mode(adev, NULL) == -EOPNOTSUPP) + *states = ATTR_STATE_UNSUPPORTED; } switch (asic_type) { diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h index 98f1b3d8c1d5..16e3f72d31b9 100644 --- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h +++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h @@ -417,6 +417,12 @@ struct amdgpu_dpm { enum amd_dpm_forced_level forced_level; }; +enum ip_power_state { + POWER_STATE_UNKNOWN, + POWER_STATE_ON, + POWER_STATE_OFF, +}; + struct amdgpu_pm { struct mutex mutex; u32 current_sclk; @@ -452,6 +458,8 @@ struct amdgpu_pm { struct i2c_adapter smu_i2c; struct mutex smu_i2c_mutex; struct list_head pm_attr_list; + + atomic_t pwr_state[AMD_IP_BLOCK_TYPE_NUM]; }; #define R600_SSTU_DFLT 0 diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v13_0_1_ppsmc.h b/drivers/gpu/drm/amd/pm/inc/smu_v13_0_1_ppsmc.h index 1d3447991d0c..fc9198846e70 100644 --- a/drivers/gpu/drm/amd/pm/inc/smu_v13_0_1_ppsmc.h +++ b/drivers/gpu/drm/amd/pm/inc/smu_v13_0_1_ppsmc.h @@ -51,7 +51,7 @@ #define PPSMC_MSG_PowerUpVcn 0x07 ///< Power up VCN; VCN is power gated by default #define PPSMC_MSG_SetHardMinVcn 0x08 ///< For wireless display #define PPSMC_MSG_SetSoftMinGfxclk 0x09 ///< Set SoftMin for GFXCLK, argument is frequency in MHz -#define PPSMC_MSG_ActiveProcessNotify 0x0A ///< Set active work load type +#define PPSMC_MSG_ActiveProcessNotify 0x0A ///< Deprecated (Not to be used) #define PPSMC_MSG_ForcePowerDownGfx 0x0B ///< Force power down GFX, i.e. enter GFXOFF #define PPSMC_MSG_PrepareMp1ForUnload 0x0C ///< Prepare PMFW for GFX driver unload #define PPSMC_MSG_SetDriverDramAddrHigh 0x0D ///< Set high 32 bits of DRAM address for Driver table transfer @@ -63,7 +63,7 @@ #define PPSMC_MSG_SetHardMinSocclkByFreq 0x13 ///< Set hard min for SOC CLK #define PPSMC_MSG_SetSoftMinFclk 0x14 ///< Set hard min for FCLK #define PPSMC_MSG_SetSoftMinVcn 0x15 ///< Set soft min for VCN clocks (VCLK and DCLK) -#define PPSMC_MSG_SPARE0 0x16 ///< Spared +#define PPSMC_MSG_SPARE 0x16 ///< Spare #define PPSMC_MSG_GetGfxclkFrequency 0x17 ///< Get GFX clock frequency #define PPSMC_MSG_GetFclkFrequency 0x18 ///< Get FCLK frequency #define PPSMC_MSG_AllowGfxOff 0x19 ///< Inform PMFW of allowing GFXOFF entry diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c index 321215003643..619f8d305292 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c @@ -875,34 +875,30 @@ pp_dpm_get_vce_clock_state(void *handle, unsigned idx) static int pp_get_power_profile_mode(void *handle, char *buf) { struct pp_hwmgr *hwmgr = handle; + int ret; - if (!hwmgr || !hwmgr->pm_en || !buf) + if (!hwmgr || !hwmgr->pm_en || !hwmgr->hwmgr_func->get_power_profile_mode) + return -EOPNOTSUPP; + if (!buf) return -EINVAL; - if (hwmgr->hwmgr_func->get_power_profile_mode == NULL) { - pr_info_ratelimited("%s was not implemented.\n", __func__); - return snprintf(buf, PAGE_SIZE, "\n"); - } - - return hwmgr->hwmgr_func->get_power_profile_mode(hwmgr, buf); + mutex_lock(&hwmgr->smu_lock); + ret = hwmgr->hwmgr_func->get_power_profile_mode(hwmgr, buf); + mutex_unlock(&hwmgr->smu_lock); + return ret; } static int pp_set_power_profile_mode(void *handle, long *input, uint32_t size) { struct pp_hwmgr *hwmgr = handle; - int ret = -EINVAL; + int ret = -EOPNOTSUPP; - if (!hwmgr || !hwmgr->pm_en) - return ret; - - if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) { - pr_info_ratelimited("%s was not implemented.\n", __func__); + if (!hwmgr || !hwmgr->pm_en || !hwmgr->hwmgr_func->set_power_profile_mode) return ret; - } if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) { pr_debug("power profile setting is for manual dpm mode only.\n"); - return ret; + return -EINVAL; } mutex_lock(&hwmgr->smu_lock); @@ -1332,7 +1328,12 @@ static int pp_set_powergating_by_smu(void *handle, pp_dpm_powergate_vce(handle, gate); break; case AMD_IP_BLOCK_TYPE_GMC: - pp_dpm_powergate_mmhub(handle); + /* + * For now, this is only used on PICASSO. + * And only "gate" operation is supported. + */ + if (gate) + pp_dpm_powergate_mmhub(handle); break; case AMD_IP_BLOCK_TYPE_GFX: ret = pp_dpm_powergate_gfx(handle, gate); diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c index 1de3ae77e03e..1f406f21b452 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c @@ -1036,13 +1036,13 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr, else i = 1; - size += sysfs_emit_at(buf, size, "0: %uMhz %s\n", + size += sprintf(buf + size, "0: %uMhz %s\n", data->gfx_min_freq_limit/100, i == 0 ? "*" : ""); - size += sysfs_emit_at(buf, size, "1: %uMhz %s\n", + size += sprintf(buf + size, "1: %uMhz %s\n", i == 1 ? now : SMU10_UMD_PSTATE_GFXCLK, i == 1 ? "*" : ""); - size += sysfs_emit_at(buf, size, "2: %uMhz %s\n", + size += sprintf(buf + size, "2: %uMhz %s\n", data->gfx_max_freq_limit/100, i == 2 ? "*" : ""); break; @@ -1050,7 +1050,7 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr, smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency, &now); for (i = 0; i < mclk_table->count; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, mclk_table->entries[i].clk / 100, ((mclk_table->entries[i].clk / 100) @@ -1065,10 +1065,10 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr, if (ret) return ret; - size = sysfs_emit(buf, "%s:\n", "OD_SCLK"); - size += sysfs_emit_at(buf, size, "0: %10uMhz\n", + size += sprintf(buf + size, "%s:\n", "OD_SCLK"); + size += sprintf(buf + size, "0: %10uMhz\n", (data->gfx_actual_soft_min_freq > 0) ? data->gfx_actual_soft_min_freq : min_freq); - size += sysfs_emit_at(buf, size, "1: %10uMhz\n", + size += sprintf(buf + size, "1: %10uMhz\n", (data->gfx_actual_soft_max_freq > 0) ? data->gfx_actual_soft_max_freq : max_freq); } break; @@ -1081,8 +1081,8 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr, if (ret) return ret; - size = sysfs_emit(buf, "%s:\n", "OD_RANGE"); - size += sysfs_emit_at(buf, size, "SCLK: %7uMHz %10uMHz\n", + size += sprintf(buf + size, "%s:\n", "OD_RANGE"); + size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n", min_freq, max_freq); } break; @@ -1456,6 +1456,8 @@ static int smu10_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf) if (!buf) return -EINVAL; + phm_get_sysfs_buf(&buf, &size); + size += sysfs_emit_at(buf, size, "%s %16s %s %s %s %s\n",title[0], title[1], title[2], title[3], title[4], title[5]); diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c index e7803ce8f67a..611969bf4520 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c @@ -4926,7 +4926,7 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr, now = i; for (i = 0; i < sclk_table->count; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, sclk_table->dpm_levels[i].value / 100, (i == now) ? "*" : ""); break; @@ -4941,7 +4941,7 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr, now = i; for (i = 0; i < mclk_table->count; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, mclk_table->dpm_levels[i].value / 100, (i == now) ? "*" : ""); break; @@ -4955,7 +4955,7 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr, now = i; for (i = 0; i < pcie_table->count; i++) - size += sysfs_emit_at(buf, size, "%d: %s %s\n", i, + size += sprintf(buf + size, "%d: %s %s\n", i, (pcie_table->dpm_levels[i].value == 0) ? "2.5GT/s, x8" : (pcie_table->dpm_levels[i].value == 1) ? "5.0GT/s, x16" : (pcie_table->dpm_levels[i].value == 2) ? "8.0GT/s, x16" : "", @@ -4963,32 +4963,32 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr, break; case OD_SCLK: if (hwmgr->od_enabled) { - size = sysfs_emit(buf, "%s:\n", "OD_SCLK"); + size += sprintf(buf + size, "%s:\n", "OD_SCLK"); for (i = 0; i < odn_sclk_table->num_of_pl; i++) - size += sysfs_emit_at(buf, size, "%d: %10uMHz %10umV\n", + size += sprintf(buf + size, "%d: %10uMHz %10umV\n", i, odn_sclk_table->entries[i].clock/100, odn_sclk_table->entries[i].vddc); } break; case OD_MCLK: if (hwmgr->od_enabled) { - size = sysfs_emit(buf, "%s:\n", "OD_MCLK"); + size += sprintf(buf + size, "%s:\n", "OD_MCLK"); for (i = 0; i < odn_mclk_table->num_of_pl; i++) - size += sysfs_emit_at(buf, size, "%d: %10uMHz %10umV\n", + size += sprintf(buf + size, "%d: %10uMHz %10umV\n", i, odn_mclk_table->entries[i].clock/100, odn_mclk_table->entries[i].vddc); } break; case OD_RANGE: if (hwmgr->od_enabled) { - size = sysfs_emit(buf, "%s:\n", "OD_RANGE"); - size += sysfs_emit_at(buf, size, "SCLK: %7uMHz %10uMHz\n", + size += sprintf(buf + size, "%s:\n", "OD_RANGE"); + size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n", data->golden_dpm_table.sclk_table.dpm_levels[0].value/100, hwmgr->platform_descriptor.overdriveLimit.engineClock/100); - size += sysfs_emit_at(buf, size, "MCLK: %7uMHz %10uMHz\n", + size += sprintf(buf + size, "MCLK: %7uMHz %10uMHz\n", data->golden_dpm_table.mclk_table.dpm_levels[0].value/100, hwmgr->platform_descriptor.overdriveLimit.memoryClock/100); - size += sysfs_emit_at(buf, size, "VDDC: %7umV %11umV\n", + size += sprintf(buf + size, "VDDC: %7umV %11umV\n", data->odn_dpm_table.min_vddc, data->odn_dpm_table.max_vddc); } @@ -5518,6 +5518,8 @@ static int smu7_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf) if (!buf) return -EINVAL; + phm_get_sysfs_buf(&buf, &size); + size += sysfs_emit_at(buf, size, "%s %16s %16s %16s %16s %16s %16s %16s\n", title[0], title[1], title[2], title[3], title[4], title[5], title[6], title[7]); diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c index b94a77e4e714..03bf8f069222 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c @@ -1559,7 +1559,7 @@ static int smu8_print_clock_levels(struct pp_hwmgr *hwmgr, CURR_SCLK_INDEX); for (i = 0; i < sclk_table->count; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, sclk_table->entries[i].clk / 100, (i == now) ? "*" : ""); break; @@ -1571,7 +1571,7 @@ static int smu8_print_clock_levels(struct pp_hwmgr *hwmgr, CURR_MCLK_INDEX); for (i = SMU8_NUM_NBPMEMORYCLOCK; i > 0; i--) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", SMU8_NUM_NBPMEMORYCLOCK-i, data->sys_info.nbp_memory_clock[i-1] / 100, (SMU8_NUM_NBPMEMORYCLOCK-i == now) ? "*" : ""); break; diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.h index ad33983a8064..2a75da1e9f03 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.h +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.h @@ -109,6 +109,19 @@ int phm_irq_process(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry); +/* + * Helper function to make sysfs_emit_at() happy. Align buf to + * the current page boundary and record the offset. + */ +static inline void phm_get_sysfs_buf(char **buf, int *offset) +{ + if (!*buf || !offset) + return; + + *offset = offset_in_page(*buf); + *buf -= *offset; +} + int smu9_register_irq_handlers(struct pp_hwmgr *hwmgr); void *smu_atom_get_data_table(void *dev, uint32_t table, uint16_t *size, diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c index c152a61ddd2c..e6336654c565 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c @@ -4548,6 +4548,8 @@ static int vega10_get_ppfeature_status(struct pp_hwmgr *hwmgr, char *buf) int ret = 0; int size = 0; + phm_get_sysfs_buf(&buf, &size); + ret = vega10_get_enabled_smc_features(hwmgr, &features_enabled); PP_ASSERT_WITH_CODE(!ret, "[EnableAllSmuFeatures] Failed to get enabled smc features!", @@ -4650,7 +4652,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, else count = sclk_table->count; for (i = 0; i < count; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, sclk_table->dpm_levels[i].value / 100, (i == now) ? "*" : ""); break; @@ -4661,7 +4663,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex, &now); for (i = 0; i < mclk_table->count; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, mclk_table->dpm_levels[i].value / 100, (i == now) ? "*" : ""); break; @@ -4672,7 +4674,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentSocclkIndex, &now); for (i = 0; i < soc_table->count; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, soc_table->dpm_levels[i].value / 100, (i == now) ? "*" : ""); break; @@ -4684,7 +4686,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, PPSMC_MSG_GetClockFreqMHz, CLK_DCEFCLK, &now); for (i = 0; i < dcef_table->count; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, dcef_table->dpm_levels[i].value / 100, (dcef_table->dpm_levels[i].value / 100 == now) ? "*" : ""); @@ -4698,7 +4700,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, gen_speed = pptable->PcieGenSpeed[i]; lane_width = pptable->PcieLaneCount[i]; - size += sysfs_emit_at(buf, size, "%d: %s %s %s\n", i, + size += sprintf(buf + size, "%d: %s %s %s\n", i, (gen_speed == 0) ? "2.5GT/s," : (gen_speed == 1) ? "5.0GT/s," : (gen_speed == 2) ? "8.0GT/s," : @@ -4717,34 +4719,34 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, case OD_SCLK: if (hwmgr->od_enabled) { - size = sysfs_emit(buf, "%s:\n", "OD_SCLK"); + size += sprintf(buf + size, "%s:\n", "OD_SCLK"); podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_sclk; for (i = 0; i < podn_vdd_dep->count; i++) - size += sysfs_emit_at(buf, size, "%d: %10uMhz %10umV\n", + size += sprintf(buf + size, "%d: %10uMhz %10umV\n", i, podn_vdd_dep->entries[i].clk / 100, podn_vdd_dep->entries[i].vddc); } break; case OD_MCLK: if (hwmgr->od_enabled) { - size = sysfs_emit(buf, "%s:\n", "OD_MCLK"); + size += sprintf(buf + size, "%s:\n", "OD_MCLK"); podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_mclk; for (i = 0; i < podn_vdd_dep->count; i++) - size += sysfs_emit_at(buf, size, "%d: %10uMhz %10umV\n", + size += sprintf(buf + size, "%d: %10uMhz %10umV\n", i, podn_vdd_dep->entries[i].clk/100, podn_vdd_dep->entries[i].vddc); } break; case OD_RANGE: if (hwmgr->od_enabled) { - size = sysfs_emit(buf, "%s:\n", "OD_RANGE"); - size += sysfs_emit_at(buf, size, "SCLK: %7uMHz %10uMHz\n", + size += sprintf(buf + size, "%s:\n", "OD_RANGE"); + size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n", data->golden_dpm_table.gfx_table.dpm_levels[0].value/100, hwmgr->platform_descriptor.overdriveLimit.engineClock/100); - size += sysfs_emit_at(buf, size, "MCLK: %7uMHz %10uMHz\n", + size += sprintf(buf + size, "MCLK: %7uMHz %10uMHz\n", data->golden_dpm_table.mem_table.dpm_levels[0].value/100, hwmgr->platform_descriptor.overdriveLimit.memoryClock/100); - size += sysfs_emit_at(buf, size, "VDDC: %7umV %11umV\n", + size += sprintf(buf + size, "VDDC: %7umV %11umV\n", data->odn_dpm_table.min_vddc, data->odn_dpm_table.max_vddc); } @@ -5112,6 +5114,8 @@ static int vega10_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf) if (!buf) return -EINVAL; + phm_get_sysfs_buf(&buf, &size); + size += sysfs_emit_at(buf, size, "%s %16s %s %s %s %s\n",title[0], title[1], title[2], title[3], title[4], title[5]); diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c index 8558718e15a8..a2f4d6773d45 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c @@ -2141,6 +2141,8 @@ static int vega12_get_ppfeature_status(struct pp_hwmgr *hwmgr, char *buf) int ret = 0; int size = 0; + phm_get_sysfs_buf(&buf, &size); + ret = vega12_get_enabled_smc_features(hwmgr, &features_enabled); PP_ASSERT_WITH_CODE(!ret, "[EnableAllSmuFeatures] Failed to get enabled smc features!", @@ -2256,7 +2258,7 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr, "Attempt to get gfx clk levels Failed!", return -1); for (i = 0; i < clocks.num_levels; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, clocks.data[i].clocks_in_khz / 1000, (clocks.data[i].clocks_in_khz / 1000 == now / 100) ? "*" : ""); break; @@ -2272,7 +2274,7 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr, "Attempt to get memory clk levels Failed!", return -1); for (i = 0; i < clocks.num_levels; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, clocks.data[i].clocks_in_khz / 1000, (clocks.data[i].clocks_in_khz / 1000 == now / 100) ? "*" : ""); break; @@ -2290,7 +2292,7 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr, "Attempt to get soc clk levels Failed!", return -1); for (i = 0; i < clocks.num_levels; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, clocks.data[i].clocks_in_khz / 1000, (clocks.data[i].clocks_in_khz / 1000 == now) ? "*" : ""); break; @@ -2308,7 +2310,7 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr, "Attempt to get dcef clk levels Failed!", return -1); for (i = 0; i < clocks.num_levels; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, clocks.data[i].clocks_in_khz / 1000, (clocks.data[i].clocks_in_khz / 1000 == now) ? "*" : ""); break; diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c index 0cf39c1244b1..85d55ab4e369 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c @@ -3238,6 +3238,8 @@ static int vega20_get_ppfeature_status(struct pp_hwmgr *hwmgr, char *buf) int ret = 0; int size = 0; + phm_get_sysfs_buf(&buf, &size); + ret = vega20_get_enabled_smc_features(hwmgr, &features_enabled); PP_ASSERT_WITH_CODE(!ret, "[EnableAllSmuFeatures] Failed to get enabled smc features!", @@ -3372,13 +3374,13 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, return ret); if (vega20_get_sclks(hwmgr, &clocks)) { - size += sysfs_emit_at(buf, size, "0: %uMhz * (DPM disabled)\n", + size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n", now / 100); break; } for (i = 0; i < clocks.num_levels; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, clocks.data[i].clocks_in_khz / 1000, (clocks.data[i].clocks_in_khz == now * 10) ? "*" : ""); break; @@ -3390,13 +3392,13 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, return ret); if (vega20_get_memclocks(hwmgr, &clocks)) { - size += sysfs_emit_at(buf, size, "0: %uMhz * (DPM disabled)\n", + size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n", now / 100); break; } for (i = 0; i < clocks.num_levels; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, clocks.data[i].clocks_in_khz / 1000, (clocks.data[i].clocks_in_khz == now * 10) ? "*" : ""); break; @@ -3408,13 +3410,13 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, return ret); if (vega20_get_socclocks(hwmgr, &clocks)) { - size += sysfs_emit_at(buf, size, "0: %uMhz * (DPM disabled)\n", + size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n", now / 100); break; } for (i = 0; i < clocks.num_levels; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, clocks.data[i].clocks_in_khz / 1000, (clocks.data[i].clocks_in_khz == now * 10) ? "*" : ""); break; @@ -3426,7 +3428,7 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, return ret); for (i = 0; i < fclk_dpm_table->count; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, fclk_dpm_table->dpm_levels[i].value, fclk_dpm_table->dpm_levels[i].value == (now / 100) ? "*" : ""); break; @@ -3438,13 +3440,13 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, return ret); if (vega20_get_dcefclocks(hwmgr, &clocks)) { - size += sysfs_emit_at(buf, size, "0: %uMhz * (DPM disabled)\n", + size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n", now / 100); break; } for (i = 0; i < clocks.num_levels; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, clocks.data[i].clocks_in_khz / 1000, (clocks.data[i].clocks_in_khz == now * 10) ? "*" : ""); break; @@ -3458,7 +3460,7 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, gen_speed = pptable->PcieGenSpeed[i]; lane_width = pptable->PcieLaneCount[i]; - size += sysfs_emit_at(buf, size, "%d: %s %s %dMhz %s\n", i, + size += sprintf(buf + size, "%d: %s %s %dMhz %s\n", i, (gen_speed == 0) ? "2.5GT/s," : (gen_speed == 1) ? "5.0GT/s," : (gen_speed == 2) ? "8.0GT/s," : @@ -3479,18 +3481,18 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, case OD_SCLK: if (od8_settings[OD8_SETTING_GFXCLK_FMIN].feature_id && od8_settings[OD8_SETTING_GFXCLK_FMAX].feature_id) { - size = sysfs_emit(buf, "%s:\n", "OD_SCLK"); - size += sysfs_emit_at(buf, size, "0: %10uMhz\n", + size += sprintf(buf + size, "%s:\n", "OD_SCLK"); + size += sprintf(buf + size, "0: %10uMhz\n", od_table->GfxclkFmin); - size += sysfs_emit_at(buf, size, "1: %10uMhz\n", + size += sprintf(buf + size, "1: %10uMhz\n", od_table->GfxclkFmax); } break; case OD_MCLK: if (od8_settings[OD8_SETTING_UCLK_FMAX].feature_id) { - size = sysfs_emit(buf, "%s:\n", "OD_MCLK"); - size += sysfs_emit_at(buf, size, "1: %10uMhz\n", + size += sprintf(buf + size, "%s:\n", "OD_MCLK"); + size += sprintf(buf + size, "1: %10uMhz\n", od_table->UclkFmax); } @@ -3503,14 +3505,14 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id && od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id && od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id) { - size = sysfs_emit(buf, "%s:\n", "OD_VDDC_CURVE"); - size += sysfs_emit_at(buf, size, "0: %10uMhz %10dmV\n", + size += sprintf(buf + size, "%s:\n", "OD_VDDC_CURVE"); + size += sprintf(buf + size, "0: %10uMhz %10dmV\n", od_table->GfxclkFreq1, od_table->GfxclkVolt1 / VOLTAGE_SCALE); - size += sysfs_emit_at(buf, size, "1: %10uMhz %10dmV\n", + size += sprintf(buf + size, "1: %10uMhz %10dmV\n", od_table->GfxclkFreq2, od_table->GfxclkVolt2 / VOLTAGE_SCALE); - size += sysfs_emit_at(buf, size, "2: %10uMhz %10dmV\n", + size += sprintf(buf + size, "2: %10uMhz %10dmV\n", od_table->GfxclkFreq3, od_table->GfxclkVolt3 / VOLTAGE_SCALE); } @@ -3518,17 +3520,17 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, break; case OD_RANGE: - size = sysfs_emit(buf, "%s:\n", "OD_RANGE"); + size += sprintf(buf + size, "%s:\n", "OD_RANGE"); if (od8_settings[OD8_SETTING_GFXCLK_FMIN].feature_id && od8_settings[OD8_SETTING_GFXCLK_FMAX].feature_id) { - size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n", + size += sprintf(buf + size, "SCLK: %7uMhz %10uMhz\n", od8_settings[OD8_SETTING_GFXCLK_FMIN].min_value, od8_settings[OD8_SETTING_GFXCLK_FMAX].max_value); } if (od8_settings[OD8_SETTING_UCLK_FMAX].feature_id) { - size += sysfs_emit_at(buf, size, "MCLK: %7uMhz %10uMhz\n", + size += sprintf(buf + size, "MCLK: %7uMhz %10uMhz\n", od8_settings[OD8_SETTING_UCLK_FMAX].min_value, od8_settings[OD8_SETTING_UCLK_FMAX].max_value); } @@ -3539,22 +3541,22 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id && od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id && od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id) { - size += sysfs_emit_at(buf, size, "VDDC_CURVE_SCLK[0]: %7uMhz %10uMhz\n", + size += sprintf(buf + size, "VDDC_CURVE_SCLK[0]: %7uMhz %10uMhz\n", od8_settings[OD8_SETTING_GFXCLK_FREQ1].min_value, od8_settings[OD8_SETTING_GFXCLK_FREQ1].max_value); - size += sysfs_emit_at(buf, size, "VDDC_CURVE_VOLT[0]: %7dmV %11dmV\n", + size += sprintf(buf + size, "VDDC_CURVE_VOLT[0]: %7dmV %11dmV\n", od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].min_value, od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].max_value); - size += sysfs_emit_at(buf, size, "VDDC_CURVE_SCLK[1]: %7uMhz %10uMhz\n", + size += sprintf(buf + size, "VDDC_CURVE_SCLK[1]: %7uMhz %10uMhz\n", od8_settings[OD8_SETTING_GFXCLK_FREQ2].min_value, od8_settings[OD8_SETTING_GFXCLK_FREQ2].max_value); - size += sysfs_emit_at(buf, size, "VDDC_CURVE_VOLT[1]: %7dmV %11dmV\n", + size += sprintf(buf + size, "VDDC_CURVE_VOLT[1]: %7dmV %11dmV\n", od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].min_value, od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].max_value); - size += sysfs_emit_at(buf, size, "VDDC_CURVE_SCLK[2]: %7uMhz %10uMhz\n", + size += sprintf(buf + size, "VDDC_CURVE_SCLK[2]: %7uMhz %10uMhz\n", od8_settings[OD8_SETTING_GFXCLK_FREQ3].min_value, od8_settings[OD8_SETTING_GFXCLK_FREQ3].max_value); - size += sysfs_emit_at(buf, size, "VDDC_CURVE_VOLT[2]: %7dmV %11dmV\n", + size += sprintf(buf + size, "VDDC_CURVE_VOLT[2]: %7dmV %11dmV\n", od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].min_value, od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].max_value); } @@ -4003,6 +4005,8 @@ static int vega20_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf) if (!buf) return -EINVAL; + phm_get_sysfs_buf(&buf, &size); + size += sysfs_emit_at(buf, size, "%16s %s %s %s %s %s %s %s %s %s %s\n", title[0], title[1], title[2], title[3], title[4], title[5], title[6], title[7], title[8], title[9], title[10]); diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index b06c59dcc1b4..8a3244585d80 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -1468,7 +1468,7 @@ static int smu_disable_dpms(struct smu_context *smu) dev_err(adev->dev, "Failed to disable smu features.\n"); } - if (adev->ip_versions[MP1_HWIP][0] >= IP_VERSION(11, 0, 0) && + if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(9, 4, 2) && adev->gfx.rlc.funcs->stop) adev->gfx.rlc.funcs->stop(adev); @@ -2534,13 +2534,15 @@ static int smu_get_power_profile_mode(void *handle, char *buf) struct smu_context *smu = handle; int ret = 0; - if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) + if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled || + !smu->ppt_funcs->get_power_profile_mode) return -EOPNOTSUPP; + if (!buf) + return -EINVAL; mutex_lock(&smu->mutex); - if (smu->ppt_funcs->get_power_profile_mode) - ret = smu->ppt_funcs->get_power_profile_mode(smu, buf); + ret = smu->ppt_funcs->get_power_profile_mode(smu, buf); mutex_unlock(&smu->mutex); @@ -2554,7 +2556,8 @@ static int smu_set_power_profile_mode(void *handle, struct smu_context *smu = handle; int ret = 0; - if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) + if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled || + !smu->ppt_funcs->set_power_profile_mode) return -EOPNOTSUPP; mutex_lock(&smu->mutex); diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c index cbc3f99e8573..2238ee19c222 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c @@ -309,6 +309,7 @@ static int cyan_skillfish_print_clk_levels(struct smu_context *smu, { int ret = 0, size = 0; uint32_t cur_value = 0; + int i; smu_cmn_get_sysfs_buf(&buf, &size); @@ -334,8 +335,6 @@ static int cyan_skillfish_print_clk_levels(struct smu_context *smu, size += sysfs_emit_at(buf, size, "VDDC: %7umV %10umV\n", CYAN_SKILLFISH_VDDC_MIN, CYAN_SKILLFISH_VDDC_MAX); break; - case SMU_GFXCLK: - case SMU_SCLK: case SMU_FCLK: case SMU_MCLK: case SMU_SOCCLK: @@ -346,6 +345,25 @@ static int cyan_skillfish_print_clk_levels(struct smu_context *smu, return ret; size += sysfs_emit_at(buf, size, "0: %uMhz *\n", cur_value); break; + case SMU_SCLK: + case SMU_GFXCLK: + ret = cyan_skillfish_get_current_clk_freq(smu, clk_type, &cur_value); + if (ret) + return ret; + if (cur_value == CYAN_SKILLFISH_SCLK_MAX) + i = 2; + else if (cur_value == CYAN_SKILLFISH_SCLK_MIN) + i = 0; + else + i = 1; + size += sysfs_emit_at(buf, size, "0: %uMhz %s\n", CYAN_SKILLFISH_SCLK_MIN, + i == 0 ? "*" : ""); + size += sysfs_emit_at(buf, size, "1: %uMhz %s\n", + i == 1 ? cur_value : cyan_skillfish_sclk_default, + i == 1 ? "*" : ""); + size += sysfs_emit_at(buf, size, "2: %uMhz %s\n", CYAN_SKILLFISH_SCLK_MAX, + i == 2 ? "*" : ""); + break; default: dev_warn(smu->adev->dev, "Unsupported clock type\n"); return ret; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c index 71161f6b78fe..60a557068ea4 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c @@ -1265,7 +1265,7 @@ static int navi10_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) { uint16_t *curve_settings; - int i, size = 0, ret = 0; + int i, levels, size = 0, ret = 0; uint32_t cur_value = 0, value = 0, count = 0; uint32_t freq_values[3] = {0}; uint32_t mark_index = 0; @@ -1319,14 +1319,17 @@ static int navi10_print_clk_levels(struct smu_context *smu, freq_values[1] = cur_value; mark_index = cur_value == freq_values[0] ? 0 : cur_value == freq_values[2] ? 2 : 1; - if (mark_index != 1) - freq_values[1] = (freq_values[0] + freq_values[2]) / 2; - for (i = 0; i < 3; i++) { + levels = 3; + if (mark_index != 1) { + levels = 2; + freq_values[1] = freq_values[2]; + } + + for (i = 0; i < levels; i++) { size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i, freq_values[i], i == mark_index ? "*" : ""); } - } break; case SMU_PCIE: diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c index 421f38e8dada..c02ed65ffa38 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c @@ -683,6 +683,7 @@ static int vangogh_print_clk_levels(struct smu_context *smu, int i, size = 0, ret = 0; uint32_t cur_value = 0, value = 0, count = 0; bool cur_value_match_level = false; + uint32_t min, max; memset(&metrics, 0, sizeof(metrics)); @@ -743,6 +744,13 @@ static int vangogh_print_clk_levels(struct smu_context *smu, if (ret) return ret; break; + case SMU_GFXCLK: + case SMU_SCLK: + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetGfxclkFrequency, 0, &cur_value); + if (ret) { + return ret; + } + break; default: break; } @@ -768,6 +776,24 @@ static int vangogh_print_clk_levels(struct smu_context *smu, if (!cur_value_match_level) size += sysfs_emit_at(buf, size, " %uMhz *\n", cur_value); break; + case SMU_GFXCLK: + case SMU_SCLK: + min = (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq; + max = (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq; + if (cur_value == max) + i = 2; + else if (cur_value == min) + i = 0; + else + i = 1; + size += sysfs_emit_at(buf, size, "0: %uMhz %s\n", min, + i == 0 ? "*" : ""); + size += sysfs_emit_at(buf, size, "1: %uMhz %s\n", + i == 1 ? cur_value : VANGOGH_UMD_PSTATE_STANDARD_GFXCLK, + i == 1 ? "*" : ""); + size += sysfs_emit_at(buf, size, "2: %uMhz %s\n", max, + i == 2 ? "*" : ""); + break; default: break; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c index d60b8c5e8715..43028f2cd28b 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c @@ -191,6 +191,9 @@ int smu_v12_0_fini_smc_tables(struct smu_context *smu) kfree(smu_table->watermarks_table); smu_table->watermarks_table = NULL; + kfree(smu_table->gpu_metrics_table); + smu_table->gpu_metrics_table = NULL; + return 0; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c index 35145db6eedf..19a5d2c39c8d 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c @@ -198,6 +198,7 @@ int smu_v13_0_check_fw_status(struct smu_context *smu) int smu_v13_0_check_fw_version(struct smu_context *smu) { + struct amdgpu_device *adev = smu->adev; uint32_t if_version = 0xff, smu_version = 0xff; uint16_t smu_major; uint8_t smu_minor, smu_debug; @@ -210,6 +211,8 @@ int smu_v13_0_check_fw_version(struct smu_context *smu) smu_major = (smu_version >> 16) & 0xffff; smu_minor = (smu_version >> 8) & 0xff; smu_debug = (smu_version >> 0) & 0xff; + if (smu->is_apu) + adev->pm.fw_version = smu_version; switch (smu->adev->ip_versions[MP1_HWIP][0]) { case IP_VERSION(13, 0, 2): diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c index a403657151ba..caf1775d48ef 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c @@ -64,7 +64,6 @@ static struct cmn2asic_msg_mapping yellow_carp_message_map[SMU_MSG_MAX_COUNT] = MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn, 1), MSG_MAP(PowerUpVcn, PPSMC_MSG_PowerUpVcn, 1), MSG_MAP(SetHardMinVcn, PPSMC_MSG_SetHardMinVcn, 1), - MSG_MAP(ActiveProcessNotify, PPSMC_MSG_ActiveProcessNotify, 1), MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload, 1), MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 1), MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 1), @@ -135,14 +134,6 @@ static struct cmn2asic_mapping yellow_carp_table_map[SMU_TABLE_COUNT] = { TAB_MAP_VALID(CUSTOM_DPM), TAB_MAP_VALID(DPMCLOCKS), }; - -static struct cmn2asic_mapping yellow_carp_workload_map[PP_SMC_POWER_PROFILE_COUNT] = { - WORKLOAD_MAP(PP_SMC_POWER_PROFILE_FULLSCREEN3D, WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT), - WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO, WORKLOAD_PPLIB_VIDEO_BIT), - WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR, WORKLOAD_PPLIB_VR_BIT), - WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_COMPUTE_BIT), - WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT), -}; static int yellow_carp_init_smc_tables(struct smu_context *smu) { @@ -543,81 +534,6 @@ static int yellow_carp_set_watermarks_table(struct smu_context *smu, return 0; } -static int yellow_carp_get_power_profile_mode(struct smu_context *smu, - char *buf) -{ - static const char *profile_name[] = { - "BOOTUP_DEFAULT", - "3D_FULL_SCREEN", - "POWER_SAVING", - "VIDEO", - "VR", - "COMPUTE", - "CUSTOM"}; - uint32_t i, size = 0; - int16_t workload_type = 0; - - if (!buf) - return -EINVAL; - - for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) { - /* - * Conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT. - * Not all profile modes are supported on yellow carp. - */ - workload_type = smu_cmn_to_asic_specific_index(smu, - CMN2ASIC_MAPPING_WORKLOAD, - i); - - if (workload_type < 0) - continue; - - size += sysfs_emit_at(buf, size, "%2d %14s%s\n", - i, profile_name[i], (i == smu->power_profile_mode) ? "*" : " "); - } - - return size; -} - -static int yellow_carp_set_power_profile_mode(struct smu_context *smu, - long *input, uint32_t size) -{ - int workload_type, ret; - uint32_t profile_mode = input[size]; - - if (profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) { - dev_err(smu->adev->dev, "Invalid power profile mode %d\n", profile_mode); - return -EINVAL; - } - - if (profile_mode == PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT || - profile_mode == PP_SMC_POWER_PROFILE_POWERSAVING) - return 0; - - /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ - workload_type = smu_cmn_to_asic_specific_index(smu, - CMN2ASIC_MAPPING_WORKLOAD, - profile_mode); - if (workload_type < 0) { - dev_dbg(smu->adev->dev, "Unsupported power profile mode %d on YELLOWCARP\n", - profile_mode); - return -EINVAL; - } - - ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ActiveProcessNotify, - 1 << workload_type, - NULL); - if (ret) { - dev_err_once(smu->adev->dev, "Fail to set workload type %d\n", - workload_type); - return ret; - } - - smu->power_profile_mode = profile_mode; - - return 0; -} - static ssize_t yellow_carp_get_gpu_metrics(struct smu_context *smu, void **table) { @@ -781,6 +697,11 @@ static int yellow_carp_get_current_clk_freq(struct smu_context *smu, case SMU_FCLK: return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetFclkFrequency, 0, value); + case SMU_GFXCLK: + case SMU_SCLK: + return smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_GetGfxclkFrequency, 0, value); + break; default: return -EINVAL; } @@ -1051,6 +972,7 @@ static int yellow_carp_print_clk_levels(struct smu_context *smu, { int i, size = 0, ret = 0; uint32_t cur_value = 0, value = 0, count = 0; + uint32_t min, max; smu_cmn_get_sysfs_buf(&buf, &size); @@ -1089,6 +1011,27 @@ static int yellow_carp_print_clk_levels(struct smu_context *smu, cur_value == value ? "*" : ""); } break; + case SMU_GFXCLK: + case SMU_SCLK: + ret = yellow_carp_get_current_clk_freq(smu, clk_type, &cur_value); + if (ret) + goto print_clk_out; + min = (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq; + max = (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq; + if (cur_value == max) + i = 2; + else if (cur_value == min) + i = 0; + else + i = 1; + size += sysfs_emit_at(buf, size, "0: %uMhz %s\n", min, + i == 0 ? "*" : ""); + size += sysfs_emit_at(buf, size, "1: %uMhz %s\n", + i == 1 ? cur_value : YELLOW_CARP_UMD_PSTATE_GFXCLK, + i == 1 ? "*" : ""); + size += sysfs_emit_at(buf, size, "2: %uMhz %s\n", max, + i == 2 ? "*" : ""); + break; default: break; } @@ -1238,8 +1181,6 @@ static const struct pptable_funcs yellow_carp_ppt_funcs = { .read_sensor = yellow_carp_read_sensor, .is_dpm_running = yellow_carp_is_dpm_running, .set_watermarks_table = yellow_carp_set_watermarks_table, - .get_power_profile_mode = yellow_carp_get_power_profile_mode, - .set_power_profile_mode = yellow_carp_set_power_profile_mode, .get_gpu_metrics = yellow_carp_get_gpu_metrics, .get_enabled_mask = smu_cmn_get_enabled_32_bits_mask, .get_pp_feature_mask = smu_cmn_get_pp_feature_mask, @@ -1261,6 +1202,5 @@ void yellow_carp_set_ppt_funcs(struct smu_context *smu) smu->message_map = yellow_carp_message_map; smu->feature_map = yellow_carp_feature_mask_map; smu->table_map = yellow_carp_table_map; - smu->workload_map = yellow_carp_workload_map; smu->is_apu = true; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.h b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.h index b3ad8352c68a..a9205a8ea3ad 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.h +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.h @@ -24,5 +24,6 @@ #define __YELLOW_CARP_PPT_H__ extern void yellow_carp_set_ppt_funcs(struct smu_context *smu); +#define YELLOW_CARP_UMD_PSTATE_GFXCLK 1100 #endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c index 843d2cbfc71d..ea6f50c08c5f 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c @@ -139,9 +139,13 @@ static void __smu_cmn_reg_print_error(struct smu_context *smu, const char *message = smu_get_message_name(smu, msg); switch (reg_c2pmsg_90) { - case SMU_RESP_NONE: + case SMU_RESP_NONE: { + u32 msg_idx = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66); + u32 prm = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82); dev_err_ratelimited(adev->dev, - "SMU: I'm not done with your previous command!"); + "SMU: I'm not done with your previous command: SMN_C2PMSG_66:0x%08X SMN_C2PMSG_82:0x%08X", + msg_idx, prm); + } break; case SMU_RESP_OK: /* The SMU executed the command. It completed with a |