diff options
Diffstat (limited to 'drivers/gpu/drm/amd/pm/swsmu')
31 files changed, 2267 insertions, 457 deletions
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index 7789b313285c..bb3bc68dfc39 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -324,6 +324,18 @@ static int smu_dpm_set_umsch_mm_enable(struct smu_context *smu, return ret; } +static int smu_set_mall_enable(struct smu_context *smu) +{ + int ret = 0; + + if (!smu->ppt_funcs->set_mall_enable) + return 0; + + ret = smu->ppt_funcs->set_mall_enable(smu); + + return ret; +} + /** * smu_dpm_set_power_gate - power gate/ungate the specific IP block * @@ -705,6 +717,7 @@ static int smu_set_funcs(struct amdgpu_device *adev) smu_v13_0_0_set_ppt_funcs(smu); break; case IP_VERSION(13, 0, 6): + case IP_VERSION(13, 0, 14): smu_v13_0_6_set_ppt_funcs(smu); /* Enable pp_od_clk_voltage node */ smu->od_enabled = true; @@ -714,6 +727,7 @@ static int smu_set_funcs(struct amdgpu_device *adev) break; case IP_VERSION(14, 0, 0): case IP_VERSION(14, 0, 1): + case IP_VERSION(14, 0, 4): smu_v14_0_0_set_ppt_funcs(smu); break; case IP_VERSION(14, 0, 2): @@ -1196,17 +1210,28 @@ static void smu_swctf_delayed_work_handler(struct work_struct *work) static void smu_init_xgmi_plpd_mode(struct smu_context *smu) { + struct smu_dpm_context *dpm_ctxt = &(smu->smu_dpm); + struct smu_dpm_policy_ctxt *policy_ctxt; + struct smu_dpm_policy *policy; + + policy = smu_get_pm_policy(smu, PP_PM_POLICY_XGMI_PLPD); if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 2)) { - smu->plpd_mode = XGMI_PLPD_DEFAULT; + if (policy) + policy->current_level = XGMI_PLPD_DEFAULT; return; } /* PMFW put PLPD into default policy after enabling the feature */ if (smu_feature_is_enabled(smu, - SMU_FEATURE_XGMI_PER_LINK_PWR_DWN_BIT)) - smu->plpd_mode = XGMI_PLPD_DEFAULT; - else - smu->plpd_mode = XGMI_PLPD_NONE; + SMU_FEATURE_XGMI_PER_LINK_PWR_DWN_BIT)) { + if (policy) + policy->current_level = XGMI_PLPD_DEFAULT; + } else { + policy_ctxt = dpm_ctxt->dpm_policies; + if (policy_ctxt) + policy_ctxt->policy_mask &= + ~BIT(PP_PM_POLICY_XGMI_PLPD); + } } static int smu_sw_init(void *handle) @@ -1232,7 +1257,6 @@ static int smu_sw_init(void *handle) atomic_set(&smu->smu_power.power_gate.vpe_gated, 1); atomic_set(&smu->smu_power.power_gate.umsch_mm_gated, 1); - smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT]; smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0; smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1; smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2; @@ -1240,6 +1264,7 @@ static int smu_sw_init(void *handle) smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4; smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5; smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6; + smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT]; smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D; @@ -1730,6 +1755,8 @@ static int smu_start_smc_engine(struct smu_context *smu) struct amdgpu_device *adev = smu->adev; int ret = 0; + smu->smc_fw_state = SMU_FW_INIT; + if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { if (amdgpu_ip_version(adev, MP1_HWIP, 0) < IP_VERSION(11, 0, 0)) { if (smu->ppt_funcs->load_microcode) { @@ -1791,6 +1818,7 @@ static int smu_hw_init(void *handle) smu_dpm_set_jpeg_enable(smu, true); smu_dpm_set_vpe_enable(smu, true); smu_dpm_set_umsch_mm_enable(smu, true); + smu_set_mall_enable(smu); smu_set_gfx_cgpg(smu, true); } @@ -1844,6 +1872,8 @@ static int smu_disable_dpms(struct smu_context *smu) case IP_VERSION(13, 0, 0): case IP_VERSION(13, 0, 7): case IP_VERSION(13, 0, 10): + case IP_VERSION(14, 0, 2): + case IP_VERSION(14, 0, 3): return 0; default: break; @@ -1894,20 +1924,12 @@ static int smu_disable_dpms(struct smu_context *smu) } /* - * For SMU 13.0.4/11 and 14.0.0, PMFW will handle the features disablement properly + * For GFX11 and subsequent APUs, PMFW will handle the features disablement properly * for gpu reset and S0i3 cases. Driver involvement is unnecessary. */ - if (amdgpu_in_reset(adev) || adev->in_s0ix) { - switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { - case IP_VERSION(13, 0, 4): - case IP_VERSION(13, 0, 11): - case IP_VERSION(14, 0, 0): - case IP_VERSION(14, 0, 1): - return 0; - default: - break; - } - } + if (IP_VERSION_MAJ(amdgpu_ip_version(adev, GC_HWIP, 0)) >= 11 && + smu->is_apu && (amdgpu_in_reset(adev) || adev->in_s0ix)) + return 0; /* * For gpu reset, runpm and hibernation through BACO, @@ -2202,12 +2224,13 @@ static int smu_bump_power_profile_mode(struct smu_context *smu, } static int smu_adjust_power_state_dynamic(struct smu_context *smu, - enum amd_dpm_forced_level level, - bool skip_display_settings) + enum amd_dpm_forced_level level, + bool skip_display_settings, + bool force_update) { int ret = 0; int index = 0; - long workload; + long workload[1]; struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); if (!skip_display_settings) { @@ -2232,7 +2255,7 @@ static int smu_adjust_power_state_dynamic(struct smu_context *smu, } } - if (smu_dpm_ctx->dpm_level != level) { + if (force_update || smu_dpm_ctx->dpm_level != level) { ret = smu_asic_set_performance_level(smu, level); if (ret) { dev_err(smu->adev->dev, "Failed to set performance level!"); @@ -2247,10 +2270,10 @@ static int smu_adjust_power_state_dynamic(struct smu_context *smu, smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) { index = fls(smu->workload_mask); index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; - workload = smu->workload_setting[index]; + workload[0] = smu->workload_setting[index]; - if (smu->power_profile_mode != workload) - smu_bump_power_profile_mode(smu, &workload, 0); + if (force_update || smu->power_profile_mode != workload[0]) + smu_bump_power_profile_mode(smu, workload, 0); } return ret; @@ -2270,11 +2293,13 @@ static int smu_handle_task(struct smu_context *smu, ret = smu_pre_display_config_changed(smu); if (ret) return ret; - ret = smu_adjust_power_state_dynamic(smu, level, false); + ret = smu_adjust_power_state_dynamic(smu, level, false, false); break; case AMD_PP_TASK_COMPLETE_INIT: + ret = smu_adjust_power_state_dynamic(smu, level, true, true); + break; case AMD_PP_TASK_READJUST_POWER_STATE: - ret = smu_adjust_power_state_dynamic(smu, level, true); + ret = smu_adjust_power_state_dynamic(smu, level, true, false); break; default: break; @@ -2300,7 +2325,7 @@ static int smu_switch_power_profile(void *handle, { struct smu_context *smu = handle; struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); - long workload; + long workload[1]; uint32_t index; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) @@ -2313,17 +2338,17 @@ static int smu_switch_power_profile(void *handle, smu->workload_mask &= ~(1 << smu->workload_prority[type]); index = fls(smu->workload_mask); index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; - workload = smu->workload_setting[index]; + workload[0] = smu->workload_setting[index]; } else { smu->workload_mask |= (1 << smu->workload_prority[type]); index = fls(smu->workload_mask); index = index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; - workload = smu->workload_setting[index]; + workload[0] = smu->workload_setting[index]; } if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL && smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) - smu_bump_power_profile_mode(smu, &workload, 0); + smu_bump_power_profile_mode(smu, workload, 0); return 0; } @@ -2716,6 +2741,7 @@ int smu_get_power_limit(void *handle, switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { case IP_VERSION(13, 0, 2): case IP_VERSION(13, 0, 6): + case IP_VERSION(13, 0, 14): case IP_VERSION(11, 0, 7): case IP_VERSION(11, 0, 11): case IP_VERSION(11, 0, 12): @@ -3494,26 +3520,101 @@ static int smu_get_prv_buffer_details(void *handle, void **addr, size_t *size) return 0; } -int smu_set_xgmi_plpd_mode(struct smu_context *smu, - enum pp_xgmi_plpd_mode mode) +static void smu_print_dpm_policy(struct smu_dpm_policy *policy, char *sysbuf, + size_t *size) { + size_t offset = *size; + int level; + + for_each_set_bit(level, &policy->level_mask, PP_POLICY_MAX_LEVELS) { + if (level == policy->current_level) + offset += sysfs_emit_at(sysbuf, offset, + "%d : %s*\n", level, + policy->desc->get_desc(policy, level)); + else + offset += sysfs_emit_at(sysbuf, offset, + "%d : %s\n", level, + policy->desc->get_desc(policy, level)); + } + + *size = offset; +} + +ssize_t smu_get_pm_policy_info(struct smu_context *smu, + enum pp_pm_policy p_type, char *sysbuf) +{ + struct smu_dpm_context *dpm_ctxt = &smu->smu_dpm; + struct smu_dpm_policy_ctxt *policy_ctxt; + struct smu_dpm_policy *dpm_policy; + size_t offset = 0; + + policy_ctxt = dpm_ctxt->dpm_policies; + if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled || !policy_ctxt || + !policy_ctxt->policy_mask) + return -EOPNOTSUPP; + + if (p_type == PP_PM_POLICY_NONE) + return -EINVAL; + + dpm_policy = smu_get_pm_policy(smu, p_type); + if (!dpm_policy || !dpm_policy->level_mask || !dpm_policy->desc) + return -ENOENT; + + if (!sysbuf) + return -EINVAL; + + smu_print_dpm_policy(dpm_policy, sysbuf, &offset); + + return offset; +} + +struct smu_dpm_policy *smu_get_pm_policy(struct smu_context *smu, + enum pp_pm_policy p_type) +{ + struct smu_dpm_context *dpm_ctxt = &smu->smu_dpm; + struct smu_dpm_policy_ctxt *policy_ctxt; + int i; + + policy_ctxt = dpm_ctxt->dpm_policies; + if (!policy_ctxt) + return NULL; + + for (i = 0; i < hweight32(policy_ctxt->policy_mask); ++i) { + if (policy_ctxt->policies[i].policy_type == p_type) + return &policy_ctxt->policies[i]; + } + + return NULL; +} + +int smu_set_pm_policy(struct smu_context *smu, enum pp_pm_policy p_type, + int level) +{ + struct smu_dpm_context *dpm_ctxt = &smu->smu_dpm; + struct smu_dpm_policy *dpm_policy = NULL; + struct smu_dpm_policy_ctxt *policy_ctxt; int ret = -EOPNOTSUPP; - if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) + policy_ctxt = dpm_ctxt->dpm_policies; + if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled || !policy_ctxt || + !policy_ctxt->policy_mask) return ret; - /* PLPD policy is not supported if it's NONE */ - if (smu->plpd_mode == XGMI_PLPD_NONE) + if (level < 0 || level >= PP_POLICY_MAX_LEVELS) + return -EINVAL; + + dpm_policy = smu_get_pm_policy(smu, p_type); + + if (!dpm_policy || !dpm_policy->level_mask || !dpm_policy->set_policy) return ret; - if (smu->plpd_mode == mode) + if (dpm_policy->current_level == level) return 0; - if (smu->ppt_funcs && smu->ppt_funcs->select_xgmi_plpd_policy) - ret = smu->ppt_funcs->select_xgmi_plpd_policy(smu, mode); + ret = dpm_policy->set_policy(smu, level); if (!ret) - smu->plpd_mode = mode; + dpm_policy->current_level = level; return ret; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h index 0917dec8efe3..b44a185d07e8 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h @@ -362,6 +362,27 @@ struct smu_table_context { void *gpu_metrics_table; }; +struct smu_context; +struct smu_dpm_policy; + +struct smu_dpm_policy_desc { + const char *name; + char *(*get_desc)(struct smu_dpm_policy *dpm_policy, int level); +}; + +struct smu_dpm_policy { + struct smu_dpm_policy_desc *desc; + enum pp_pm_policy policy_type; + unsigned long level_mask; + int current_level; + int (*set_policy)(struct smu_context *ctxt, int level); +}; + +struct smu_dpm_policy_ctxt { + struct smu_dpm_policy policies[PP_PM_POLICY_NUM]; + unsigned long policy_mask; +}; + struct smu_dpm_context { uint32_t dpm_context_size; void *dpm_context; @@ -372,6 +393,7 @@ struct smu_dpm_context { struct smu_power_state *dpm_request_power_state; struct smu_power_state *dpm_current_power_state; struct mclock_latency_table *mclk_latency_table; + struct smu_dpm_policy_ctxt *dpm_policies; }; struct smu_power_gate { @@ -473,6 +495,12 @@ struct stb_context { spinlock_t lock; }; +enum smu_fw_status { + SMU_FW_INIT = 0, + SMU_FW_RUNTIME, + SMU_FW_HANG, +}; + #define WORKLOAD_POLICY_MAX 7 /* @@ -540,6 +568,7 @@ struct smu_context { uint32_t smc_fw_if_version; uint32_t smc_fw_version; uint32_t smc_fw_caps; + uint8_t smc_fw_state; bool uploading_custom_pp_table; bool dc_controlled_by_gpio; @@ -580,8 +609,6 @@ struct smu_context { struct delayed_work swctf_delayed_work; - enum pp_xgmi_plpd_mode plpd_mode; - /* data structures for wbrf feature support */ bool wbrf_supported; struct notifier_block wbrf_notifier; @@ -857,12 +884,6 @@ struct pptable_funcs { int (*set_df_cstate)(struct smu_context *smu, enum pp_df_cstate state); /** - * @select_xgmi_plpd_policy: Select xgmi per-link power down policy. - */ - int (*select_xgmi_plpd_policy)(struct smu_context *smu, - enum pp_xgmi_plpd_mode mode); - - /** * @update_pcie_parameters: Update and upload the system's PCIe * capabilites to the SMU. * &pcie_gen_cap: Maximum allowed PCIe generation. @@ -1395,6 +1416,11 @@ struct pptable_funcs { int (*dpm_set_umsch_mm_enable)(struct smu_context *smu, bool enable); /** + * @set_mall_enable: Init MALL power gating control. + */ + int (*set_mall_enable)(struct smu_context *smu); + + /** * @notify_rlc_state: Notify RLC power state to SMU. */ int (*notify_rlc_state)(struct smu_context *smu, bool en); @@ -1551,6 +1577,12 @@ typedef struct { uint32_t MmHubPadding[8]; } WifiBandEntryTable_t; +#define STR_SOC_PSTATE_POLICY "soc_pstate" +#define STR_XGMI_PLPD_POLICY "xgmi_plpd" + +struct smu_dpm_policy *smu_get_pm_policy(struct smu_context *smu, + enum pp_pm_policy p_type); + #if !defined(SWSMU_CODE_LAYER_L2) && !defined(SWSMU_CODE_LAYER_L3) && !defined(SWSMU_CODE_LAYER_L4) int smu_get_power_limit(void *handle, uint32_t *limit, @@ -1598,5 +1630,10 @@ void amdgpu_smu_stb_debug_fs_init(struct amdgpu_device *adev); int smu_send_hbm_bad_pages_num(struct smu_context *smu, uint32_t size); int smu_send_hbm_bad_channel_flag(struct smu_context *smu, uint32_t size); int smu_send_rma_reason(struct smu_context *smu); +int smu_set_pm_policy(struct smu_context *smu, enum pp_pm_policy p_type, + int level); +ssize_t smu_get_pm_policy_info(struct smu_context *smu, + enum pp_pm_policy p_type, char *sysbuf); + #endif #endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0.h index 97a29b80fb13..ee457a6f0813 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0.h @@ -1641,6 +1641,23 @@ typedef struct { uint16_t dGPU_W_MAX ; uint16_t padding ; + uint16_t MovingAverageGfxclkFrequencyTarget; + uint16_t MovingAverageGfxclkFrequencyPreDs; + uint16_t MovingAverageGfxclkFrequencyPostDs; + uint16_t MovingAverageFclkFrequencyPreDs; + uint16_t MovingAverageFclkFrequencyPostDs; + uint16_t MovingAverageMemclkFrequencyPreDs; + uint16_t MovingAverageMemclkFrequencyPostDs; + uint16_t MovingAverageVclk0Frequency; + uint16_t MovingAverageDclk0Frequency; + uint16_t MovingAverageGfxActivity; + uint16_t MovingAverageUclkActivity; + uint16_t MovingAverageVcn0ActivityPercentage; + uint16_t MovingAveragePCIeBusy; + uint16_t MovingAverageUclkActivity_MAX; + uint16_t MovingAverageSocketPower; + uint16_t MovingAveragePadding; + uint32_t MetricsCounter ; uint16_t AvgVoltage[SVI_PLANE_COUNT]; @@ -1653,7 +1670,7 @@ typedef struct { uint32_t EnergyAccumulator; uint16_t AverageSocketPower; - uint16_t AverageTotalBoardPower; + uint16_t MovingAverageTotalBoardPower; uint16_t AvgTemperature[TEMP_COUNT]; uint16_t AvgTemperatureFanIntake; @@ -1676,7 +1693,7 @@ typedef struct { uint16_t ApuSTAPMSmartShiftLimit; uint16_t ApuSTAPMLimit; - uint16_t AvgApuSocketPower; + uint16_t MovingAvgApuSocketPower; uint16_t AverageUclkActivity_MAX; diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h index 97522c085258..1bc30db22f9c 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h @@ -169,7 +169,6 @@ typedef struct { uint8_t VpeClkLevelsEnabled; uint8_t NumMemPstatesEnabled; uint8_t NumFclkLevelsEnabled; - uint8_t spare; uint32_t MinGfxClk; uint32_t MaxGfxClk; diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h index 0b3c2f54a343..822c6425d90e 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h @@ -123,7 +123,7 @@ typedef enum { VOLTAGE_GUARDBAND_COUNT } GFX_GUARDBAND_e; -#define SMU_METRICS_TABLE_VERSION 0xC +#define SMU_METRICS_TABLE_VERSION 0xD typedef struct __attribute__((packed, aligned(4))) { uint32_t AccumulationCounter; @@ -227,6 +227,10 @@ typedef struct __attribute__((packed, aligned(4))) { // PCIE LINK Speed and width uint32_t PCIeLinkSpeed; uint32_t PCIeLinkWidth; + + // PER XCD ACTIVITY + uint32_t GfxBusy[8]; + uint64_t GfxBusyAcc[8]; } MetricsTableX_t; typedef struct __attribute__((packed, aligned(4))) { diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h index 86758051cb93..41cb681927e2 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h @@ -92,7 +92,8 @@ #define PPSMC_MSG_McaBankCeDumpDW 0x3B #define PPSMC_MSG_SelectPLPDMode 0x40 #define PPSMC_MSG_RmaDueToBadPageThreshold 0x43 -#define PPSMC_Message_Count 0x44 +#define PPSMC_MSG_SelectPstatePolicy 0x44 +#define PPSMC_Message_Count 0x45 //PPSMC Reset Types for driver msg argument #define PPSMC_RESET_TYPE_DRIVER_MODE_1_RESET 0x1 diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_ppsmc.h index c4dc5881d8df..e7f5ef49049f 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_ppsmc.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_ppsmc.h @@ -106,8 +106,8 @@ #define PPSMC_MSG_DisableLSdma 0x35 ///< Disable LSDMA #define PPSMC_MSG_SetSoftMaxVpe 0x36 ///< #define PPSMC_MSG_SetSoftMinVpe 0x37 ///< -#define PPSMC_MSG_AllocMALLCache 0x38 ///< Allocating MALL Cache -#define PPSMC_MSG_ReleaseMALLCache 0x39 ///< Releasing MALL Cache +#define PPSMC_MSG_MALLPowerController 0x38 ///< Set MALL control +#define PPSMC_MSG_MALLPowerState 0x39 ///< Enter/Exit MALL PG #define PPSMC_Message_Count 0x3A ///< Total number of PPSMC messages /** @}*/ diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_2_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_2_ppsmc.h index de2e442281ff..87ca5ceb1ece 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_2_ppsmc.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_2_ppsmc.h @@ -92,7 +92,6 @@ //Resets #define PPSMC_MSG_PrepareMp1ForUnload 0x2E -#define PPSMC_MSG_Mode1Reset 0x2F //Set SystemVirtual DramAddrHigh #define PPSMC_MSG_SetSystemVirtualDramAddrHigh 0x30 @@ -119,11 +118,12 @@ //STB to dram log #define PPSMC_MSG_DumpSTBtoDram 0x3D -#define PPSMC_MSG_STBtoDramLogSetDramAddrHigh 0x3E -#define PPSMC_MSG_STBtoDramLogSetDramAddrLow 0x3F +#define PPSMC_MSG_STBtoDramLogSetDramAddress 0x3E +#define PPSMC_MSG_DummyUndefined 0x3F #define PPSMC_MSG_STBtoDramLogSetDramSize 0x40 #define PPSMC_MSG_SetOBMTraceBufferLogging 0x41 +#define PPSMC_MSG_UseProfilingMode 0x42 #define PPSMC_MSG_AllowGfxDcs 0x43 #define PPSMC_MSG_DisallowGfxDcs 0x44 #define PPSMC_MSG_EnableAudioStutterWA 0x45 @@ -135,6 +135,16 @@ #define PPSMC_MSG_SetBadMemoryPagesRetiredFlagsPerChannel 0x4B #define PPSMC_MSG_SetPriorityDeltaGain 0x4C #define PPSMC_MSG_AllowIHHostInterrupt 0x4D +#define PPSMC_MSG_EnableShadowDpm 0x4E #define PPSMC_MSG_Mode3Reset 0x4F -#define PPSMC_Message_Count 0x50 +#define PPSMC_MSG_SetDriverDramAddr 0x50 +#define PPSMC_MSG_SetToolsDramAddr 0x51 +#define PPSMC_MSG_TransferTableSmu2DramWithAddr 0x52 +#define PPSMC_MSG_TransferTableDram2SmuWithAddr 0x53 +#define PPSMC_MSG_GetAllRunningSmuFeatures 0x54 +#define PPSMC_MSG_GetSvi3Voltage 0x55 +#define PPSMC_MSG_UpdatePolicy 0x56 +#define PPSMC_MSG_ExtPwrConnSupport 0x57 +#define PPSMC_MSG_PreloadSwPstateForUclkOverDrive 0x58 +#define PPSMC_Message_Count 0x59 #endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h index c48214e3dc8e..e71a721c12b9 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h @@ -272,7 +272,10 @@ __SMU_DUMMY_MAP(SetSoftMinVpe), \ __SMU_DUMMY_MAP(GetMetricsVersion), \ __SMU_DUMMY_MAP(EnableUCLKShadow), \ - __SMU_DUMMY_MAP(RmaDueToBadPageThreshold), + __SMU_DUMMY_MAP(RmaDueToBadPageThreshold), \ + __SMU_DUMMY_MAP(SelectPstatePolicy), \ + __SMU_DUMMY_MAP(MALLPowerController), \ + __SMU_DUMMY_MAP(MALLPowerState), #undef __SMU_DUMMY_MAP #define __SMU_DUMMY_MAP(type) SMU_MSG_##type @@ -436,7 +439,16 @@ enum smu_clk_type { __SMU_DUMMY_MAP(BACO_CG), \ __SMU_DUMMY_MAP(SOC_CG), \ __SMU_DUMMY_MAP(LOW_POWER_DCNCLKS), \ - __SMU_DUMMY_MAP(WHISPER_MODE), + __SMU_DUMMY_MAP(WHISPER_MODE), \ + __SMU_DUMMY_MAP(EDC_PWRBRK), \ + __SMU_DUMMY_MAP(SOC_EDC_XVMIN), \ + __SMU_DUMMY_MAP(GFX_PSM_DIDT), \ + __SMU_DUMMY_MAP(APT_ALL_ENABLE), \ + __SMU_DUMMY_MAP(APT_SQ_THROTTLE), \ + __SMU_DUMMY_MAP(APT_PF_DCS), \ + __SMU_DUMMY_MAP(GFX_EDC_XVMIN), \ + __SMU_DUMMY_MAP(GFX_DIDT_XVMIN), \ + __SMU_DUMMY_MAP(FAN_ABNORMAL), #undef __SMU_DUMMY_MAP #define __SMU_DUMMY_MAP(feature) SMU_FEATURE_##feature##_BIT diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h index d9700a3f28d2..e58220a7ee2f 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h @@ -298,5 +298,9 @@ int smu_v13_0_enable_uclk_shadow(struct smu_context *smu, bool enable); int smu_v13_0_set_wbrf_exclusion_ranges(struct smu_context *smu, struct freq_band_range *exclusion_ranges); + +int smu_v13_0_get_boot_freq_by_index(struct smu_context *smu, + enum smu_clk_type clk_type, + uint32_t *value); #endif #endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h index 1fc4557e6fb4..46b456590a08 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h @@ -28,7 +28,7 @@ #define SMU14_DRIVER_IF_VERSION_INV 0xFFFFFFFF #define SMU14_DRIVER_IF_VERSION_SMU_V14_0_0 0x7 #define SMU14_DRIVER_IF_VERSION_SMU_V14_0_1 0x6 -#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_2 0x25 +#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_2 0x26 #define FEATURE_MASK(feature) (1ULL << feature) @@ -46,6 +46,18 @@ #define MAX_DPM_LEVELS 16 #define MAX_PCIE_CONF 3 +#define SMU14_TOOL_SIZE 0x19000 + +#define CTF_OFFSET_EDGE 5 +#define CTF_OFFSET_HOTSPOT 5 +#define CTF_OFFSET_MEM 5 + +extern const int decoded_link_speed[5]; +extern const int decoded_link_width[7]; + +#define DECODE_GEN_SPEED(gen_speed_idx) (decoded_link_speed[gen_speed_idx]) +#define DECODE_LANE_WIDTH(lane_width_idx) (decoded_link_width[lane_width_idx]) + struct smu_14_0_max_sustainable_clocks { uint32_t display_clock; uint32_t phy_clock; @@ -228,5 +240,9 @@ int smu_v14_0_od_edit_dpm_table(struct smu_context *smu, void smu_v14_0_set_smu_mailbox_registers(struct smu_context *smu); +int smu_v14_0_enable_thermal_alert(struct smu_context *smu); + +int smu_v14_0_disable_thermal_alert(struct smu_context *smu); + #endif #endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0_2_pptable.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0_2_pptable.h index 4a3fde89aed7..75c921e87360 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0_2_pptable.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0_2_pptable.h @@ -27,7 +27,8 @@ #pragma pack(push, 1) -#define SMU_14_0_2_TABLE_FORMAT_REVISION 3 +#define SMU_14_0_2_TABLE_FORMAT_REVISION 23 +#define SMU_14_0_2_CUSTOM_TABLE_FORMAT_REVISION 1 // POWERPLAYTABLE::ulPlatformCaps #define SMU_14_0_2_PP_PLATFORM_CAP_POWERPLAY 0x1 // This cap indicates whether CCC need to show Powerplay page. @@ -43,6 +44,7 @@ #define SMU_14_0_2_PP_THERMALCONTROLLER_NONE 0 #define SMU_14_0_2_PP_OVERDRIVE_VERSION 0x1 // TODO: FIX OverDrive Version TBD +#define SMU_14_0_2_PP_CUSTOM_OVERDRIVE_VERSION 0x1 #define SMU_14_0_2_PP_POWERSAVINGCLOCK_VERSION 0x01 // Power Saving Clock Table Version 1.00 enum SMU_14_0_2_OD_SW_FEATURE_CAP @@ -107,6 +109,7 @@ enum SMU_14_0_2_PWRMODE_SETTING SMU_14_0_2_PMSETTING_ACOUSTIC_LIMIT_RPM_BALANCE, SMU_14_0_2_PMSETTING_ACOUSTIC_LIMIT_RPM_TURBO, SMU_14_0_2_PMSETTING_ACOUSTIC_LIMIT_RPM_RAGE, + SMU_14_0_2_PMSETTING_COUNT }; #define SMU_14_0_2_MAX_PMSETTING 32 // Maximum Number of PowerMode Settings @@ -127,17 +130,24 @@ struct smu_14_0_2_overdrive_table int16_t pm_setting[SMU_14_0_2_MAX_PMSETTING]; // Optimized power mode feature settings }; +enum smu_14_0_3_pptable_source { + PPTABLE_SOURCE_IFWI = 0, + PPTABLE_SOURCE_DRIVER_HARDCODED = 1, + PPTABLE_SOURCE_PPGEN_REGISTRY = 2, + PPTABLE_SOURCE_MAX = PPTABLE_SOURCE_PPGEN_REGISTRY, +}; + struct smu_14_0_2_powerplay_table { struct atom_common_table_header header; // header.format_revision = 3 (HAS TO MATCH SMU_14_0_2_TABLE_FORMAT_REVISION), header.content_revision = ? structuresize is calculated by PPGen. uint8_t table_revision; // PPGen use only: table_revision = 3 - uint8_t padding; // Padding 1 byte to align table_size offset to 6 bytes (pmfw_start_offset, for PMFW to know the starting offset of PPTable_t). + uint8_t pptable_source; // PPGen UI dropdown box uint16_t pmfw_pptable_start_offset; // The start offset of the pmfw portion. i.e. start of PPTable_t (start of SkuTable_t) uint16_t pmfw_pptable_size; // The total size of pmfw_pptable, i.e PPTable_t. - uint16_t pmfw_pfe_table_start_offset; // The start offset of the PFE_Settings_t within pmfw_pptable. - uint16_t pmfw_pfe_table_size; // The size of PFE_Settings_t. - uint16_t pmfw_board_table_start_offset; // The start offset of the BoardTable_t within pmfw_pptable. - uint16_t pmfw_board_table_size; // The size of BoardTable_t. + uint16_t pmfw_sku_table_start_offset; // DO NOT CHANGE ORDER; The absolute start offset of the SkuTable_t (within smu_14_0_3_powerplay_table). + uint16_t pmfw_sku_table_size; // DO NOT CHANGE ORDER; The size of SkuTable_t. + uint16_t pmfw_board_table_start_offset; // The start offset of the BoardTable_t + uint16_t pmfw_board_table_size; // The size of BoardTable_t. uint16_t pmfw_custom_sku_table_start_offset; // The start offset of the CustomSkuTable_t within pmfw_pptable. uint16_t pmfw_custom_sku_table_size; // The size of the CustomSkuTable_t. uint32_t golden_pp_id; // PPGen use only: PP Table ID on the Golden Data Base @@ -159,6 +169,36 @@ struct smu_14_0_2_powerplay_table PPTable_t smc_pptable; // PPTable_t in driver_if.h -- as requested by PMFW, this offset should start at a 32-byte boundary, and the table_size above should remain at offset=6 bytes }; +enum SMU_14_0_2_CUSTOM_OD_SW_FEATURE_CAP { + SMU_14_0_2_CUSTOM_ODCAP_POWER_MODE = 0, + SMU_14_0_2_CUSTOM_ODCAP_COUNT +}; + +enum SMU_14_0_2_CUSTOM_OD_FEATURE_SETTING_ID { + SMU_14_0_2_CUSTOM_ODSETTING_POWER_MODE = 0, + SMU_14_0_2_CUSTOM_ODSETTING_COUNT, +}; + +struct smu_14_0_2_custom_overdrive_table { + uint8_t revision; + uint8_t reserve[3]; + uint8_t cap[SMU_14_0_2_CUSTOM_ODCAP_COUNT]; + int32_t max[SMU_14_0_2_CUSTOM_ODSETTING_COUNT]; + int32_t min[SMU_14_0_2_CUSTOM_ODSETTING_COUNT]; + int16_t pm_setting[SMU_14_0_2_PMSETTING_COUNT]; +}; + +struct smu_14_0_3_custom_powerplay_table { + uint8_t custom_table_revision; + uint16_t custom_table_size; + uint16_t custom_sku_table_offset; + uint32_t custom_platform_caps; + uint16_t software_shutdown_temp; + struct smu_14_0_2_custom_overdrive_table custom_overdrive_table; + uint32_t reserve[8]; + CustomSkuTable_t custom_sku_table_pmfw; +}; + #pragma pack(pop) #endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c index 6d334a2aff67..c0f6b59369b7 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c @@ -283,9 +283,29 @@ static int arcturus_tables_init(struct smu_context *smu) return 0; } +static int arcturus_select_plpd_policy(struct smu_context *smu, int level) +{ + /* PPSMC_MSG_GmiPwrDnControl is supported by 54.23.0 and onwards */ + if (smu->smc_fw_version < 0x00361700) { + dev_err(smu->adev->dev, + "XGMI power down control is only supported by PMFW 54.23.0 and onwards\n"); + return -EINVAL; + } + + if (level == XGMI_PLPD_DEFAULT) + return smu_cmn_send_smc_msg_with_param( + smu, SMU_MSG_GmiPwrDnControl, 1, NULL); + else if (level == XGMI_PLPD_DISALLOW) + return smu_cmn_send_smc_msg_with_param( + smu, SMU_MSG_GmiPwrDnControl, 0, NULL); + else + return -EINVAL; +} + static int arcturus_allocate_dpm_context(struct smu_context *smu) { struct smu_dpm_context *smu_dpm = &smu->smu_dpm; + struct smu_dpm_policy *policy; smu_dpm->dpm_context = kzalloc(sizeof(struct smu_11_0_dpm_context), GFP_KERNEL); @@ -293,6 +313,20 @@ static int arcturus_allocate_dpm_context(struct smu_context *smu) return -ENOMEM; smu_dpm->dpm_context_size = sizeof(struct smu_11_0_dpm_context); + smu_dpm->dpm_policies = + kzalloc(sizeof(struct smu_dpm_policy_ctxt), GFP_KERNEL); + + if (!smu_dpm->dpm_policies) + return -ENOMEM; + + policy = &(smu_dpm->dpm_policies->policies[0]); + policy->policy_type = PP_PM_POLICY_XGMI_PLPD; + policy->level_mask = BIT(XGMI_PLPD_DISALLOW) | BIT(XGMI_PLPD_DEFAULT); + policy->current_level = XGMI_PLPD_DEFAULT; + policy->set_policy = arcturus_select_plpd_policy; + smu_cmn_generic_plpd_policy_desc(policy); + smu_dpm->dpm_policies->policy_mask |= BIT(PP_PM_POLICY_XGMI_PLPD); + return 0; } @@ -403,6 +437,14 @@ static int arcturus_set_default_dpm_table(struct smu_context *smu) dpm_table->max = dpm_table->dpm_levels[0].value; } + /* XGMI PLPD is supported by 54.23.0 and onwards */ + if (smu->smc_fw_version < 0x00361700) { + struct smu_dpm_context *smu_dpm = &smu->smu_dpm; + + smu_dpm->dpm_policies->policy_mask &= + ~BIT(PP_PM_POLICY_XGMI_PLPD); + } + return 0; } @@ -1416,6 +1458,9 @@ static int arcturus_set_power_profile_mode(struct smu_context *smu, if ((profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) && (smu->smc_fw_version >= 0x360d00)) { + if (size != 10) + return -EINVAL; + ret = smu_cmn_update_table(smu, SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT, @@ -1449,6 +1494,8 @@ static int arcturus_set_power_profile_mode(struct smu_context *smu, activity_monitor.Mem_PD_Data_error_coeff = input[8]; activity_monitor.Mem_PD_Data_error_rate_coeff = input[9]; break; + default: + return -EINVAL; } ret = smu_cmn_update_table(smu, @@ -2175,27 +2222,6 @@ static int arcturus_set_df_cstate(struct smu_context *smu, return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DFCstateControl, state, NULL); } -static int arcturus_select_xgmi_plpd_policy(struct smu_context *smu, - enum pp_xgmi_plpd_mode mode) -{ - /* PPSMC_MSG_GmiPwrDnControl is supported by 54.23.0 and onwards */ - if (smu->smc_fw_version < 0x00361700) { - dev_err(smu->adev->dev, "XGMI power down control is only supported by PMFW 54.23.0 and onwards\n"); - return -EINVAL; - } - - if (mode == XGMI_PLPD_DEFAULT) - return smu_cmn_send_smc_msg_with_param(smu, - SMU_MSG_GmiPwrDnControl, - 1, NULL); - else if (mode == XGMI_PLPD_DISALLOW) - return smu_cmn_send_smc_msg_with_param(smu, - SMU_MSG_GmiPwrDnControl, - 0, NULL); - else - return -EINVAL; -} - static const struct throttling_logging_label { uint32_t feature_mask; const char *label; @@ -2393,7 +2419,6 @@ static const struct pptable_funcs arcturus_ppt_funcs = { .get_dpm_ultimate_freq = smu_v11_0_get_dpm_ultimate_freq, .set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range, .set_df_cstate = arcturus_set_df_cstate, - .select_xgmi_plpd_policy = arcturus_select_xgmi_plpd_policy, .log_thermal_throttling_event = arcturus_log_thermal_throttling_event, .get_pp_feature_mask = smu_cmn_get_pp_feature_mask, .set_pp_feature_mask = smu_cmn_set_pp_feature_mask, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c index 5a68d365967f..16af1a329621 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c @@ -1219,19 +1219,22 @@ static int navi10_get_current_clk_freq_by_table(struct smu_context *smu, value); } -static bool navi10_is_support_fine_grained_dpm(struct smu_context *smu, enum smu_clk_type clk_type) +static int navi10_is_support_fine_grained_dpm(struct smu_context *smu, enum smu_clk_type clk_type) { PPTable_t *pptable = smu->smu_table.driver_pptable; DpmDescriptor_t *dpm_desc = NULL; - uint32_t clk_index = 0; + int clk_index = 0; clk_index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_CLK, clk_type); + if (clk_index < 0) + return clk_index; + dpm_desc = &pptable->DpmDescriptor[clk_index]; /* 0 - Fine grained DPM, 1 - Discrete DPM */ - return dpm_desc->SnapToDiscrete == 0; + return dpm_desc->SnapToDiscrete == 0 ? 1 : 0; } static inline bool navi10_od_feature_is_supported(struct smu_11_0_overdrive_table *od_table, enum SMU_11_0_ODFEATURE_CAP cap) @@ -1287,7 +1290,11 @@ static int navi10_emit_clk_levels(struct smu_context *smu, if (ret) return ret; - if (!navi10_is_support_fine_grained_dpm(smu, clk_type)) { + ret = navi10_is_support_fine_grained_dpm(smu, clk_type); + if (ret < 0) + return ret; + + if (!ret) { for (i = 0; i < count; i++) { ret = smu_v11_0_get_dpm_freq_by_index(smu, clk_type, i, &value); @@ -1382,8 +1389,6 @@ static int navi10_emit_clk_levels(struct smu_context *smu, case 2: curve_settings = &od_table->GfxclkFreq3; break; - default: - break; } *offset += sysfs_emit_at(buf, *offset, "%d: %uMHz %umV\n", i, curve_settings[0], @@ -1496,7 +1501,11 @@ static int navi10_print_clk_levels(struct smu_context *smu, if (ret) return size; - if (!navi10_is_support_fine_grained_dpm(smu, clk_type)) { + ret = navi10_is_support_fine_grained_dpm(smu, clk_type); + if (ret < 0) + return ret; + + if (!ret) { for (i = 0; i < count; i++) { ret = smu_v11_0_get_dpm_freq_by_index(smu, clk_type, i, &value); if (ret) @@ -1583,8 +1592,6 @@ static int navi10_print_clk_levels(struct smu_context *smu, case 2: curve_settings = &od_table->GfxclkFreq3; break; - default: - break; } size += sysfs_emit_at(buf, size, "%d: %uMHz %umV\n", i, curve_settings[0], @@ -1665,7 +1672,11 @@ static int navi10_force_clk_levels(struct smu_context *smu, case SMU_UCLK: case SMU_FCLK: /* There is only 2 levels for fine grained DPM */ - if (navi10_is_support_fine_grained_dpm(smu, clk_type)) { + ret = navi10_is_support_fine_grained_dpm(smu, clk_type); + if (ret < 0) + return ret; + + if (ret) { soft_max_level = (soft_max_level >= 1 ? 1 : 0); soft_min_level = (soft_min_level >= 1 ? 1 : 0); } @@ -1978,7 +1989,7 @@ static int navi10_get_power_profile_mode(struct smu_context *smu, char *buf) size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n", " ", 2, - "MEMLK", + "MEMCLK", activity_monitor.Mem_FPS, activity_monitor.Mem_MinFreqStep, activity_monitor.Mem_MinActiveFreqType, @@ -2006,6 +2017,8 @@ static int navi10_set_power_profile_mode(struct smu_context *smu, long *input, u } if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) { + if (size != 10) + return -EINVAL; ret = smu_cmn_update_table(smu, SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT, @@ -2038,7 +2051,7 @@ static int navi10_set_power_profile_mode(struct smu_context *smu, long *input, u activity_monitor.Soc_PD_Data_error_coeff = input[8]; activity_monitor.Soc_PD_Data_error_rate_coeff = input[9]; break; - case 2: /* Memlk */ + case 2: /* Memclk */ activity_monitor.Mem_FPS = input[1]; activity_monitor.Mem_MinFreqStep = input[2]; activity_monitor.Mem_MinActiveFreqType = input[3]; @@ -2049,6 +2062,8 @@ static int navi10_set_power_profile_mode(struct smu_context *smu, long *input, u activity_monitor.Mem_PD_Data_error_coeff = input[8]; activity_monitor.Mem_PD_Data_error_rate_coeff = input[9]; break; + default: + return -EINVAL; } ret = smu_cmn_update_table(smu, @@ -2066,8 +2081,10 @@ static int navi10_set_power_profile_mode(struct smu_context *smu, long *input, u smu->power_profile_mode); if (workload_type < 0) return -EINVAL; - smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, 1 << workload_type, NULL); + if (ret) + dev_err(smu->adev->dev, "[%s] Failed to set work load mask!", __func__); return ret; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c index e426f457a017..9c3c48297cba 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c @@ -1691,7 +1691,7 @@ static int sienna_cichlid_get_power_profile_mode(struct smu_context *smu, char * size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n", " ", 2, - "MEMLK", + "MEMCLK", activity_monitor->Mem_FPS, activity_monitor->Mem_MinFreqStep, activity_monitor->Mem_MinActiveFreqType, @@ -1722,6 +1722,8 @@ static int sienna_cichlid_set_power_profile_mode(struct smu_context *smu, long * } if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) { + if (size != 10) + return -EINVAL; ret = smu_cmn_update_table(smu, SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT, @@ -1754,7 +1756,7 @@ static int sienna_cichlid_set_power_profile_mode(struct smu_context *smu, long * activity_monitor->Fclk_PD_Data_error_coeff = input[8]; activity_monitor->Fclk_PD_Data_error_rate_coeff = input[9]; break; - case 2: /* Memlk */ + case 2: /* Memclk */ activity_monitor->Mem_FPS = input[1]; activity_monitor->Mem_MinFreqStep = input[2]; activity_monitor->Mem_MinActiveFreqType = input[3]; @@ -1765,6 +1767,8 @@ static int sienna_cichlid_set_power_profile_mode(struct smu_context *smu, long * activity_monitor->Mem_PD_Data_error_coeff = input[8]; activity_monitor->Mem_PD_Data_error_rate_coeff = input[9]; break; + default: + return -EINVAL; } ret = smu_cmn_update_table(smu, @@ -1782,8 +1786,10 @@ static int sienna_cichlid_set_power_profile_mode(struct smu_context *smu, long * smu->power_profile_mode); if (workload_type < 0) return -EINVAL; - smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, 1 << workload_type, NULL); + if (ret) + dev_err(smu->adev->dev, "[%s] Failed to set work load mask!", __func__); return ret; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c index 9d5ab2ea643a..16fcd9dcd202 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c @@ -94,7 +94,6 @@ int smu_v11_0_init_microcode(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; char ucode_prefix[25]; - char fw_name[SMU_FW_NAME_LEN]; int err = 0; const struct smc_firmware_header_v1_0 *hdr; const struct common_firmware_header *header; @@ -106,10 +105,7 @@ int smu_v11_0_init_microcode(struct smu_context *smu) return 0; amdgpu_ucode_ip_version_decode(adev, MP1_HWIP, ucode_prefix, sizeof(ucode_prefix)); - - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", ucode_prefix); - - err = amdgpu_ucode_request(adev, &adev->pm.fw, fw_name); + err = amdgpu_ucode_request(adev, &adev->pm.fw, "amdgpu/%s.bin", ucode_prefix); if (err) goto out; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c index 379e44eb0019..22737b11b1bf 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c @@ -976,6 +976,18 @@ static int vangogh_get_dpm_ultimate_freq(struct smu_context *smu, } } if (min) { + ret = vangogh_get_profiling_clk_mask(smu, + AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK, + NULL, + NULL, + &mclk_mask, + &fclk_mask, + &soc_mask); + if (ret) + goto failed; + + vclk_mask = dclk_mask = 0; + switch (clk_type) { case SMU_UCLK: case SMU_MCLK: @@ -2450,6 +2462,8 @@ static u32 vangogh_set_gfxoff_residency(struct smu_context *smu, bool start) ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_LogGfxOffResidency, start, &residency); + if (ret) + return ret; if (!start) adev->gfx.gfx_off_residency = residency; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c index 8908bbb3ff1f..cc0504b063fa 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c @@ -585,8 +585,6 @@ static int renoir_print_clk_levels(struct smu_context *smu, } switch (clk_type) { - case SMU_GFXCLK: - case SMU_SCLK: case SMU_SOCCLK: case SMU_MCLK: case SMU_DCEFCLK: @@ -928,11 +926,56 @@ static int renoir_set_peak_clock_by_device(struct smu_context *smu) return ret; } +static int renior_set_dpm_profile_freq(struct smu_context *smu, + enum amd_dpm_forced_level level, + enum smu_clk_type clk_type) +{ + int ret = 0; + uint32_t sclk = 0, socclk = 0, fclk = 0; + + switch (clk_type) { + case SMU_GFXCLK: + case SMU_SCLK: + sclk = RENOIR_UMD_PSTATE_GFXCLK; + if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) + renoir_get_dpm_ultimate_freq(smu, SMU_SCLK, NULL, &sclk); + else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) + renoir_get_dpm_ultimate_freq(smu, SMU_SCLK, &sclk, NULL); + break; + case SMU_SOCCLK: + socclk = RENOIR_UMD_PSTATE_SOCCLK; + if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) + renoir_get_dpm_ultimate_freq(smu, SMU_SOCCLK, NULL, &socclk); + break; + case SMU_FCLK: + case SMU_MCLK: + fclk = RENOIR_UMD_PSTATE_FCLK; + if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) + renoir_get_dpm_ultimate_freq(smu, SMU_FCLK, NULL, &fclk); + else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) + renoir_get_dpm_ultimate_freq(smu, SMU_FCLK, &fclk, NULL); + break; + default: + ret = -EINVAL; + break; + } + + if (sclk) + ret = smu_v12_0_set_soft_freq_limited_range(smu, SMU_SCLK, sclk, sclk); + + if (socclk) + ret = smu_v12_0_set_soft_freq_limited_range(smu, SMU_SOCCLK, socclk, socclk); + + if (fclk) + ret = smu_v12_0_set_soft_freq_limited_range(smu, SMU_FCLK, fclk, fclk); + + return ret; +} + static int renoir_set_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level) { int ret = 0; - uint32_t sclk_mask, mclk_mask, soc_mask; switch (level) { case AMD_DPM_FORCED_LEVEL_HIGH: @@ -1012,15 +1055,9 @@ static int renoir_set_performance_level(struct smu_context *smu, smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq; smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq; - ret = renoir_get_profiling_clk_mask(smu, level, - &sclk_mask, - &mclk_mask, - &soc_mask); - if (ret) - return ret; - renoir_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask); - renoir_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask); - renoir_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask); + renior_set_dpm_profile_freq(smu, level, SMU_SCLK); + renior_set_dpm_profile_freq(smu, level, SMU_MCLK); + renior_set_dpm_profile_freq(smu, level, SMU_SOCCLK); break; case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c index ce941fbb9cfb..2c35eb31475a 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c @@ -266,9 +266,31 @@ static int aldebaran_tables_init(struct smu_context *smu) return 0; } +static int aldebaran_select_plpd_policy(struct smu_context *smu, int level) +{ + struct amdgpu_device *adev = smu->adev; + + /* The message only works on master die and NACK will be sent + * back for other dies, only send it on master die. + */ + if (adev->smuio.funcs->get_socket_id(adev) || + adev->smuio.funcs->get_die_id(adev)) + return 0; + + if (level == XGMI_PLPD_DEFAULT) + return smu_cmn_send_smc_msg_with_param( + smu, SMU_MSG_GmiPwrDnControl, 0, NULL); + else if (level == XGMI_PLPD_DISALLOW) + return smu_cmn_send_smc_msg_with_param( + smu, SMU_MSG_GmiPwrDnControl, 1, NULL); + else + return -EINVAL; +} + static int aldebaran_allocate_dpm_context(struct smu_context *smu) { struct smu_dpm_context *smu_dpm = &smu->smu_dpm; + struct smu_dpm_policy *policy; smu_dpm->dpm_context = kzalloc(sizeof(struct smu_13_0_dpm_context), GFP_KERNEL); @@ -276,6 +298,20 @@ static int aldebaran_allocate_dpm_context(struct smu_context *smu) return -ENOMEM; smu_dpm->dpm_context_size = sizeof(struct smu_13_0_dpm_context); + smu_dpm->dpm_policies = + kzalloc(sizeof(struct smu_dpm_policy_ctxt), GFP_KERNEL); + + if (!smu_dpm->dpm_policies) + return -ENOMEM; + + policy = &(smu_dpm->dpm_policies->policies[0]); + policy->policy_type = PP_PM_POLICY_XGMI_PLPD; + policy->level_mask = BIT(XGMI_PLPD_DISALLOW) | BIT(XGMI_PLPD_DEFAULT); + policy->current_level = XGMI_PLPD_DEFAULT; + policy->set_policy = aldebaran_select_plpd_policy; + smu_cmn_generic_plpd_policy_desc(policy); + smu_dpm->dpm_policies->policy_mask |= BIT(PP_PM_POLICY_XGMI_PLPD); + return 0; } @@ -1607,29 +1643,6 @@ static int aldebaran_set_df_cstate(struct smu_context *smu, return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DFCstateControl, state, NULL); } -static int aldebaran_select_xgmi_plpd_policy(struct smu_context *smu, - enum pp_xgmi_plpd_mode mode) -{ - struct amdgpu_device *adev = smu->adev; - - /* The message only works on master die and NACK will be sent - back for other dies, only send it on master die */ - if (adev->smuio.funcs->get_socket_id(adev) || - adev->smuio.funcs->get_die_id(adev)) - return 0; - - if (mode == XGMI_PLPD_DEFAULT) - return smu_cmn_send_smc_msg_with_param(smu, - SMU_MSG_GmiPwrDnControl, - 0, NULL); - else if (mode == XGMI_PLPD_DISALLOW) - return smu_cmn_send_smc_msg_with_param(smu, - SMU_MSG_GmiPwrDnControl, - 1, NULL); - else - return -EINVAL; -} - static const struct throttling_logging_label { uint32_t feature_mask; const char *label; @@ -1850,7 +1863,6 @@ static int aldebaran_mode1_reset(struct smu_context *smu) u32 fatal_err, param; int ret = 0; struct amdgpu_device *adev = smu->adev; - struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); fatal_err = 0; param = SMU_RESET_MODE_1; @@ -1863,8 +1875,8 @@ static int aldebaran_mode1_reset(struct smu_context *smu) } else { /* fatal error triggered by ras, PMFW supports the flag from 68.44.0 */ - if ((smu->smc_fw_version >= 0x00442c00) && ras && - atomic_read(&ras->in_recovery)) + if ((smu->smc_fw_version >= 0x00442c00) && + amdgpu_ras_get_fed_status(adev)) fatal_err = 1; param |= (fatal_err << 16); @@ -1886,7 +1898,8 @@ static int aldebaran_mode2_reset(struct smu_context *smu) index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG, SMU_MSG_GfxDeviceDriverReset); - + if (index < 0 ) + return -EINVAL; mutex_lock(&smu->message_lock); if (smu->smc_fw_version >= 0x00441400) { ret = smu_cmn_send_msg_without_waiting(smu, (uint16_t)index, SMU_RESET_MODE_2); @@ -2068,7 +2081,6 @@ static const struct pptable_funcs aldebaran_ppt_funcs = { .set_soft_freq_limited_range = aldebaran_set_soft_freq_limited_range, .od_edit_dpm_table = aldebaran_usr_edit_dpm_table, .set_df_cstate = aldebaran_set_df_cstate, - .select_xgmi_plpd_policy = aldebaran_select_xgmi_plpd_policy, .log_thermal_throttling_event = aldebaran_log_thermal_throttling_event, .get_pp_feature_mask = smu_cmn_get_pp_feature_mask, .set_pp_feature_mask = smu_cmn_set_pp_feature_mask, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c index a8d34adc7d3f..e17466cc1952 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c @@ -79,8 +79,8 @@ MODULE_FIRMWARE("amdgpu/smu_13_0_10.bin"); #define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK 0x00000070L #define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT 0x4 #define smnPCIE_LC_SPEED_CNTL 0x11140290 -#define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK 0xC000 -#define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT 0xE +#define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK 0xE0 +#define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT 0x5 #define ENABLE_IMU_ARG_GFXOFF_ENABLE 1 @@ -92,7 +92,6 @@ const int pmfw_decoded_link_width[7] = {0, 1, 2, 4, 8, 12, 16}; int smu_v13_0_init_microcode(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; - char fw_name[30]; char ucode_prefix[15]; int err = 0; const struct smc_firmware_header_v1_0 *hdr; @@ -104,10 +103,7 @@ int smu_v13_0_init_microcode(struct smu_context *smu) return 0; amdgpu_ucode_ip_version_decode(adev, MP1_HWIP, ucode_prefix, sizeof(ucode_prefix)); - - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", ucode_prefix); - - err = amdgpu_ucode_request(adev, &adev->pm.fw, fw_name); + err = amdgpu_ucode_request(adev, &adev->pm.fw, "amdgpu/%s.bin", ucode_prefix); if (err) goto out; @@ -271,7 +267,8 @@ int smu_v13_0_check_fw_version(struct smu_context *smu) smu_minor = (smu_version >> 8) & 0xff; smu_debug = (smu_version >> 0) & 0xff; if (smu->is_apu || - amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 6)) + amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 6) || + amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 14)) adev->pm.fw_version = smu_version; /* only for dGPU w/ SMU13*/ @@ -530,10 +527,12 @@ int smu_v13_0_fini_smc_tables(struct smu_context *smu) smu_table->watermarks_table = NULL; smu_table->metrics_time = 0; + kfree(smu_dpm->dpm_policies); kfree(smu_dpm->dpm_context); kfree(smu_dpm->golden_dpm_context); kfree(smu_dpm->dpm_current_power_state); kfree(smu_dpm->dpm_request_power_state); + smu_dpm->dpm_policies = NULL; smu_dpm->dpm_context = NULL; smu_dpm->golden_dpm_context = NULL; smu_dpm->dpm_context_size = 0; @@ -1559,22 +1558,9 @@ int smu_v13_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type c uint32_t clock_limit; if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) { - switch (clk_type) { - case SMU_MCLK: - case SMU_UCLK: - clock_limit = smu->smu_table.boot_values.uclk; - break; - case SMU_GFXCLK: - case SMU_SCLK: - clock_limit = smu->smu_table.boot_values.gfxclk; - break; - case SMU_SOCCLK: - clock_limit = smu->smu_table.boot_values.socclk; - break; - default: - clock_limit = 0; - break; - } + ret = smu_v13_0_get_boot_freq_by_index(smu, clk_type, &clock_limit); + if (ret) + return ret; /* clock in Mhz unit */ if (min) @@ -1894,6 +1880,40 @@ int smu_v13_0_set_power_source(struct smu_context *smu, NULL); } +int smu_v13_0_get_boot_freq_by_index(struct smu_context *smu, + enum smu_clk_type clk_type, + uint32_t *value) +{ + int ret = 0; + + switch (clk_type) { + case SMU_MCLK: + case SMU_UCLK: + *value = smu->smu_table.boot_values.uclk; + break; + case SMU_FCLK: + *value = smu->smu_table.boot_values.fclk; + break; + case SMU_GFXCLK: + case SMU_SCLK: + *value = smu->smu_table.boot_values.gfxclk; + break; + case SMU_SOCCLK: + *value = smu->smu_table.boot_values.socclk; + break; + case SMU_VCLK: + *value = smu->smu_table.boot_values.vclk; + break; + case SMU_DCLK: + *value = smu->smu_table.boot_values.dclk; + break; + default: + ret = -EINVAL; + break; + } + return ret; +} + int smu_v13_0_get_dpm_freq_by_index(struct smu_context *smu, enum smu_clk_type clk_type, uint16_t level, uint32_t *value) @@ -1905,7 +1925,7 @@ int smu_v13_0_get_dpm_freq_by_index(struct smu_context *smu, return -EINVAL; if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) - return 0; + return smu_v13_0_get_boot_freq_by_index(smu, clk_type, value); clk_id = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_CLK, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c index 1e09d5f2d82f..1d024b122b0c 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c @@ -2495,6 +2495,9 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu, } if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) { + if (size != 9) + return -EINVAL; + ret = smu_cmn_update_table(smu, SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT, @@ -2526,6 +2529,8 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu, activity_monitor->Fclk_PD_Data_error_coeff = input[7]; activity_monitor->Fclk_PD_Data_error_rate_coeff = input[8]; break; + default: + return -EINVAL; } ret = smu_cmn_update_table(smu, @@ -2557,17 +2562,21 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu, (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 10) && smu->adev->pm.fw_version >= 0x00504500)) { workload_type = smu_cmn_to_asic_specific_index(smu, - CMN2ASIC_MAPPING_WORKLOAD, - PP_SMC_POWER_PROFILE_POWERSAVING); + CMN2ASIC_MAPPING_WORKLOAD, + PP_SMC_POWER_PROFILE_POWERSAVING); if (workload_type >= 0) workload_mask |= 1 << workload_type; } } - return smu_cmn_send_smc_msg_with_param(smu, + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, workload_mask, NULL); + if (!ret) + smu->workload_mask = workload_mask; + + return ret; } static bool smu_v13_0_0_is_mode1_reset_supported(struct smu_context *smu) @@ -2781,10 +2790,9 @@ static void smu_v13_0_0_set_mode1_reset_param(struct smu_context *smu, uint32_t *param) { struct amdgpu_device *adev = smu->adev; - struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); if ((smu->smc_fw_version >= supported_version) && - ras && atomic_read(&ras->in_recovery)) + amdgpu_ras_get_fed_status(adev)) /* Set RAS fatal error reset flag */ *param = 1 << 16; else diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c index b6257f34a7c6..b081ae3e8f43 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c @@ -758,31 +758,9 @@ static int smu_v13_0_4_get_dpm_ultimate_freq(struct smu_context *smu, int ret = 0; if (!smu_v13_0_4_clk_dpm_is_enabled(smu, clk_type)) { - switch (clk_type) { - case SMU_MCLK: - case SMU_UCLK: - clock_limit = smu->smu_table.boot_values.uclk; - break; - case SMU_FCLK: - clock_limit = smu->smu_table.boot_values.fclk; - break; - case SMU_GFXCLK: - case SMU_SCLK: - clock_limit = smu->smu_table.boot_values.gfxclk; - break; - case SMU_SOCCLK: - clock_limit = smu->smu_table.boot_values.socclk; - break; - case SMU_VCLK: - clock_limit = smu->smu_table.boot_values.vclk; - break; - case SMU_DCLK: - clock_limit = smu->smu_table.boot_values.dclk; - break; - default: - clock_limit = 0; - break; - } + ret = smu_v13_0_get_boot_freq_by_index(smu, clk_type, &clock_limit); + if (ret) + return ret; /* clock in Mhz unit */ if (min) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c index 218f209c3775..9c2c43bfed0b 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c @@ -643,7 +643,7 @@ static int smu_v13_0_5_get_dpm_level_count(struct smu_context *smu, *count = clk_table->NumDfPstatesEnabled; break; default: - break; + return -EINVAL; } return 0; @@ -733,31 +733,9 @@ static int smu_v13_0_5_get_dpm_ultimate_freq(struct smu_context *smu, int ret = 0; if (!smu_v13_0_5_clk_dpm_is_enabled(smu, clk_type)) { - switch (clk_type) { - case SMU_MCLK: - case SMU_UCLK: - clock_limit = smu->smu_table.boot_values.uclk; - break; - case SMU_FCLK: - clock_limit = smu->smu_table.boot_values.fclk; - break; - case SMU_GFXCLK: - case SMU_SCLK: - clock_limit = smu->smu_table.boot_values.gfxclk; - break; - case SMU_SOCCLK: - clock_limit = smu->smu_table.boot_values.socclk; - break; - case SMU_VCLK: - clock_limit = smu->smu_table.boot_values.vclk; - break; - case SMU_DCLK: - clock_limit = smu->smu_table.boot_values.dclk; - break; - default: - clock_limit = 0; - break; - } + ret = smu_v13_0_get_boot_freq_by_index(smu, clk_type, &clock_limit); + if (ret) + return ret; /* clock in Mhz unit */ if (min) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c index 4d3eca2fc3f1..55ed6247eb61 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c @@ -68,6 +68,7 @@ #undef pr_debug MODULE_FIRMWARE("amdgpu/smu_13_0_6.bin"); +MODULE_FIRMWARE("amdgpu/smu_13_0_14.bin"); #define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c)) @@ -120,6 +121,7 @@ struct mca_ras_info { #define P2S_TABLE_ID_A 0x50325341 #define P2S_TABLE_ID_X 0x50325358 +#define P2S_TABLE_ID_3 0x50325303 // clang-format off static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COUNT] = { @@ -173,6 +175,7 @@ static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COU MSG_MAP(McaBankCeDumpDW, PPSMC_MSG_McaBankCeDumpDW, SMU_MSG_RAS_PRI), MSG_MAP(SelectPLPDMode, PPSMC_MSG_SelectPLPDMode, 0), MSG_MAP(RmaDueToBadPageThreshold, PPSMC_MSG_RmaDueToBadPageThreshold, 0), + MSG_MAP(SelectPstatePolicy, PPSMC_MSG_SelectPstatePolicy, 0), }; // clang-format on @@ -269,22 +272,22 @@ static int smu_v13_0_6_init_microcode(struct smu_context *smu) struct amdgpu_device *adev = smu->adev; uint32_t p2s_table_id = P2S_TABLE_ID_A; int ret = 0, i, p2stable_count; + int var = (adev->pdev->device & 0xF); char ucode_prefix[15]; - char fw_name[30]; /* No need to load P2S tables in IOV mode */ if (amdgpu_sriov_vf(adev)) return 0; - if (!(adev->flags & AMD_IS_APU)) + if (!(adev->flags & AMD_IS_APU)) { p2s_table_id = P2S_TABLE_ID_X; + if (var == 0x5) + p2s_table_id = P2S_TABLE_ID_3; + } amdgpu_ucode_ip_version_decode(adev, MP1_HWIP, ucode_prefix, sizeof(ucode_prefix)); - - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", ucode_prefix); - - ret = amdgpu_ucode_request(adev, &adev->pm.fw, fw_name); + ret = amdgpu_ucode_request(adev, &adev->pm.fw, "amdgpu/%s.bin", ucode_prefix); if (ret) goto out; @@ -368,9 +371,78 @@ static int smu_v13_0_6_tables_init(struct smu_context *smu) return 0; } +static int smu_v13_0_6_select_policy_soc_pstate(struct smu_context *smu, + int policy) +{ + struct amdgpu_device *adev = smu->adev; + int ret, param; + + switch (policy) { + case SOC_PSTATE_DEFAULT: + param = 0; + break; + case SOC_PSTATE_0: + param = 1; + break; + case SOC_PSTATE_1: + param = 2; + break; + case SOC_PSTATE_2: + param = 3; + break; + default: + return -EINVAL; + } + + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SelectPstatePolicy, + param, NULL); + + if (ret) + dev_err(adev->dev, "select soc pstate policy %d failed", + policy); + + return ret; +} + +static int smu_v13_0_6_select_plpd_policy(struct smu_context *smu, int level) +{ + struct amdgpu_device *adev = smu->adev; + int ret, param; + + switch (level) { + case XGMI_PLPD_DEFAULT: + param = PPSMC_PLPD_MODE_DEFAULT; + break; + case XGMI_PLPD_OPTIMIZED: + param = PPSMC_PLPD_MODE_OPTIMIZED; + break; + case XGMI_PLPD_DISALLOW: + param = 0; + break; + default: + return -EINVAL; + } + + if (level == XGMI_PLPD_DISALLOW) + ret = smu_cmn_send_smc_msg_with_param( + smu, SMU_MSG_GmiPwrDnControl, param, NULL); + else + /* change xgmi per-link power down policy */ + ret = smu_cmn_send_smc_msg_with_param( + smu, SMU_MSG_SelectPLPDMode, param, NULL); + + if (ret) + dev_err(adev->dev, + "select xgmi per-link power down policy %d failed\n", + level); + + return ret; +} + static int smu_v13_0_6_allocate_dpm_context(struct smu_context *smu) { struct smu_dpm_context *smu_dpm = &smu->smu_dpm; + struct smu_dpm_policy *policy; smu_dpm->dpm_context = kzalloc(sizeof(struct smu_13_0_dpm_context), GFP_KERNEL); @@ -378,6 +450,36 @@ static int smu_v13_0_6_allocate_dpm_context(struct smu_context *smu) return -ENOMEM; smu_dpm->dpm_context_size = sizeof(struct smu_13_0_dpm_context); + smu_dpm->dpm_policies = + kzalloc(sizeof(struct smu_dpm_policy_ctxt), GFP_KERNEL); + if (!smu_dpm->dpm_policies) { + kfree(smu_dpm->dpm_context); + return -ENOMEM; + } + + if (!(smu->adev->flags & AMD_IS_APU)) { + policy = &(smu_dpm->dpm_policies->policies[0]); + + policy->policy_type = PP_PM_POLICY_SOC_PSTATE; + policy->level_mask = BIT(SOC_PSTATE_DEFAULT) | + BIT(SOC_PSTATE_0) | BIT(SOC_PSTATE_1) | + BIT(SOC_PSTATE_2); + policy->current_level = SOC_PSTATE_DEFAULT; + policy->set_policy = smu_v13_0_6_select_policy_soc_pstate; + smu_cmn_generic_soc_policy_desc(policy); + smu_dpm->dpm_policies->policy_mask |= + BIT(PP_PM_POLICY_SOC_PSTATE); + } + policy = &(smu_dpm->dpm_policies->policies[1]); + + policy->policy_type = PP_PM_POLICY_XGMI_PLPD; + policy->level_mask = BIT(XGMI_PLPD_DISALLOW) | BIT(XGMI_PLPD_DEFAULT) | + BIT(XGMI_PLPD_OPTIMIZED); + policy->current_level = XGMI_PLPD_DEFAULT; + policy->set_policy = smu_v13_0_6_select_plpd_policy; + smu_cmn_generic_plpd_policy_desc(policy); + smu_dpm->dpm_policies->policy_mask |= BIT(PP_PM_POLICY_XGMI_PLPD); + return 0; } @@ -462,8 +564,10 @@ static ssize_t smu_v13_0_6_get_pm_metrics(struct smu_context *smu, memset(&pm_metrics->common_header, 0, sizeof(pm_metrics->common_header)); - pm_metrics->common_header.mp1_ip_discovery_version = - IP_VERSION(13, 0, 6); + if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 6)) + pm_metrics->common_header.mp1_ip_discovery_version = IP_VERSION(13, 0, 6); + if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 14)) + pm_metrics->common_header.mp1_ip_discovery_version = IP_VERSION(13, 0, 14); pm_metrics->common_header.pmfw_version = pmfw_version; pm_metrics->common_header.pmmetrics_version = table_version; pm_metrics->common_header.structure_size = @@ -636,6 +740,15 @@ static int smu_v13_0_6_get_dpm_level_count(struct smu_context *smu, return ret; } +static void smu_v13_0_6_pm_policy_init(struct smu_context *smu) +{ + struct smu_dpm_policy *policy; + + policy = smu_get_pm_policy(smu, PP_PM_POLICY_SOC_PSTATE); + if (policy) + policy->current_level = SOC_PSTATE_DEFAULT; +} + static int smu_v13_0_6_set_default_dpm_table(struct smu_context *smu) { struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; @@ -665,6 +778,16 @@ static int smu_v13_0_6_set_default_dpm_table(struct smu_context *smu) smu_v13_0_6_setup_driver_pptable(smu); + /* DPM policy not supported in older firmwares */ + if (!(smu->adev->flags & AMD_IS_APU) && + (smu->smc_fw_version < 0x00556000)) { + struct smu_dpm_context *smu_dpm = &smu->smu_dpm; + + smu_dpm->dpm_policies->policy_mask &= + ~BIT(PP_PM_POLICY_SOC_PSTATE); + } + + smu_v13_0_6_pm_policy_init(smu); /* gfxclk dpm table setup */ dpm_table = &dpm_context->dpm_tables.gfx_table; if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT)) { @@ -1984,8 +2107,12 @@ static int smu_v13_0_6_i2c_xfer(struct i2c_adapter *i2c_adap, } mutex_lock(&adev->pm.mutex); r = smu_v13_0_6_request_i2c_xfer(smu, req); - if (r) - goto fail; + if (r) { + /* Retry once, in case of an i2c collision */ + r = smu_v13_0_6_request_i2c_xfer(smu, req); + if (r) + goto fail; + } for (c = i = 0; i < num_msgs; i++) { if (!(msg[i].flags & I2C_M_RD)) { @@ -2333,6 +2460,8 @@ static int smu_v13_0_6_mode2_reset(struct smu_context *smu) index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG, SMU_MSG_GfxDeviceDriverReset); + if (index < 0) + return index; mutex_lock(&smu->message_lock); @@ -2454,24 +2583,14 @@ failed: static int smu_v13_0_6_mode1_reset(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; - struct amdgpu_hive_info *hive = NULL; - u32 hive_ras_recovery = 0; - struct amdgpu_ras *ras; u32 fatal_err, param; int ret = 0; - hive = amdgpu_get_xgmi_hive(adev); - ras = amdgpu_ras_get_context(adev); fatal_err = 0; param = SMU_RESET_MODE_1; - if (hive) { - hive_ras_recovery = atomic_read(&hive->ras_recovery); - amdgpu_put_xgmi_hive(hive); - } - /* fatal error triggered by ras, PMFW supports the flag */ - if (ras && (atomic_read(&ras->in_recovery) || hive_ras_recovery)) + if (amdgpu_ras_get_fed_status(adev)) fatal_err = 1; param |= (fatal_err << 16); @@ -2928,55 +3047,6 @@ static bool mca_bank_is_valid(struct amdgpu_device *adev, const struct mca_ras_i return true; } -static int __mca_smu_get_ras_mca_set(struct amdgpu_device *adev, const struct mca_ras_info *mca_ras, - enum amdgpu_mca_error_type type, struct mca_bank_set *mca_set) -{ - struct mca_bank_entry entry; - uint32_t mca_cnt; - int i, ret; - - ret = mca_get_valid_mca_count(adev, type, &mca_cnt); - if (ret) - return ret; - - /* if valid mca bank count is 0, the driver can return 0 directly */ - if (!mca_cnt) - return 0; - - for (i = 0; i < mca_cnt; i++) { - memset(&entry, 0, sizeof(entry)); - ret = mca_get_mca_entry(adev, type, i, &entry); - if (ret) - return ret; - - if (mca_ras && !mca_bank_is_valid(adev, mca_ras, type, &entry)) - continue; - - ret = amdgpu_mca_bank_set_add_entry(mca_set, &entry); - if (ret) - return ret; - } - - return 0; -} - -static int mca_smu_get_ras_mca_set(struct amdgpu_device *adev, enum amdgpu_ras_block blk, - enum amdgpu_mca_error_type type, struct mca_bank_set *mca_set) -{ - const struct mca_ras_info *mca_ras = NULL; - - if (!mca_set) - return -EINVAL; - - if (blk != AMDGPU_RAS_BLOCK_COUNT) { - mca_ras = mca_get_mca_ras_info(adev, blk); - if (!mca_ras) - return -EOPNOTSUPP; - } - - return __mca_smu_get_ras_mca_set(adev, mca_ras, type, mca_set); -} - static int mca_smu_parse_mca_error_count(struct amdgpu_device *adev, enum amdgpu_ras_block blk, enum amdgpu_mca_error_type type, struct mca_bank_entry *entry, uint32_t *count) { @@ -3013,7 +3083,6 @@ static const struct amdgpu_mca_smu_funcs smu_v13_0_6_mca_smu_funcs = { .max_ue_count = 12, .max_ce_count = 12, .mca_set_debug_mode = mca_smu_set_debug_mode, - .mca_get_ras_mca_set = mca_smu_get_ras_mca_set, .mca_parse_mca_error_count = mca_smu_parse_mca_error_count, .mca_get_mca_entry = mca_smu_get_mca_entry, .mca_get_valid_mca_count = mca_smu_get_valid_mca_count, @@ -3169,44 +3238,6 @@ static const struct aca_smu_funcs smu_v13_0_6_aca_smu_funcs = { .parse_error_code = aca_smu_parse_error_code, }; -static int smu_v13_0_6_select_xgmi_plpd_policy(struct smu_context *smu, - enum pp_xgmi_plpd_mode mode) -{ - struct amdgpu_device *adev = smu->adev; - int ret, param; - - switch (mode) { - case XGMI_PLPD_DEFAULT: - param = PPSMC_PLPD_MODE_DEFAULT; - break; - case XGMI_PLPD_OPTIMIZED: - param = PPSMC_PLPD_MODE_OPTIMIZED; - break; - case XGMI_PLPD_DISALLOW: - param = 0; - break; - default: - return -EINVAL; - } - - if (mode == XGMI_PLPD_DISALLOW) - ret = smu_cmn_send_smc_msg_with_param(smu, - SMU_MSG_GmiPwrDnControl, - param, NULL); - else - /* change xgmi per-link power down policy */ - ret = smu_cmn_send_smc_msg_with_param(smu, - SMU_MSG_SelectPLPDMode, - param, NULL); - - if (ret) - dev_err(adev->dev, - "select xgmi per-link power down policy %d failed\n", - mode); - - return ret; -} - static const struct pptable_funcs smu_v13_0_6_ppt_funcs = { /* init dpm */ .get_allowed_feature_mask = smu_v13_0_6_get_allowed_feature_mask, @@ -3247,7 +3278,6 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = { .get_dpm_ultimate_freq = smu_v13_0_6_get_dpm_ultimate_freq, .set_soft_freq_limited_range = smu_v13_0_6_set_soft_freq_limited_range, .od_edit_dpm_table = smu_v13_0_6_usr_edit_dpm_table, - .select_xgmi_plpd_policy = smu_v13_0_6_select_xgmi_plpd_policy, .log_thermal_throttling_event = smu_v13_0_6_log_thermal_throttling_event, .get_pp_feature_mask = smu_cmn_get_pp_feature_mask, .get_gpu_metrics = smu_v13_0_6_get_gpu_metrics, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c index e996a0a4d33e..b891a5e0a396 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c @@ -2378,7 +2378,7 @@ static int smu_v13_0_7_get_power_profile_mode(struct smu_context *smu, char *buf size += sysfs_emit_at(buf, size, " "); for (i = 0; i <= PP_SMC_POWER_PROFILE_WINDOW3D; i++) - size += sysfs_emit_at(buf, size, "%-14s%s", amdgpu_pp_profile_name[i], + size += sysfs_emit_at(buf, size, "%d %-14s%s", i, amdgpu_pp_profile_name[i], (i == smu->power_profile_mode) ? "* " : " "); size += sysfs_emit_at(buf, size, "\n"); @@ -2408,7 +2408,7 @@ static int smu_v13_0_7_get_power_profile_mode(struct smu_context *smu, char *buf do { \ size += sysfs_emit_at(buf, size, "%-30s", #field); \ for (j = 0; j <= PP_SMC_POWER_PROFILE_WINDOW3D; j++) \ - size += sysfs_emit_at(buf, size, "%-16d", activity_monitor_external[j].DpmActivityMonitorCoeffInt.field); \ + size += sysfs_emit_at(buf, size, "%-18d", activity_monitor_external[j].DpmActivityMonitorCoeffInt.field); \ size += sysfs_emit_at(buf, size, "\n"); \ } while (0) @@ -2450,6 +2450,8 @@ static int smu_v13_0_7_set_power_profile_mode(struct smu_context *smu, long *inp } if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) { + if (size != 8) + return -EINVAL; ret = smu_cmn_update_table(smu, SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT, @@ -2478,6 +2480,8 @@ static int smu_v13_0_7_set_power_profile_mode(struct smu_context *smu, long *inp activity_monitor->Fclk_MinActiveFreq = input[6]; activity_monitor->Fclk_BoosterFreq = input[7]; break; + default: + return -EINVAL; } ret = smu_cmn_update_table(smu, @@ -2495,9 +2499,14 @@ static int smu_v13_0_7_set_power_profile_mode(struct smu_context *smu, long *inp smu->power_profile_mode); if (workload_type < 0) return -EINVAL; - smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, 1 << workload_type, NULL); + if (ret) + dev_err(smu->adev->dev, "[%s] Failed to set work load mask!", __func__); + else + smu->workload_mask = (1 << workload_type); + return ret; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c index d8bcf765a803..260c339f89c5 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c @@ -777,7 +777,7 @@ static int yellow_carp_get_dpm_level_count(struct smu_context *smu, *count = clk_table->NumDfPstatesEnabled; break; default: - break; + return -EINVAL; } return 0; @@ -867,31 +867,9 @@ static int yellow_carp_get_dpm_ultimate_freq(struct smu_context *smu, int ret = 0; if (!yellow_carp_clk_dpm_is_enabled(smu, clk_type)) { - switch (clk_type) { - case SMU_MCLK: - case SMU_UCLK: - clock_limit = smu->smu_table.boot_values.uclk; - break; - case SMU_FCLK: - clock_limit = smu->smu_table.boot_values.fclk; - break; - case SMU_GFXCLK: - case SMU_SCLK: - clock_limit = smu->smu_table.boot_values.gfxclk; - break; - case SMU_SOCCLK: - clock_limit = smu->smu_table.boot_values.socclk; - break; - case SMU_VCLK: - clock_limit = smu->smu_table.boot_values.vclk; - break; - case SMU_DCLK: - clock_limit = smu->smu_table.boot_values.dclk; - break; - default: - clock_limit = 0; - break; - } + ret = smu_v13_0_get_boot_freq_by_index(smu, clk_type, &clock_limit); + if (ret) + return ret; /* clock in Mhz unit */ if (min) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c index 68b9bf822e8d..865e916fc425 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c @@ -38,6 +38,8 @@ #include "amdgpu_ras.h" #include "smu_cmn.h" +#include "asic_reg/thm/thm_14_0_2_offset.h" +#include "asic_reg/thm/thm_14_0_2_sh_mask.h" #include "asic_reg/mp/mp_14_0_2_offset.h" #include "asic_reg/mp/mp_14_0_2_sh_mask.h" @@ -46,6 +48,8 @@ #define regMP1_SMN_IH_SW_INT_CTRL_mp1_14_0_0 0x0342 #define regMP1_SMN_IH_SW_INT_CTRL_mp1_14_0_0_BASE_IDX 0 +const int decoded_link_speed[5] = {1, 2, 3, 4, 5}; +const int decoded_link_width[7] = {0, 1, 2, 4, 8, 12, 16}; /* * DO NOT use these for err/warn/info/debug messages. * Use dev_err, dev_warn, dev_info and dev_dbg instead. @@ -64,7 +68,6 @@ MODULE_FIRMWARE("amdgpu/smu_14_0_3.bin"); int smu_v14_0_init_microcode(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; - char fw_name[30]; char ucode_prefix[15]; int err = 0; const struct smc_firmware_header_v1_0 *hdr; @@ -76,10 +79,7 @@ int smu_v14_0_init_microcode(struct smu_context *smu) return 0; amdgpu_ucode_ip_version_decode(adev, MP1_HWIP, ucode_prefix, sizeof(ucode_prefix)); - - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", ucode_prefix); - - err = amdgpu_ucode_request(adev, &adev->pm.fw, fw_name); + err = amdgpu_ucode_request(adev, &adev->pm.fw, "amdgpu/%s.bin", ucode_prefix); if (err) goto out; @@ -136,8 +136,7 @@ int smu_v14_0_load_microcode(struct smu_context *smu) 1 & ~MP1_SMN_PUB_CTRL__LX3_RESET_MASK); for (i = 0; i < adev->usec_timeout; i++) { - if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0) || - amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1)) + if (smu->is_apu) mp1_fw_flags = RREG32_PCIE(MP1_Public | (smnMP1_FIRMWARE_FLAGS_14_0_0 & 0xffffffff)); else @@ -210,8 +209,7 @@ int smu_v14_0_check_fw_status(struct smu_context *smu) struct amdgpu_device *adev = smu->adev; uint32_t mp1_fw_flags; - if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0) || - amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1)) + if (smu->is_apu) mp1_fw_flags = RREG32_PCIE(MP1_Public | (smnMP1_FIRMWARE_FLAGS_14_0_0 & 0xffffffff)); else @@ -245,6 +243,7 @@ int smu_v14_0_check_fw_version(struct smu_context *smu) switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { case IP_VERSION(14, 0, 0): + case IP_VERSION(14, 0, 4): smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_0; break; case IP_VERSION(14, 0, 1): @@ -453,17 +452,26 @@ int smu_v14_0_init_smc_tables(struct smu_context *smu) ret = -ENOMEM; goto err3_out; } + + smu_table->user_overdrive_table = + kzalloc(tables[SMU_TABLE_OVERDRIVE].size, GFP_KERNEL); + if (!smu_table->user_overdrive_table) { + ret = -ENOMEM; + goto err4_out; + } } smu_table->combo_pptable = kzalloc(tables[SMU_TABLE_COMBO_PPTABLE].size, GFP_KERNEL); if (!smu_table->combo_pptable) { ret = -ENOMEM; - goto err4_out; + goto err5_out; } return 0; +err5_out: + kfree(smu_table->user_overdrive_table); err4_out: kfree(smu_table->boot_overdrive_table); err3_out: @@ -758,6 +766,8 @@ int smu_v14_0_gfx_off_control(struct smu_context *smu, bool enable) case IP_VERSION(14, 0, 0): case IP_VERSION(14, 0, 1): case IP_VERSION(14, 0, 2): + case IP_VERSION(14, 0, 3): + case IP_VERSION(14, 0, 4): if (!(adev->pm.pp_feature & PP_GFXOFF_MASK)) return 0; if (enable) @@ -850,16 +860,22 @@ static int smu_v14_0_set_irq_state(struct amdgpu_device *adev, unsigned tyep, enum amdgpu_interrupt_state state) { + struct smu_context *smu = adev->powerplay.pp_handle; + uint32_t low, high; uint32_t val = 0; switch (state) { case AMDGPU_IRQ_STATE_DISABLE: /* For THM irqs */ - // TODO + val = RREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL); + val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTH_MASK, 1); + val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTL_MASK, 1); + WREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL, val); + + WREG32_SOC15(THM, 0, regTHM_THERMAL_INT_ENA, 0); /* For MP1 SW irqs */ - if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0) || - amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1)) { + if (smu->is_apu) { val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL_mp1_14_0_0); val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 1); WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL_mp1_14_0_0, val); @@ -872,11 +888,27 @@ static int smu_v14_0_set_irq_state(struct amdgpu_device *adev, break; case AMDGPU_IRQ_STATE_ENABLE: /* For THM irqs */ - // TODO + low = max(SMU_THERMAL_MINIMUM_ALERT_TEMP, + smu->thermal_range.min / SMU_TEMPERATURE_UNITS_PER_CENTIGRADES); + high = min(SMU_THERMAL_MAXIMUM_ALERT_TEMP, + smu->thermal_range.software_shutdown_temp); + val = RREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL); + val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5); + val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1); + val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTH_MASK, 0); + val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTL_MASK, 0); + val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high & 0xff)); + val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low & 0xff)); + val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK); + WREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL, val); + + val = (1 << THM_THERMAL_INT_ENA__THERM_INTH_CLR__SHIFT); + val |= (1 << THM_THERMAL_INT_ENA__THERM_INTL_CLR__SHIFT); + val |= (1 << THM_THERMAL_INT_ENA__THERM_TRIGGER_CLR__SHIFT); + WREG32_SOC15(THM, 0, regTHM_THERMAL_INT_ENA, val); /* For MP1 SW irqs */ - if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0) || - amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1)) { + if (smu->is_apu) { val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_mp1_14_0_0); val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, ID, 0xFE); val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, VALID, 0); @@ -1469,8 +1501,7 @@ int smu_v14_0_set_vcn_enable(struct smu_context *smu, if (adev->vcn.harvest_config & (1 << i)) continue; - if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0) || - amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1)) { + if (smu->is_apu) { if (i == 0) ret = smu_cmn_send_smc_msg_with_param(smu, enable ? SMU_MSG_PowerUpVcn0 : SMU_MSG_PowerDownVcn0, @@ -1502,8 +1533,7 @@ int smu_v14_0_set_jpeg_enable(struct smu_context *smu, if (adev->jpeg.harvest_config & (1 << i)) continue; - if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0) || - amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1)) { + if (smu->is_apu) { if (i == 0) ret = smu_cmn_send_smc_msg_with_param(smu, enable ? SMU_MSG_PowerUpJpeg0 : SMU_MSG_PowerDownJpeg0, @@ -1846,3 +1876,41 @@ int smu_v14_0_od_edit_dpm_table(struct smu_context *smu, return ret; } +static int smu_v14_0_allow_ih_interrupt(struct smu_context *smu) +{ + return smu_cmn_send_smc_msg(smu, + SMU_MSG_AllowIHHostInterrupt, + NULL); +} + +static int smu_v14_0_process_pending_interrupt(struct smu_context *smu) +{ + int ret = 0; + + if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_ACDC_BIT)) + ret = smu_v14_0_allow_ih_interrupt(smu); + + return ret; +} + +int smu_v14_0_enable_thermal_alert(struct smu_context *smu) +{ + int ret = 0; + + if (!smu->irq_source.num_types) + return 0; + + ret = amdgpu_irq_get(smu->adev, &smu->irq_source, 0); + if (ret) + return ret; + + return smu_v14_0_process_pending_interrupt(smu); +} + +int smu_v14_0_disable_thermal_alert(struct smu_context *smu) +{ + if (!smu->irq_source.num_types) + return 0; + + return amdgpu_irq_put(smu->adev, &smu->irq_source, 0); +} diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c index e4419e1561ef..8798ebfcea83 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c @@ -52,6 +52,26 @@ #define mmMP1_SMN_C2PMSG_90 0x029a #define mmMP1_SMN_C2PMSG_90_BASE_IDX 0 +/* MALLPowerController message arguments (Defines for the Cache mode control) */ +#define SMU_MALL_PMFW_CONTROL 0 +#define SMU_MALL_DRIVER_CONTROL 1 + +/* + * MALLPowerState message arguments + * (Defines for the Allocate/Release Cache mode if in driver mode) + */ +#define SMU_MALL_EXIT_PG 0 +#define SMU_MALL_ENTER_PG 1 + +#define SMU_MALL_PG_CONFIG_DEFAULT SMU_MALL_PG_CONFIG_DRIVER_CONTROL_ALWAYS_ON + +#define SMU_14_0_0_UMD_PSTATE_GFXCLK 700 +#define SMU_14_0_0_UMD_PSTATE_SOCCLK 678 +#define SMU_14_0_0_UMD_PSTATE_FCLK 1800 + +#define SMU_14_0_4_UMD_PSTATE_GFXCLK 938 +#define SMU_14_0_4_UMD_PSTATE_SOCCLK 938 + #define FEATURE_MASK(feature) (1ULL << feature) #define SMC_DPM_FEATURE ( \ FEATURE_MASK(FEATURE_CCLK_DPM_BIT) | \ @@ -66,6 +86,12 @@ FEATURE_MASK(FEATURE_GFX_DPM_BIT) | \ FEATURE_MASK(FEATURE_VPE_DPM_BIT)) +enum smu_mall_pg_config { + SMU_MALL_PG_CONFIG_PMFW_CONTROL = 0, + SMU_MALL_PG_CONFIG_DRIVER_CONTROL_ALWAYS_ON = 1, + SMU_MALL_PG_CONFIG_DRIVER_CONTROL_ALWAYS_OFF = 2, +}; + static struct cmn2asic_msg_mapping smu_v14_0_0_message_map[SMU_MSG_MAX_COUNT] = { MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1), MSG_MAP(GetSmuVersion, PPSMC_MSG_GetPmfwVersion, 1), @@ -113,6 +139,8 @@ static struct cmn2asic_msg_mapping smu_v14_0_0_message_map[SMU_MSG_MAX_COUNT] = MSG_MAP(PowerDownUmsch, PPSMC_MSG_PowerDownUmsch, 1), MSG_MAP(SetSoftMaxVpe, PPSMC_MSG_SetSoftMaxVpe, 1), MSG_MAP(SetSoftMinVpe, PPSMC_MSG_SetSoftMinVpe, 1), + MSG_MAP(MALLPowerController, PPSMC_MSG_MALLPowerController, 1), + MSG_MAP(MALLPowerState, PPSMC_MSG_MALLPowerState, 1), }; static struct cmn2asic_mapping smu_v14_0_0_feature_mask_map[SMU_FEATURE_COUNT] = { @@ -702,10 +730,10 @@ static int smu_v14_0_common_get_dpm_freq_by_index(struct smu_context *smu, uint32_t dpm_level, uint32_t *freq) { - if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0)) - smu_v14_0_0_get_dpm_freq_by_index(smu, clk_type, dpm_level, freq); - else if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1)) + if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1)) smu_v14_0_1_get_dpm_freq_by_index(smu, clk_type, dpm_level, freq); + else if (clk_type != SMU_VCLK1 && clk_type != SMU_DCLK1) + smu_v14_0_0_get_dpm_freq_by_index(smu, clk_type, dpm_level, freq); return 0; } @@ -797,9 +825,11 @@ static int smu_v14_0_1_get_dpm_ultimate_freq(struct smu_context *smu, break; case SMU_MCLK: case SMU_UCLK: - case SMU_FCLK: max_dpm_level = 0; break; + case SMU_FCLK: + max_dpm_level = clk_table->NumFclkLevelsEnabled - 1; + break; case SMU_SOCCLK: max_dpm_level = clk_table->NumSocClkLevelsEnabled - 1; break; @@ -834,7 +864,7 @@ static int smu_v14_0_1_get_dpm_ultimate_freq(struct smu_context *smu, min_dpm_level = clk_table->NumMemPstatesEnabled - 1; break; case SMU_FCLK: - min_dpm_level = clk_table->NumFclkLevelsEnabled - 1; + min_dpm_level = 0; break; case SMU_SOCCLK: min_dpm_level = 0; @@ -915,9 +945,11 @@ static int smu_v14_0_0_get_dpm_ultimate_freq(struct smu_context *smu, break; case SMU_MCLK: case SMU_UCLK: - case SMU_FCLK: max_dpm_level = 0; break; + case SMU_FCLK: + max_dpm_level = clk_table->NumFclkLevelsEnabled - 1; + break; case SMU_SOCCLK: max_dpm_level = clk_table->NumSocClkLevelsEnabled - 1; break; @@ -948,7 +980,7 @@ static int smu_v14_0_0_get_dpm_ultimate_freq(struct smu_context *smu, min_dpm_level = clk_table->NumMemPstatesEnabled - 1; break; case SMU_FCLK: - min_dpm_level = clk_table->NumFclkLevelsEnabled - 1; + min_dpm_level = 0; break; case SMU_SOCCLK: min_dpm_level = 0; @@ -978,10 +1010,10 @@ static int smu_v14_0_common_get_dpm_ultimate_freq(struct smu_context *smu, uint32_t *min, uint32_t *max) { - if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0)) - smu_v14_0_0_get_dpm_ultimate_freq(smu, clk_type, min, max); - else if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1)) + if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1)) smu_v14_0_1_get_dpm_ultimate_freq(smu, clk_type, min, max); + else if (clk_type != SMU_VCLK1 && clk_type != SMU_DCLK1) + smu_v14_0_0_get_dpm_ultimate_freq(smu, clk_type, min, max); return 0; } @@ -999,9 +1031,15 @@ static int smu_v14_0_0_get_current_clk_freq(struct smu_context *smu, case SMU_VCLK: member_type = METRICS_AVERAGE_VCLK; break; + case SMU_VCLK1: + member_type = METRICS_AVERAGE_VCLK1; + break; case SMU_DCLK: member_type = METRICS_AVERAGE_DCLK; break; + case SMU_DCLK1: + member_type = METRICS_AVERAGE_DCLK1; + break; case SMU_MCLK: member_type = METRICS_AVERAGE_UCLK; break; @@ -1083,10 +1121,10 @@ static int smu_v14_0_common_get_dpm_level_count(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t *count) { - if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0)) - smu_v14_0_0_get_dpm_level_count(smu, clk_type, count); - else if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1)) + if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1)) smu_v14_0_1_get_dpm_level_count(smu, clk_type, count); + else if (clk_type != SMU_VCLK1 && clk_type != SMU_DCLK1) + smu_v14_0_0_get_dpm_level_count(smu, clk_type, count); return 0; } @@ -1229,6 +1267,8 @@ static int smu_v14_0_0_force_clk_levels(struct smu_context *smu, case SMU_FCLK: case SMU_VCLK: case SMU_DCLK: + case SMU_VCLK1: + case SMU_DCLK1: ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, soft_min_level, &min_freq); if (ret) break; @@ -1247,13 +1287,76 @@ static int smu_v14_0_0_force_clk_levels(struct smu_context *smu, return ret; } -static int smu_v14_0_0_set_performance_level(struct smu_context *smu, +static int smu_v14_0_common_get_dpm_profile_freq(struct smu_context *smu, + enum amd_dpm_forced_level level, + enum smu_clk_type clk_type, + uint32_t *min_clk, + uint32_t *max_clk) +{ + uint32_t clk_limit = 0; + int ret = 0; + + switch (clk_type) { + case SMU_GFXCLK: + case SMU_SCLK: + if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 4)) + clk_limit = SMU_14_0_4_UMD_PSTATE_GFXCLK; + else + clk_limit = SMU_14_0_0_UMD_PSTATE_GFXCLK; + if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SCLK, NULL, &clk_limit); + else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SCLK, &clk_limit, NULL); + break; + case SMU_SOCCLK: + if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 4)) + clk_limit = SMU_14_0_4_UMD_PSTATE_SOCCLK; + else + clk_limit = SMU_14_0_0_UMD_PSTATE_SOCCLK; + if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SOCCLK, NULL, &clk_limit); + break; + case SMU_FCLK: + if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 4)) + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_FCLK, NULL, &clk_limit); + else + clk_limit = SMU_14_0_0_UMD_PSTATE_FCLK; + if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_FCLK, NULL, &clk_limit); + else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_FCLK, &clk_limit, NULL); + break; + case SMU_VCLK: + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_VCLK, NULL, &clk_limit); + break; + case SMU_VCLK1: + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_VCLK1, NULL, &clk_limit); + break; + case SMU_DCLK: + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_DCLK, NULL, &clk_limit); + break; + case SMU_DCLK1: + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_DCLK1, NULL, &clk_limit); + break; + default: + ret = -EINVAL; + break; + } + *min_clk = *max_clk = clk_limit; + return ret; +} + +static int smu_v14_0_common_set_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level) { struct amdgpu_device *adev = smu->adev; uint32_t sclk_min = 0, sclk_max = 0; uint32_t fclk_min = 0, fclk_max = 0; uint32_t socclk_min = 0, socclk_max = 0; + uint32_t vclk_min = 0, vclk_max = 0; + uint32_t dclk_min = 0, dclk_max = 0; + uint32_t vclk1_min = 0, vclk1_max = 0; + uint32_t dclk1_min = 0, dclk1_max = 0; int ret = 0; switch (level) { @@ -1261,28 +1364,54 @@ static int smu_v14_0_0_set_performance_level(struct smu_context *smu, smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SCLK, NULL, &sclk_max); smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_FCLK, NULL, &fclk_max); smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SOCCLK, NULL, &socclk_max); + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_VCLK, NULL, &vclk_max); + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_DCLK, NULL, &dclk_max); + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_VCLK1, NULL, &vclk1_max); + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_DCLK1, NULL, &dclk1_max); sclk_min = sclk_max; fclk_min = fclk_max; socclk_min = socclk_max; + vclk_min = vclk_max; + dclk_min = dclk_max; + vclk1_min = vclk1_max; + dclk1_min = dclk1_max; break; case AMD_DPM_FORCED_LEVEL_LOW: smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SCLK, &sclk_min, NULL); smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_FCLK, &fclk_min, NULL); smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SOCCLK, &socclk_min, NULL); + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_VCLK, &vclk_min, NULL); + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_DCLK, &dclk_min, NULL); + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_VCLK1, &vclk1_min, NULL); + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_DCLK1, &dclk1_min, NULL); sclk_max = sclk_min; fclk_max = fclk_min; socclk_max = socclk_min; + vclk_max = vclk_min; + dclk_max = dclk_min; + vclk1_max = vclk1_min; + dclk1_max = dclk1_min; break; case AMD_DPM_FORCED_LEVEL_AUTO: smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SCLK, &sclk_min, &sclk_max); smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_FCLK, &fclk_min, &fclk_max); smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SOCCLK, &socclk_min, &socclk_max); + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_VCLK, &vclk_min, &vclk_max); + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_DCLK, &dclk_min, &dclk_max); + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_VCLK1, &vclk1_min, &vclk1_max); + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_DCLK1, &dclk1_min, &dclk1_max); break; case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: - /* Temporarily do nothing since the optimal clocks haven't been provided yet */ + smu_v14_0_common_get_dpm_profile_freq(smu, level, SMU_SCLK, &sclk_min, &sclk_max); + smu_v14_0_common_get_dpm_profile_freq(smu, level, SMU_FCLK, &fclk_min, &fclk_max); + smu_v14_0_common_get_dpm_profile_freq(smu, level, SMU_SOCCLK, &socclk_min, &socclk_max); + smu_v14_0_common_get_dpm_profile_freq(smu, level, SMU_VCLK, &vclk_min, &vclk_max); + smu_v14_0_common_get_dpm_profile_freq(smu, level, SMU_DCLK, &dclk_min, &dclk_max); + smu_v14_0_common_get_dpm_profile_freq(smu, level, SMU_VCLK1, &vclk1_min, &vclk1_max); + smu_v14_0_common_get_dpm_profile_freq(smu, level, SMU_DCLK1, &dclk1_min, &dclk1_max); break; case AMD_DPM_FORCED_LEVEL_MANUAL: case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: @@ -1322,6 +1451,42 @@ static int smu_v14_0_0_set_performance_level(struct smu_context *smu, return ret; } + if (vclk_min && vclk_max) { + ret = smu_v14_0_0_set_soft_freq_limited_range(smu, + SMU_VCLK, + vclk_min, + vclk_max); + if (ret) + return ret; + } + + if (vclk1_min && vclk1_max) { + ret = smu_v14_0_0_set_soft_freq_limited_range(smu, + SMU_VCLK1, + vclk1_min, + vclk1_max); + if (ret) + return ret; + } + + if (dclk_min && dclk_max) { + ret = smu_v14_0_0_set_soft_freq_limited_range(smu, + SMU_DCLK, + dclk_min, + dclk_max); + if (ret) + return ret; + } + + if (dclk1_min && dclk1_max) { + ret = smu_v14_0_0_set_soft_freq_limited_range(smu, + SMU_DCLK1, + dclk1_min, + dclk1_max); + if (ret) + return ret; + } + return ret; } @@ -1351,10 +1516,10 @@ static int smu_v14_0_0_set_fine_grain_gfx_freq_parameters(struct smu_context *sm static int smu_v14_0_common_set_fine_grain_gfx_freq_parameters(struct smu_context *smu) { - if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0)) - smu_v14_0_0_set_fine_grain_gfx_freq_parameters(smu); - else if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1)) + if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1)) smu_v14_0_1_set_fine_grain_gfx_freq_parameters(smu); + else + smu_v14_0_0_set_fine_grain_gfx_freq_parameters(smu); return 0; } @@ -1415,14 +1580,65 @@ static int smu_14_0_0_get_dpm_table(struct smu_context *smu, struct dpm_clocks * static int smu_v14_0_common_get_dpm_table(struct smu_context *smu, struct dpm_clocks *clock_table) { - if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0)) - smu_14_0_0_get_dpm_table(smu, clock_table); - else if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1)) + if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1)) smu_14_0_1_get_dpm_table(smu, clock_table); + else + smu_14_0_0_get_dpm_table(smu, clock_table); return 0; } +static int smu_v14_0_1_init_mall_power_gating(struct smu_context *smu, enum smu_mall_pg_config pg_config) +{ + struct amdgpu_device *adev = smu->adev; + int ret = 0; + + if (pg_config == SMU_MALL_PG_CONFIG_PMFW_CONTROL) { + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_MALLPowerController, + SMU_MALL_PMFW_CONTROL, NULL); + if (ret) { + dev_err(adev->dev, "Init MALL PMFW CONTROL Failure\n"); + return ret; + } + } else { + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_MALLPowerController, + SMU_MALL_DRIVER_CONTROL, NULL); + if (ret) { + dev_err(adev->dev, "Init MALL Driver CONTROL Failure\n"); + return ret; + } + + if (pg_config == SMU_MALL_PG_CONFIG_DRIVER_CONTROL_ALWAYS_ON) { + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_MALLPowerState, + SMU_MALL_EXIT_PG, NULL); + if (ret) { + dev_err(adev->dev, "EXIT MALL PG Failure\n"); + return ret; + } + } else if (pg_config == SMU_MALL_PG_CONFIG_DRIVER_CONTROL_ALWAYS_OFF) { + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_MALLPowerState, + SMU_MALL_ENTER_PG, NULL); + if (ret) { + dev_err(adev->dev, "Enter MALL PG Failure\n"); + return ret; + } + } + } + + return ret; +} + +static int smu_v14_0_common_set_mall_enable(struct smu_context *smu) +{ + enum smu_mall_pg_config pg_config = SMU_MALL_PG_CONFIG_DEFAULT; + int ret = 0; + + if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1)) + ret = smu_v14_0_1_init_mall_power_gating(smu, pg_config); + + return ret; +} + static const struct pptable_funcs smu_v14_0_0_ppt_funcs = { .check_fw_status = smu_v14_0_check_fw_status, .check_fw_version = smu_v14_0_check_fw_version, @@ -1448,12 +1664,13 @@ static const struct pptable_funcs smu_v14_0_0_ppt_funcs = { .od_edit_dpm_table = smu_v14_0_od_edit_dpm_table, .print_clk_levels = smu_v14_0_0_print_clk_levels, .force_clk_levels = smu_v14_0_0_force_clk_levels, - .set_performance_level = smu_v14_0_0_set_performance_level, + .set_performance_level = smu_v14_0_common_set_performance_level, .set_fine_grain_gfx_freq_parameters = smu_v14_0_common_set_fine_grain_gfx_freq_parameters, .set_gfx_power_up_by_imu = smu_v14_0_set_gfx_power_up_by_imu, .dpm_set_vpe_enable = smu_v14_0_0_set_vpe_enable, .dpm_set_umsch_mm_enable = smu_v14_0_0_set_umsch_mm_enable, .get_dpm_clock_table = smu_v14_0_common_get_dpm_table, + .set_mall_enable = smu_v14_0_common_set_mall_enable, }; static void smu_v14_0_0_set_smu_mailbox_registers(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c index 90703f4542ab..5899d01fa73d 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c @@ -65,6 +65,20 @@ FEATURE_MASK(FEATURE_DPM_FCLK_BIT)) #define MP0_MP1_DATA_REGION_SIZE_COMBOPPTABLE 0x4000 +#define DEBUGSMC_MSG_Mode1Reset 2 +#define LINK_SPEED_MAX 3 + +#define PP_OD_FEATURE_GFXCLK_FMIN 0 +#define PP_OD_FEATURE_GFXCLK_FMAX 1 +#define PP_OD_FEATURE_UCLK_FMIN 2 +#define PP_OD_FEATURE_UCLK_FMAX 3 +#define PP_OD_FEATURE_GFX_VF_CURVE 4 +#define PP_OD_FEATURE_FAN_CURVE_TEMP 5 +#define PP_OD_FEATURE_FAN_CURVE_PWM 6 +#define PP_OD_FEATURE_FAN_ACOUSTIC_LIMIT 7 +#define PP_OD_FEATURE_FAN_ACOUSTIC_TARGET 8 +#define PP_OD_FEATURE_FAN_TARGET_TEMPERATURE 9 +#define PP_OD_FEATURE_FAN_MINIMUM_PWM 10 static struct cmn2asic_msg_mapping smu_v14_0_2_message_map[SMU_MSG_MAX_COUNT] = { MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1), @@ -113,7 +127,6 @@ static struct cmn2asic_msg_mapping smu_v14_0_2_message_map[SMU_MSG_MAX_COUNT] = MSG_MAP(SetMGpuFanBoostLimitRpm, PPSMC_MSG_SetMGpuFanBoostLimitRpm, 0), MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 0), MSG_MAP(NotifyPowerSource, PPSMC_MSG_NotifyPowerSource, 0), - MSG_MAP(Mode1Reset, PPSMC_MSG_Mode1Reset, 0), MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload, 0), MSG_MAP(DFCstateControl, PPSMC_MSG_SetExternalClientDfCstateAllow, 0), MSG_MAP(ArmD3, PPSMC_MSG_ArmD3, 0), @@ -133,6 +146,7 @@ static struct cmn2asic_mapping smu_v14_0_2_clk_map[SMU_CLK_COUNT] = { CLK_MAP(MCLK, PPCLK_UCLK), CLK_MAP(VCLK, PPCLK_VCLK_0), CLK_MAP(DCLK, PPCLK_DCLK_0), + CLK_MAP(DCEFCLK, PPCLK_DCFCLK), }; static struct cmn2asic_mapping smu_v14_0_2_feature_mask_map[SMU_FEATURE_COUNT] = { @@ -185,6 +199,15 @@ static struct cmn2asic_mapping smu_v14_0_2_feature_mask_map[SMU_FEATURE_COUNT] = FEA_MAP(MEM_TEMP_READ), FEA_MAP(ATHUB_MMHUB_PG), FEA_MAP(SOC_PCC), + FEA_MAP(EDC_PWRBRK), + FEA_MAP(SOC_EDC_XVMIN), + FEA_MAP(GFX_PSM_DIDT), + FEA_MAP(APT_ALL_ENABLE), + FEA_MAP(APT_SQ_THROTTLE), + FEA_MAP(APT_PF_DCS), + FEA_MAP(GFX_EDC_XVMIN), + FEA_MAP(GFX_DIDT_XVMIN), + FEA_MAP(FAN_ABNORMAL), [SMU_FEATURE_DPM_VCLK_BIT] = {1, FEATURE_MM_DPM_BIT}, [SMU_FEATURE_DPM_DCLK_BIT] = {1, FEATURE_MM_DPM_BIT}, [SMU_FEATURE_PPT_BIT] = {1, FEATURE_THROTTLERS_BIT}, @@ -201,6 +224,7 @@ static struct cmn2asic_mapping smu_v14_0_2_table_map[SMU_TABLE_COUNT] = { [SMU_TABLE_COMBO_PPTABLE] = {1, TABLE_COMBO_PPTABLE}, TAB_MAP(I2C_COMMANDS), TAB_MAP(ECCINFO), + TAB_MAP(OVERDRIVE), }; static struct cmn2asic_mapping smu_v14_0_2_pwr_src_map[SMU_POWER_SOURCE_COUNT] = { @@ -219,7 +243,6 @@ static struct cmn2asic_mapping smu_v14_0_2_workload_map[PP_SMC_POWER_PROFILE_COU WORKLOAD_MAP(PP_SMC_POWER_PROFILE_WINDOW3D, WORKLOAD_PPLIB_WINDOW_3D_BIT), }; -#if 0 static const uint8_t smu_v14_0_2_throttler_map[] = { [THROTTLER_PPT0_BIT] = (SMU_THROTTLER_PPT0_BIT), [THROTTLER_PPT1_BIT] = (SMU_THROTTLER_PPT1_BIT), @@ -239,7 +262,6 @@ static const uint8_t smu_v14_0_2_throttler_map[] = { [THROTTLER_GFX_APCC_PLUS_BIT] = (SMU_THROTTLER_APCC_BIT), [THROTTLER_FIT_BIT] = (SMU_THROTTLER_FIT_BIT), }; -#endif static int smu_v14_0_2_get_allowed_feature_mask(struct smu_context *smu, @@ -465,6 +487,8 @@ static int smu_v14_0_2_tables_init(struct smu_context *smu) PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); SMU_TABLE_INIT(tables, SMU_TABLE_OVERDRIVE, sizeof(OverDriveTable_t), PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); + SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU14_TOOL_SIZE, + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); SMU_TABLE_INIT(tables, SMU_TABLE_ACTIVITY_MONITOR_COEFF, sizeof(DpmActivityMonitorCoeffIntExternal_t), PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); @@ -672,6 +696,25 @@ static int smu_v14_0_2_set_default_dpm_table(struct smu_context *smu) pcie_table->clk_freq[pcie_table->num_of_link_levels] = skutable->LclkFreq[link_level]; pcie_table->num_of_link_levels++; + + if (link_level == 0) + link_level++; + } + + /* dcefclk dpm table setup */ + dpm_table = &dpm_context->dpm_tables.dcef_table; + if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCN_BIT)) { + ret = smu_v14_0_set_single_dpm_table(smu, + SMU_DCEFCLK, + dpm_table); + if (ret) + return ret; + } else { + dpm_table->count = 1; + dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.dcefclk / 100; + dpm_table->dpm_levels[0].enabled = true; + dpm_table->min = dpm_table->dpm_levels[0].value; + dpm_table->max = dpm_table->dpm_levels[0].value; } return 0; @@ -998,6 +1041,9 @@ static int smu_v14_0_2_get_current_clk_freq_by_table(struct smu_context *smu, case PPCLK_DCLK_0: member_type = METRICS_AVERAGE_DCLK; break; + case PPCLK_DCFCLK: + member_type = METRICS_CURR_DCEFCLK; + break; default: return -EINVAL; } @@ -1007,14 +1053,97 @@ static int smu_v14_0_2_get_current_clk_freq_by_table(struct smu_context *smu, value); } +static bool smu_v14_0_2_is_od_feature_supported(struct smu_context *smu, + int od_feature_bit) +{ + PPTable_t *pptable = smu->smu_table.driver_pptable; + const OverDriveLimits_t * const overdrive_upperlimits = + &pptable->SkuTable.OverDriveLimitsBasicMax; + + return overdrive_upperlimits->FeatureCtrlMask & (1U << od_feature_bit); +} + +static void smu_v14_0_2_get_od_setting_limits(struct smu_context *smu, + int od_feature_bit, + int32_t *min, + int32_t *max) +{ + PPTable_t *pptable = smu->smu_table.driver_pptable; + const OverDriveLimits_t * const overdrive_upperlimits = + &pptable->SkuTable.OverDriveLimitsBasicMax; + const OverDriveLimits_t * const overdrive_lowerlimits = + &pptable->SkuTable.OverDriveLimitsBasicMin; + int32_t od_min_setting, od_max_setting; + + switch (od_feature_bit) { + case PP_OD_FEATURE_GFXCLK_FMIN: + od_min_setting = overdrive_lowerlimits->GfxclkFmin; + od_max_setting = overdrive_upperlimits->GfxclkFmin; + break; + case PP_OD_FEATURE_GFXCLK_FMAX: + od_min_setting = overdrive_lowerlimits->GfxclkFmax; + od_max_setting = overdrive_upperlimits->GfxclkFmax; + break; + case PP_OD_FEATURE_UCLK_FMIN: + od_min_setting = overdrive_lowerlimits->UclkFmin; + od_max_setting = overdrive_upperlimits->UclkFmin; + break; + case PP_OD_FEATURE_UCLK_FMAX: + od_min_setting = overdrive_lowerlimits->UclkFmax; + od_max_setting = overdrive_upperlimits->UclkFmax; + break; + case PP_OD_FEATURE_GFX_VF_CURVE: + od_min_setting = overdrive_lowerlimits->VoltageOffsetPerZoneBoundary[0]; + od_max_setting = overdrive_upperlimits->VoltageOffsetPerZoneBoundary[0]; + break; + case PP_OD_FEATURE_FAN_CURVE_TEMP: + od_min_setting = overdrive_lowerlimits->FanLinearTempPoints[0]; + od_max_setting = overdrive_upperlimits->FanLinearTempPoints[0]; + break; + case PP_OD_FEATURE_FAN_CURVE_PWM: + od_min_setting = overdrive_lowerlimits->FanLinearPwmPoints[0]; + od_max_setting = overdrive_upperlimits->FanLinearPwmPoints[0]; + break; + case PP_OD_FEATURE_FAN_ACOUSTIC_LIMIT: + od_min_setting = overdrive_lowerlimits->AcousticLimitRpmThreshold; + od_max_setting = overdrive_upperlimits->AcousticLimitRpmThreshold; + break; + case PP_OD_FEATURE_FAN_ACOUSTIC_TARGET: + od_min_setting = overdrive_lowerlimits->AcousticTargetRpmThreshold; + od_max_setting = overdrive_upperlimits->AcousticTargetRpmThreshold; + break; + case PP_OD_FEATURE_FAN_TARGET_TEMPERATURE: + od_min_setting = overdrive_lowerlimits->FanTargetTemperature; + od_max_setting = overdrive_upperlimits->FanTargetTemperature; + break; + case PP_OD_FEATURE_FAN_MINIMUM_PWM: + od_min_setting = overdrive_lowerlimits->FanMinimumPwm; + od_max_setting = overdrive_upperlimits->FanMinimumPwm; + break; + default: + od_min_setting = od_max_setting = INT_MAX; + break; + } + + if (min) + *min = od_min_setting; + if (max) + *max = od_max_setting; +} + static int smu_v14_0_2_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) { struct smu_dpm_context *smu_dpm = &smu->smu_dpm; struct smu_14_0_dpm_context *dpm_context = smu_dpm->dpm_context; + OverDriveTableExternal_t *od_table = + (OverDriveTableExternal_t *)smu->smu_table.overdrive_table; struct smu_14_0_dpm_table *single_dpm_table; + struct smu_14_0_pcie_table *pcie_table; + uint32_t gen_speed, lane_width; int i, curr_freq, size = 0; + int32_t min_value, max_value; int ret = 0; smu_cmn_get_sysfs_buf(&buf, &size); @@ -1045,6 +1174,9 @@ static int smu_v14_0_2_print_clk_levels(struct smu_context *smu, case SMU_DCLK1: single_dpm_table = &(dpm_context->dpm_tables.dclk_table); break; + case SMU_DCEFCLK: + single_dpm_table = &(dpm_context->dpm_tables.dcef_table); + break; default: break; } @@ -1058,6 +1190,7 @@ static int smu_v14_0_2_print_clk_levels(struct smu_context *smu, case SMU_VCLK1: case SMU_DCLK: case SMU_DCLK1: + case SMU_DCEFCLK: ret = smu_v14_0_2_get_current_clk_freq_by_table(smu, clk_type, &curr_freq); if (ret) { dev_err(smu->adev->dev, "Failed to get current clock freq!"); @@ -1100,7 +1233,212 @@ static int smu_v14_0_2_print_clk_levels(struct smu_context *smu, } break; case SMU_PCIE: - // TODO + ret = smu_v14_0_2_get_smu_metrics_data(smu, + METRICS_PCIE_RATE, + &gen_speed); + if (ret) + return ret; + + ret = smu_v14_0_2_get_smu_metrics_data(smu, + METRICS_PCIE_WIDTH, + &lane_width); + if (ret) + return ret; + + pcie_table = &(dpm_context->dpm_tables.pcie_table); + for (i = 0; i < pcie_table->num_of_link_levels; i++) + size += sysfs_emit_at(buf, size, "%d: %s %s %dMhz %s\n", i, + (pcie_table->pcie_gen[i] == 0) ? "2.5GT/s," : + (pcie_table->pcie_gen[i] == 1) ? "5.0GT/s," : + (pcie_table->pcie_gen[i] == 2) ? "8.0GT/s," : + (pcie_table->pcie_gen[i] == 3) ? "16.0GT/s," : "", + (pcie_table->pcie_lane[i] == 1) ? "x1" : + (pcie_table->pcie_lane[i] == 2) ? "x2" : + (pcie_table->pcie_lane[i] == 3) ? "x4" : + (pcie_table->pcie_lane[i] == 4) ? "x8" : + (pcie_table->pcie_lane[i] == 5) ? "x12" : + (pcie_table->pcie_lane[i] == 6) ? "x16" : "", + pcie_table->clk_freq[i], + (gen_speed == DECODE_GEN_SPEED(pcie_table->pcie_gen[i])) && + (lane_width == DECODE_LANE_WIDTH(pcie_table->pcie_lane[i])) ? + "*" : ""); + break; + + case SMU_OD_SCLK: + if (!smu_v14_0_2_is_od_feature_supported(smu, + PP_OD_FEATURE_GFXCLK_BIT)) + break; + + size += sysfs_emit_at(buf, size, "OD_SCLK:\n"); + size += sysfs_emit_at(buf, size, "0: %uMhz\n1: %uMhz\n", + od_table->OverDriveTable.GfxclkFmin, + od_table->OverDriveTable.GfxclkFmax); + break; + + case SMU_OD_MCLK: + if (!smu_v14_0_2_is_od_feature_supported(smu, + PP_OD_FEATURE_UCLK_BIT)) + break; + + size += sysfs_emit_at(buf, size, "OD_MCLK:\n"); + size += sysfs_emit_at(buf, size, "0: %uMhz\n1: %uMHz\n", + od_table->OverDriveTable.UclkFmin, + od_table->OverDriveTable.UclkFmax); + break; + + case SMU_OD_VDDGFX_OFFSET: + if (!smu_v14_0_2_is_od_feature_supported(smu, + PP_OD_FEATURE_GFX_VF_CURVE_BIT)) + break; + + size += sysfs_emit_at(buf, size, "OD_VDDGFX_OFFSET:\n"); + size += sysfs_emit_at(buf, size, "%dmV\n", + od_table->OverDriveTable.VoltageOffsetPerZoneBoundary[0]); + break; + + case SMU_OD_FAN_CURVE: + if (!smu_v14_0_2_is_od_feature_supported(smu, + PP_OD_FEATURE_FAN_CURVE_BIT)) + break; + + size += sysfs_emit_at(buf, size, "OD_FAN_CURVE:\n"); + for (i = 0; i < NUM_OD_FAN_MAX_POINTS - 1; i++) + size += sysfs_emit_at(buf, size, "%d: %dC %d%%\n", + i, + (int)od_table->OverDriveTable.FanLinearTempPoints[i], + (int)od_table->OverDriveTable.FanLinearPwmPoints[i]); + + size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); + smu_v14_0_2_get_od_setting_limits(smu, + PP_OD_FEATURE_FAN_CURVE_TEMP, + &min_value, + &max_value); + size += sysfs_emit_at(buf, size, "FAN_CURVE(hotspot temp): %uC %uC\n", + min_value, max_value); + + smu_v14_0_2_get_od_setting_limits(smu, + PP_OD_FEATURE_FAN_CURVE_PWM, + &min_value, + &max_value); + size += sysfs_emit_at(buf, size, "FAN_CURVE(fan speed): %u%% %u%%\n", + min_value, max_value); + + break; + + case SMU_OD_ACOUSTIC_LIMIT: + if (!smu_v14_0_2_is_od_feature_supported(smu, + PP_OD_FEATURE_FAN_CURVE_BIT)) + break; + + size += sysfs_emit_at(buf, size, "OD_ACOUSTIC_LIMIT:\n"); + size += sysfs_emit_at(buf, size, "%d\n", + (int)od_table->OverDriveTable.AcousticLimitRpmThreshold); + + size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); + smu_v14_0_2_get_od_setting_limits(smu, + PP_OD_FEATURE_FAN_ACOUSTIC_LIMIT, + &min_value, + &max_value); + size += sysfs_emit_at(buf, size, "ACOUSTIC_LIMIT: %u %u\n", + min_value, max_value); + break; + + case SMU_OD_ACOUSTIC_TARGET: + if (!smu_v14_0_2_is_od_feature_supported(smu, + PP_OD_FEATURE_FAN_CURVE_BIT)) + break; + + size += sysfs_emit_at(buf, size, "OD_ACOUSTIC_TARGET:\n"); + size += sysfs_emit_at(buf, size, "%d\n", + (int)od_table->OverDriveTable.AcousticTargetRpmThreshold); + + size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); + smu_v14_0_2_get_od_setting_limits(smu, + PP_OD_FEATURE_FAN_ACOUSTIC_TARGET, + &min_value, + &max_value); + size += sysfs_emit_at(buf, size, "ACOUSTIC_TARGET: %u %u\n", + min_value, max_value); + break; + + case SMU_OD_FAN_TARGET_TEMPERATURE: + if (!smu_v14_0_2_is_od_feature_supported(smu, + PP_OD_FEATURE_FAN_CURVE_BIT)) + break; + + size += sysfs_emit_at(buf, size, "FAN_TARGET_TEMPERATURE:\n"); + size += sysfs_emit_at(buf, size, "%d\n", + (int)od_table->OverDriveTable.FanTargetTemperature); + + size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); + smu_v14_0_2_get_od_setting_limits(smu, + PP_OD_FEATURE_FAN_TARGET_TEMPERATURE, + &min_value, + &max_value); + size += sysfs_emit_at(buf, size, "TARGET_TEMPERATURE: %u %u\n", + min_value, max_value); + break; + + case SMU_OD_FAN_MINIMUM_PWM: + if (!smu_v14_0_2_is_od_feature_supported(smu, + PP_OD_FEATURE_FAN_CURVE_BIT)) + break; + + size += sysfs_emit_at(buf, size, "FAN_MINIMUM_PWM:\n"); + size += sysfs_emit_at(buf, size, "%d\n", + (int)od_table->OverDriveTable.FanMinimumPwm); + + size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); + smu_v14_0_2_get_od_setting_limits(smu, + PP_OD_FEATURE_FAN_MINIMUM_PWM, + &min_value, + &max_value); + size += sysfs_emit_at(buf, size, "MINIMUM_PWM: %u %u\n", + min_value, max_value); + break; + + case SMU_OD_RANGE: + if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_GFXCLK_BIT) && + !smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_UCLK_BIT) && + !smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_GFX_VF_CURVE_BIT)) + break; + + size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); + + if (smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_GFXCLK_BIT)) { + smu_v14_0_2_get_od_setting_limits(smu, + PP_OD_FEATURE_GFXCLK_FMIN, + &min_value, + NULL); + smu_v14_0_2_get_od_setting_limits(smu, + PP_OD_FEATURE_GFXCLK_FMAX, + NULL, + &max_value); + size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n", + min_value, max_value); + } + + if (smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_UCLK_BIT)) { + smu_v14_0_2_get_od_setting_limits(smu, + PP_OD_FEATURE_UCLK_FMIN, + &min_value, + NULL); + smu_v14_0_2_get_od_setting_limits(smu, + PP_OD_FEATURE_UCLK_FMAX, + NULL, + &max_value); + size += sysfs_emit_at(buf, size, "MCLK: %7uMhz %10uMhz\n", + min_value, max_value); + } + + if (smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_GFX_VF_CURVE_BIT)) { + smu_v14_0_2_get_od_setting_limits(smu, + PP_OD_FEATURE_GFX_VF_CURVE, + &min_value, + &max_value); + size += sysfs_emit_at(buf, size, "VDDGFX_OFFSET: %7dmv %10dmv\n", + min_value, max_value); + } break; default: @@ -1220,17 +1558,100 @@ static int smu_v14_0_2_update_pcie_parameters(struct smu_context *smu, return 0; } +static const struct smu_temperature_range smu14_thermal_policy[] = { + {-273150, 99000, 99000, -273150, 99000, 99000, -273150, 99000, 99000}, + { 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000}, +}; + static int smu_v14_0_2_get_thermal_temperature_range(struct smu_context *smu, struct smu_temperature_range *range) { - // TODO + struct smu_table_context *table_context = &smu->smu_table; + struct smu_14_0_2_powerplay_table *powerplay_table = + table_context->power_play_table; + PPTable_t *pptable = smu->smu_table.driver_pptable; + + if (amdgpu_sriov_vf(smu->adev)) + return 0; + + if (!range) + return -EINVAL; + + memcpy(range, &smu14_thermal_policy[0], sizeof(struct smu_temperature_range)); + + range->max = pptable->CustomSkuTable.TemperatureLimit[TEMP_EDGE] * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + range->edge_emergency_max = (pptable->CustomSkuTable.TemperatureLimit[TEMP_EDGE] + CTF_OFFSET_EDGE) * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + range->hotspot_crit_max = pptable->CustomSkuTable.TemperatureLimit[TEMP_HOTSPOT] * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + range->hotspot_emergency_max = (pptable->CustomSkuTable.TemperatureLimit[TEMP_HOTSPOT] + CTF_OFFSET_HOTSPOT) * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + range->mem_crit_max = pptable->CustomSkuTable.TemperatureLimit[TEMP_MEM] * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + range->mem_emergency_max = (pptable->CustomSkuTable.TemperatureLimit[TEMP_MEM] + CTF_OFFSET_MEM)* + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + range->software_shutdown_temp = powerplay_table->software_shutdown_temp; + range->software_shutdown_temp_offset = pptable->CustomSkuTable.FanAbnormalTempLimitOffset; return 0; } static int smu_v14_0_2_populate_umd_state_clk(struct smu_context *smu) { - // TODO + struct smu_14_0_dpm_context *dpm_context = + smu->smu_dpm.dpm_context; + struct smu_14_0_dpm_table *gfx_table = + &dpm_context->dpm_tables.gfx_table; + struct smu_14_0_dpm_table *mem_table = + &dpm_context->dpm_tables.uclk_table; + struct smu_14_0_dpm_table *soc_table = + &dpm_context->dpm_tables.soc_table; + struct smu_14_0_dpm_table *vclk_table = + &dpm_context->dpm_tables.vclk_table; + struct smu_14_0_dpm_table *dclk_table = + &dpm_context->dpm_tables.dclk_table; + struct smu_14_0_dpm_table *fclk_table = + &dpm_context->dpm_tables.fclk_table; + struct smu_umd_pstate_table *pstate_table = + &smu->pstate_table; + struct smu_table_context *table_context = &smu->smu_table; + PPTable_t *pptable = table_context->driver_pptable; + DriverReportedClocks_t driver_clocks = + pptable->SkuTable.DriverReportedClocks; + + pstate_table->gfxclk_pstate.min = gfx_table->min; + if (driver_clocks.GameClockAc && + (driver_clocks.GameClockAc < gfx_table->max)) + pstate_table->gfxclk_pstate.peak = driver_clocks.GameClockAc; + else + pstate_table->gfxclk_pstate.peak = gfx_table->max; + + pstate_table->uclk_pstate.min = mem_table->min; + pstate_table->uclk_pstate.peak = mem_table->max; + + pstate_table->socclk_pstate.min = soc_table->min; + pstate_table->socclk_pstate.peak = soc_table->max; + + pstate_table->vclk_pstate.min = vclk_table->min; + pstate_table->vclk_pstate.peak = vclk_table->max; + + pstate_table->dclk_pstate.min = dclk_table->min; + pstate_table->dclk_pstate.peak = dclk_table->max; + + pstate_table->fclk_pstate.min = fclk_table->min; + pstate_table->fclk_pstate.peak = fclk_table->max; + + if (driver_clocks.BaseClockAc && + driver_clocks.BaseClockAc < gfx_table->max) + pstate_table->gfxclk_pstate.standard = driver_clocks.BaseClockAc; + else + pstate_table->gfxclk_pstate.standard = gfx_table->max; + pstate_table->uclk_pstate.standard = mem_table->max; + pstate_table->socclk_pstate.standard = soc_table->min; + pstate_table->vclk_pstate.standard = vclk_table->min; + pstate_table->dclk_pstate.standard = dclk_table->min; + pstate_table->fclk_pstate.standard = fclk_table->min; return 0; } @@ -1261,7 +1682,27 @@ static int smu_v14_0_2_get_power_limit(struct smu_context *smu, uint32_t *max_power_limit, uint32_t *min_power_limit) { - // TODO + struct smu_table_context *table_context = &smu->smu_table; + PPTable_t *pptable = table_context->driver_pptable; + CustomSkuTable_t *skutable = &pptable->CustomSkuTable; + uint32_t power_limit; + uint32_t msg_limit = pptable->SkuTable.MsgLimits.Power[PPT_THROTTLER_PPT0][POWER_SOURCE_AC]; + + if (smu_v14_0_get_current_power_limit(smu, &power_limit)) + power_limit = smu->adev->pm.ac_power ? + skutable->SocketPowerLimitAc[PPT_THROTTLER_PPT0] : + skutable->SocketPowerLimitDc[PPT_THROTTLER_PPT0]; + + if (current_power_limit) + *current_power_limit = power_limit; + if (default_power_limit) + *default_power_limit = power_limit; + + if (max_power_limit) + *max_power_limit = msg_limit; + + if (min_power_limit) + *min_power_limit = 0; return 0; } @@ -1364,6 +1805,9 @@ static int smu_v14_0_2_set_power_profile_mode(struct smu_context *smu, } if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) { + if (size != 9) + return -EINVAL; + ret = smu_cmn_update_table(smu, SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT, @@ -1395,6 +1839,8 @@ static int smu_v14_0_2_set_power_profile_mode(struct smu_context *smu, activity_monitor->Fclk_PD_Data_error_coeff = input[7]; activity_monitor->Fclk_PD_Data_error_rate_coeff = input[8]; break; + default: + return -EINVAL; } ret = smu_cmn_update_table(smu, @@ -1415,10 +1861,14 @@ static int smu_v14_0_2_set_power_profile_mode(struct smu_context *smu, if (workload_type < 0) return -EINVAL; - return smu_cmn_send_smc_msg_with_param(smu, + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, 1 << workload_type, NULL); + if (!ret) + smu->workload_mask = 1 << workload_type; + + return ret; } static int smu_v14_0_2_baco_enter(struct smu_context *smu) @@ -1636,7 +2086,13 @@ static int smu_v14_0_2_mode1_reset(struct smu_context *smu) { int ret = 0; - // TODO + ret = smu_cmn_send_debug_smc_msg(smu, DEBUGSMC_MSG_Mode1Reset); + if (!ret) { + if (amdgpu_emu_mode == 1) + msleep(50000); + else + msleep(1000); + } return ret; } @@ -1668,52 +2124,682 @@ static void smu_v14_0_2_set_smu_mailbox_registers(struct smu_context *smu) smu->param_reg = SOC15_REG_OFFSET(MP1, 0, regMP1_SMN_C2PMSG_82); smu->msg_reg = SOC15_REG_OFFSET(MP1, 0, regMP1_SMN_C2PMSG_66); smu->resp_reg = SOC15_REG_OFFSET(MP1, 0, regMP1_SMN_C2PMSG_90); + + smu->debug_param_reg = SOC15_REG_OFFSET(MP1, 0, regMP1_SMN_C2PMSG_53); + smu->debug_msg_reg = SOC15_REG_OFFSET(MP1, 0, regMP1_SMN_C2PMSG_75); + smu->debug_resp_reg = SOC15_REG_OFFSET(MP1, 0, regMP1_SMN_C2PMSG_54); } -static int smu_v14_0_2_smu_send_bad_mem_page_num(struct smu_context *smu, - uint32_t size) +static ssize_t smu_v14_0_2_get_gpu_metrics(struct smu_context *smu, + void **table) { + struct smu_table_context *smu_table = &smu->smu_table; + struct gpu_metrics_v1_3 *gpu_metrics = + (struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table; + SmuMetricsExternal_t metrics_ext; + SmuMetrics_t *metrics = &metrics_ext.SmuMetrics; int ret = 0; - /* message SMU to update the bad page number on SMUBUS */ - ret = smu_cmn_send_smc_msg_with_param(smu, - SMU_MSG_SetNumBadMemoryPagesRetired, - size, NULL); + ret = smu_cmn_get_metrics_table(smu, + &metrics_ext, + true); + if (ret) + return ret; + + smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3); + + gpu_metrics->temperature_edge = metrics->AvgTemperature[TEMP_EDGE]; + gpu_metrics->temperature_hotspot = metrics->AvgTemperature[TEMP_HOTSPOT]; + gpu_metrics->temperature_mem = metrics->AvgTemperature[TEMP_MEM]; + gpu_metrics->temperature_vrgfx = metrics->AvgTemperature[TEMP_VR_GFX]; + gpu_metrics->temperature_vrsoc = metrics->AvgTemperature[TEMP_VR_SOC]; + gpu_metrics->temperature_vrmem = max(metrics->AvgTemperature[TEMP_VR_MEM0], + metrics->AvgTemperature[TEMP_VR_MEM1]); + + gpu_metrics->average_gfx_activity = metrics->AverageGfxActivity; + gpu_metrics->average_umc_activity = metrics->AverageUclkActivity; + gpu_metrics->average_mm_activity = max(metrics->Vcn0ActivityPercentage, + metrics->Vcn1ActivityPercentage); + + gpu_metrics->average_socket_power = metrics->AverageSocketPower; + gpu_metrics->energy_accumulator = metrics->EnergyAccumulator; + + if (metrics->AverageGfxActivity <= SMU_14_0_2_BUSY_THRESHOLD) + gpu_metrics->average_gfxclk_frequency = metrics->AverageGfxclkFrequencyPostDs; + else + gpu_metrics->average_gfxclk_frequency = metrics->AverageGfxclkFrequencyPreDs; + + if (metrics->AverageUclkActivity <= SMU_14_0_2_BUSY_THRESHOLD) + gpu_metrics->average_uclk_frequency = metrics->AverageMemclkFrequencyPostDs; + else + gpu_metrics->average_uclk_frequency = metrics->AverageMemclkFrequencyPreDs; + + gpu_metrics->average_vclk0_frequency = metrics->AverageVclk0Frequency; + gpu_metrics->average_dclk0_frequency = metrics->AverageDclk0Frequency; + gpu_metrics->average_vclk1_frequency = metrics->AverageVclk1Frequency; + gpu_metrics->average_dclk1_frequency = metrics->AverageDclk1Frequency; + + gpu_metrics->current_gfxclk = gpu_metrics->average_gfxclk_frequency; + gpu_metrics->current_socclk = metrics->CurrClock[PPCLK_SOCCLK]; + gpu_metrics->current_uclk = metrics->CurrClock[PPCLK_UCLK]; + gpu_metrics->current_vclk0 = metrics->CurrClock[PPCLK_VCLK_0]; + gpu_metrics->current_dclk0 = metrics->CurrClock[PPCLK_DCLK_0]; + gpu_metrics->current_vclk1 = metrics->CurrClock[PPCLK_VCLK_0]; + gpu_metrics->current_dclk1 = metrics->CurrClock[PPCLK_DCLK_0]; + + gpu_metrics->throttle_status = + smu_v14_0_2_get_throttler_status(metrics); + gpu_metrics->indep_throttle_status = + smu_cmn_get_indep_throttler_status(gpu_metrics->throttle_status, + smu_v14_0_2_throttler_map); + + gpu_metrics->current_fan_speed = metrics->AvgFanRpm; + + gpu_metrics->pcie_link_width = metrics->PcieWidth; + if ((metrics->PcieRate - 1) > LINK_SPEED_MAX) + gpu_metrics->pcie_link_speed = pcie_gen_to_speed(1); + else + gpu_metrics->pcie_link_speed = pcie_gen_to_speed(metrics->PcieRate); + + gpu_metrics->system_clock_counter = ktime_get_boottime_ns(); + + gpu_metrics->voltage_gfx = metrics->AvgVoltage[SVI_PLANE_VDD_GFX]; + gpu_metrics->voltage_soc = metrics->AvgVoltage[SVI_PLANE_VDD_SOC]; + gpu_metrics->voltage_mem = metrics->AvgVoltage[SVI_PLANE_VDDIO_MEM]; + + *table = (void *)gpu_metrics; + + return sizeof(struct gpu_metrics_v1_3); +} + +static void smu_v14_0_2_dump_od_table(struct smu_context *smu, + OverDriveTableExternal_t *od_table) +{ + struct amdgpu_device *adev = smu->adev; + + dev_dbg(adev->dev, "OD: Gfxclk: (%d, %d)\n", od_table->OverDriveTable.GfxclkFmin, + od_table->OverDriveTable.GfxclkFmax); + dev_dbg(adev->dev, "OD: Uclk: (%d, %d)\n", od_table->OverDriveTable.UclkFmin, + od_table->OverDriveTable.UclkFmax); +} + +static int smu_v14_0_2_upload_overdrive_table(struct smu_context *smu, + OverDriveTableExternal_t *od_table) +{ + int ret; + ret = smu_cmn_update_table(smu, + SMU_TABLE_OVERDRIVE, + 0, + (void *)od_table, + true); if (ret) - dev_err(smu->adev->dev, - "[%s] failed to message SMU to update bad memory pages number\n", - __func__); + dev_err(smu->adev->dev, "Failed to upload overdrive table!\n"); return ret; } -static int smu_v14_0_2_send_bad_mem_channel_flag(struct smu_context *smu, - uint32_t size) +static void smu_v14_0_2_set_supported_od_feature_mask(struct smu_context *smu) { - int ret = 0; + struct amdgpu_device *adev = smu->adev; - /* message SMU to update the bad channel info on SMUBUS */ - ret = smu_cmn_send_smc_msg_with_param(smu, - SMU_MSG_SetBadMemoryPagesRetiredFlagsPerChannel, - size, NULL); + if (smu_v14_0_2_is_od_feature_supported(smu, + PP_OD_FEATURE_FAN_CURVE_BIT)) + adev->pm.od_feature_mask |= OD_OPS_SUPPORT_FAN_CURVE_RETRIEVE | + OD_OPS_SUPPORT_FAN_CURVE_SET | + OD_OPS_SUPPORT_ACOUSTIC_LIMIT_THRESHOLD_RETRIEVE | + OD_OPS_SUPPORT_ACOUSTIC_LIMIT_THRESHOLD_SET | + OD_OPS_SUPPORT_ACOUSTIC_TARGET_THRESHOLD_RETRIEVE | + OD_OPS_SUPPORT_ACOUSTIC_TARGET_THRESHOLD_SET | + OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_RETRIEVE | + OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_SET | + OD_OPS_SUPPORT_FAN_MINIMUM_PWM_RETRIEVE | + OD_OPS_SUPPORT_FAN_MINIMUM_PWM_SET; +} + +static int smu_v14_0_2_get_overdrive_table(struct smu_context *smu, + OverDriveTableExternal_t *od_table) +{ + int ret; + ret = smu_cmn_update_table(smu, + SMU_TABLE_OVERDRIVE, + 0, + (void *)od_table, + false); if (ret) - dev_err(smu->adev->dev, - "[%s] failed to message SMU to update bad memory pages channel info\n", - __func__); + dev_err(smu->adev->dev, "Failed to get overdrive table!\n"); return ret; } -static ssize_t smu_v14_0_2_get_ecc_info(struct smu_context *smu, - void *table) +static int smu_v14_0_2_set_default_od_settings(struct smu_context *smu) { - int ret = 0; + OverDriveTableExternal_t *od_table = + (OverDriveTableExternal_t *)smu->smu_table.overdrive_table; + OverDriveTableExternal_t *boot_od_table = + (OverDriveTableExternal_t *)smu->smu_table.boot_overdrive_table; + OverDriveTableExternal_t *user_od_table = + (OverDriveTableExternal_t *)smu->smu_table.user_overdrive_table; + OverDriveTableExternal_t user_od_table_bak; + int ret; + int i; - // TODO + ret = smu_v14_0_2_get_overdrive_table(smu, boot_od_table); + if (ret) + return ret; + + smu_v14_0_2_dump_od_table(smu, boot_od_table); + + memcpy(od_table, + boot_od_table, + sizeof(OverDriveTableExternal_t)); + + /* + * For S3/S4/Runpm resume, we need to setup those overdrive tables again, + * but we have to preserve user defined values in "user_od_table". + */ + if (!smu->adev->in_suspend) { + memcpy(user_od_table, + boot_od_table, + sizeof(OverDriveTableExternal_t)); + smu->user_dpm_profile.user_od = false; + } else if (smu->user_dpm_profile.user_od) { + memcpy(&user_od_table_bak, + user_od_table, + sizeof(OverDriveTableExternal_t)); + memcpy(user_od_table, + boot_od_table, + sizeof(OverDriveTableExternal_t)); + user_od_table->OverDriveTable.GfxclkFmin = + user_od_table_bak.OverDriveTable.GfxclkFmin; + user_od_table->OverDriveTable.GfxclkFmax = + user_od_table_bak.OverDriveTable.GfxclkFmax; + user_od_table->OverDriveTable.UclkFmin = + user_od_table_bak.OverDriveTable.UclkFmin; + user_od_table->OverDriveTable.UclkFmax = + user_od_table_bak.OverDriveTable.UclkFmax; + for (i = 0; i < PP_NUM_OD_VF_CURVE_POINTS; i++) + user_od_table->OverDriveTable.VoltageOffsetPerZoneBoundary[i] = + user_od_table_bak.OverDriveTable.VoltageOffsetPerZoneBoundary[i]; + for (i = 0; i < NUM_OD_FAN_MAX_POINTS - 1; i++) { + user_od_table->OverDriveTable.FanLinearTempPoints[i] = + user_od_table_bak.OverDriveTable.FanLinearTempPoints[i]; + user_od_table->OverDriveTable.FanLinearPwmPoints[i] = + user_od_table_bak.OverDriveTable.FanLinearPwmPoints[i]; + } + user_od_table->OverDriveTable.AcousticLimitRpmThreshold = + user_od_table_bak.OverDriveTable.AcousticLimitRpmThreshold; + user_od_table->OverDriveTable.AcousticTargetRpmThreshold = + user_od_table_bak.OverDriveTable.AcousticTargetRpmThreshold; + user_od_table->OverDriveTable.FanTargetTemperature = + user_od_table_bak.OverDriveTable.FanTargetTemperature; + user_od_table->OverDriveTable.FanMinimumPwm = + user_od_table_bak.OverDriveTable.FanMinimumPwm; + } + + smu_v14_0_2_set_supported_od_feature_mask(smu); + + return 0; +} + +static int smu_v14_0_2_restore_user_od_settings(struct smu_context *smu) +{ + struct smu_table_context *table_context = &smu->smu_table; + OverDriveTableExternal_t *od_table = table_context->overdrive_table; + OverDriveTableExternal_t *user_od_table = table_context->user_overdrive_table; + int res; + + user_od_table->OverDriveTable.FeatureCtrlMask = BIT(PP_OD_FEATURE_GFXCLK_BIT) | + BIT(PP_OD_FEATURE_UCLK_BIT) | + BIT(PP_OD_FEATURE_GFX_VF_CURVE_BIT) | + BIT(PP_OD_FEATURE_FAN_CURVE_BIT); + res = smu_v14_0_2_upload_overdrive_table(smu, user_od_table); + user_od_table->OverDriveTable.FeatureCtrlMask = 0; + if (res == 0) + memcpy(od_table, user_od_table, sizeof(OverDriveTableExternal_t)); + + return res; +} + +static int smu_v14_0_2_od_restore_table_single(struct smu_context *smu, long input) +{ + struct smu_table_context *table_context = &smu->smu_table; + OverDriveTableExternal_t *boot_overdrive_table = + (OverDriveTableExternal_t *)table_context->boot_overdrive_table; + OverDriveTableExternal_t *od_table = + (OverDriveTableExternal_t *)table_context->overdrive_table; + struct amdgpu_device *adev = smu->adev; + int i; + + switch (input) { + case PP_OD_EDIT_FAN_CURVE: + for (i = 0; i < NUM_OD_FAN_MAX_POINTS; i++) { + od_table->OverDriveTable.FanLinearTempPoints[i] = + boot_overdrive_table->OverDriveTable.FanLinearTempPoints[i]; + od_table->OverDriveTable.FanLinearPwmPoints[i] = + boot_overdrive_table->OverDriveTable.FanLinearPwmPoints[i]; + } + od_table->OverDriveTable.FanMode = FAN_MODE_AUTO; + od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT); + break; + case PP_OD_EDIT_ACOUSTIC_LIMIT: + od_table->OverDriveTable.AcousticLimitRpmThreshold = + boot_overdrive_table->OverDriveTable.AcousticLimitRpmThreshold; + od_table->OverDriveTable.FanMode = FAN_MODE_AUTO; + od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT); + break; + case PP_OD_EDIT_ACOUSTIC_TARGET: + od_table->OverDriveTable.AcousticTargetRpmThreshold = + boot_overdrive_table->OverDriveTable.AcousticTargetRpmThreshold; + od_table->OverDriveTable.FanMode = FAN_MODE_AUTO; + od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT); + break; + case PP_OD_EDIT_FAN_TARGET_TEMPERATURE: + od_table->OverDriveTable.FanTargetTemperature = + boot_overdrive_table->OverDriveTable.FanTargetTemperature; + od_table->OverDriveTable.FanMode = FAN_MODE_AUTO; + od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT); + break; + case PP_OD_EDIT_FAN_MINIMUM_PWM: + od_table->OverDriveTable.FanMinimumPwm = + boot_overdrive_table->OverDriveTable.FanMinimumPwm; + od_table->OverDriveTable.FanMode = FAN_MODE_AUTO; + od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT); + break; + default: + dev_info(adev->dev, "Invalid table index: %ld\n", input); + return -EINVAL; + } + + return 0; +} + +static int smu_v14_0_2_od_edit_dpm_table(struct smu_context *smu, + enum PP_OD_DPM_TABLE_COMMAND type, + long input[], + uint32_t size) +{ + struct smu_table_context *table_context = &smu->smu_table; + OverDriveTableExternal_t *od_table = + (OverDriveTableExternal_t *)table_context->overdrive_table; + struct amdgpu_device *adev = smu->adev; + uint32_t offset_of_voltageoffset; + int32_t minimum, maximum; + uint32_t feature_ctrlmask; + int i, ret = 0; + + switch (type) { + case PP_OD_EDIT_SCLK_VDDC_TABLE: + if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_GFXCLK_BIT)) { + dev_warn(adev->dev, "GFXCLK_LIMITS setting not supported!\n"); + return -ENOTSUPP; + } + + for (i = 0; i < size; i += 2) { + if (i + 2 > size) { + dev_info(adev->dev, "invalid number of input parameters %d\n", size); + return -EINVAL; + } + + switch (input[i]) { + case 0: + smu_v14_0_2_get_od_setting_limits(smu, + PP_OD_FEATURE_GFXCLK_FMIN, + &minimum, + &maximum); + if (input[i + 1] < minimum || + input[i + 1] > maximum) { + dev_info(adev->dev, "GfxclkFmin (%ld) must be within [%u, %u]!\n", + input[i + 1], minimum, maximum); + return -EINVAL; + } + + od_table->OverDriveTable.GfxclkFmin = input[i + 1]; + od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_GFXCLK_BIT; + break; + + case 1: + smu_v14_0_2_get_od_setting_limits(smu, + PP_OD_FEATURE_GFXCLK_FMAX, + &minimum, + &maximum); + if (input[i + 1] < minimum || + input[i + 1] > maximum) { + dev_info(adev->dev, "GfxclkFmax (%ld) must be within [%u, %u]!\n", + input[i + 1], minimum, maximum); + return -EINVAL; + } + + od_table->OverDriveTable.GfxclkFmax = input[i + 1]; + od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_GFXCLK_BIT; + break; + + default: + dev_info(adev->dev, "Invalid SCLK_VDDC_TABLE index: %ld\n", input[i]); + dev_info(adev->dev, "Supported indices: [0:min,1:max]\n"); + return -EINVAL; + } + } + + if (od_table->OverDriveTable.GfxclkFmin > od_table->OverDriveTable.GfxclkFmax) { + dev_err(adev->dev, + "Invalid setting: GfxclkFmin(%u) is bigger than GfxclkFmax(%u)\n", + (uint32_t)od_table->OverDriveTable.GfxclkFmin, + (uint32_t)od_table->OverDriveTable.GfxclkFmax); + return -EINVAL; + } + break; + + case PP_OD_EDIT_MCLK_VDDC_TABLE: + if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_UCLK_BIT)) { + dev_warn(adev->dev, "UCLK_LIMITS setting not supported!\n"); + return -ENOTSUPP; + } + + for (i = 0; i < size; i += 2) { + if (i + 2 > size) { + dev_info(adev->dev, "invalid number of input parameters %d\n", size); + return -EINVAL; + } + + switch (input[i]) { + case 0: + smu_v14_0_2_get_od_setting_limits(smu, + PP_OD_FEATURE_UCLK_FMIN, + &minimum, + &maximum); + if (input[i + 1] < minimum || + input[i + 1] > maximum) { + dev_info(adev->dev, "UclkFmin (%ld) must be within [%u, %u]!\n", + input[i + 1], minimum, maximum); + return -EINVAL; + } + + od_table->OverDriveTable.UclkFmin = input[i + 1]; + od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_UCLK_BIT; + break; + + case 1: + smu_v14_0_2_get_od_setting_limits(smu, + PP_OD_FEATURE_UCLK_FMAX, + &minimum, + &maximum); + if (input[i + 1] < minimum || + input[i + 1] > maximum) { + dev_info(adev->dev, "UclkFmax (%ld) must be within [%u, %u]!\n", + input[i + 1], minimum, maximum); + return -EINVAL; + } + + od_table->OverDriveTable.UclkFmax = input[i + 1]; + od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_UCLK_BIT; + break; + + default: + dev_info(adev->dev, "Invalid MCLK_VDDC_TABLE index: %ld\n", input[i]); + dev_info(adev->dev, "Supported indices: [0:min,1:max]\n"); + return -EINVAL; + } + } + + if (od_table->OverDriveTable.UclkFmin > od_table->OverDriveTable.UclkFmax) { + dev_err(adev->dev, + "Invalid setting: UclkFmin(%u) is bigger than UclkFmax(%u)\n", + (uint32_t)od_table->OverDriveTable.UclkFmin, + (uint32_t)od_table->OverDriveTable.UclkFmax); + return -EINVAL; + } + break; + + case PP_OD_EDIT_VDDGFX_OFFSET: + if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_GFX_VF_CURVE_BIT)) { + dev_warn(adev->dev, "Gfx offset setting not supported!\n"); + return -ENOTSUPP; + } + + smu_v14_0_2_get_od_setting_limits(smu, + PP_OD_FEATURE_GFX_VF_CURVE, + &minimum, + &maximum); + if (input[0] < minimum || + input[0] > maximum) { + dev_info(adev->dev, "Voltage offset (%ld) must be within [%d, %d]!\n", + input[0], minimum, maximum); + return -EINVAL; + } + + for (i = 0; i < PP_NUM_OD_VF_CURVE_POINTS; i++) + od_table->OverDriveTable.VoltageOffsetPerZoneBoundary[i] = input[0]; + od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_GFX_VF_CURVE_BIT); + break; + + case PP_OD_EDIT_FAN_CURVE: + if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_FAN_CURVE_BIT)) { + dev_warn(adev->dev, "Fan curve setting not supported!\n"); + return -ENOTSUPP; + } + + if (input[0] >= NUM_OD_FAN_MAX_POINTS - 1 || + input[0] < 0) + return -EINVAL; + + smu_v14_0_2_get_od_setting_limits(smu, + PP_OD_FEATURE_FAN_CURVE_TEMP, + &minimum, + &maximum); + if (input[1] < minimum || + input[1] > maximum) { + dev_info(adev->dev, "Fan curve temp setting(%ld) must be within [%d, %d]!\n", + input[1], minimum, maximum); + return -EINVAL; + } + + smu_v14_0_2_get_od_setting_limits(smu, + PP_OD_FEATURE_FAN_CURVE_PWM, + &minimum, + &maximum); + if (input[2] < minimum || + input[2] > maximum) { + dev_info(adev->dev, "Fan curve pwm setting(%ld) must be within [%d, %d]!\n", + input[2], minimum, maximum); + return -EINVAL; + } + + od_table->OverDriveTable.FanLinearTempPoints[input[0]] = input[1]; + od_table->OverDriveTable.FanLinearPwmPoints[input[0]] = input[2]; + od_table->OverDriveTable.FanMode = FAN_MODE_MANUAL_LINEAR; + od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT); + break; + + case PP_OD_EDIT_ACOUSTIC_LIMIT: + if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_FAN_CURVE_BIT)) { + dev_warn(adev->dev, "Fan curve setting not supported!\n"); + return -ENOTSUPP; + } + + smu_v14_0_2_get_od_setting_limits(smu, + PP_OD_FEATURE_FAN_ACOUSTIC_LIMIT, + &minimum, + &maximum); + if (input[0] < minimum || + input[0] > maximum) { + dev_info(adev->dev, "acoustic limit threshold setting(%ld) must be within [%d, %d]!\n", + input[0], minimum, maximum); + return -EINVAL; + } + + od_table->OverDriveTable.AcousticLimitRpmThreshold = input[0]; + od_table->OverDriveTable.FanMode = FAN_MODE_AUTO; + od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT); + break; + + case PP_OD_EDIT_ACOUSTIC_TARGET: + if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_FAN_CURVE_BIT)) { + dev_warn(adev->dev, "Fan curve setting not supported!\n"); + return -ENOTSUPP; + } + + smu_v14_0_2_get_od_setting_limits(smu, + PP_OD_FEATURE_FAN_ACOUSTIC_TARGET, + &minimum, + &maximum); + if (input[0] < minimum || + input[0] > maximum) { + dev_info(adev->dev, "acoustic target threshold setting(%ld) must be within [%d, %d]!\n", + input[0], minimum, maximum); + return -EINVAL; + } + + od_table->OverDriveTable.AcousticTargetRpmThreshold = input[0]; + od_table->OverDriveTable.FanMode = FAN_MODE_AUTO; + od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT); + break; + + case PP_OD_EDIT_FAN_TARGET_TEMPERATURE: + if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_FAN_CURVE_BIT)) { + dev_warn(adev->dev, "Fan curve setting not supported!\n"); + return -ENOTSUPP; + } + + smu_v14_0_2_get_od_setting_limits(smu, + PP_OD_FEATURE_FAN_TARGET_TEMPERATURE, + &minimum, + &maximum); + if (input[0] < minimum || + input[0] > maximum) { + dev_info(adev->dev, "fan target temperature setting(%ld) must be within [%d, %d]!\n", + input[0], minimum, maximum); + return -EINVAL; + } + + od_table->OverDriveTable.FanTargetTemperature = input[0]; + od_table->OverDriveTable.FanMode = FAN_MODE_AUTO; + od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT); + break; + + case PP_OD_EDIT_FAN_MINIMUM_PWM: + if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_FAN_CURVE_BIT)) { + dev_warn(adev->dev, "Fan curve setting not supported!\n"); + return -ENOTSUPP; + } + + smu_v14_0_2_get_od_setting_limits(smu, + PP_OD_FEATURE_FAN_MINIMUM_PWM, + &minimum, + &maximum); + if (input[0] < minimum || + input[0] > maximum) { + dev_info(adev->dev, "fan minimum pwm setting(%ld) must be within [%d, %d]!\n", + input[0], minimum, maximum); + return -EINVAL; + } + + od_table->OverDriveTable.FanMinimumPwm = input[0]; + od_table->OverDriveTable.FanMode = FAN_MODE_AUTO; + od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT); + break; + + case PP_OD_RESTORE_DEFAULT_TABLE: + if (size == 1) { + ret = smu_v14_0_2_od_restore_table_single(smu, input[0]); + if (ret) + return ret; + } else { + feature_ctrlmask = od_table->OverDriveTable.FeatureCtrlMask; + memcpy(od_table, + table_context->boot_overdrive_table, + sizeof(OverDriveTableExternal_t)); + od_table->OverDriveTable.FeatureCtrlMask = feature_ctrlmask; + } + fallthrough; + case PP_OD_COMMIT_DPM_TABLE: + /* + * The member below instructs PMFW the settings focused in + * this single operation. + * `uint32_t FeatureCtrlMask;` + * It does not contain actual informations about user's custom + * settings. Thus we do not cache it. + */ + offset_of_voltageoffset = offsetof(OverDriveTable_t, VoltageOffsetPerZoneBoundary); + if (memcmp((u8 *)od_table + offset_of_voltageoffset, + table_context->user_overdrive_table + offset_of_voltageoffset, + sizeof(OverDriveTableExternal_t) - offset_of_voltageoffset)) { + smu_v14_0_2_dump_od_table(smu, od_table); + + ret = smu_v14_0_2_upload_overdrive_table(smu, od_table); + if (ret) { + dev_err(adev->dev, "Failed to upload overdrive table!\n"); + return ret; + } + + od_table->OverDriveTable.FeatureCtrlMask = 0; + memcpy(table_context->user_overdrive_table + offset_of_voltageoffset, + (u8 *)od_table + offset_of_voltageoffset, + sizeof(OverDriveTableExternal_t) - offset_of_voltageoffset); + + if (!memcmp(table_context->user_overdrive_table, + table_context->boot_overdrive_table, + sizeof(OverDriveTableExternal_t))) + smu->user_dpm_profile.user_od = false; + else + smu->user_dpm_profile.user_od = true; + } + break; + + default: + return -ENOSYS; + } return ret; } +static int smu_v14_0_2_set_power_limit(struct smu_context *smu, + enum smu_ppt_limit_type limit_type, + uint32_t limit) +{ + PPTable_t *pptable = smu->smu_table.driver_pptable; + uint32_t msg_limit = pptable->SkuTable.MsgLimits.Power[PPT_THROTTLER_PPT0][POWER_SOURCE_AC]; + struct smu_table_context *table_context = &smu->smu_table; + OverDriveTableExternal_t *od_table = + (OverDriveTableExternal_t *)table_context->overdrive_table; + int ret = 0; + + if (limit_type != SMU_DEFAULT_PPT_LIMIT) + return -EINVAL; + + if (limit <= msg_limit) { + if (smu->current_power_limit > msg_limit) { + od_table->OverDriveTable.Ppt = 0; + od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_PPT_BIT; + + ret = smu_v14_0_2_upload_overdrive_table(smu, od_table); + if (ret) { + dev_err(smu->adev->dev, "Failed to upload overdrive table!\n"); + return ret; + } + } + return smu_v14_0_set_power_limit(smu, limit_type, limit); + } else if (smu->od_enabled) { + ret = smu_v14_0_set_power_limit(smu, limit_type, msg_limit); + if (ret) + return ret; + + od_table->OverDriveTable.Ppt = (limit * 100) / msg_limit - 100; + od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_PPT_BIT; + + ret = smu_v14_0_2_upload_overdrive_table(smu, od_table); + if (ret) { + dev_err(smu->adev->dev, "Failed to upload overdrive table!\n"); + return ret; + } + + smu->current_power_limit = limit; + } else { + return -EINVAL; + } + + return 0; +} + static const struct pptable_funcs smu_v14_0_2_ppt_funcs = { .get_allowed_feature_mask = smu_v14_0_2_get_allowed_feature_mask, .set_default_dpm_table = smu_v14_0_2_set_default_dpm_table, @@ -1747,15 +2833,21 @@ static const struct pptable_funcs smu_v14_0_2_ppt_funcs = { .update_pcie_parameters = smu_v14_0_2_update_pcie_parameters, .get_thermal_temperature_range = smu_v14_0_2_get_thermal_temperature_range, .register_irq_handler = smu_v14_0_register_irq_handler, + .enable_thermal_alert = smu_v14_0_enable_thermal_alert, + .disable_thermal_alert = smu_v14_0_disable_thermal_alert, .notify_memory_pool_location = smu_v14_0_notify_memory_pool_location, + .get_gpu_metrics = smu_v14_0_2_get_gpu_metrics, .set_soft_freq_limited_range = smu_v14_0_set_soft_freq_limited_range, + .set_default_od_settings = smu_v14_0_2_set_default_od_settings, + .restore_user_od_settings = smu_v14_0_2_restore_user_od_settings, + .od_edit_dpm_table = smu_v14_0_2_od_edit_dpm_table, .init_pptable_microcode = smu_v14_0_init_pptable_microcode, .populate_umd_state_clk = smu_v14_0_2_populate_umd_state_clk, .set_performance_level = smu_v14_0_set_performance_level, .gfx_off_control = smu_v14_0_gfx_off_control, .get_unique_id = smu_v14_0_2_get_unique_id, .get_power_limit = smu_v14_0_2_get_power_limit, - .set_power_limit = smu_v14_0_set_power_limit, + .set_power_limit = smu_v14_0_2_set_power_limit, .set_power_source = smu_v14_0_set_power_source, .get_power_profile_mode = smu_v14_0_2_get_power_profile_mode, .set_power_profile_mode = smu_v14_0_2_set_power_profile_mode, @@ -1776,10 +2868,9 @@ static const struct pptable_funcs smu_v14_0_2_ppt_funcs = { .enable_gfx_features = smu_v14_0_2_enable_gfx_features, .set_mp1_state = smu_v14_0_2_set_mp1_state, .set_df_cstate = smu_v14_0_2_set_df_cstate, - .send_hbm_bad_pages_num = smu_v14_0_2_smu_send_bad_mem_page_num, - .send_hbm_bad_channel_flag = smu_v14_0_2_send_bad_mem_channel_flag, +#if 0 .gpo_control = smu_v14_0_gpo_control, - .get_ecc_info = smu_v14_0_2_get_ecc_info, +#endif }; void smu_v14_0_2_set_ppt_funcs(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c index 6d1c3af927ca..91ad434bcdae 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c @@ -56,7 +56,7 @@ static const char * const __smu_message_names[] = { static const char *smu_get_message_name(struct smu_context *smu, enum smu_message_type type) { - if (type < 0 || type >= SMU_MSG_MAX_COUNT) + if (type >= SMU_MSG_MAX_COUNT) return "unknown smu message"; return __smu_message_names[type]; @@ -315,11 +315,21 @@ int smu_cmn_send_msg_without_waiting(struct smu_context *smu, if (adev->no_hw_access) return 0; - reg = __smu_cmn_poll_stat(smu); - res = __smu_cmn_reg2errno(smu, reg); - if (reg == SMU_RESP_NONE || - res == -EREMOTEIO) + if (smu->smc_fw_state == SMU_FW_HANG) { + dev_err(adev->dev, "SMU is in hanged state, failed to send smu message!\n"); + res = -EREMOTEIO; goto Out; + } + + if (smu->smc_fw_state == SMU_FW_INIT) { + smu->smc_fw_state = SMU_FW_RUNTIME; + } else { + reg = __smu_cmn_poll_stat(smu); + res = __smu_cmn_reg2errno(smu, reg); + if (reg == SMU_RESP_NONE || res == -EREMOTEIO) + goto Out; + } + __smu_cmn_send_msg(smu, msg_index, param); res = 0; Out: @@ -350,6 +360,9 @@ int smu_cmn_wait_for_response(struct smu_context *smu) reg = __smu_cmn_poll_stat(smu); res = __smu_cmn_reg2errno(smu, reg); + if (res == -EREMOTEIO) + smu->smc_fw_state = SMU_FW_HANG; + if (unlikely(smu->adev->pm.smu_debug_mask & SMU_DEBUG_HALT_ON_ERROR) && res && (res != -ETIME)) { amdgpu_device_halt(smu->adev); @@ -418,6 +431,16 @@ int smu_cmn_send_smc_msg_with_param(struct smu_context *smu, goto Out; } + if (smu->smc_fw_state == SMU_FW_HANG) { + dev_err(adev->dev, "SMU is in hanged state, failed to send smu message!\n"); + res = -EREMOTEIO; + goto Out; + } else if (smu->smc_fw_state == SMU_FW_INIT) { + /* Ignore initial smu response register value */ + poll = false; + smu->smc_fw_state = SMU_FW_RUNTIME; + } + if (poll) { reg = __smu_cmn_poll_stat(smu); res = __smu_cmn_reg2errno(smu, reg); @@ -429,8 +452,11 @@ int smu_cmn_send_smc_msg_with_param(struct smu_context *smu, __smu_cmn_send_msg(smu, (uint16_t) index, param); reg = __smu_cmn_poll_stat(smu); res = __smu_cmn_reg2errno(smu, reg); - if (res != 0) + if (res != 0) { + if (res == -EREMOTEIO) + smu->smc_fw_state = SMU_FW_HANG; __smu_cmn_reg_print_error(smu, reg, index, param, msg); + } if (read_arg) { smu_cmn_read_arg(smu, read_arg); dev_dbg(adev->dev, "smu send message: %s(%d) param: 0x%08x, resp: 0x%08x,\ @@ -760,7 +786,7 @@ static const char *__smu_feature_names[] = { static const char *smu_get_feature_name(struct smu_context *smu, enum smu_feature_mask feature) { - if (feature < 0 || feature >= SMU_FEATURE_COUNT) + if (feature >= SMU_FEATURE_COUNT) return "unknown smu feature"; return __smu_feature_names[feature]; } @@ -768,7 +794,7 @@ static const char *smu_get_feature_name(struct smu_context *smu, size_t smu_cmn_get_pp_feature_mask(struct smu_context *smu, char *buf) { - int8_t sort_feature[max(SMU_FEATURE_COUNT, SMU_FEATURE_MAX)]; + int8_t sort_feature[MAX(SMU_FEATURE_COUNT, SMU_FEATURE_MAX)]; uint64_t feature_mask; int i, feature_index; uint32_t count = 0; @@ -1132,3 +1158,60 @@ bool smu_cmn_is_audio_func_enabled(struct amdgpu_device *adev) return snd_driver_loaded; } + +static char *smu_soc_policy_get_desc(struct smu_dpm_policy *policy, int level) +{ + if (level < 0 || !(policy->level_mask & BIT(level))) + return "Invalid"; + + switch (level) { + case SOC_PSTATE_DEFAULT: + return "soc_pstate_default"; + case SOC_PSTATE_0: + return "soc_pstate_0"; + case SOC_PSTATE_1: + return "soc_pstate_1"; + case SOC_PSTATE_2: + return "soc_pstate_2"; + } + + return "Invalid"; +} + +static struct smu_dpm_policy_desc pstate_policy_desc = { + .name = STR_SOC_PSTATE_POLICY, + .get_desc = smu_soc_policy_get_desc, +}; + +void smu_cmn_generic_soc_policy_desc(struct smu_dpm_policy *policy) +{ + policy->desc = &pstate_policy_desc; +} + +static char *smu_xgmi_plpd_policy_get_desc(struct smu_dpm_policy *policy, + int level) +{ + if (level < 0 || !(policy->level_mask & BIT(level))) + return "Invalid"; + + switch (level) { + case XGMI_PLPD_DISALLOW: + return "plpd_disallow"; + case XGMI_PLPD_DEFAULT: + return "plpd_default"; + case XGMI_PLPD_OPTIMIZED: + return "plpd_optimized"; + } + + return "Invalid"; +} + +static struct smu_dpm_policy_desc xgmi_plpd_policy_desc = { + .name = STR_XGMI_PLPD_POLICY, + .get_desc = smu_xgmi_plpd_policy_get_desc, +}; + +void smu_cmn_generic_plpd_policy_desc(struct smu_dpm_policy *policy) +{ + policy->desc = &xgmi_plpd_policy_desc; +} diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h index 81bfce1406e5..1de685defe85 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h @@ -144,6 +144,8 @@ static inline void smu_cmn_get_sysfs_buf(char **buf, int *offset) } bool smu_cmn_is_audio_func_enabled(struct amdgpu_device *adev); +void smu_cmn_generic_soc_policy_desc(struct smu_dpm_policy *policy); +void smu_cmn_generic_plpd_policy_desc(struct smu_dpm_policy *policy); #endif #endif |