summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/powerplay
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/powerplay')
-rw-r--r--drivers/gpu/drm/amd/powerplay/amdgpu_smu.c75
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h3
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h6
-rw-r--r--drivers/gpu/drm/amd/powerplay/navi10_ppt.c62
-rw-r--r--drivers/gpu/drm/amd/powerplay/smu_v11_0.c16
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/vega20_ppt.c41
10 files changed, 118 insertions, 93 deletions
diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
index 3093917adc2d..f1565c448de5 100644
--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
@@ -69,6 +69,9 @@ int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
if (min <= 0 && max <= 0)
return -EINVAL;
+ if (!smu_clk_dpm_is_enabled(smu, clk_type))
+ return 0;
+
clk_id = smu_clk_get_index(smu, clk_type);
if (clk_id < 0)
return clk_id;
@@ -102,6 +105,9 @@ int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
if (min <= 0 && max <= 0)
return -EINVAL;
+ if (!smu_clk_dpm_is_enabled(smu, clk_type))
+ return 0;
+
clk_id = smu_clk_get_index(smu, clk_type);
if (clk_id < 0)
return clk_id;
@@ -135,23 +141,8 @@ int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
if (!min && !max)
return -EINVAL;
- switch (clk_type) {
- case SMU_UCLK:
- if (!smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
- pr_warn("uclk dpm is not enabled\n");
- return 0;
- }
- break;
- case SMU_GFXCLK:
- case SMU_SCLK:
- if (!smu_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT)) {
- pr_warn("gfxclk dpm is not enabled\n");
- return 0;
- }
- break;
- default:
- break;
- }
+ if (!smu_clk_dpm_is_enabled(smu, clk_type))
+ return 0;
mutex_lock(&smu->mutex);
clk_id = smu_clk_get_index(smu, clk_type);
@@ -194,6 +185,9 @@ int smu_get_dpm_freq_by_index(struct smu_context *smu, enum smu_clk_type clk_typ
if (!value)
return -EINVAL;
+ if (!smu_clk_dpm_is_enabled(smu, clk_type))
+ return 0;
+
clk_id = smu_clk_get_index(smu, clk_type);
if (clk_id < 0)
return clk_id;
@@ -222,6 +216,35 @@ int smu_get_dpm_level_count(struct smu_context *smu, enum smu_clk_type clk_type,
return smu_get_dpm_freq_by_index(smu, clk_type, 0xff, value);
}
+bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type)
+{
+ enum smu_feature_mask feature_id = 0;
+
+ switch (clk_type) {
+ case SMU_MCLK:
+ case SMU_UCLK:
+ feature_id = SMU_FEATURE_DPM_UCLK_BIT;
+ break;
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ feature_id = SMU_FEATURE_DPM_GFXCLK_BIT;
+ break;
+ case SMU_SOCCLK:
+ feature_id = SMU_FEATURE_DPM_SOCCLK_BIT;
+ break;
+ default:
+ return true;
+ }
+
+ if(!smu_feature_is_enabled(smu, feature_id)) {
+ pr_warn("smu %d clk dpm feature %d is not enabled\n", clk_type, feature_id);
+ return false;
+ }
+
+ return true;
+}
+
+
int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type,
bool gate)
{
@@ -300,7 +323,7 @@ int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor,
return ret;
}
-int smu_update_table(struct smu_context *smu, enum smu_table_id table_index,
+int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int argument,
void *table_data, bool drv2smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
@@ -327,7 +350,7 @@ int smu_update_table(struct smu_context *smu, enum smu_table_id table_index,
ret = smu_send_smc_msg_with_param(smu, drv2smu ?
SMU_MSG_TransferTableDram2Smu :
SMU_MSG_TransferTableSmu2Dram,
- table_id);
+ table_id | ((argument & 0xFFFF) << 16));
if (ret)
return ret;
@@ -1372,10 +1395,10 @@ int smu_adjust_power_state_dynamic(struct smu_context *smu,
break;
case AMD_DPM_FORCED_LEVEL_AUTO:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
ret = smu_unforce_dpm_levels(smu);
break;
- case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
@@ -1385,8 +1408,9 @@ int smu_adjust_power_state_dynamic(struct smu_context *smu,
&soc_mask);
if (ret)
return ret;
- smu_force_clk_levels(smu, PP_SCLK, 1 << sclk_mask);
- smu_force_clk_levels(smu, PP_MCLK, 1 << mclk_mask);
+ smu_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask);
+ smu_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask);
+ smu_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask);
break;
case AMD_DPM_FORCED_LEVEL_MANUAL:
@@ -1441,17 +1465,16 @@ int smu_handle_task(struct smu_context *smu,
enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu)
{
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+ enum amd_dpm_forced_level level;
if (!smu_dpm_ctx->dpm_context)
return -EINVAL;
mutex_lock(&(smu->mutex));
- if (smu_dpm_ctx->dpm_level != smu_dpm_ctx->saved_dpm_level) {
- smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level;
- }
+ level = smu_dpm_ctx->dpm_level;
mutex_unlock(&(smu->mutex));
- return smu_dpm_ctx->dpm_level;
+ return level;
}
int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
index 1cd5a8b5cdc1..b760f95e7fa7 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
@@ -1067,8 +1067,6 @@ static int pp_tables_v1_0_initialize(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE((NULL != hwmgr->pptable),
"Failed to allocate hwmgr->pptable!", return -ENOMEM);
- memset(hwmgr->pptable, 0x00, sizeof(struct phm_ppt_v1_information));
-
powerplay_table = get_powerplay_table(hwmgr);
PP_ASSERT_WITH_CODE((NULL != powerplay_table),
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
index c97324ef7db2..1af992fb0bde 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
@@ -937,7 +937,7 @@ extern int smu_feature_is_supported(struct smu_context *smu,
extern int smu_feature_set_supported(struct smu_context *smu,
enum smu_feature_mask mask, bool enable);
-int smu_update_table(struct smu_context *smu, uint32_t table_index,
+int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int argument,
void *table_data, bool drv2smu);
bool is_support_sw_smu(struct amdgpu_device *adev);
@@ -973,5 +973,6 @@ int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu);
int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level);
int smu_set_display_count(struct smu_context *smu, uint32_t count);
+bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type);
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h
index 195c4ae67058..755d51f9c6a9 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h
@@ -27,7 +27,7 @@
// *** IMPORTANT ***
// SMU TEAM: Always increment the interface version if
// any structure is changed in this file
-#define SMU11_DRIVER_IF_VERSION 0x12
+#define SMU11_DRIVER_IF_VERSION 0x13
#define PPTABLE_V20_SMU_VERSION 3
@@ -615,6 +615,7 @@ typedef struct {
uint16_t UclkAverageLpfTau;
uint16_t GfxActivityLpfTau;
uint16_t UclkActivityLpfTau;
+ uint16_t SocketPowerLpfTau;
uint32_t MmHubPadding[8];
@@ -665,7 +666,8 @@ typedef struct {
uint32_t ThrottlerStatus ;
uint8_t LinkDpmLevel;
- uint8_t Padding[3];
+ uint16_t AverageSocketPower;
+ uint8_t Padding;
uint32_t MmHubPadding[7];
diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
index 880fe0930d9e..2dae0ae0829e 100644
--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
@@ -331,7 +331,10 @@ navi10_get_allowed_feature_mask(struct smu_context *smu,
| FEATURE_MASK(FEATURE_DS_DCEFCLK_BIT)
| FEATURE_MASK(FEATURE_FW_DSTATE_BIT)
| FEATURE_MASK(FEATURE_BACO_BIT)
- | FEATURE_MASK(FEATURE_ACDC_BIT);
+ | FEATURE_MASK(FEATURE_ACDC_BIT)
+ | FEATURE_MASK(FEATURE_GFX_SS_BIT)
+ | FEATURE_MASK(FEATURE_APCC_DFLL_BIT)
+ | FEATURE_MASK(FEATURE_FW_CTF_BIT);
if (adev->pm.pp_feature & PP_MCLK_DPM_MASK)
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_UCLK_BIT)
@@ -339,8 +342,7 @@ navi10_get_allowed_feature_mask(struct smu_context *smu,
| FEATURE_MASK(FEATURE_MEM_MVDD_SCALING_BIT);
if (adev->pm.pp_feature & PP_GFXOFF_MASK) {
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_SS_BIT)
- | FEATURE_MASK(FEATURE_GFXOFF_BIT);
+ *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFXOFF_BIT);
/* TODO: remove it once fw fix the bug */
*(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_FW_DSTATE_BIT);
}
@@ -465,9 +467,6 @@ static int navi10_append_powerplay_table(struct smu_context *smu)
smc_pptable->MvddRatio = smc_dpm_table->MvddRatio;
if (adev->pm.pp_feature & PP_GFXOFF_MASK) {
- *(uint64_t *)smc_pptable->FeaturesToRun |= FEATURE_MASK(FEATURE_GFX_SS_BIT)
- | FEATURE_MASK(FEATURE_GFXOFF_BIT);
-
/* TODO: remove it once SMU fw fix it */
smc_pptable->DebugOverrides |= DPM_OVERRIDE_DISABLE_DFLL_PLL_SHUTDOWN;
}
@@ -614,7 +613,7 @@ static int navi10_get_current_clk_freq_by_table(struct smu_context *smu,
memset(&metrics, 0, sizeof(metrics));
- ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, (void *)&metrics, false);
+ ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0, (void *)&metrics, false);
if (ret)
return ret;
@@ -709,7 +708,7 @@ static int navi10_force_clk_levels(struct smu_context *smu,
static int navi10_populate_umd_state_clk(struct smu_context *smu)
{
int ret = 0;
- uint32_t min_sclk_freq = 0;
+ uint32_t min_sclk_freq = 0, min_mclk_freq = 0;
ret = smu_get_dpm_freq_range(smu, SMU_SCLK, &min_sclk_freq, NULL);
if (ret)
@@ -717,6 +716,12 @@ static int navi10_populate_umd_state_clk(struct smu_context *smu)
smu->pstate_sclk = min_sclk_freq * 100;
+ ret = smu_get_dpm_freq_range(smu, SMU_MCLK, &min_mclk_freq, NULL);
+ if (ret)
+ return ret;
+
+ smu->pstate_mclk = min_mclk_freq * 100;
+
return ret;
}
@@ -827,27 +832,20 @@ static int navi10_force_dpm_limit_value(struct smu_context *smu, bool highest)
return ret;
}
-static int navi10_unforce_dpm_levels(struct smu_context *smu) {
-
+static int navi10_unforce_dpm_levels(struct smu_context *smu)
+{
int ret = 0, i = 0;
uint32_t min_freq, max_freq;
enum smu_clk_type clk_type;
- struct clk_feature_map {
- enum smu_clk_type clk_type;
- uint32_t feature;
- } clk_feature_map[] = {
- {SMU_GFXCLK, SMU_FEATURE_DPM_GFXCLK_BIT},
- {SMU_MCLK, SMU_FEATURE_DPM_UCLK_BIT},
- {SMU_SOCCLK, SMU_FEATURE_DPM_SOCCLK_BIT},
+ enum smu_clk_type clks[] = {
+ SMU_GFXCLK,
+ SMU_MCLK,
+ SMU_SOCCLK,
};
- for (i = 0; i < ARRAY_SIZE(clk_feature_map); i++) {
- if (!smu_feature_is_enabled(smu, clk_feature_map[i].feature))
- continue;
-
- clk_type = clk_feature_map[i].clk_type;
-
+ for (i = 0; i < ARRAY_SIZE(clks); i++) {
+ clk_type = clks[i];
ret = smu_get_dpm_freq_range(smu, clk_type, &min_freq, &max_freq);
if (ret)
return ret;
@@ -868,7 +866,7 @@ static int navi10_get_gpu_power(struct smu_context *smu, uint32_t *value)
if (!value)
return -EINVAL;
- ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, (void *)&metrics,
+ ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0, (void *)&metrics,
false);
if (ret)
return ret;
@@ -890,7 +888,7 @@ static int navi10_get_current_activity_percent(struct smu_context *smu,
msleep(1);
- ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS,
+ ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0,
(void *)&metrics, false);
if (ret)
return ret;
@@ -931,7 +929,7 @@ static int navi10_get_fan_speed(struct smu_context *smu, uint16_t *value)
memset(&metrics, 0, sizeof(metrics));
- ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS,
+ ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0,
(void *)&metrics, false);
if (ret)
return ret;
@@ -997,7 +995,7 @@ static int navi10_get_power_profile_mode(struct smu_context *smu, char *buf)
/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
workload_type = smu_workload_get_type(smu, i);
result = smu_update_table(smu,
- SMU_TABLE_ACTIVITY_MONITOR_COEFF | workload_type << 16,
+ SMU_TABLE_ACTIVITY_MONITOR_COEFF, workload_type,
(void *)(&activity_monitor), false);
if (result) {
pr_err("[%s] Failed to get activity monitor!", __func__);
@@ -1070,7 +1068,7 @@ static int navi10_set_power_profile_mode(struct smu_context *smu, long *input, u
return -EINVAL;
ret = smu_update_table(smu,
- SMU_TABLE_ACTIVITY_MONITOR_COEFF | WORKLOAD_PPLIB_CUSTOM_BIT << 16,
+ SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
(void *)(&activity_monitor), false);
if (ret) {
pr_err("[%s] Failed to get activity monitor!", __func__);
@@ -1114,7 +1112,7 @@ static int navi10_set_power_profile_mode(struct smu_context *smu, long *input, u
}
ret = smu_update_table(smu,
- SMU_TABLE_ACTIVITY_MONITOR_COEFF | WORKLOAD_PPLIB_CUSTOM_BIT << 16,
+ SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
(void *)(&activity_monitor), true);
if (ret) {
pr_err("[%s] Failed to set activity monitor!", __func__);
@@ -1157,14 +1155,14 @@ static int navi10_get_profiling_clk_mask(struct smu_context *smu,
ret = smu_get_dpm_level_count(smu, SMU_MCLK, &level_count);
if (ret)
return ret;
- *sclk_mask = level_count - 1;
+ *mclk_mask = level_count - 1;
}
if(soc_mask) {
ret = smu_get_dpm_level_count(smu, SMU_SOCCLK, &level_count);
if (ret)
return ret;
- *sclk_mask = level_count - 1;
+ *soc_mask = level_count - 1;
}
}
@@ -1280,7 +1278,7 @@ static int navi10_thermal_get_temperature(struct smu_context *smu,
if (!value)
return -EINVAL;
- ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, (void *)&metrics, false);
+ ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0, (void *)&metrics, false);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
index a87b86ae2cc5..95c7c4dae523 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
@@ -261,14 +261,20 @@ static int smu_v11_0_check_fw_version(struct smu_context *smu)
smu_minor = (smu_version >> 8) & 0xff;
smu_debug = (smu_version >> 0) & 0xff;
-
+ /*
+ * 1. if_version mismatch is not critical as our fw is designed
+ * to be backward compatible.
+ * 2. New fw usually brings some optimizations. But that's visible
+ * only on the paired driver.
+ * Considering above, we just leave user a warning message instead
+ * of halt driver loading.
+ */
if (if_version != smu->smc_if_version) {
pr_info("smu driver if version = 0x%08x, smu fw if version = 0x%08x, "
"smu fw version = 0x%08x (%d.%d.%d)\n",
smu->smc_if_version, if_version,
smu_version, smu_major, smu_minor, smu_debug);
- pr_err("SMU driver if version not matched\n");
- ret = -EINVAL;
+ pr_warn("SMU driver if version not matched\n");
}
return ret;
@@ -703,7 +709,7 @@ static int smu_v11_0_write_pptable(struct smu_context *smu)
struct smu_table_context *table_context = &smu->smu_table;
int ret = 0;
- ret = smu_update_table(smu, SMU_TABLE_PPTABLE,
+ ret = smu_update_table(smu, SMU_TABLE_PPTABLE, 0,
table_context->driver_pptable, true);
return ret;
@@ -722,7 +728,7 @@ static int smu_v11_0_write_watermarks_table(struct smu_context *smu)
if (!table->cpu_addr)
return -EINVAL;
- ret = smu_update_table(smu, SMU_TABLE_WATERMARKS, table->cpu_addr,
+ ret = smu_update_table(smu, SMU_TABLE_WATERMARKS, 0, table->cpu_addr,
true);
return ret;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
index 6c81cb91ebae..15590fd86ef4 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
@@ -2705,8 +2705,6 @@ static int ci_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS2_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS2));
cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_2));
- memset(table, 0x00, sizeof(pp_atomctrl_mc_reg_table));
-
result = atomctrl_initialize_mc_reg_table(hwmgr, module_index, table);
if (0 == result)
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
index 9e0dd56fe7c5..732005c03a82 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
@@ -2634,8 +2634,6 @@ static int iceland_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS2_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS2));
cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_2));
- memset(table, 0x00, sizeof(pp_atomctrl_mc_reg_table));
-
result = atomctrl_initialize_mc_reg_table(hwmgr, module_index, table);
if (0 == result)
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
index ba3394303b9c..f19bac7ef7ba 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
@@ -3117,8 +3117,6 @@ static int tonga_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_2_LP,
cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_2));
- memset(table, 0x00, sizeof(pp_atomctrl_mc_reg_table));
-
result = atomctrl_initialize_mc_reg_table(hwmgr, module_index, table);
if (!result)
diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
index a76a22a18eb4..bb9bb09cfc7a 100644
--- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
@@ -319,7 +319,7 @@ static int vega20_tables_init(struct smu_context *smu, struct smu_table *tables)
AMDGPU_GEM_DOMAIN_VRAM);
smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL);
- if (smu_table->metrics_table)
+ if (!smu_table->metrics_table)
return -ENOMEM;
smu_table->metrics_time = 0;
@@ -441,7 +441,6 @@ static int vega20_store_powerplay_table(struct smu_context *smu)
{
ATOM_Vega20_POWERPLAYTABLE *powerplay_table = NULL;
struct smu_table_context *table_context = &smu->smu_table;
- int ret;
if (!table_context->power_play_table)
return -EINVAL;
@@ -455,9 +454,7 @@ static int vega20_store_powerplay_table(struct smu_context *smu)
table_context->thermal_controller_type = powerplay_table->ucThermalControllerType;
table_context->TDPODLimit = le32_to_cpu(powerplay_table->OverDrive8Table.ODSettingsMax[ATOM_VEGA20_ODSETTING_POWERPERCENTAGE]);
- ret = vega20_setup_od8_information(smu);
-
- return ret;
+ return 0;
}
static int vega20_append_powerplay_table(struct smu_context *smu)
@@ -992,7 +989,7 @@ static int vega20_print_clk_levels(struct smu_context *smu,
break;
case SMU_SOCCLK:
- ret = smu_get_current_clk_freq(smu, PPCLK_SOCCLK, &now);
+ ret = smu_get_current_clk_freq(smu, SMU_SOCCLK, &now);
if (ret) {
pr_err("Attempt to get current socclk Failed!");
return ret;
@@ -1013,7 +1010,7 @@ static int vega20_print_clk_levels(struct smu_context *smu,
break;
case SMU_FCLK:
- ret = smu_get_current_clk_freq(smu, PPCLK_FCLK, &now);
+ ret = smu_get_current_clk_freq(smu, SMU_FCLK, &now);
if (ret) {
pr_err("Attempt to get current fclk Failed!");
return ret;
@@ -1028,7 +1025,7 @@ static int vega20_print_clk_levels(struct smu_context *smu,
break;
case SMU_DCEFCLK:
- ret = smu_get_current_clk_freq(smu, PPCLK_DCEFCLK, &now);
+ ret = smu_get_current_clk_freq(smu, SMU_DCEFCLK, &now);
if (ret) {
pr_err("Attempt to get current dcefclk Failed!");
return ret;
@@ -1502,11 +1499,17 @@ static int vega20_set_default_od8_setttings(struct smu_context *smu)
od8_settings = kzalloc(sizeof(struct vega20_od8_settings), GFP_KERNEL);
- if (od8_settings)
+ if (!od8_settings)
return -ENOMEM;
smu->od_settings = (void *)od8_settings;
+ ret = vega20_setup_od8_information(smu);
+ if (ret) {
+ pr_err("Retrieve board OD limits failed!\n");
+ return ret;
+ }
+
if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
if (od8_settings->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_GFXCLK_LIMITS] &&
od8_settings->od_settings_max[OD8_SETTING_GFXCLK_FMAX] > 0 &&
@@ -1677,7 +1680,7 @@ static int vega20_get_metrics_table(struct smu_context *smu,
int ret = 0;
if (!smu_table->metrics_time || time_after(jiffies, smu_table->metrics_time + HZ / 1000)) {
- ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS,
+ ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0,
(void *)smu_table->metrics_table, false);
if (ret) {
pr_info("Failed to export SMU metrics table!\n");
@@ -1706,7 +1709,7 @@ static int vega20_set_default_od_settings(struct smu_context *smu,
if (!table_context->overdrive_table)
return -ENOMEM;
- ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE,
+ ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0,
table_context->overdrive_table, false);
if (ret) {
pr_err("Failed to export over drive table!\n");
@@ -1718,7 +1721,7 @@ static int vega20_set_default_od_settings(struct smu_context *smu,
return ret;
}
- ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE,
+ ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0,
table_context->overdrive_table, true);
if (ret) {
pr_err("Failed to import over drive table!\n");
@@ -1802,7 +1805,7 @@ static int vega20_get_power_profile_mode(struct smu_context *smu, char *buf)
/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
workload_type = smu_workload_get_type(smu, i);
result = smu_update_table(smu,
- SMU_TABLE_ACTIVITY_MONITOR_COEFF | workload_type << 16,
+ SMU_TABLE_ACTIVITY_MONITOR_COEFF, workload_type,
(void *)(&activity_monitor), false);
if (result) {
pr_err("[%s] Failed to get activity monitor!", __func__);
@@ -1888,7 +1891,7 @@ static int vega20_set_power_profile_mode(struct smu_context *smu, long *input, u
if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
ret = smu_update_table(smu,
- SMU_TABLE_ACTIVITY_MONITOR_COEFF | WORKLOAD_PPLIB_CUSTOM_BIT << 16,
+ SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
(void *)(&activity_monitor), false);
if (ret) {
pr_err("[%s] Failed to get activity monitor!", __func__);
@@ -1943,7 +1946,7 @@ static int vega20_set_power_profile_mode(struct smu_context *smu, long *input, u
}
ret = smu_update_table(smu,
- SMU_TABLE_ACTIVITY_MONITOR_COEFF | WORKLOAD_PPLIB_CUSTOM_BIT << 16,
+ SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
(void *)(&activity_monitor), true);
if (ret) {
pr_err("[%s] Failed to set activity monitor!", __func__);
@@ -2492,7 +2495,7 @@ static int vega20_update_od8_settings(struct smu_context *smu,
struct smu_table_context *table_context = &smu->smu_table;
int ret;
- ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE,
+ ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0,
table_context->overdrive_table, false);
if (ret) {
pr_err("Failed to export over drive table!\n");
@@ -2503,7 +2506,7 @@ static int vega20_update_od8_settings(struct smu_context *smu,
if (ret)
return ret;
- ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE,
+ ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0,
table_context->overdrive_table, true);
if (ret) {
pr_err("Failed to import over drive table!\n");
@@ -2767,7 +2770,7 @@ static int vega20_odn_edit_dpm_table(struct smu_context *smu,
break;
case PP_OD_RESTORE_DEFAULT_TABLE:
- ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, table_context->overdrive_table, false);
+ ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0, table_context->overdrive_table, false);
if (ret) {
pr_err("Failed to export over drive table!\n");
return ret;
@@ -2776,7 +2779,7 @@ static int vega20_odn_edit_dpm_table(struct smu_context *smu,
break;
case PP_OD_COMMIT_DPM_TABLE:
- ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, table_context->overdrive_table, true);
+ ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0, table_context->overdrive_table, true);
if (ret) {
pr_err("Failed to import over drive table!\n");
return ret;