diff options
Diffstat (limited to 'drivers/gpu/drm/amd/powerplay/smumgr')
23 files changed, 3910 insertions, 1165 deletions
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile index 1703bbefbfd5..a423c0a85129 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile +++ b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile @@ -4,7 +4,7 @@ SMU_MGR = smumgr.o cz_smumgr.o tonga_smumgr.o fiji_smumgr.o fiji_smc.o \ polaris10_smumgr.o iceland_smumgr.o polaris10_smc.o tonga_smc.o \ - smu7_smumgr.o iceland_smc.o vega10_smumgr.o rv_smumgr.o + smu7_smumgr.o iceland_smc.o vega10_smumgr.o rv_smumgr.o ci_smc.o ci_smumgr.o AMD_PP_SMUMGR = $(addprefix $(AMD_PP_PATH)/smumgr/,$(SMU_MGR)) diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smc.c new file mode 100644 index 000000000000..9ee14315dce7 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smc.c @@ -0,0 +1,2753 @@ +/* + * Copyright 2017 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/fb.h> +#include "linux/delay.h" +#include <linux/types.h> + +#include "smumgr.h" +#include "pp_debug.h" +#include "ci_smc.h" +#include "ci_smumgr.h" +#include "ppsmc.h" +#include "smu7_hwmgr.h" +#include "hardwaremanager.h" +#include "ppatomctrl.h" +#include "cgs_common.h" +#include "atombios.h" +#include "pppcielanes.h" + +#include "smu/smu_7_0_1_d.h" +#include "smu/smu_7_0_1_sh_mask.h" + +#include "dce/dce_8_0_d.h" +#include "dce/dce_8_0_sh_mask.h" + +#include "bif/bif_4_1_d.h" +#include "bif/bif_4_1_sh_mask.h" + +#include "gca/gfx_7_2_d.h" +#include "gca/gfx_7_2_sh_mask.h" + +#include "gmc/gmc_7_1_d.h" +#include "gmc/gmc_7_1_sh_mask.h" + +#include "processpptables.h" + +#define MC_CG_ARB_FREQ_F0 0x0a +#define MC_CG_ARB_FREQ_F1 0x0b +#define MC_CG_ARB_FREQ_F2 0x0c +#define MC_CG_ARB_FREQ_F3 0x0d + +#define SMC_RAM_END 0x40000 + +#define VOLTAGE_SCALE 4 +#define VOLTAGE_VID_OFFSET_SCALE1 625 +#define VOLTAGE_VID_OFFSET_SCALE2 100 +#define CISLAND_MINIMUM_ENGINE_CLOCK 800 +#define CISLAND_MAX_DEEPSLEEP_DIVIDER_ID 5 + +static const struct ci_pt_defaults defaults_hawaii_xt = { + 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000, + { 0x2E, 0x00, 0x00, 0x88, 0x00, 0x00, 0x72, 0x60, 0x51, 0xA7, 0x79, 0x6B, 0x90, 0xBD, 0x79 }, + { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 } +}; + +static const struct ci_pt_defaults defaults_hawaii_pro = { + 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0x65062, + { 0x2E, 0x00, 0x00, 0x88, 0x00, 0x00, 0x72, 0x60, 0x51, 0xA7, 0x79, 0x6B, 0x90, 0xBD, 0x79 }, + { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 } +}; + +static const struct ci_pt_defaults defaults_bonaire_xt = { + 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000, + { 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61 }, + { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 } +}; + + +static const struct ci_pt_defaults defaults_saturn_xt = { + 1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x70000, + { 0x8C, 0x247, 0x249, 0xA6, 0x80, 0x81, 0x8B, 0x89, 0x86, 0xC9, 0xCA, 0xC9, 0x4D, 0x4D, 0x4D }, + { 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 } +}; + + +static int ci_set_smc_sram_address(struct pp_hwmgr *hwmgr, + uint32_t smc_addr, uint32_t limit) +{ + if ((0 != (3 & smc_addr)) + || ((smc_addr + 3) >= limit)) { + pr_err("smc_addr invalid \n"); + return -EINVAL; + } + + cgs_write_register(hwmgr->device, mmSMC_IND_INDEX_0, smc_addr); + PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0); + return 0; +} + +static int ci_copy_bytes_to_smc(struct pp_hwmgr *hwmgr, uint32_t smc_start_address, + const uint8_t *src, uint32_t byte_count, uint32_t limit) +{ + int result; + uint32_t data = 0; + uint32_t original_data; + uint32_t addr = 0; + uint32_t extra_shift; + + if ((3 & smc_start_address) + || ((smc_start_address + byte_count) >= limit)) { + pr_err("smc_start_address invalid \n"); + return -EINVAL; + } + + addr = smc_start_address; + + while (byte_count >= 4) { + /* Bytes are written into the SMC address space with the MSB first. */ + data = src[0] * 0x1000000 + src[1] * 0x10000 + src[2] * 0x100 + src[3]; + + result = ci_set_smc_sram_address(hwmgr, addr, limit); + + if (0 != result) + return result; + + cgs_write_register(hwmgr->device, mmSMC_IND_DATA_0, data); + + src += 4; + byte_count -= 4; + addr += 4; + } + + if (0 != byte_count) { + + data = 0; + + result = ci_set_smc_sram_address(hwmgr, addr, limit); + + if (0 != result) + return result; + + + original_data = cgs_read_register(hwmgr->device, mmSMC_IND_DATA_0); + + extra_shift = 8 * (4 - byte_count); + + while (byte_count > 0) { + /* Bytes are written into the SMC addres space with the MSB first. */ + data = (0x100 * data) + *src++; + byte_count--; + } + + data <<= extra_shift; + + data |= (original_data & ~((~0UL) << extra_shift)); + + result = ci_set_smc_sram_address(hwmgr, addr, limit); + + if (0 != result) + return result; + + cgs_write_register(hwmgr->device, mmSMC_IND_DATA_0, data); + } + + return 0; +} + + +static int ci_program_jump_on_start(struct pp_hwmgr *hwmgr) +{ + static const unsigned char data[4] = { 0xE0, 0x00, 0x80, 0x40 }; + + ci_copy_bytes_to_smc(hwmgr, 0x0, data, 4, sizeof(data)+1); + + return 0; +} + +bool ci_is_smc_ram_running(struct pp_hwmgr *hwmgr) +{ + return ((0 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, + CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable)) + && (0x20100 <= cgs_read_ind_register(hwmgr->device, + CGS_IND_REG__SMC, ixSMC_PC_C))); +} + +static int ci_read_smc_sram_dword(struct pp_hwmgr *hwmgr, uint32_t smc_addr, + uint32_t *value, uint32_t limit) +{ + int result; + + result = ci_set_smc_sram_address(hwmgr, smc_addr, limit); + + if (result) + return result; + + *value = cgs_read_register(hwmgr->device, mmSMC_IND_DATA_0); + return 0; +} + +int ci_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg) +{ + int ret; + + if (!ci_is_smc_ram_running(hwmgr)) + return -EINVAL; + + cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, msg); + + PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0); + + ret = PHM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP); + + if (ret != 1) + pr_info("\n failed to send message %x ret is %d\n", msg, ret); + + return 0; +} + +int ci_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, + uint16_t msg, uint32_t parameter) +{ + cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, parameter); + return ci_send_msg_to_smc(hwmgr, msg); +} + +static void ci_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr) +{ + struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); + struct cgs_system_info sys_info = {0}; + uint32_t dev_id; + + sys_info.size = sizeof(struct cgs_system_info); + sys_info.info_id = CGS_SYSTEM_INFO_PCIE_DEV; + cgs_query_system_info(hwmgr->device, &sys_info); + dev_id = (uint32_t)sys_info.value; + + switch (dev_id) { + case 0x67BA: + case 0x66B1: + smu_data->power_tune_defaults = &defaults_hawaii_pro; + break; + case 0x67B8: + case 0x66B0: + smu_data->power_tune_defaults = &defaults_hawaii_xt; + break; + case 0x6640: + case 0x6641: + case 0x6646: + case 0x6647: + smu_data->power_tune_defaults = &defaults_saturn_xt; + break; + case 0x6649: + case 0x6650: + case 0x6651: + case 0x6658: + case 0x665C: + case 0x665D: + case 0x67A0: + case 0x67A1: + case 0x67A2: + case 0x67A8: + case 0x67A9: + case 0x67AA: + case 0x67B9: + case 0x67BE: + default: + smu_data->power_tune_defaults = &defaults_bonaire_xt; + break; + } +} + +static int ci_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr, + struct phm_clock_voltage_dependency_table *allowed_clock_voltage_table, + uint32_t clock, uint32_t *vol) +{ + uint32_t i = 0; + + if (allowed_clock_voltage_table->count == 0) + return -EINVAL; + + for (i = 0; i < allowed_clock_voltage_table->count; i++) { + if (allowed_clock_voltage_table->entries[i].clk >= clock) { + *vol = allowed_clock_voltage_table->entries[i].v; + return 0; + } + } + + *vol = allowed_clock_voltage_table->entries[i - 1].v; + return 0; +} + +static int ci_calculate_sclk_params(struct pp_hwmgr *hwmgr, + uint32_t clock, struct SMU7_Discrete_GraphicsLevel *sclk) +{ + const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct pp_atomctrl_clock_dividers_vi dividers; + uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL; + uint32_t spll_func_cntl_3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3; + uint32_t spll_func_cntl_4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4; + uint32_t cg_spll_spread_spectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM; + uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2; + uint32_t ref_clock; + uint32_t ref_divider; + uint32_t fbdiv; + int result; + + /* get the engine clock dividers for this clock value */ + result = atomctrl_get_engine_pll_dividers_vi(hwmgr, clock, ÷rs); + + PP_ASSERT_WITH_CODE(result == 0, + "Error retrieving Engine Clock dividers from VBIOS.", + return result); + + /* To get FBDIV we need to multiply this by 16384 and divide it by Fref. */ + ref_clock = atomctrl_get_reference_clock(hwmgr); + ref_divider = 1 + dividers.uc_pll_ref_div; + + /* low 14 bits is fraction and high 12 bits is divider */ + fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF; + + /* SPLL_FUNC_CNTL setup */ + spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL, + SPLL_REF_DIV, dividers.uc_pll_ref_div); + spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL, + SPLL_PDIV_A, dividers.uc_pll_post_div); + + /* SPLL_FUNC_CNTL_3 setup*/ + spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3, + SPLL_FB_DIV, fbdiv); + + /* set to use fractional accumulation*/ + spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3, + SPLL_DITHEN, 1); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_EngineSpreadSpectrumSupport)) { + struct pp_atomctrl_internal_ss_info ss_info; + uint32_t vco_freq = clock * dividers.uc_pll_post_div; + + if (!atomctrl_get_engine_clock_spread_spectrum(hwmgr, + vco_freq, &ss_info)) { + uint32_t clk_s = ref_clock * 5 / + (ref_divider * ss_info.speed_spectrum_rate); + uint32_t clk_v = 4 * ss_info.speed_spectrum_percentage * + fbdiv / (clk_s * 10000); + + cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum, + CG_SPLL_SPREAD_SPECTRUM, CLKS, clk_s); + cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum, + CG_SPLL_SPREAD_SPECTRUM, SSEN, 1); + cg_spll_spread_spectrum_2 = PHM_SET_FIELD(cg_spll_spread_spectrum_2, + CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clk_v); + } + } + + sclk->SclkFrequency = clock; + sclk->CgSpllFuncCntl3 = spll_func_cntl_3; + sclk->CgSpllFuncCntl4 = spll_func_cntl_4; + sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum; + sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2; + sclk->SclkDid = (uint8_t)dividers.pll_post_divider; + + return 0; +} + +static void ci_populate_phase_value_based_on_sclk(struct pp_hwmgr *hwmgr, + const struct phm_phase_shedding_limits_table *pl, + uint32_t sclk, uint32_t *p_shed) +{ + unsigned int i; + + /* use the minimum phase shedding */ + *p_shed = 1; + + for (i = 0; i < pl->count; i++) { + if (sclk < pl->entries[i].Sclk) { + *p_shed = i; + break; + } + } +} + +static uint8_t ci_get_sleep_divider_id_from_clock(uint32_t clock, + uint32_t clock_insr) +{ + uint8_t i; + uint32_t temp; + uint32_t min = min_t(uint32_t, clock_insr, CISLAND_MINIMUM_ENGINE_CLOCK); + + if (clock < min) { + pr_info("Engine clock can't satisfy stutter requirement!\n"); + return 0; + } + for (i = CISLAND_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) { + temp = clock >> i; + + if (temp >= min || i == 0) + break; + } + return i; +} + +static int ci_populate_single_graphic_level(struct pp_hwmgr *hwmgr, + uint32_t clock, uint16_t sclk_al_threshold, + struct SMU7_Discrete_GraphicsLevel *level) +{ + int result; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + + result = ci_calculate_sclk_params(hwmgr, clock, level); + + /* populate graphics levels */ + result = ci_get_dependency_volt_by_clk(hwmgr, + hwmgr->dyn_state.vddc_dependency_on_sclk, clock, + (uint32_t *)(&level->MinVddc)); + if (result) { + pr_err("vdd_dep_on_sclk table is NULL\n"); + return result; + } + + level->SclkFrequency = clock; + level->MinVddcPhases = 1; + + if (data->vddc_phase_shed_control) + ci_populate_phase_value_based_on_sclk(hwmgr, + hwmgr->dyn_state.vddc_phase_shed_limits_table, + clock, + &level->MinVddcPhases); + + level->ActivityLevel = sclk_al_threshold; + level->CcPwrDynRm = 0; + level->CcPwrDynRm1 = 0; + level->EnabledForActivity = 0; + /* this level can be used for throttling.*/ + level->EnabledForThrottle = 1; + level->UpH = 0; + level->DownH = 0; + level->VoltageDownH = 0; + level->PowerThrottle = 0; + + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SclkDeepSleep)) + level->DeepSleepDivId = + ci_get_sleep_divider_id_from_clock(clock, + CISLAND_MINIMUM_ENGINE_CLOCK); + + /* Default to slow, highest DPM level will be set to PPSMC_DISPLAY_WATERMARK_LOW later.*/ + level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; + + if (0 == result) { + level->MinVddc = PP_HOST_TO_SMC_UL(level->MinVddc * VOLTAGE_SCALE); + CONVERT_FROM_HOST_TO_SMC_UL(level->MinVddcPhases); + CONVERT_FROM_HOST_TO_SMC_UL(level->SclkFrequency); + CONVERT_FROM_HOST_TO_SMC_US(level->ActivityLevel); + CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl3); + CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl4); + CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum); + CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum2); + CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm); + CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm1); + } + + return result; +} + +int ci_populate_all_graphic_levels(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); + struct smu7_dpm_table *dpm_table = &data->dpm_table; + int result = 0; + uint32_t array = smu_data->dpm_table_start + + offsetof(SMU7_Discrete_DpmTable, GraphicsLevel); + uint32_t array_size = sizeof(struct SMU7_Discrete_GraphicsLevel) * + SMU7_MAX_LEVELS_GRAPHICS; + struct SMU7_Discrete_GraphicsLevel *levels = + smu_data->smc_state_table.GraphicsLevel; + uint32_t i; + + for (i = 0; i < dpm_table->sclk_table.count; i++) { + result = ci_populate_single_graphic_level(hwmgr, + dpm_table->sclk_table.dpm_levels[i].value, + (uint16_t)smu_data->activity_target[i], + &levels[i]); + if (result) + return result; + if (i > 1) + smu_data->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0; + if (i == (dpm_table->sclk_table.count - 1)) + smu_data->smc_state_table.GraphicsLevel[i].DisplayWatermark = + PPSMC_DISPLAY_WATERMARK_HIGH; + } + + smu_data->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1; + + smu_data->smc_state_table.GraphicsDpmLevelCount = (u8)dpm_table->sclk_table.count; + data->dpm_level_enable_mask.sclk_dpm_enable_mask = + phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table); + + result = ci_copy_bytes_to_smc(hwmgr, array, + (u8 *)levels, array_size, + SMC_RAM_END); + + return result; + +} + +static int ci_populate_svi_load_line(struct pp_hwmgr *hwmgr) +{ + struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); + const struct ci_pt_defaults *defaults = smu_data->power_tune_defaults; + + smu_data->power_tune_table.SviLoadLineEn = defaults->svi_load_line_en; + smu_data->power_tune_table.SviLoadLineVddC = defaults->svi_load_line_vddc; + smu_data->power_tune_table.SviLoadLineTrimVddC = 3; + smu_data->power_tune_table.SviLoadLineOffsetVddC = 0; + + return 0; +} + +static int ci_populate_tdc_limit(struct pp_hwmgr *hwmgr) +{ + uint16_t tdc_limit; + struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); + const struct ci_pt_defaults *defaults = smu_data->power_tune_defaults; + + tdc_limit = (uint16_t)(hwmgr->dyn_state.cac_dtp_table->usTDC * 256); + smu_data->power_tune_table.TDC_VDDC_PkgLimit = + CONVERT_FROM_HOST_TO_SMC_US(tdc_limit); + smu_data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc = + defaults->tdc_vddc_throttle_release_limit_perc; + smu_data->power_tune_table.TDC_MAWt = defaults->tdc_mawt; + + return 0; +} + +static int ci_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset) +{ + struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); + const struct ci_pt_defaults *defaults = smu_data->power_tune_defaults; + uint32_t temp; + + if (ci_read_smc_sram_dword(hwmgr, + fuse_table_offset + + offsetof(SMU7_Discrete_PmFuses, TdcWaterfallCtl), + (uint32_t *)&temp, SMC_RAM_END)) + PP_ASSERT_WITH_CODE(false, + "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!", + return -EINVAL); + else + smu_data->power_tune_table.TdcWaterfallCtl = defaults->tdc_waterfall_ctl; + + return 0; +} + +static int ci_populate_fuzzy_fan(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset) +{ + uint16_t tmp; + struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); + + if ((hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity & (1 << 15)) + || 0 == hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity) + tmp = hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity; + else + tmp = hwmgr->thermal_controller.advanceFanControlParameters.usDefaultFanOutputSensitivity; + + smu_data->power_tune_table.FuzzyFan_PwmSetDelta = CONVERT_FROM_HOST_TO_SMC_US(tmp); + + return 0; +} + +static int ci_populate_bapm_vddc_vid_sidd(struct pp_hwmgr *hwmgr) +{ + int i; + struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); + uint8_t *hi_vid = smu_data->power_tune_table.BapmVddCVidHiSidd; + uint8_t *lo_vid = smu_data->power_tune_table.BapmVddCVidLoSidd; + uint8_t *hi2_vid = smu_data->power_tune_table.BapmVddCVidHiSidd2; + + PP_ASSERT_WITH_CODE(NULL != hwmgr->dyn_state.cac_leakage_table, + "The CAC Leakage table does not exist!", return -EINVAL); + PP_ASSERT_WITH_CODE(hwmgr->dyn_state.cac_leakage_table->count <= 8, + "There should never be more than 8 entries for BapmVddcVid!!!", return -EINVAL); + PP_ASSERT_WITH_CODE(hwmgr->dyn_state.cac_leakage_table->count == hwmgr->dyn_state.vddc_dependency_on_sclk->count, + "CACLeakageTable->count and VddcDependencyOnSCLk->count not equal", return -EINVAL); + + for (i = 0; (uint32_t) i < hwmgr->dyn_state.cac_leakage_table->count; i++) { + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_EVV)) { + lo_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc1); + hi_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc2); + hi2_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc3); + } else { + lo_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc); + hi_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Leakage); + } + } + + return 0; +} + +static int ci_populate_vddc_vid(struct pp_hwmgr *hwmgr) +{ + int i; + struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); + uint8_t *vid = smu_data->power_tune_table.VddCVid; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + PP_ASSERT_WITH_CODE(data->vddc_voltage_table.count <= 8, + "There should never be more than 8 entries for VddcVid!!!", + return -EINVAL); + + for (i = 0; i < (int)data->vddc_voltage_table.count; i++) + vid[i] = convert_to_vid(data->vddc_voltage_table.entries[i].value); + + return 0; +} + +static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct pp_hwmgr *hwmgr) +{ + struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); + u8 *hi_vid = smu_data->power_tune_table.BapmVddCVidHiSidd; + u8 *lo_vid = smu_data->power_tune_table.BapmVddCVidLoSidd; + int i, min, max; + + min = max = hi_vid[0]; + for (i = 0; i < 8; i++) { + if (0 != hi_vid[i]) { + if (min > hi_vid[i]) + min = hi_vid[i]; + if (max < hi_vid[i]) + max = hi_vid[i]; + } + + if (0 != lo_vid[i]) { + if (min > lo_vid[i]) + min = lo_vid[i]; + if (max < lo_vid[i]) + max = lo_vid[i]; + } + } + + if ((min == 0) || (max == 0)) + return -EINVAL; + smu_data->power_tune_table.GnbLPMLMaxVid = (u8)max; + smu_data->power_tune_table.GnbLPMLMinVid = (u8)min; + + return 0; +} + +static int ci_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr) +{ + struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); + uint16_t HiSidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd; + uint16_t LoSidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd; + struct phm_cac_tdp_table *cac_table = hwmgr->dyn_state.cac_dtp_table; + + HiSidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256); + LoSidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256); + + smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd = + CONVERT_FROM_HOST_TO_SMC_US(HiSidd); + smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd = + CONVERT_FROM_HOST_TO_SMC_US(LoSidd); + + return 0; +} + +static int ci_populate_pm_fuses(struct pp_hwmgr *hwmgr) +{ + struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); + uint32_t pm_fuse_table_offset; + int ret = 0; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerContainment)) { + if (ci_read_smc_sram_dword(hwmgr, + SMU7_FIRMWARE_HEADER_LOCATION + + offsetof(SMU7_Firmware_Header, PmFuseTable), + &pm_fuse_table_offset, SMC_RAM_END)) { + pr_err("Attempt to get pm_fuse_table_offset Failed!\n"); + return -EINVAL; + } + + /* DW0 - DW3 */ + ret = ci_populate_bapm_vddc_vid_sidd(hwmgr); + /* DW4 - DW5 */ + ret |= ci_populate_vddc_vid(hwmgr); + /* DW6 */ + ret |= ci_populate_svi_load_line(hwmgr); + /* DW7 */ + ret |= ci_populate_tdc_limit(hwmgr); + /* DW8 */ + ret |= ci_populate_dw8(hwmgr, pm_fuse_table_offset); + + ret |= ci_populate_fuzzy_fan(hwmgr, pm_fuse_table_offset); + + ret |= ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(hwmgr); + + ret |= ci_populate_bapm_vddc_base_leakage_sidd(hwmgr); + if (ret) + return ret; + + ret = ci_copy_bytes_to_smc(hwmgr, pm_fuse_table_offset, + (uint8_t *)&smu_data->power_tune_table, + sizeof(struct SMU7_Discrete_PmFuses), SMC_RAM_END); + } + return ret; +} + +static int ci_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr) +{ + struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + const struct ci_pt_defaults *defaults = smu_data->power_tune_defaults; + SMU7_Discrete_DpmTable *dpm_table = &(smu_data->smc_state_table); + struct phm_cac_tdp_table *cac_dtp_table = hwmgr->dyn_state.cac_dtp_table; + struct phm_ppm_table *ppm = hwmgr->dyn_state.ppm_parameter_table; + const uint16_t *def1, *def2; + int i, j, k; + + dpm_table->DefaultTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 256)); + dpm_table->TargetTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usConfigurableTDP * 256)); + + dpm_table->DTETjOffset = 0; + dpm_table->GpuTjMax = (uint8_t)(data->thermal_temp_setting.temperature_high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES); + dpm_table->GpuTjHyst = 8; + + dpm_table->DTEAmbientTempBase = defaults->dte_ambient_temp_base; + + if (ppm) { + dpm_table->PPM_PkgPwrLimit = (uint16_t)ppm->dgpu_tdp * 256 / 1000; + dpm_table->PPM_TemperatureLimit = (uint16_t)ppm->tj_max * 256; + } else { + dpm_table->PPM_PkgPwrLimit = 0; + dpm_table->PPM_TemperatureLimit = 0; + } + + CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_PkgPwrLimit); + CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_TemperatureLimit); + + dpm_table->BAPM_TEMP_GRADIENT = PP_HOST_TO_SMC_UL(defaults->bapm_temp_gradient); + def1 = defaults->bapmti_r; + def2 = defaults->bapmti_rc; + + for (i = 0; i < SMU7_DTE_ITERATIONS; i++) { + for (j = 0; j < SMU7_DTE_SOURCES; j++) { + for (k = 0; k < SMU7_DTE_SINKS; k++) { + dpm_table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*def1); + dpm_table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*def2); + def1++; + def2++; + } + } + } + + return 0; +} + +static int ci_get_std_voltage_value_sidd(struct pp_hwmgr *hwmgr, + pp_atomctrl_voltage_table_entry *tab, uint16_t *hi, + uint16_t *lo) +{ + uint16_t v_index; + bool vol_found = false; + *hi = tab->value * VOLTAGE_SCALE; + *lo = tab->value * VOLTAGE_SCALE; + + PP_ASSERT_WITH_CODE(NULL != hwmgr->dyn_state.vddc_dependency_on_sclk, + "The SCLK/VDDC Dependency Table does not exist.\n", + return -EINVAL); + + if (NULL == hwmgr->dyn_state.cac_leakage_table) { + pr_warn("CAC Leakage Table does not exist, using vddc.\n"); + return 0; + } + + for (v_index = 0; (uint32_t)v_index < hwmgr->dyn_state.vddc_dependency_on_sclk->count; v_index++) { + if (tab->value == hwmgr->dyn_state.vddc_dependency_on_sclk->entries[v_index].v) { + vol_found = true; + if ((uint32_t)v_index < hwmgr->dyn_state.cac_leakage_table->count) { + *lo = hwmgr->dyn_state.cac_leakage_table->entries[v_index].Vddc * VOLTAGE_SCALE; + *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[v_index].Leakage * VOLTAGE_SCALE); + } else { + pr_warn("Index from SCLK/VDDC Dependency Table exceeds the CAC Leakage Table index, using maximum index from CAC table.\n"); + *lo = hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Vddc * VOLTAGE_SCALE; + *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Leakage * VOLTAGE_SCALE); + } + break; + } + } + + if (!vol_found) { + for (v_index = 0; (uint32_t)v_index < hwmgr->dyn_state.vddc_dependency_on_sclk->count; v_index++) { + if (tab->value <= hwmgr->dyn_state.vddc_dependency_on_sclk->entries[v_index].v) { + vol_found = true; + if ((uint32_t)v_index < hwmgr->dyn_state.cac_leakage_table->count) { + *lo = hwmgr->dyn_state.cac_leakage_table->entries[v_index].Vddc * VOLTAGE_SCALE; + *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[v_index].Leakage) * VOLTAGE_SCALE; + } else { + pr_warn("Index from SCLK/VDDC Dependency Table exceeds the CAC Leakage Table index in second look up, using maximum index from CAC table."); + *lo = hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Vddc * VOLTAGE_SCALE; + *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Leakage * VOLTAGE_SCALE); + } + break; + } + } + + if (!vol_found) + pr_warn("Unable to get std_vddc from SCLK/VDDC Dependency Table, using vddc.\n"); + } + + return 0; +} + +static int ci_populate_smc_voltage_table(struct pp_hwmgr *hwmgr, + pp_atomctrl_voltage_table_entry *tab, + SMU7_Discrete_VoltageLevel *smc_voltage_tab) +{ + int result; + + result = ci_get_std_voltage_value_sidd(hwmgr, tab, + &smc_voltage_tab->StdVoltageHiSidd, + &smc_voltage_tab->StdVoltageLoSidd); + if (result) { + smc_voltage_tab->StdVoltageHiSidd = tab->value * VOLTAGE_SCALE; + smc_voltage_tab->StdVoltageLoSidd = tab->value * VOLTAGE_SCALE; + } + + smc_voltage_tab->Voltage = PP_HOST_TO_SMC_US(tab->value * VOLTAGE_SCALE); + CONVERT_FROM_HOST_TO_SMC_US(smc_voltage_tab->StdVoltageHiSidd); + CONVERT_FROM_HOST_TO_SMC_US(smc_voltage_tab->StdVoltageLoSidd); + + return 0; +} + +static int ci_populate_smc_vddc_table(struct pp_hwmgr *hwmgr, + SMU7_Discrete_DpmTable *table) +{ + unsigned int count; + int result; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + table->VddcLevelCount = data->vddc_voltage_table.count; + for (count = 0; count < table->VddcLevelCount; count++) { + result = ci_populate_smc_voltage_table(hwmgr, + &(data->vddc_voltage_table.entries[count]), + &(table->VddcLevel[count])); + PP_ASSERT_WITH_CODE(0 == result, "do not populate SMC VDDC voltage table", return -EINVAL); + + /* GPIO voltage control */ + if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->voltage_control) + table->VddcLevel[count].Smio |= data->vddc_voltage_table.entries[count].smio_low; + else + table->VddcLevel[count].Smio = 0; + } + + CONVERT_FROM_HOST_TO_SMC_UL(table->VddcLevelCount); + + return 0; +} + +static int ci_populate_smc_vdd_ci_table(struct pp_hwmgr *hwmgr, + SMU7_Discrete_DpmTable *table) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + uint32_t count; + int result; + + table->VddciLevelCount = data->vddci_voltage_table.count; + + for (count = 0; count < table->VddciLevelCount; count++) { + result = ci_populate_smc_voltage_table(hwmgr, + &(data->vddci_voltage_table.entries[count]), + &(table->VddciLevel[count])); + PP_ASSERT_WITH_CODE(result == 0, "do not populate SMC VDDCI voltage table", return -EINVAL); + if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) + table->VddciLevel[count].Smio |= data->vddci_voltage_table.entries[count].smio_low; + else + table->VddciLevel[count].Smio |= 0; + } + + CONVERT_FROM_HOST_TO_SMC_UL(table->VddciLevelCount); + + return 0; +} + +static int ci_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr, + SMU7_Discrete_DpmTable *table) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + uint32_t count; + int result; + + table->MvddLevelCount = data->mvdd_voltage_table.count; + + for (count = 0; count < table->MvddLevelCount; count++) { + result = ci_populate_smc_voltage_table(hwmgr, + &(data->mvdd_voltage_table.entries[count]), + &table->MvddLevel[count]); + PP_ASSERT_WITH_CODE(result == 0, "do not populate SMC mvdd voltage table", return -EINVAL); + if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) + table->MvddLevel[count].Smio |= data->mvdd_voltage_table.entries[count].smio_low; + else + table->MvddLevel[count].Smio |= 0; + } + + CONVERT_FROM_HOST_TO_SMC_UL(table->MvddLevelCount); + + return 0; +} + + +static int ci_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr, + SMU7_Discrete_DpmTable *table) +{ + int result; + + result = ci_populate_smc_vddc_table(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "can not populate VDDC voltage table to SMC", return -EINVAL); + + result = ci_populate_smc_vdd_ci_table(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "can not populate VDDCI voltage table to SMC", return -EINVAL); + + result = ci_populate_smc_mvdd_table(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "can not populate MVDD voltage table to SMC", return -EINVAL); + + return 0; +} + +static int ci_populate_ulv_level(struct pp_hwmgr *hwmgr, + struct SMU7_Discrete_Ulv *state) +{ + uint32_t voltage_response_time, ulv_voltage; + int result; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + state->CcPwrDynRm = 0; + state->CcPwrDynRm1 = 0; + + result = pp_tables_get_response_times(hwmgr, &voltage_response_time, &ulv_voltage); + PP_ASSERT_WITH_CODE((0 == result), "can not get ULV voltage value", return result;); + + if (ulv_voltage == 0) { + data->ulv_supported = false; + return 0; + } + + if (data->voltage_control != SMU7_VOLTAGE_CONTROL_BY_SVID2) { + /* use minimum voltage if ulv voltage in pptable is bigger than minimum voltage */ + if (ulv_voltage > hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v) + state->VddcOffset = 0; + else + /* used in SMIO Mode. not implemented for now. this is backup only for CI. */ + state->VddcOffset = (uint16_t)(hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v - ulv_voltage); + } else { + /* use minimum voltage if ulv voltage in pptable is bigger than minimum voltage */ + if (ulv_voltage > hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v) + state->VddcOffsetVid = 0; + else /* used in SVI2 Mode */ + state->VddcOffsetVid = (uint8_t)( + (hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v - ulv_voltage) + * VOLTAGE_VID_OFFSET_SCALE2 + / VOLTAGE_VID_OFFSET_SCALE1); + } + state->VddcPhase = 1; + + CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm); + CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1); + CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset); + + return 0; +} + +static int ci_populate_ulv_state(struct pp_hwmgr *hwmgr, + SMU7_Discrete_Ulv *ulv_level) +{ + return ci_populate_ulv_level(hwmgr, ulv_level); +} + +static int ci_populate_smc_link_level(struct pp_hwmgr *hwmgr, SMU7_Discrete_DpmTable *table) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct smu7_dpm_table *dpm_table = &data->dpm_table; + struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); + uint32_t i; + +/* Index dpm_table->pcie_speed_table.count is reserved for PCIE boot level.*/ + for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) { + table->LinkLevel[i].PcieGenSpeed = + (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value; + table->LinkLevel[i].PcieLaneCount = + (uint8_t)encode_pcie_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1); + table->LinkLevel[i].EnabledForActivity = 1; + table->LinkLevel[i].DownT = PP_HOST_TO_SMC_UL(5); + table->LinkLevel[i].UpT = PP_HOST_TO_SMC_UL(30); + } + + smu_data->smc_state_table.LinkLevelCount = + (uint8_t)dpm_table->pcie_speed_table.count; + data->dpm_level_enable_mask.pcie_dpm_enable_mask = + phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table); + + return 0; +} + +static int ci_calculate_mclk_params( + struct pp_hwmgr *hwmgr, + uint32_t memory_clock, + SMU7_Discrete_MemoryLevel *mclk, + bool strobe_mode, + bool dllStateOn + ) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + uint32_t dll_cntl = data->clock_registers.vDLL_CNTL; + uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL; + uint32_t mpll_ad_func_cntl = data->clock_registers.vMPLL_AD_FUNC_CNTL; + uint32_t mpll_dq_func_cntl = data->clock_registers.vMPLL_DQ_FUNC_CNTL; + uint32_t mpll_func_cntl = data->clock_registers.vMPLL_FUNC_CNTL; + uint32_t mpll_func_cntl_1 = data->clock_registers.vMPLL_FUNC_CNTL_1; + uint32_t mpll_func_cntl_2 = data->clock_registers.vMPLL_FUNC_CNTL_2; + uint32_t mpll_ss1 = data->clock_registers.vMPLL_SS1; + uint32_t mpll_ss2 = data->clock_registers.vMPLL_SS2; + + pp_atomctrl_memory_clock_param mpll_param; + int result; + + result = atomctrl_get_memory_pll_dividers_si(hwmgr, + memory_clock, &mpll_param, strobe_mode); + PP_ASSERT_WITH_CODE(0 == result, + "Error retrieving Memory Clock Parameters from VBIOS.", return result); + + mpll_func_cntl = PHM_SET_FIELD(mpll_func_cntl, MPLL_FUNC_CNTL, BWCTRL, mpll_param.bw_ctrl); + + mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1, + MPLL_FUNC_CNTL_1, CLKF, mpll_param.mpll_fb_divider.cl_kf); + mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1, + MPLL_FUNC_CNTL_1, CLKFRAC, mpll_param.mpll_fb_divider.clk_frac); + mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1, + MPLL_FUNC_CNTL_1, VCO_MODE, mpll_param.vco_mode); + + mpll_ad_func_cntl = PHM_SET_FIELD(mpll_ad_func_cntl, + MPLL_AD_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider); + + if (data->is_memory_gddr5) { + mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl, + MPLL_DQ_FUNC_CNTL, YCLK_SEL, mpll_param.yclk_sel); + mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl, + MPLL_DQ_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider); + } + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_MemorySpreadSpectrumSupport)) { + pp_atomctrl_internal_ss_info ss_info; + uint32_t freq_nom; + uint32_t tmp; + uint32_t reference_clock = atomctrl_get_mpll_reference_clock(hwmgr); + + /* for GDDR5 for all modes and DDR3 */ + if (1 == mpll_param.qdr) + freq_nom = memory_clock * 4 * (1 << mpll_param.mpll_post_divider); + else + freq_nom = memory_clock * 2 * (1 << mpll_param.mpll_post_divider); + + /* tmp = (freq_nom / reference_clock * reference_divider) ^ 2 Note: S.I. reference_divider = 1*/ + tmp = (freq_nom / reference_clock); + tmp = tmp * tmp; + + if (0 == atomctrl_get_memory_clock_spread_spectrum(hwmgr, freq_nom, &ss_info)) { + uint32_t clks = reference_clock * 5 / ss_info.speed_spectrum_rate; + uint32_t clkv = + (uint32_t)((((131 * ss_info.speed_spectrum_percentage * + ss_info.speed_spectrum_rate) / 100) * tmp) / freq_nom); + + mpll_ss1 = PHM_SET_FIELD(mpll_ss1, MPLL_SS1, CLKV, clkv); + mpll_ss2 = PHM_SET_FIELD(mpll_ss2, MPLL_SS2, CLKS, clks); + } + } + + mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, + MCLK_PWRMGT_CNTL, DLL_SPEED, mpll_param.dll_speed); + mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, + MCLK_PWRMGT_CNTL, MRDCK0_PDNB, dllStateOn); + mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, + MCLK_PWRMGT_CNTL, MRDCK1_PDNB, dllStateOn); + + + mclk->MclkFrequency = memory_clock; + mclk->MpllFuncCntl = mpll_func_cntl; + mclk->MpllFuncCntl_1 = mpll_func_cntl_1; + mclk->MpllFuncCntl_2 = mpll_func_cntl_2; + mclk->MpllAdFuncCntl = mpll_ad_func_cntl; + mclk->MpllDqFuncCntl = mpll_dq_func_cntl; + mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl; + mclk->DllCntl = dll_cntl; + mclk->MpllSs1 = mpll_ss1; + mclk->MpllSs2 = mpll_ss2; + + return 0; +} + +static uint8_t ci_get_mclk_frequency_ratio(uint32_t memory_clock, + bool strobe_mode) +{ + uint8_t mc_para_index; + + if (strobe_mode) { + if (memory_clock < 12500) + mc_para_index = 0x00; + else if (memory_clock > 47500) + mc_para_index = 0x0f; + else + mc_para_index = (uint8_t)((memory_clock - 10000) / 2500); + } else { + if (memory_clock < 65000) + mc_para_index = 0x00; + else if (memory_clock > 135000) + mc_para_index = 0x0f; + else + mc_para_index = (uint8_t)((memory_clock - 60000) / 5000); + } + + return mc_para_index; +} + +static uint8_t ci_get_ddr3_mclk_frequency_ratio(uint32_t memory_clock) +{ + uint8_t mc_para_index; + + if (memory_clock < 10000) + mc_para_index = 0; + else if (memory_clock >= 80000) + mc_para_index = 0x0f; + else + mc_para_index = (uint8_t)((memory_clock - 10000) / 5000 + 1); + + return mc_para_index; +} + +static int ci_populate_phase_value_based_on_mclk(struct pp_hwmgr *hwmgr, const struct phm_phase_shedding_limits_table *pl, + uint32_t memory_clock, uint32_t *p_shed) +{ + unsigned int i; + + *p_shed = 1; + + for (i = 0; i < pl->count; i++) { + if (memory_clock < pl->entries[i].Mclk) { + *p_shed = i; + break; + } + } + + return 0; +} + +static int ci_populate_single_memory_level( + struct pp_hwmgr *hwmgr, + uint32_t memory_clock, + SMU7_Discrete_MemoryLevel *memory_level + ) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + int result = 0; + bool dll_state_on; + struct cgs_display_info info = {0}; + uint32_t mclk_edc_wr_enable_threshold = 40000; + uint32_t mclk_edc_enable_threshold = 40000; + uint32_t mclk_strobe_mode_threshold = 40000; + + if (hwmgr->dyn_state.vddc_dependency_on_mclk != NULL) { + result = ci_get_dependency_volt_by_clk(hwmgr, + hwmgr->dyn_state.vddc_dependency_on_mclk, memory_clock, &memory_level->MinVddc); + PP_ASSERT_WITH_CODE((0 == result), + "can not find MinVddc voltage value from memory VDDC voltage dependency table", return result); + } + + if (NULL != hwmgr->dyn_state.vddci_dependency_on_mclk) { + result = ci_get_dependency_volt_by_clk(hwmgr, + hwmgr->dyn_state.vddci_dependency_on_mclk, + memory_clock, + &memory_level->MinVddci); + PP_ASSERT_WITH_CODE((0 == result), + "can not find MinVddci voltage value from memory VDDCI voltage dependency table", return result); + } + + if (NULL != hwmgr->dyn_state.mvdd_dependency_on_mclk) { + result = ci_get_dependency_volt_by_clk(hwmgr, + hwmgr->dyn_state.mvdd_dependency_on_mclk, + memory_clock, + &memory_level->MinMvdd); + PP_ASSERT_WITH_CODE((0 == result), + "can not find MinVddci voltage value from memory MVDD voltage dependency table", return result); + } + + memory_level->MinVddcPhases = 1; + + if (data->vddc_phase_shed_control) { + ci_populate_phase_value_based_on_mclk(hwmgr, hwmgr->dyn_state.vddc_phase_shed_limits_table, + memory_clock, &memory_level->MinVddcPhases); + } + + memory_level->EnabledForThrottle = 1; + memory_level->EnabledForActivity = 1; + memory_level->UpH = 0; + memory_level->DownH = 100; + memory_level->VoltageDownH = 0; + + /* Indicates maximum activity level for this performance level.*/ + memory_level->ActivityLevel = (uint16_t)data->mclk_activity_target; + memory_level->StutterEnable = 0; + memory_level->StrobeEnable = 0; + memory_level->EdcReadEnable = 0; + memory_level->EdcWriteEnable = 0; + memory_level->RttEnable = 0; + + /* default set to low watermark. Highest level will be set to high later.*/ + memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; + + cgs_get_active_displays_info(hwmgr->device, &info); + data->display_timing.num_existing_displays = info.display_count; + + /* stutter mode not support on ci */ + + /* decide strobe mode*/ + memory_level->StrobeEnable = (mclk_strobe_mode_threshold != 0) && + (memory_clock <= mclk_strobe_mode_threshold); + + /* decide EDC mode and memory clock ratio*/ + if (data->is_memory_gddr5) { + memory_level->StrobeRatio = ci_get_mclk_frequency_ratio(memory_clock, + memory_level->StrobeEnable); + + if ((mclk_edc_enable_threshold != 0) && + (memory_clock > mclk_edc_enable_threshold)) { + memory_level->EdcReadEnable = 1; + } + + if ((mclk_edc_wr_enable_threshold != 0) && + (memory_clock > mclk_edc_wr_enable_threshold)) { + memory_level->EdcWriteEnable = 1; + } + + if (memory_level->StrobeEnable) { + if (ci_get_mclk_frequency_ratio(memory_clock, 1) >= + ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC7) >> 16) & 0xf)) + dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0; + else + dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC6) >> 1) & 0x1) ? 1 : 0; + } else + dll_state_on = data->dll_default_on; + } else { + memory_level->StrobeRatio = + ci_get_ddr3_mclk_frequency_ratio(memory_clock); + dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0; + } + + result = ci_calculate_mclk_params(hwmgr, + memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on); + + if (0 == result) { + memory_level->MinVddc = PP_HOST_TO_SMC_UL(memory_level->MinVddc * VOLTAGE_SCALE); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MinVddcPhases); + memory_level->MinVddci = PP_HOST_TO_SMC_UL(memory_level->MinVddci * VOLTAGE_SCALE); + memory_level->MinMvdd = PP_HOST_TO_SMC_UL(memory_level->MinMvdd * VOLTAGE_SCALE); + /* MCLK frequency in units of 10KHz*/ + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkFrequency); + /* Indicates maximum activity level for this performance level.*/ + CONVERT_FROM_HOST_TO_SMC_US(memory_level->ActivityLevel); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_1); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_2); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllAdFuncCntl); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllDqFuncCntl); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkPwrmgtCntl); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->DllCntl); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs1); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs2); + } + + return result; +} + +int ci_populate_all_memory_levels(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); + struct smu7_dpm_table *dpm_table = &data->dpm_table; + int result; + struct cgs_system_info sys_info = {0}; + uint32_t dev_id; + + uint32_t level_array_address = smu_data->dpm_table_start + offsetof(SMU7_Discrete_DpmTable, MemoryLevel); + uint32_t level_array_size = sizeof(SMU7_Discrete_MemoryLevel) * SMU7_MAX_LEVELS_MEMORY; + SMU7_Discrete_MemoryLevel *levels = smu_data->smc_state_table.MemoryLevel; + uint32_t i; + + memset(levels, 0x00, level_array_size); + + for (i = 0; i < dpm_table->mclk_table.count; i++) { + PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value), + "can not populate memory level as memory clock is zero", return -EINVAL); + result = ci_populate_single_memory_level(hwmgr, dpm_table->mclk_table.dpm_levels[i].value, + &(smu_data->smc_state_table.MemoryLevel[i])); + if (0 != result) + return result; + } + + smu_data->smc_state_table.MemoryLevel[0].EnabledForActivity = 1; + + sys_info.size = sizeof(struct cgs_system_info); + sys_info.info_id = CGS_SYSTEM_INFO_PCIE_DEV; + cgs_query_system_info(hwmgr->device, &sys_info); + dev_id = (uint32_t)sys_info.value; + + if ((dpm_table->mclk_table.count >= 2) + && ((dev_id == 0x67B0) || (dev_id == 0x67B1))) { + smu_data->smc_state_table.MemoryLevel[1].MinVddci = + smu_data->smc_state_table.MemoryLevel[0].MinVddci; + smu_data->smc_state_table.MemoryLevel[1].MinMvdd = + smu_data->smc_state_table.MemoryLevel[0].MinMvdd; + } + smu_data->smc_state_table.MemoryLevel[0].ActivityLevel = 0x1F; + CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.MemoryLevel[0].ActivityLevel); + + smu_data->smc_state_table.MemoryDpmLevelCount = (uint8_t)dpm_table->mclk_table.count; + data->dpm_level_enable_mask.mclk_dpm_enable_mask = phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table); + smu_data->smc_state_table.MemoryLevel[dpm_table->mclk_table.count-1].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH; + + result = ci_copy_bytes_to_smc(hwmgr, + level_array_address, (uint8_t *)levels, (uint32_t)level_array_size, + SMC_RAM_END); + + return result; +} + +static int ci_populate_mvdd_value(struct pp_hwmgr *hwmgr, uint32_t mclk, + SMU7_Discrete_VoltageLevel *voltage) +{ + const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + uint32_t i = 0; + + if (SMU7_VOLTAGE_CONTROL_NONE != data->mvdd_control) { + /* find mvdd value which clock is more than request */ + for (i = 0; i < hwmgr->dyn_state.mvdd_dependency_on_mclk->count; i++) { + if (mclk <= hwmgr->dyn_state.mvdd_dependency_on_mclk->entries[i].clk) { + /* Always round to higher voltage. */ + voltage->Voltage = data->mvdd_voltage_table.entries[i].value; + break; + } + } + + PP_ASSERT_WITH_CODE(i < hwmgr->dyn_state.mvdd_dependency_on_mclk->count, + "MVDD Voltage is outside the supported range.", return -EINVAL); + + } else { + return -EINVAL; + } + + return 0; +} + +static int ci_populate_smc_acpi_level(struct pp_hwmgr *hwmgr, + SMU7_Discrete_DpmTable *table) +{ + int result = 0; + const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct pp_atomctrl_clock_dividers_vi dividers; + + SMU7_Discrete_VoltageLevel voltage_level; + uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL; + uint32_t spll_func_cntl_2 = data->clock_registers.vCG_SPLL_FUNC_CNTL_2; + uint32_t dll_cntl = data->clock_registers.vDLL_CNTL; + uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL; + + + /* The ACPI state should not do DPM on DC (or ever).*/ + table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC; + + if (data->acpi_vddc) + table->ACPILevel.MinVddc = PP_HOST_TO_SMC_UL(data->acpi_vddc * VOLTAGE_SCALE); + else + table->ACPILevel.MinVddc = PP_HOST_TO_SMC_UL(data->min_vddc_in_pptable * VOLTAGE_SCALE); + + table->ACPILevel.MinVddcPhases = data->vddc_phase_shed_control ? 0 : 1; + /* assign zero for now*/ + table->ACPILevel.SclkFrequency = atomctrl_get_reference_clock(hwmgr); + + /* get the engine clock dividers for this clock value*/ + result = atomctrl_get_engine_pll_dividers_vi(hwmgr, + table->ACPILevel.SclkFrequency, ÷rs); + + PP_ASSERT_WITH_CODE(result == 0, + "Error retrieving Engine Clock dividers from VBIOS.", return result); + + /* divider ID for required SCLK*/ + table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider; + table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; + table->ACPILevel.DeepSleepDivId = 0; + + spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, + CG_SPLL_FUNC_CNTL, SPLL_PWRON, 0); + spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, + CG_SPLL_FUNC_CNTL, SPLL_RESET, 1); + spll_func_cntl_2 = PHM_SET_FIELD(spll_func_cntl_2, + CG_SPLL_FUNC_CNTL_2, SCLK_MUX_SEL, 4); + + table->ACPILevel.CgSpllFuncCntl = spll_func_cntl; + table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2; + table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3; + table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4; + table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM; + table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2; + table->ACPILevel.CcPwrDynRm = 0; + table->ACPILevel.CcPwrDynRm1 = 0; + + /* For various features to be enabled/disabled while this level is active.*/ + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags); + /* SCLK frequency in units of 10KHz*/ + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1); + + + /* table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;*/ + table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc; + table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases; + + if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control) + table->MemoryACPILevel.MinVddci = table->MemoryACPILevel.MinVddc; + else { + if (data->acpi_vddci != 0) + table->MemoryACPILevel.MinVddci = PP_HOST_TO_SMC_UL(data->acpi_vddci * VOLTAGE_SCALE); + else + table->MemoryACPILevel.MinVddci = PP_HOST_TO_SMC_UL(data->min_vddci_in_pptable * VOLTAGE_SCALE); + } + + if (0 == ci_populate_mvdd_value(hwmgr, 0, &voltage_level)) + table->MemoryACPILevel.MinMvdd = + PP_HOST_TO_SMC_UL(voltage_level.Voltage * VOLTAGE_SCALE); + else + table->MemoryACPILevel.MinMvdd = 0; + + /* Force reset on DLL*/ + mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, + MCLK_PWRMGT_CNTL, MRDCK0_RESET, 0x1); + mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, + MCLK_PWRMGT_CNTL, MRDCK1_RESET, 0x1); + + /* Disable DLL in ACPIState*/ + mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, + MCLK_PWRMGT_CNTL, MRDCK0_PDNB, 0); + mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, + MCLK_PWRMGT_CNTL, MRDCK1_PDNB, 0); + + /* Enable DLL bypass signal*/ + dll_cntl = PHM_SET_FIELD(dll_cntl, + DLL_CNTL, MRDCK0_BYPASS, 0); + dll_cntl = PHM_SET_FIELD(dll_cntl, + DLL_CNTL, MRDCK1_BYPASS, 0); + + table->MemoryACPILevel.DllCntl = + PP_HOST_TO_SMC_UL(dll_cntl); + table->MemoryACPILevel.MclkPwrmgtCntl = + PP_HOST_TO_SMC_UL(mclk_pwrmgt_cntl); + table->MemoryACPILevel.MpllAdFuncCntl = + PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_AD_FUNC_CNTL); + table->MemoryACPILevel.MpllDqFuncCntl = + PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_DQ_FUNC_CNTL); + table->MemoryACPILevel.MpllFuncCntl = + PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL); + table->MemoryACPILevel.MpllFuncCntl_1 = + PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_1); + table->MemoryACPILevel.MpllFuncCntl_2 = + PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_2); + table->MemoryACPILevel.MpllSs1 = + PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS1); + table->MemoryACPILevel.MpllSs2 = + PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS2); + + table->MemoryACPILevel.EnabledForThrottle = 0; + table->MemoryACPILevel.EnabledForActivity = 0; + table->MemoryACPILevel.UpH = 0; + table->MemoryACPILevel.DownH = 100; + table->MemoryACPILevel.VoltageDownH = 0; + /* Indicates maximum activity level for this performance level.*/ + table->MemoryACPILevel.ActivityLevel = PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target); + + table->MemoryACPILevel.StutterEnable = 0; + table->MemoryACPILevel.StrobeEnable = 0; + table->MemoryACPILevel.EdcReadEnable = 0; + table->MemoryACPILevel.EdcWriteEnable = 0; + table->MemoryACPILevel.RttEnable = 0; + + return result; +} + +static int ci_populate_smc_uvd_level(struct pp_hwmgr *hwmgr, + SMU7_Discrete_DpmTable *table) +{ + int result = 0; + uint8_t count; + struct pp_atomctrl_clock_dividers_vi dividers; + struct phm_uvd_clock_voltage_dependency_table *uvd_table = + hwmgr->dyn_state.uvd_clock_voltage_dependency_table; + + table->UvdLevelCount = (uint8_t)(uvd_table->count); + + for (count = 0; count < table->UvdLevelCount; count++) { + table->UvdLevel[count].VclkFrequency = + uvd_table->entries[count].vclk; + table->UvdLevel[count].DclkFrequency = + uvd_table->entries[count].dclk; + table->UvdLevel[count].MinVddc = + uvd_table->entries[count].v * VOLTAGE_SCALE; + table->UvdLevel[count].MinVddcPhases = 1; + + result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, + table->UvdLevel[count].VclkFrequency, ÷rs); + PP_ASSERT_WITH_CODE((0 == result), + "can not find divide id for Vclk clock", return result); + + table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider; + + result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, + table->UvdLevel[count].DclkFrequency, ÷rs); + PP_ASSERT_WITH_CODE((0 == result), + "can not find divide id for Dclk clock", return result); + + table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider; + CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency); + CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency); + CONVERT_FROM_HOST_TO_SMC_US(table->UvdLevel[count].MinVddc); + } + + return result; +} + +static int ci_populate_smc_vce_level(struct pp_hwmgr *hwmgr, + SMU7_Discrete_DpmTable *table) +{ + int result = -EINVAL; + uint8_t count; + struct pp_atomctrl_clock_dividers_vi dividers; + struct phm_vce_clock_voltage_dependency_table *vce_table = + hwmgr->dyn_state.vce_clock_voltage_dependency_table; + + table->VceLevelCount = (uint8_t)(vce_table->count); + table->VceBootLevel = 0; + + for (count = 0; count < table->VceLevelCount; count++) { + table->VceLevel[count].Frequency = vce_table->entries[count].evclk; + table->VceLevel[count].MinVoltage = + vce_table->entries[count].v * VOLTAGE_SCALE; + table->VceLevel[count].MinPhases = 1; + + result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, + table->VceLevel[count].Frequency, ÷rs); + PP_ASSERT_WITH_CODE((0 == result), + "can not find divide id for VCE engine clock", + return result); + + table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider; + + CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency); + CONVERT_FROM_HOST_TO_SMC_US(table->VceLevel[count].MinVoltage); + } + return result; +} + +static int ci_populate_smc_acp_level(struct pp_hwmgr *hwmgr, + SMU7_Discrete_DpmTable *table) +{ + int result = -EINVAL; + uint8_t count; + struct pp_atomctrl_clock_dividers_vi dividers; + struct phm_acp_clock_voltage_dependency_table *acp_table = + hwmgr->dyn_state.acp_clock_voltage_dependency_table; + + table->AcpLevelCount = (uint8_t)(acp_table->count); + table->AcpBootLevel = 0; + + for (count = 0; count < table->AcpLevelCount; count++) { + table->AcpLevel[count].Frequency = acp_table->entries[count].acpclk; + table->AcpLevel[count].MinVoltage = acp_table->entries[count].v; + table->AcpLevel[count].MinPhases = 1; + + result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, + table->AcpLevel[count].Frequency, ÷rs); + PP_ASSERT_WITH_CODE((0 == result), + "can not find divide id for engine clock", return result); + + table->AcpLevel[count].Divider = (uint8_t)dividers.pll_post_divider; + + CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].Frequency); + CONVERT_FROM_HOST_TO_SMC_US(table->AcpLevel[count].MinVoltage); + } + return result; +} + +static int ci_populate_smc_samu_level(struct pp_hwmgr *hwmgr, + SMU7_Discrete_DpmTable *table) +{ + int result = -EINVAL; + uint8_t count; + struct pp_atomctrl_clock_dividers_vi dividers; + struct phm_samu_clock_voltage_dependency_table *samu_table = + hwmgr->dyn_state.samu_clock_voltage_dependency_table; + + table->SamuBootLevel = 0; + table->SamuLevelCount = (uint8_t)(samu_table->count); + + for (count = 0; count < table->SamuLevelCount; count++) { + table->SamuLevel[count].Frequency = samu_table->entries[count].samclk; + table->SamuLevel[count].MinVoltage = samu_table->entries[count].v * VOLTAGE_SCALE; + table->SamuLevel[count].MinPhases = 1; + + /* retrieve divider value for VBIOS */ + result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, + table->SamuLevel[count].Frequency, ÷rs); + PP_ASSERT_WITH_CODE((0 == result), + "can not find divide id for samu clock", return result); + + table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider; + + CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency); + CONVERT_FROM_HOST_TO_SMC_US(table->SamuLevel[count].MinVoltage); + } + return result; +} + +static int ci_populate_memory_timing_parameters( + struct pp_hwmgr *hwmgr, + uint32_t engine_clock, + uint32_t memory_clock, + struct SMU7_Discrete_MCArbDramTimingTableEntry *arb_regs + ) +{ + uint32_t dramTiming; + uint32_t dramTiming2; + uint32_t burstTime; + int result; + + result = atomctrl_set_engine_dram_timings_rv770(hwmgr, + engine_clock, memory_clock); + + PP_ASSERT_WITH_CODE(result == 0, + "Error calling VBIOS to set DRAM_TIMING.", return result); + + dramTiming = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING); + dramTiming2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2); + burstTime = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0); + + arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dramTiming); + arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dramTiming2); + arb_regs->McArbBurstTime = (uint8_t)burstTime; + + return 0; +} + +static int ci_program_memory_timing_parameters(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); + int result = 0; + SMU7_Discrete_MCArbDramTimingTable arb_regs; + uint32_t i, j; + + memset(&arb_regs, 0x00, sizeof(SMU7_Discrete_MCArbDramTimingTable)); + + for (i = 0; i < data->dpm_table.sclk_table.count; i++) { + for (j = 0; j < data->dpm_table.mclk_table.count; j++) { + result = ci_populate_memory_timing_parameters + (hwmgr, data->dpm_table.sclk_table.dpm_levels[i].value, + data->dpm_table.mclk_table.dpm_levels[j].value, + &arb_regs.entries[i][j]); + + if (0 != result) + break; + } + } + + if (0 == result) { + result = ci_copy_bytes_to_smc( + hwmgr, + smu_data->arb_table_start, + (uint8_t *)&arb_regs, + sizeof(SMU7_Discrete_MCArbDramTimingTable), + SMC_RAM_END + ); + } + + return result; +} + +static int ci_populate_smc_boot_level(struct pp_hwmgr *hwmgr, + SMU7_Discrete_DpmTable *table) +{ + int result = 0; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); + + table->GraphicsBootLevel = 0; + table->MemoryBootLevel = 0; + + /* find boot level from dpm table*/ + result = phm_find_boot_level(&(data->dpm_table.sclk_table), + data->vbios_boot_state.sclk_bootup_value, + (uint32_t *)&(smu_data->smc_state_table.GraphicsBootLevel)); + + if (0 != result) { + smu_data->smc_state_table.GraphicsBootLevel = 0; + pr_err("VBIOS did not find boot engine clock value \ + in dependency table. Using Graphics DPM level 0!"); + result = 0; + } + + result = phm_find_boot_level(&(data->dpm_table.mclk_table), + data->vbios_boot_state.mclk_bootup_value, + (uint32_t *)&(smu_data->smc_state_table.MemoryBootLevel)); + + if (0 != result) { + smu_data->smc_state_table.MemoryBootLevel = 0; + pr_err("VBIOS did not find boot engine clock value \ + in dependency table. Using Memory DPM level 0!"); + result = 0; + } + + table->BootVddc = data->vbios_boot_state.vddc_bootup_value; + table->BootVddci = data->vbios_boot_state.vddci_bootup_value; + table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value; + + return result; +} + +static int ci_populate_mc_reg_address(struct pp_hwmgr *hwmgr, + SMU7_Discrete_MCRegisters *mc_reg_table) +{ + const struct ci_smumgr *smu_data = (struct ci_smumgr *)hwmgr->smu_backend; + + uint32_t i, j; + + for (i = 0, j = 0; j < smu_data->mc_reg_table.last; j++) { + if (smu_data->mc_reg_table.validflag & 1<<j) { + PP_ASSERT_WITH_CODE(i < SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE, + "Index of mc_reg_table->address[] array out of boundary", return -EINVAL); + mc_reg_table->address[i].s0 = + PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s0); + mc_reg_table->address[i].s1 = + PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s1); + i++; + } + } + + mc_reg_table->last = (uint8_t)i; + + return 0; +} + +static void ci_convert_mc_registers( + const struct ci_mc_reg_entry *entry, + SMU7_Discrete_MCRegisterSet *data, + uint32_t num_entries, uint32_t valid_flag) +{ + uint32_t i, j; + + for (i = 0, j = 0; j < num_entries; j++) { + if (valid_flag & 1<<j) { + data->value[i] = PP_HOST_TO_SMC_UL(entry->mc_data[j]); + i++; + } + } +} + +static int ci_convert_mc_reg_table_entry_to_smc( + struct pp_hwmgr *hwmgr, + const uint32_t memory_clock, + SMU7_Discrete_MCRegisterSet *mc_reg_table_data + ) +{ + struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); + uint32_t i = 0; + + for (i = 0; i < smu_data->mc_reg_table.num_entries; i++) { + if (memory_clock <= + smu_data->mc_reg_table.mc_reg_table_entry[i].mclk_max) { + break; + } + } + + if ((i == smu_data->mc_reg_table.num_entries) && (i > 0)) + --i; + + ci_convert_mc_registers(&smu_data->mc_reg_table.mc_reg_table_entry[i], + mc_reg_table_data, smu_data->mc_reg_table.last, + smu_data->mc_reg_table.validflag); + + return 0; +} + +static int ci_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr, + SMU7_Discrete_MCRegisters *mc_regs) +{ + int result = 0; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + int res; + uint32_t i; + + for (i = 0; i < data->dpm_table.mclk_table.count; i++) { + res = ci_convert_mc_reg_table_entry_to_smc( + hwmgr, + data->dpm_table.mclk_table.dpm_levels[i].value, + &mc_regs->data[i] + ); + + if (0 != res) + result = res; + } + + return result; +} + +static int ci_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr) +{ + struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + uint32_t address; + int32_t result; + + if (0 == (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) + return 0; + + + memset(&smu_data->mc_regs, 0, sizeof(SMU7_Discrete_MCRegisters)); + + result = ci_convert_mc_reg_table_to_smc(hwmgr, &(smu_data->mc_regs)); + + if (result != 0) + return result; + + address = smu_data->mc_reg_table_start + (uint32_t)offsetof(SMU7_Discrete_MCRegisters, data[0]); + + return ci_copy_bytes_to_smc(hwmgr, address, + (uint8_t *)&smu_data->mc_regs.data[0], + sizeof(SMU7_Discrete_MCRegisterSet) * data->dpm_table.mclk_table.count, + SMC_RAM_END); +} + +static int ci_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr) +{ + int result; + struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); + + memset(&smu_data->mc_regs, 0x00, sizeof(SMU7_Discrete_MCRegisters)); + result = ci_populate_mc_reg_address(hwmgr, &(smu_data->mc_regs)); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize MCRegTable for the MC register addresses!", return result;); + + result = ci_convert_mc_reg_table_to_smc(hwmgr, &smu_data->mc_regs); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize MCRegTable for driver state!", return result;); + + return ci_copy_bytes_to_smc(hwmgr, smu_data->mc_reg_table_start, + (uint8_t *)&smu_data->mc_regs, sizeof(SMU7_Discrete_MCRegisters), SMC_RAM_END); +} + +static int ci_populate_smc_initial_state(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); + uint8_t count, level; + + count = (uint8_t)(hwmgr->dyn_state.vddc_dependency_on_sclk->count); + + for (level = 0; level < count; level++) { + if (hwmgr->dyn_state.vddc_dependency_on_sclk->entries[level].clk + >= data->vbios_boot_state.sclk_bootup_value) { + smu_data->smc_state_table.GraphicsBootLevel = level; + break; + } + } + + count = (uint8_t)(hwmgr->dyn_state.vddc_dependency_on_mclk->count); + + for (level = 0; level < count; level++) { + if (hwmgr->dyn_state.vddc_dependency_on_mclk->entries[level].clk + >= data->vbios_boot_state.mclk_bootup_value) { + smu_data->smc_state_table.MemoryBootLevel = level; + break; + } + } + + return 0; +} + +static int ci_populate_smc_svi2_config(struct pp_hwmgr *hwmgr, + SMU7_Discrete_DpmTable *table) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) + table->SVI2Enable = 1; + else + table->SVI2Enable = 0; + return 0; +} + +static int ci_start_smc(struct pp_hwmgr *hwmgr) +{ + /* set smc instruct start point at 0x0 */ + ci_program_jump_on_start(hwmgr); + + /* enable smc clock */ + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0); + + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 0); + + PHM_WAIT_INDIRECT_FIELD(hwmgr, SMC_IND, FIRMWARE_FLAGS, + INTERRUPTS_ENABLED, 1); + + return 0; +} + +int ci_init_smc_table(struct pp_hwmgr *hwmgr) +{ + int result; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); + SMU7_Discrete_DpmTable *table = &(smu_data->smc_state_table); + struct pp_atomctrl_gpio_pin_assignment gpio_pin; + u32 i; + + ci_initialize_power_tune_defaults(hwmgr); + memset(&(smu_data->smc_state_table), 0x00, sizeof(smu_data->smc_state_table)); + + if (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control) + ci_populate_smc_voltage_tables(hwmgr, table); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_AutomaticDCTransition)) + table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC; + + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_StepVddc)) + table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC; + + if (data->is_memory_gddr5) + table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5; + + if (data->ulv_supported) { + result = ci_populate_ulv_state(hwmgr, &(table->Ulv)); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize ULV state!", return result); + + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_ULV_PARAMETER, 0x40035); + } + + result = ci_populate_all_graphic_levels(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize Graphics Level!", return result); + + result = ci_populate_all_memory_levels(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize Memory Level!", return result); + + result = ci_populate_smc_link_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize Link Level!", return result); + + result = ci_populate_smc_acpi_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize ACPI Level!", return result); + + result = ci_populate_smc_vce_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize VCE Level!", return result); + + result = ci_populate_smc_acp_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize ACP Level!", return result); + + result = ci_populate_smc_samu_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize SAMU Level!", return result); + + /* Since only the initial state is completely set up at this point (the other states are just copies of the boot state) we only */ + /* need to populate the ARB settings for the initial state. */ + result = ci_program_memory_timing_parameters(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to Write ARB settings for the initial state.", return result); + + result = ci_populate_smc_uvd_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize UVD Level!", return result); + + table->UvdBootLevel = 0; + table->VceBootLevel = 0; + table->AcpBootLevel = 0; + table->SamuBootLevel = 0; + + table->GraphicsBootLevel = 0; + table->MemoryBootLevel = 0; + + result = ci_populate_smc_boot_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize Boot Level!", return result); + + result = ci_populate_smc_initial_state(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, "Failed to initialize Boot State!", return result); + + result = ci_populate_bapm_parameters_in_dpm_table(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, "Failed to populate BAPM Parameters!", return result); + + table->UVDInterval = 1; + table->VCEInterval = 1; + table->ACPInterval = 1; + table->SAMUInterval = 1; + table->GraphicsVoltageChangeEnable = 1; + table->GraphicsThermThrottleEnable = 1; + table->GraphicsInterval = 1; + table->VoltageInterval = 1; + table->ThermalInterval = 1; + + table->TemperatureLimitHigh = + (data->thermal_temp_setting.temperature_high * + SMU7_Q88_FORMAT_CONVERSION_UNIT) / PP_TEMPERATURE_UNITS_PER_CENTIGRADES; + table->TemperatureLimitLow = + (data->thermal_temp_setting.temperature_low * + SMU7_Q88_FORMAT_CONVERSION_UNIT) / PP_TEMPERATURE_UNITS_PER_CENTIGRADES; + + table->MemoryVoltageChangeEnable = 1; + table->MemoryInterval = 1; + table->VoltageResponseTime = 0; + table->VddcVddciDelta = 4000; + table->PhaseResponseTime = 0; + table->MemoryThermThrottleEnable = 1; + + PP_ASSERT_WITH_CODE((1 <= data->dpm_table.pcie_speed_table.count), + "There must be 1 or more PCIE levels defined in PPTable.", + return -EINVAL); + + table->PCIeBootLinkLevel = (uint8_t)data->dpm_table.pcie_speed_table.count; + table->PCIeGenInterval = 1; + + ci_populate_smc_svi2_config(hwmgr, table); + + for (i = 0; i < SMU7_MAX_ENTRIES_SMIO; i++) + CONVERT_FROM_HOST_TO_SMC_UL(table->Smio[i]); + + table->ThermGpio = 17; + table->SclkStepSize = 0x4000; + if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID, &gpio_pin)) { + table->VRHotGpio = gpio_pin.uc_gpio_pin_bit_shift; + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_RegulatorHot); + } else { + table->VRHotGpio = SMU7_UNUSED_GPIO_PIN; + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_RegulatorHot); + } + + table->AcDcGpio = SMU7_UNUSED_GPIO_PIN; + + CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags); + CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcVid); + CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcPhase); + CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddciVid); + CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskMvddVid); + CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize); + CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh); + CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow); + table->VddcVddciDelta = PP_HOST_TO_SMC_US(table->VddcVddciDelta); + CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime); + CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime); + + table->BootVddc = PP_HOST_TO_SMC_US(table->BootVddc * VOLTAGE_SCALE); + table->BootVddci = PP_HOST_TO_SMC_US(table->BootVddci * VOLTAGE_SCALE); + table->BootMVdd = PP_HOST_TO_SMC_US(table->BootMVdd * VOLTAGE_SCALE); + + /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */ + result = ci_copy_bytes_to_smc(hwmgr, smu_data->dpm_table_start + + offsetof(SMU7_Discrete_DpmTable, SystemFlags), + (uint8_t *)&(table->SystemFlags), + sizeof(SMU7_Discrete_DpmTable)-3 * sizeof(SMU7_PIDController), + SMC_RAM_END); + + PP_ASSERT_WITH_CODE(0 == result, + "Failed to upload dpm data to SMC memory!", return result;); + + result = ci_populate_initial_mc_reg_table(hwmgr); + PP_ASSERT_WITH_CODE((0 == result), + "Failed to populate initialize MC Reg table!", return result); + + result = ci_populate_pm_fuses(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to populate PM fuses to SMC memory!", return result); + + ci_start_smc(hwmgr); + + return 0; +} + +int ci_thermal_setup_fan_table(struct pp_hwmgr *hwmgr) +{ + struct ci_smumgr *ci_data = (struct ci_smumgr *)(hwmgr->smu_backend); + SMU7_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE }; + uint32_t duty100; + uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2; + uint16_t fdo_min, slope1, slope2; + uint32_t reference_clock; + int res; + uint64_t tmp64; + + if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl)) + return 0; + + if (hwmgr->thermal_controller.fanInfo.bNoFan) { + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_MicrocodeFanControl); + return 0; + } + + if (0 == ci_data->fan_table_start) { + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl); + return 0; + } + + duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL1, FMAX_DUTY100); + + if (0 == duty100) { + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl); + return 0; + } + + tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin * duty100; + do_div(tmp64, 10000); + fdo_min = (uint16_t)tmp64; + + t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed - hwmgr->thermal_controller.advanceFanControlParameters.usTMin; + t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh - hwmgr->thermal_controller.advanceFanControlParameters.usTMed; + + pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin; + pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed; + + slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100); + slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100); + + fan_table.TempMin = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMin) / 100); + fan_table.TempMed = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMed) / 100); + fan_table.TempMax = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMax) / 100); + + fan_table.Slope1 = cpu_to_be16(slope1); + fan_table.Slope2 = cpu_to_be16(slope2); + + fan_table.FdoMin = cpu_to_be16(fdo_min); + + fan_table.HystDown = cpu_to_be16(hwmgr->thermal_controller.advanceFanControlParameters.ucTHyst); + + fan_table.HystUp = cpu_to_be16(1); + + fan_table.HystSlope = cpu_to_be16(1); + + fan_table.TempRespLim = cpu_to_be16(5); + + reference_clock = smu7_get_xclk(hwmgr); + + fan_table.RefreshPeriod = cpu_to_be32((hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600); + + fan_table.FdoMax = cpu_to_be16((uint16_t)duty100); + + fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_CTRL, TEMP_SEL); + + res = ci_copy_bytes_to_smc(hwmgr, ci_data->fan_table_start, (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table), SMC_RAM_END); + + return 0; +} + +static int ci_program_mem_timing_parameters(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + if (data->need_update_smu7_dpm_table & + (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK)) + return ci_program_memory_timing_parameters(hwmgr); + + return 0; +} + +int ci_update_sclk_threshold(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); + + int result = 0; + uint32_t low_sclk_interrupt_threshold = 0; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SclkThrottleLowNotification) + && (hwmgr->gfx_arbiter.sclk_threshold != + data->low_sclk_interrupt_threshold)) { + data->low_sclk_interrupt_threshold = + hwmgr->gfx_arbiter.sclk_threshold; + low_sclk_interrupt_threshold = + data->low_sclk_interrupt_threshold; + + CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold); + + result = ci_copy_bytes_to_smc( + hwmgr, + smu_data->dpm_table_start + + offsetof(SMU7_Discrete_DpmTable, + LowSclkInterruptT), + (uint8_t *)&low_sclk_interrupt_threshold, + sizeof(uint32_t), + SMC_RAM_END); + } + + result = ci_update_and_upload_mc_reg_table(hwmgr); + + PP_ASSERT_WITH_CODE((0 == result), "Failed to upload MC reg table!", return result); + + result = ci_program_mem_timing_parameters(hwmgr); + PP_ASSERT_WITH_CODE((result == 0), + "Failed to program memory timing parameters!", + ); + + return result; +} + +uint32_t ci_get_offsetof(uint32_t type, uint32_t member) +{ + switch (type) { + case SMU_SoftRegisters: + switch (member) { + case HandshakeDisables: + return offsetof(SMU7_SoftRegisters, HandshakeDisables); + case VoltageChangeTimeout: + return offsetof(SMU7_SoftRegisters, VoltageChangeTimeout); + case AverageGraphicsActivity: + return offsetof(SMU7_SoftRegisters, AverageGraphicsA); + case PreVBlankGap: + return offsetof(SMU7_SoftRegisters, PreVBlankGap); + case VBlankTimeout: + return offsetof(SMU7_SoftRegisters, VBlankTimeout); + } + case SMU_Discrete_DpmTable: + switch (member) { + case LowSclkInterruptThreshold: + return offsetof(SMU7_Discrete_DpmTable, LowSclkInterruptT); + } + } + pr_debug("can't get the offset of type %x member %x\n", type, member); + return 0; +} + +uint32_t ci_get_mac_definition(uint32_t value) +{ + switch (value) { + case SMU_MAX_LEVELS_GRAPHICS: + return SMU7_MAX_LEVELS_GRAPHICS; + case SMU_MAX_LEVELS_MEMORY: + return SMU7_MAX_LEVELS_MEMORY; + case SMU_MAX_LEVELS_LINK: + return SMU7_MAX_LEVELS_LINK; + case SMU_MAX_ENTRIES_SMIO: + return SMU7_MAX_ENTRIES_SMIO; + case SMU_MAX_LEVELS_VDDC: + return SMU7_MAX_LEVELS_VDDC; + case SMU_MAX_LEVELS_VDDCI: + return SMU7_MAX_LEVELS_VDDCI; + case SMU_MAX_LEVELS_MVDD: + return SMU7_MAX_LEVELS_MVDD; + } + + pr_debug("can't get the mac of %x\n", value); + return 0; +} + +static int ci_load_smc_ucode(struct pp_hwmgr *hwmgr) +{ + uint32_t byte_count, start_addr; + uint8_t *src; + uint32_t data; + + struct cgs_firmware_info info = {0}; + + cgs_get_firmware_info(hwmgr->device, CGS_UCODE_ID_SMU, &info); + + hwmgr->is_kicker = info.is_kicker; + byte_count = info.image_size; + src = (uint8_t *)info.kptr; + start_addr = info.ucode_start_address; + + if (byte_count > SMC_RAM_END) { + pr_err("SMC address is beyond the SMC RAM area.\n"); + return -EINVAL; + } + + cgs_write_register(hwmgr->device, mmSMC_IND_INDEX_0, start_addr); + PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1); + + for (; byte_count >= 4; byte_count -= 4) { + data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3]; + cgs_write_register(hwmgr->device, mmSMC_IND_DATA_0, data); + src += 4; + } + PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0); + + if (0 != byte_count) { + pr_err("SMC size must be dividable by 4\n"); + return -EINVAL; + } + + return 0; +} + +static int ci_upload_firmware(struct pp_hwmgr *hwmgr) +{ + if (ci_is_smc_ram_running(hwmgr)) { + pr_info("smc is running, no need to load smc firmware\n"); + return 0; + } + PHM_WAIT_INDIRECT_FIELD(hwmgr, SMC_IND, RCU_UC_EVENTS, + boot_seq_done, 1); + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_MISC_CNTL, + pre_fetcher_en, 1); + + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 1); + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 1); + return ci_load_smc_ucode(hwmgr); +} + +int ci_process_firmware_header(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct ci_smumgr *ci_data = (struct ci_smumgr *)(hwmgr->smu_backend); + + uint32_t tmp = 0; + int result; + bool error = false; + + if (ci_upload_firmware(hwmgr)) + return -EINVAL; + + result = ci_read_smc_sram_dword(hwmgr, + SMU7_FIRMWARE_HEADER_LOCATION + + offsetof(SMU7_Firmware_Header, DpmTable), + &tmp, SMC_RAM_END); + + if (0 == result) + ci_data->dpm_table_start = tmp; + + error |= (0 != result); + + result = ci_read_smc_sram_dword(hwmgr, + SMU7_FIRMWARE_HEADER_LOCATION + + offsetof(SMU7_Firmware_Header, SoftRegisters), + &tmp, SMC_RAM_END); + + if (0 == result) { + data->soft_regs_start = tmp; + ci_data->soft_regs_start = tmp; + } + + error |= (0 != result); + + result = ci_read_smc_sram_dword(hwmgr, + SMU7_FIRMWARE_HEADER_LOCATION + + offsetof(SMU7_Firmware_Header, mcRegisterTable), + &tmp, SMC_RAM_END); + + if (0 == result) + ci_data->mc_reg_table_start = tmp; + + result = ci_read_smc_sram_dword(hwmgr, + SMU7_FIRMWARE_HEADER_LOCATION + + offsetof(SMU7_Firmware_Header, FanTable), + &tmp, SMC_RAM_END); + + if (0 == result) + ci_data->fan_table_start = tmp; + + error |= (0 != result); + + result = ci_read_smc_sram_dword(hwmgr, + SMU7_FIRMWARE_HEADER_LOCATION + + offsetof(SMU7_Firmware_Header, mcArbDramTimingTable), + &tmp, SMC_RAM_END); + + if (0 == result) + ci_data->arb_table_start = tmp; + + error |= (0 != result); + + result = ci_read_smc_sram_dword(hwmgr, + SMU7_FIRMWARE_HEADER_LOCATION + + offsetof(SMU7_Firmware_Header, Version), + &tmp, SMC_RAM_END); + + if (0 == result) + hwmgr->microcode_version_info.SMC = tmp; + + error |= (0 != result); + + return error ? 1 : 0; +} + +static uint8_t ci_get_memory_modile_index(struct pp_hwmgr *hwmgr) +{ + return (uint8_t) (0xFF & (cgs_read_register(hwmgr->device, mmBIOS_SCRATCH_4) >> 16)); +} + +static bool ci_check_s0_mc_reg_index(uint16_t in_reg, uint16_t *out_reg) +{ + bool result = true; + + switch (in_reg) { + case mmMC_SEQ_RAS_TIMING: + *out_reg = mmMC_SEQ_RAS_TIMING_LP; + break; + + case mmMC_SEQ_DLL_STBY: + *out_reg = mmMC_SEQ_DLL_STBY_LP; + break; + + case mmMC_SEQ_G5PDX_CMD0: + *out_reg = mmMC_SEQ_G5PDX_CMD0_LP; + break; + + case mmMC_SEQ_G5PDX_CMD1: + *out_reg = mmMC_SEQ_G5PDX_CMD1_LP; + break; + + case mmMC_SEQ_G5PDX_CTRL: + *out_reg = mmMC_SEQ_G5PDX_CTRL_LP; + break; + + case mmMC_SEQ_CAS_TIMING: + *out_reg = mmMC_SEQ_CAS_TIMING_LP; + break; + + case mmMC_SEQ_MISC_TIMING: + *out_reg = mmMC_SEQ_MISC_TIMING_LP; + break; + + case mmMC_SEQ_MISC_TIMING2: + *out_reg = mmMC_SEQ_MISC_TIMING2_LP; + break; + + case mmMC_SEQ_PMG_DVS_CMD: + *out_reg = mmMC_SEQ_PMG_DVS_CMD_LP; + break; + + case mmMC_SEQ_PMG_DVS_CTL: + *out_reg = mmMC_SEQ_PMG_DVS_CTL_LP; + break; + + case mmMC_SEQ_RD_CTL_D0: + *out_reg = mmMC_SEQ_RD_CTL_D0_LP; + break; + + case mmMC_SEQ_RD_CTL_D1: + *out_reg = mmMC_SEQ_RD_CTL_D1_LP; + break; + + case mmMC_SEQ_WR_CTL_D0: + *out_reg = mmMC_SEQ_WR_CTL_D0_LP; + break; + + case mmMC_SEQ_WR_CTL_D1: + *out_reg = mmMC_SEQ_WR_CTL_D1_LP; + break; + + case mmMC_PMG_CMD_EMRS: + *out_reg = mmMC_SEQ_PMG_CMD_EMRS_LP; + break; + + case mmMC_PMG_CMD_MRS: + *out_reg = mmMC_SEQ_PMG_CMD_MRS_LP; + break; + + case mmMC_PMG_CMD_MRS1: + *out_reg = mmMC_SEQ_PMG_CMD_MRS1_LP; + break; + + case mmMC_SEQ_PMG_TIMING: + *out_reg = mmMC_SEQ_PMG_TIMING_LP; + break; + + case mmMC_PMG_CMD_MRS2: + *out_reg = mmMC_SEQ_PMG_CMD_MRS2_LP; + break; + + case mmMC_SEQ_WR_CTL_2: + *out_reg = mmMC_SEQ_WR_CTL_2_LP; + break; + + default: + result = false; + break; + } + + return result; +} + +static int ci_set_s0_mc_reg_index(struct ci_mc_reg_table *table) +{ + uint32_t i; + uint16_t address; + + for (i = 0; i < table->last; i++) { + table->mc_reg_address[i].s0 = + ci_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) + ? address : table->mc_reg_address[i].s1; + } + return 0; +} + +static int ci_copy_vbios_smc_reg_table(const pp_atomctrl_mc_reg_table *table, + struct ci_mc_reg_table *ni_table) +{ + uint8_t i, j; + + PP_ASSERT_WITH_CODE((table->last <= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE), + "Invalid VramInfo table.", return -EINVAL); + PP_ASSERT_WITH_CODE((table->num_entries <= MAX_AC_TIMING_ENTRIES), + "Invalid VramInfo table.", return -EINVAL); + + for (i = 0; i < table->last; i++) + ni_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1; + + ni_table->last = table->last; + + for (i = 0; i < table->num_entries; i++) { + ni_table->mc_reg_table_entry[i].mclk_max = + table->mc_reg_table_entry[i].mclk_max; + for (j = 0; j < table->last; j++) { + ni_table->mc_reg_table_entry[i].mc_data[j] = + table->mc_reg_table_entry[i].mc_data[j]; + } + } + + ni_table->num_entries = table->num_entries; + + return 0; +} + +static int ci_set_mc_special_registers(struct pp_hwmgr *hwmgr, + struct ci_mc_reg_table *table) +{ + uint8_t i, j, k; + uint32_t temp_reg; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + for (i = 0, j = table->last; i < table->last; i++) { + PP_ASSERT_WITH_CODE((j < SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE), + "Invalid VramInfo table.", return -EINVAL); + + switch (table->mc_reg_address[i].s1) { + + case mmMC_SEQ_MISC1: + temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS); + table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS; + table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP; + for (k = 0; k < table->num_entries; k++) { + table->mc_reg_table_entry[k].mc_data[j] = + ((temp_reg & 0xffff0000)) | + ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16); + } + j++; + PP_ASSERT_WITH_CODE((j < SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE), + "Invalid VramInfo table.", return -EINVAL); + + temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS); + table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS; + table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP; + for (k = 0; k < table->num_entries; k++) { + table->mc_reg_table_entry[k].mc_data[j] = + (temp_reg & 0xffff0000) | + (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff); + + if (!data->is_memory_gddr5) + table->mc_reg_table_entry[k].mc_data[j] |= 0x100; + } + j++; + PP_ASSERT_WITH_CODE((j <= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE), + "Invalid VramInfo table.", return -EINVAL); + + if (!data->is_memory_gddr5 && j < SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) { + table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD; + table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD; + for (k = 0; k < table->num_entries; k++) { + table->mc_reg_table_entry[k].mc_data[j] = + (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16; + } + j++; + PP_ASSERT_WITH_CODE((j <= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE), + "Invalid VramInfo table.", return -EINVAL); + } + + break; + + case mmMC_SEQ_RESERVE_M: + temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1); + table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS1; + table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP; + for (k = 0; k < table->num_entries; k++) { + table->mc_reg_table_entry[k].mc_data[j] = + (temp_reg & 0xffff0000) | + (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff); + } + j++; + PP_ASSERT_WITH_CODE((j <= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE), + "Invalid VramInfo table.", return -EINVAL); + break; + + default: + break; + } + + } + + table->last = j; + + return 0; +} + +static int ci_set_valid_flag(struct ci_mc_reg_table *table) +{ + uint8_t i, j; + + for (i = 0; i < table->last; i++) { + for (j = 1; j < table->num_entries; j++) { + if (table->mc_reg_table_entry[j-1].mc_data[i] != + table->mc_reg_table_entry[j].mc_data[i]) { + table->validflag |= (1 << i); + break; + } + } + } + + return 0; +} + +int ci_initialize_mc_reg_table(struct pp_hwmgr *hwmgr) +{ + int result; + struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); + pp_atomctrl_mc_reg_table *table; + struct ci_mc_reg_table *ni_table = &smu_data->mc_reg_table; + uint8_t module_index = ci_get_memory_modile_index(hwmgr); + + table = kzalloc(sizeof(pp_atomctrl_mc_reg_table), GFP_KERNEL); + + if (NULL == table) + return -ENOMEM; + + /* Program additional LP registers that are no longer programmed by VBIOS */ + cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING)); + cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING)); + cgs_write_register(hwmgr->device, mmMC_SEQ_DLL_STBY_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_DLL_STBY)); + cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0)); + cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1)); + cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL)); + cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD)); + cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL)); + cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING)); + cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2)); + cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_EMRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS)); + cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS)); + cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS1_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1)); + cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0)); + cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1)); + cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0)); + cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1)); + cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING)); + cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS2_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS2)); + cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_2)); + + memset(table, 0x00, sizeof(pp_atomctrl_mc_reg_table)); + + result = atomctrl_initialize_mc_reg_table(hwmgr, module_index, table); + + if (0 == result) + result = ci_copy_vbios_smc_reg_table(table, ni_table); + + if (0 == result) { + ci_set_s0_mc_reg_index(ni_table); + result = ci_set_mc_special_registers(hwmgr, ni_table); + } + + if (0 == result) + ci_set_valid_flag(ni_table); + + kfree(table); + + return result; +} + +bool ci_is_dpm_running(struct pp_hwmgr *hwmgr) +{ + return ci_is_smc_ram_running(hwmgr); +} + +int ci_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr, + struct amd_pp_profile *request) +{ + struct ci_smumgr *smu_data = (struct ci_smumgr *) + (hwmgr->smu_backend); + struct SMU7_Discrete_GraphicsLevel *levels = + smu_data->smc_state_table.GraphicsLevel; + uint32_t array = smu_data->dpm_table_start + + offsetof(SMU7_Discrete_DpmTable, GraphicsLevel); + uint32_t array_size = sizeof(struct SMU7_Discrete_GraphicsLevel) * + SMU7_MAX_LEVELS_GRAPHICS; + uint32_t i; + + for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) { + levels[i].ActivityLevel = + cpu_to_be16(request->activity_threshold); + levels[i].EnabledForActivity = 1; + levels[i].UpH = request->up_hyst; + levels[i].DownH = request->down_hyst; + } + + return ci_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels, + array_size, SMC_RAM_END); +} diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smc.h b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smc.h new file mode 100644 index 000000000000..cc4176d2d25f --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smc.h @@ -0,0 +1,52 @@ +/* + * Copyright 2017 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef CI_SMC_H +#define CI_SMC_H + +#include <linux/types.h> + + +struct pp_smumgr; +struct pp_hwmgr; +struct amd_pp_profile; + +int ci_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, + uint16_t msg, uint32_t parameter); +int ci_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg); +int ci_populate_all_graphic_levels(struct pp_hwmgr *hwmgr); +int ci_populate_all_memory_levels(struct pp_hwmgr *hwmgr); +int ci_init_smc_table(struct pp_hwmgr *hwmgr); +int ci_thermal_setup_fan_table(struct pp_hwmgr *hwmgr); +int ci_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type); +int ci_update_sclk_threshold(struct pp_hwmgr *hwmgr); +uint32_t ci_get_offsetof(uint32_t type, uint32_t member); +uint32_t ci_get_mac_definition(uint32_t value); +int ci_process_firmware_header(struct pp_hwmgr *hwmgr); +int ci_initialize_mc_reg_table(struct pp_hwmgr *hwmgr); +bool ci_is_dpm_running(struct pp_hwmgr *hwmgr); +int ci_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr, + struct amd_pp_profile *request); + + +#endif + diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c new file mode 100644 index 000000000000..f265f42a7ed3 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c @@ -0,0 +1,86 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/fb.h> +#include "linux/delay.h" + +#include "smumgr.h" +#include "ci_smumgr.h" +#include "cgs_common.h" +#include "ci_smc.h" + +static int ci_smu_init(struct pp_hwmgr *hwmgr) +{ + int i; + struct ci_smumgr *ci_priv = NULL; + + ci_priv = kzalloc(sizeof(struct ci_smumgr), GFP_KERNEL); + + if (ci_priv == NULL) + return -ENOMEM; + + for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) + ci_priv->activity_target[i] = 30; + + hwmgr->smu_backend = ci_priv; + + return 0; +} + +static int ci_smu_fini(struct pp_hwmgr *hwmgr) +{ + kfree(hwmgr->smu_backend); + hwmgr->smu_backend = NULL; + cgs_rel_firmware(hwmgr->device, CGS_UCODE_ID_SMU); + return 0; +} + +static int ci_start_smu(struct pp_hwmgr *hwmgr) +{ + return 0; +} + +const struct pp_smumgr_func ci_smu_funcs = { + .smu_init = ci_smu_init, + .smu_fini = ci_smu_fini, + .start_smu = ci_start_smu, + .check_fw_load_finish = NULL, + .request_smu_load_fw = NULL, + .request_smu_load_specific_fw = NULL, + .send_msg_to_smc = ci_send_msg_to_smc, + .send_msg_to_smc_with_parameter = ci_send_msg_to_smc_with_parameter, + .download_pptable_settings = NULL, + .upload_pptable_settings = NULL, + .get_offsetof = ci_get_offsetof, + .process_firmware_header = ci_process_firmware_header, + .init_smc_table = ci_init_smc_table, + .update_sclk_threshold = ci_update_sclk_threshold, + .thermal_setup_fan_table = ci_thermal_setup_fan_table, + .populate_all_graphic_levels = ci_populate_all_graphic_levels, + .populate_all_memory_levels = ci_populate_all_memory_levels, + .get_mac_definition = ci_get_mac_definition, + .initialize_mc_reg_table = ci_initialize_mc_reg_table, + .is_dpm_running = ci_is_dpm_running, + .populate_requested_graphic_levels = ci_populate_requested_graphic_levels, +}; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.h new file mode 100644 index 000000000000..8189cfa17c46 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.h @@ -0,0 +1,78 @@ +/* + * Copyright 2017 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef _CI_SMUMANAGER_H_ +#define _CI_SMUMANAGER_H_ + +#define SMU__NUM_SCLK_DPM_STATE 8 +#define SMU__NUM_MCLK_DPM_LEVELS 6 +#define SMU__NUM_LCLK_DPM_LEVELS 8 +#define SMU__NUM_PCIE_DPM_LEVELS 8 + +#include "smu7_discrete.h" +#include <pp_endian.h> +#include "ppatomctrl.h" + +struct ci_pt_defaults { + u8 svi_load_line_en; + u8 svi_load_line_vddc; + u8 tdc_vddc_throttle_release_limit_perc; + u8 tdc_mawt; + u8 tdc_waterfall_ctl; + u8 dte_ambient_temp_base; + u32 display_cac; + u32 bapm_temp_gradient; + u16 bapmti_r[SMU7_DTE_ITERATIONS * SMU7_DTE_SOURCES * SMU7_DTE_SINKS]; + u16 bapmti_rc[SMU7_DTE_ITERATIONS * SMU7_DTE_SOURCES * SMU7_DTE_SINKS]; +}; + +struct ci_mc_reg_entry { + uint32_t mclk_max; + uint32_t mc_data[SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE]; +}; + +struct ci_mc_reg_table { + uint8_t last; + uint8_t num_entries; + uint16_t validflag; + struct ci_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES]; + SMU7_Discrete_MCRegisterAddress mc_reg_address[SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE]; +}; + +struct ci_smumgr { + uint32_t soft_regs_start; + uint32_t dpm_table_start; + uint32_t mc_reg_table_start; + uint32_t fan_table_start; + uint32_t arb_table_start; + uint32_t ulv_setting_starts; + struct SMU7_Discrete_DpmTable smc_state_table; + struct SMU7_Discrete_PmFuses power_tune_table; + const struct ci_pt_defaults *power_tune_defaults; + SMU7_Discrete_MCRegisters mc_regs; + struct ci_mc_reg_table mc_reg_table; + uint32_t activity_target[SMU7_MAX_LEVELS_GRAPHICS]; + +}; + +#endif + diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c index 652aaa43e95c..78ab0556e48f 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c @@ -52,53 +52,52 @@ static const enum cz_scratch_entry firmware_list[] = { CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, }; -static int cz_smum_get_argument(struct pp_smumgr *smumgr) +static int cz_smum_get_argument(struct pp_hwmgr *hwmgr) { - if (smumgr == NULL || smumgr->device == NULL) + if (hwmgr == NULL || hwmgr->device == NULL) return -EINVAL; - return cgs_read_register(smumgr->device, + return cgs_read_register(hwmgr->device, mmSMU_MP1_SRBM2P_ARG_0); } -static int cz_send_msg_to_smc_async(struct pp_smumgr *smumgr, - uint16_t msg) +static int cz_send_msg_to_smc_async(struct pp_hwmgr *hwmgr, uint16_t msg) { int result = 0; - if (smumgr == NULL || smumgr->device == NULL) + if (hwmgr == NULL || hwmgr->device == NULL) return -EINVAL; - result = SMUM_WAIT_FIELD_UNEQUAL(smumgr, + result = PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMU_MP1_SRBM2P_RESP_0, CONTENT, 0); if (result != 0) { pr_err("cz_send_msg_to_smc_async (0x%04x) failed\n", msg); return result; } - cgs_write_register(smumgr->device, mmSMU_MP1_SRBM2P_RESP_0, 0); - cgs_write_register(smumgr->device, mmSMU_MP1_SRBM2P_MSG_0, msg); + cgs_write_register(hwmgr->device, mmSMU_MP1_SRBM2P_RESP_0, 0); + cgs_write_register(hwmgr->device, mmSMU_MP1_SRBM2P_MSG_0, msg); return 0; } /* Send a message to the SMC, and wait for its response.*/ -static int cz_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg) +static int cz_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg) { int result = 0; - result = cz_send_msg_to_smc_async(smumgr, msg); + result = cz_send_msg_to_smc_async(hwmgr, msg); if (result != 0) return result; - return SMUM_WAIT_FIELD_UNEQUAL(smumgr, + return PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMU_MP1_SRBM2P_RESP_0, CONTENT, 0); } -static int cz_set_smc_sram_address(struct pp_smumgr *smumgr, +static int cz_set_smc_sram_address(struct pp_hwmgr *hwmgr, uint32_t smc_address, uint32_t limit) { - if (smumgr == NULL || smumgr->device == NULL) + if (hwmgr == NULL || hwmgr->device == NULL) return -EINVAL; if (0 != (3 & smc_address)) { @@ -111,39 +110,39 @@ static int cz_set_smc_sram_address(struct pp_smumgr *smumgr, return -EINVAL; } - cgs_write_register(smumgr->device, mmMP0PUB_IND_INDEX_0, + cgs_write_register(hwmgr->device, mmMP0PUB_IND_INDEX_0, SMN_MP1_SRAM_START_ADDR + smc_address); return 0; } -static int cz_write_smc_sram_dword(struct pp_smumgr *smumgr, +static int cz_write_smc_sram_dword(struct pp_hwmgr *hwmgr, uint32_t smc_address, uint32_t value, uint32_t limit) { int result; - if (smumgr == NULL || smumgr->device == NULL) + if (hwmgr == NULL || hwmgr->device == NULL) return -EINVAL; - result = cz_set_smc_sram_address(smumgr, smc_address, limit); + result = cz_set_smc_sram_address(hwmgr, smc_address, limit); if (!result) - cgs_write_register(smumgr->device, mmMP0PUB_IND_DATA_0, value); + cgs_write_register(hwmgr->device, mmMP0PUB_IND_DATA_0, value); return result; } -static int cz_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr, +static int cz_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t parameter) { - if (smumgr == NULL || smumgr->device == NULL) + if (hwmgr == NULL || hwmgr->device == NULL) return -EINVAL; - cgs_write_register(smumgr->device, mmSMU_MP1_SRBM2P_ARG_0, parameter); + cgs_write_register(hwmgr->device, mmSMU_MP1_SRBM2P_ARG_0, parameter); - return cz_send_msg_to_smc(smumgr, msg); + return cz_send_msg_to_smc(hwmgr, msg); } -static int cz_check_fw_load_finish(struct pp_smumgr *smumgr, +static int cz_check_fw_load_finish(struct pp_hwmgr *hwmgr, uint32_t firmware) { int i; @@ -151,19 +150,19 @@ static int cz_check_fw_load_finish(struct pp_smumgr *smumgr, SMU8_FIRMWARE_HEADER_LOCATION + offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus); - if (smumgr == NULL || smumgr->device == NULL) + if (hwmgr == NULL || hwmgr->device == NULL) return -EINVAL; - cgs_write_register(smumgr->device, mmMP0PUB_IND_INDEX, index); + cgs_write_register(hwmgr->device, mmMP0PUB_IND_INDEX, index); - for (i = 0; i < smumgr->usec_timeout; i++) { + for (i = 0; i < hwmgr->usec_timeout; i++) { if (firmware == - (cgs_read_register(smumgr->device, mmMP0PUB_IND_DATA) & firmware)) + (cgs_read_register(hwmgr->device, mmMP0PUB_IND_DATA) & firmware)) break; udelay(1); } - if (i >= smumgr->usec_timeout) { + if (i >= hwmgr->usec_timeout) { pr_err("SMU check loaded firmware failed.\n"); return -EINVAL; } @@ -171,7 +170,7 @@ static int cz_check_fw_load_finish(struct pp_smumgr *smumgr, return 0; } -static int cz_load_mec_firmware(struct pp_smumgr *smumgr) +static int cz_load_mec_firmware(struct pp_hwmgr *hwmgr) { uint32_t reg_data; uint32_t tmp; @@ -179,44 +178,44 @@ static int cz_load_mec_firmware(struct pp_smumgr *smumgr) struct cgs_firmware_info info = {0}; struct cz_smumgr *cz_smu; - if (smumgr == NULL || smumgr->device == NULL) + if (hwmgr == NULL || hwmgr->device == NULL) return -EINVAL; - cz_smu = (struct cz_smumgr *)smumgr->backend; - ret = cgs_get_firmware_info(smumgr->device, + cz_smu = (struct cz_smumgr *)hwmgr->smu_backend; + ret = cgs_get_firmware_info(hwmgr->device, CGS_UCODE_ID_CP_MEC, &info); if (ret) return -EINVAL; /* Disable MEC parsing/prefetching */ - tmp = cgs_read_register(smumgr->device, + tmp = cgs_read_register(hwmgr->device, mmCP_MEC_CNTL); - tmp = SMUM_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME1_HALT, 1); - tmp = SMUM_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME2_HALT, 1); - cgs_write_register(smumgr->device, mmCP_MEC_CNTL, tmp); + tmp = PHM_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME1_HALT, 1); + tmp = PHM_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME2_HALT, 1); + cgs_write_register(hwmgr->device, mmCP_MEC_CNTL, tmp); - tmp = cgs_read_register(smumgr->device, + tmp = cgs_read_register(hwmgr->device, mmCP_CPC_IC_BASE_CNTL); - tmp = SMUM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0); - tmp = SMUM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, ATC, 0); - tmp = SMUM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0); - tmp = SMUM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, MTYPE, 1); - cgs_write_register(smumgr->device, mmCP_CPC_IC_BASE_CNTL, tmp); + tmp = PHM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0); + tmp = PHM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, ATC, 0); + tmp = PHM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0); + tmp = PHM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, MTYPE, 1); + cgs_write_register(hwmgr->device, mmCP_CPC_IC_BASE_CNTL, tmp); reg_data = smu_lower_32_bits(info.mc_addr) & - SMUM_FIELD_MASK(CP_CPC_IC_BASE_LO, IC_BASE_LO); - cgs_write_register(smumgr->device, mmCP_CPC_IC_BASE_LO, reg_data); + PHM_FIELD_MASK(CP_CPC_IC_BASE_LO, IC_BASE_LO); + cgs_write_register(hwmgr->device, mmCP_CPC_IC_BASE_LO, reg_data); reg_data = smu_upper_32_bits(info.mc_addr) & - SMUM_FIELD_MASK(CP_CPC_IC_BASE_HI, IC_BASE_HI); - cgs_write_register(smumgr->device, mmCP_CPC_IC_BASE_HI, reg_data); + PHM_FIELD_MASK(CP_CPC_IC_BASE_HI, IC_BASE_HI); + cgs_write_register(hwmgr->device, mmCP_CPC_IC_BASE_HI, reg_data); return 0; } -static uint8_t cz_translate_firmware_enum_to_arg(struct pp_smumgr *smumgr, +static uint8_t cz_translate_firmware_enum_to_arg(struct pp_hwmgr *hwmgr, enum cz_scratch_entry firmware_enum) { uint8_t ret = 0; @@ -226,7 +225,7 @@ static uint8_t cz_translate_firmware_enum_to_arg(struct pp_smumgr *smumgr, ret = UCODE_ID_SDMA0; break; case CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1: - if (smumgr->chip_id == CHIP_STONEY) + if (hwmgr->chip_id == CHIP_STONEY) ret = UCODE_ID_SDMA0; else ret = UCODE_ID_SDMA1; @@ -244,7 +243,7 @@ static uint8_t cz_translate_firmware_enum_to_arg(struct pp_smumgr *smumgr, ret = UCODE_ID_CP_MEC_JT1; break; case CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2: - if (smumgr->chip_id == CHIP_STONEY) + if (hwmgr->chip_id == CHIP_STONEY) ret = UCODE_ID_CP_MEC_JT1; else ret = UCODE_ID_CP_MEC_JT2; @@ -326,17 +325,17 @@ static enum cgs_ucode_id cz_convert_fw_type_to_cgs(uint32_t fw_type) } static int cz_smu_populate_single_scratch_task( - struct pp_smumgr *smumgr, + struct pp_hwmgr *hwmgr, enum cz_scratch_entry fw_enum, uint8_t type, bool is_last) { uint8_t i; - struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend; + struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend; struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr; struct SMU_Task *task = &toc->tasks[cz_smu->toc_entry_used_count++]; task->type = type; - task->arg = cz_translate_firmware_enum_to_arg(smumgr, fw_enum); + task->arg = cz_translate_firmware_enum_to_arg(hwmgr, fw_enum); task->next = is_last ? END_OF_TASK_LIST : cz_smu->toc_entry_used_count; for (i = 0; i < cz_smu->scratch_buffer_length; i++) @@ -363,17 +362,17 @@ static int cz_smu_populate_single_scratch_task( } static int cz_smu_populate_single_ucode_load_task( - struct pp_smumgr *smumgr, + struct pp_hwmgr *hwmgr, enum cz_scratch_entry fw_enum, bool is_last) { uint8_t i; - struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend; + struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend; struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr; struct SMU_Task *task = &toc->tasks[cz_smu->toc_entry_used_count++]; task->type = TASK_TYPE_UCODE_LOAD; - task->arg = cz_translate_firmware_enum_to_arg(smumgr, fw_enum); + task->arg = cz_translate_firmware_enum_to_arg(hwmgr, fw_enum); task->next = is_last ? END_OF_TASK_LIST : cz_smu->toc_entry_used_count; for (i = 0; i < cz_smu->driver_buffer_length; i++) @@ -392,22 +391,22 @@ static int cz_smu_populate_single_ucode_load_task( return 0; } -static int cz_smu_construct_toc_for_rlc_aram_save(struct pp_smumgr *smumgr) +static int cz_smu_construct_toc_for_rlc_aram_save(struct pp_hwmgr *hwmgr) { - struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend; + struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend; cz_smu->toc_entry_aram = cz_smu->toc_entry_used_count; - cz_smu_populate_single_scratch_task(smumgr, + cz_smu_populate_single_scratch_task(hwmgr, CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM, TASK_TYPE_UCODE_SAVE, true); return 0; } -static int cz_smu_initialize_toc_empty_job_list(struct pp_smumgr *smumgr) +static int cz_smu_initialize_toc_empty_job_list(struct pp_hwmgr *hwmgr) { int i; - struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend; + struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend; struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr; for (i = 0; i < NUM_JOBLIST_ENTRIES; i++) @@ -416,17 +415,17 @@ static int cz_smu_initialize_toc_empty_job_list(struct pp_smumgr *smumgr) return 0; } -static int cz_smu_construct_toc_for_vddgfx_enter(struct pp_smumgr *smumgr) +static int cz_smu_construct_toc_for_vddgfx_enter(struct pp_hwmgr *hwmgr) { - struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend; + struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend; struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr; toc->JobList[JOB_GFX_SAVE] = (uint8_t)cz_smu->toc_entry_used_count; - cz_smu_populate_single_scratch_task(smumgr, + cz_smu_populate_single_scratch_task(hwmgr, CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH, TASK_TYPE_UCODE_SAVE, false); - cz_smu_populate_single_scratch_task(smumgr, + cz_smu_populate_single_scratch_task(hwmgr, CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM, TASK_TYPE_UCODE_SAVE, true); @@ -434,121 +433,120 @@ static int cz_smu_construct_toc_for_vddgfx_enter(struct pp_smumgr *smumgr) } -static int cz_smu_construct_toc_for_vddgfx_exit(struct pp_smumgr *smumgr) +static int cz_smu_construct_toc_for_vddgfx_exit(struct pp_hwmgr *hwmgr) { - struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend; + struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend; struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr; toc->JobList[JOB_GFX_RESTORE] = (uint8_t)cz_smu->toc_entry_used_count; - cz_smu_populate_single_ucode_load_task(smumgr, + cz_smu_populate_single_ucode_load_task(hwmgr, CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, false); - cz_smu_populate_single_ucode_load_task(smumgr, + cz_smu_populate_single_ucode_load_task(hwmgr, CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP, false); - cz_smu_populate_single_ucode_load_task(smumgr, + cz_smu_populate_single_ucode_load_task(hwmgr, CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false); - cz_smu_populate_single_ucode_load_task(smumgr, + cz_smu_populate_single_ucode_load_task(hwmgr, CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false); - if (smumgr->chip_id == CHIP_STONEY) - cz_smu_populate_single_ucode_load_task(smumgr, + if (hwmgr->chip_id == CHIP_STONEY) + cz_smu_populate_single_ucode_load_task(hwmgr, CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false); else - cz_smu_populate_single_ucode_load_task(smumgr, + cz_smu_populate_single_ucode_load_task(hwmgr, CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false); - cz_smu_populate_single_ucode_load_task(smumgr, + cz_smu_populate_single_ucode_load_task(hwmgr, CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, false); /* populate scratch */ - cz_smu_populate_single_scratch_task(smumgr, + cz_smu_populate_single_scratch_task(hwmgr, CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH, TASK_TYPE_UCODE_LOAD, false); - cz_smu_populate_single_scratch_task(smumgr, + cz_smu_populate_single_scratch_task(hwmgr, CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM, TASK_TYPE_UCODE_LOAD, false); - cz_smu_populate_single_scratch_task(smumgr, + cz_smu_populate_single_scratch_task(hwmgr, CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM, TASK_TYPE_UCODE_LOAD, true); return 0; } -static int cz_smu_construct_toc_for_power_profiling( - struct pp_smumgr *smumgr) +static int cz_smu_construct_toc_for_power_profiling(struct pp_hwmgr *hwmgr) { - struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend; + struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend; cz_smu->toc_entry_power_profiling_index = cz_smu->toc_entry_used_count; - cz_smu_populate_single_scratch_task(smumgr, + cz_smu_populate_single_scratch_task(hwmgr, CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING, TASK_TYPE_INITIALIZE, true); return 0; } -static int cz_smu_construct_toc_for_bootup(struct pp_smumgr *smumgr) +static int cz_smu_construct_toc_for_bootup(struct pp_hwmgr *hwmgr) { - struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend; + struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend; cz_smu->toc_entry_initialize_index = cz_smu->toc_entry_used_count; - cz_smu_populate_single_ucode_load_task(smumgr, + cz_smu_populate_single_ucode_load_task(hwmgr, CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, false); - if (smumgr->chip_id != CHIP_STONEY) - cz_smu_populate_single_ucode_load_task(smumgr, + if (hwmgr->chip_id != CHIP_STONEY) + cz_smu_populate_single_ucode_load_task(hwmgr, CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1, false); - cz_smu_populate_single_ucode_load_task(smumgr, + cz_smu_populate_single_ucode_load_task(hwmgr, CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, false); - cz_smu_populate_single_ucode_load_task(smumgr, + cz_smu_populate_single_ucode_load_task(hwmgr, CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP, false); - cz_smu_populate_single_ucode_load_task(smumgr, + cz_smu_populate_single_ucode_load_task(hwmgr, CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false); - cz_smu_populate_single_ucode_load_task(smumgr, + cz_smu_populate_single_ucode_load_task(hwmgr, CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false); - if (smumgr->chip_id != CHIP_STONEY) - cz_smu_populate_single_ucode_load_task(smumgr, + if (hwmgr->chip_id != CHIP_STONEY) + cz_smu_populate_single_ucode_load_task(hwmgr, CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false); - cz_smu_populate_single_ucode_load_task(smumgr, + cz_smu_populate_single_ucode_load_task(hwmgr, CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, true); return 0; } -static int cz_smu_construct_toc_for_clock_table(struct pp_smumgr *smumgr) +static int cz_smu_construct_toc_for_clock_table(struct pp_hwmgr *hwmgr) { - struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend; + struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend; cz_smu->toc_entry_clock_table = cz_smu->toc_entry_used_count; - cz_smu_populate_single_scratch_task(smumgr, + cz_smu_populate_single_scratch_task(hwmgr, CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE, TASK_TYPE_INITIALIZE, true); return 0; } -static int cz_smu_construct_toc(struct pp_smumgr *smumgr) +static int cz_smu_construct_toc(struct pp_hwmgr *hwmgr) { - struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend; + struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend; cz_smu->toc_entry_used_count = 0; - cz_smu_initialize_toc_empty_job_list(smumgr); - cz_smu_construct_toc_for_rlc_aram_save(smumgr); - cz_smu_construct_toc_for_vddgfx_enter(smumgr); - cz_smu_construct_toc_for_vddgfx_exit(smumgr); - cz_smu_construct_toc_for_power_profiling(smumgr); - cz_smu_construct_toc_for_bootup(smumgr); - cz_smu_construct_toc_for_clock_table(smumgr); + cz_smu_initialize_toc_empty_job_list(hwmgr); + cz_smu_construct_toc_for_rlc_aram_save(hwmgr); + cz_smu_construct_toc_for_vddgfx_enter(hwmgr); + cz_smu_construct_toc_for_vddgfx_exit(hwmgr); + cz_smu_construct_toc_for_power_profiling(hwmgr); + cz_smu_construct_toc_for_bootup(hwmgr); + cz_smu_construct_toc_for_clock_table(hwmgr); return 0; } -static int cz_smu_populate_firmware_entries(struct pp_smumgr *smumgr) +static int cz_smu_populate_firmware_entries(struct pp_hwmgr *hwmgr) { - struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend; + struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend; uint32_t firmware_type; uint32_t i; int ret; @@ -559,12 +557,12 @@ static int cz_smu_populate_firmware_entries(struct pp_smumgr *smumgr) for (i = 0; i < ARRAY_SIZE(firmware_list); i++) { - firmware_type = cz_translate_firmware_enum_to_arg(smumgr, + firmware_type = cz_translate_firmware_enum_to_arg(hwmgr, firmware_list[i]); ucode_id = cz_convert_fw_type_to_cgs(firmware_type); - ret = cgs_get_firmware_info(smumgr->device, + ret = cgs_get_firmware_info(hwmgr->device, ucode_id, &info); if (ret == 0) { @@ -585,12 +583,12 @@ static int cz_smu_populate_firmware_entries(struct pp_smumgr *smumgr) } static int cz_smu_populate_single_scratch_entry( - struct pp_smumgr *smumgr, + struct pp_hwmgr *hwmgr, enum cz_scratch_entry scratch_type, uint32_t ulsize_byte, struct cz_buffer_entry *entry) { - struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend; + struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend; long long mc_addr = ((long long)(cz_smu->smu_buffer.mc_addr_high) << 32) | cz_smu->smu_buffer.mc_addr_low; @@ -611,9 +609,9 @@ static int cz_smu_populate_single_scratch_entry( return 0; } -static int cz_download_pptable_settings(struct pp_smumgr *smumgr, void **table) +static int cz_download_pptable_settings(struct pp_hwmgr *hwmgr, void **table) { - struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend; + struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend; unsigned long i; for (i = 0; i < cz_smu->scratch_buffer_length; i++) { @@ -624,25 +622,25 @@ static int cz_download_pptable_settings(struct pp_smumgr *smumgr, void **table) *table = (struct SMU8_Fusion_ClkTable *)cz_smu->scratch_buffer[i].kaddr; - cz_send_msg_to_smc_with_parameter(smumgr, + cz_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetClkTableAddrHi, cz_smu->scratch_buffer[i].mc_addr_high); - cz_send_msg_to_smc_with_parameter(smumgr, + cz_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetClkTableAddrLo, cz_smu->scratch_buffer[i].mc_addr_low); - cz_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_ExecuteJob, + cz_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob, cz_smu->toc_entry_clock_table); - cz_send_msg_to_smc(smumgr, PPSMC_MSG_ClkTableXferToDram); + cz_send_msg_to_smc(hwmgr, PPSMC_MSG_ClkTableXferToDram); return 0; } -static int cz_upload_pptable_settings(struct pp_smumgr *smumgr) +static int cz_upload_pptable_settings(struct pp_hwmgr *hwmgr) { - struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend; + struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend; unsigned long i; for (i = 0; i < cz_smu->scratch_buffer_length; i++) { @@ -651,63 +649,63 @@ static int cz_upload_pptable_settings(struct pp_smumgr *smumgr) break; } - cz_send_msg_to_smc_with_parameter(smumgr, + cz_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetClkTableAddrHi, cz_smu->scratch_buffer[i].mc_addr_high); - cz_send_msg_to_smc_with_parameter(smumgr, + cz_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetClkTableAddrLo, cz_smu->scratch_buffer[i].mc_addr_low); - cz_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_ExecuteJob, + cz_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob, cz_smu->toc_entry_clock_table); - cz_send_msg_to_smc(smumgr, PPSMC_MSG_ClkTableXferToSmu); + cz_send_msg_to_smc(hwmgr, PPSMC_MSG_ClkTableXferToSmu); return 0; } -static int cz_request_smu_load_fw(struct pp_smumgr *smumgr) +static int cz_request_smu_load_fw(struct pp_hwmgr *hwmgr) { - struct cz_smumgr *cz_smu = (struct cz_smumgr *)(smumgr->backend); + struct cz_smumgr *cz_smu = (struct cz_smumgr *)(hwmgr->smu_backend); uint32_t smc_address; - if (!smumgr->reload_fw) { + if (!hwmgr->reload_fw) { pr_info("skip reloading...\n"); return 0; } - cz_smu_populate_firmware_entries(smumgr); + cz_smu_populate_firmware_entries(hwmgr); - cz_smu_construct_toc(smumgr); + cz_smu_construct_toc(hwmgr); smc_address = SMU8_FIRMWARE_HEADER_LOCATION + offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus); - cz_write_smc_sram_dword(smumgr, smc_address, 0, smc_address+4); + cz_write_smc_sram_dword(hwmgr, smc_address, 0, smc_address+4); - cz_send_msg_to_smc_with_parameter(smumgr, + cz_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DriverDramAddrHi, cz_smu->toc_buffer.mc_addr_high); - cz_send_msg_to_smc_with_parameter(smumgr, + cz_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DriverDramAddrLo, cz_smu->toc_buffer.mc_addr_low); - cz_send_msg_to_smc(smumgr, PPSMC_MSG_InitJobs); + cz_send_msg_to_smc(hwmgr, PPSMC_MSG_InitJobs); - cz_send_msg_to_smc_with_parameter(smumgr, + cz_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob, cz_smu->toc_entry_aram); - cz_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_ExecuteJob, + cz_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob, cz_smu->toc_entry_power_profiling_index); - return cz_send_msg_to_smc_with_parameter(smumgr, + return cz_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob, cz_smu->toc_entry_initialize_index); } -static int cz_start_smu(struct pp_smumgr *smumgr) +static int cz_start_smu(struct pp_hwmgr *hwmgr) { int ret = 0; uint32_t fw_to_check = 0; @@ -721,23 +719,23 @@ static int cz_start_smu(struct pp_smumgr *smumgr) UCODE_ID_CP_MEC_JT1_MASK | UCODE_ID_CP_MEC_JT2_MASK; - if (smumgr->chip_id == CHIP_STONEY) + if (hwmgr->chip_id == CHIP_STONEY) fw_to_check &= ~(UCODE_ID_SDMA1_MASK | UCODE_ID_CP_MEC_JT2_MASK); - ret = cz_request_smu_load_fw(smumgr); + ret = cz_request_smu_load_fw(hwmgr); if (ret) pr_err("SMU firmware load failed\n"); - cz_check_fw_load_finish(smumgr, fw_to_check); + cz_check_fw_load_finish(hwmgr, fw_to_check); - ret = cz_load_mec_firmware(smumgr); + ret = cz_load_mec_firmware(hwmgr); if (ret) pr_err("Mec Firmware load failed\n"); return ret; } -static int cz_smu_init(struct pp_smumgr *smumgr) +static int cz_smu_init(struct pp_hwmgr *hwmgr) { uint64_t mc_addr = 0; int ret = 0; @@ -747,7 +745,7 @@ static int cz_smu_init(struct pp_smumgr *smumgr) if (cz_smu == NULL) return -ENOMEM; - smumgr->backend = cz_smu; + hwmgr->smu_backend = cz_smu; cz_smu->toc_buffer.data_size = 4096; cz_smu->smu_buffer.data_size = @@ -757,7 +755,7 @@ static int cz_smu_init(struct pp_smumgr *smumgr) ALIGN(sizeof(struct SMU8_MultimediaPowerLogData), 32) + ALIGN(sizeof(struct SMU8_Fusion_ClkTable), 32); - ret = smu_allocate_memory(smumgr->device, + ret = smu_allocate_memory(hwmgr->device, cz_smu->toc_buffer.data_size, CGS_GPU_MEM_TYPE__GART_CACHEABLE, PAGE_SIZE, @@ -770,7 +768,7 @@ static int cz_smu_init(struct pp_smumgr *smumgr) cz_smu->toc_buffer.mc_addr_high = smu_upper_32_bits(mc_addr); cz_smu->toc_buffer.mc_addr_low = smu_lower_32_bits(mc_addr); - ret = smu_allocate_memory(smumgr->device, + ret = smu_allocate_memory(hwmgr->device, cz_smu->smu_buffer.data_size, CGS_GPU_MEM_TYPE__GART_CACHEABLE, PAGE_SIZE, @@ -783,7 +781,7 @@ static int cz_smu_init(struct pp_smumgr *smumgr) cz_smu->smu_buffer.mc_addr_high = smu_upper_32_bits(mc_addr); cz_smu->smu_buffer.mc_addr_low = smu_lower_32_bits(mc_addr); - if (0 != cz_smu_populate_single_scratch_entry(smumgr, + if (0 != cz_smu_populate_single_scratch_entry(hwmgr, CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH, UCODE_ID_RLC_SCRATCH_SIZE_BYTE, &cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) { @@ -791,14 +789,14 @@ static int cz_smu_init(struct pp_smumgr *smumgr) return -1; } - if (0 != cz_smu_populate_single_scratch_entry(smumgr, + if (0 != cz_smu_populate_single_scratch_entry(hwmgr, CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM, UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE, &cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) { pr_err("Error when Populate Firmware Entry.\n"); return -1; } - if (0 != cz_smu_populate_single_scratch_entry(smumgr, + if (0 != cz_smu_populate_single_scratch_entry(hwmgr, CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM, UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE, &cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) { @@ -806,7 +804,7 @@ static int cz_smu_init(struct pp_smumgr *smumgr) return -1; } - if (0 != cz_smu_populate_single_scratch_entry(smumgr, + if (0 != cz_smu_populate_single_scratch_entry(hwmgr, CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING, sizeof(struct SMU8_MultimediaPowerLogData), &cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) { @@ -814,7 +812,7 @@ static int cz_smu_init(struct pp_smumgr *smumgr) return -1; } - if (0 != cz_smu_populate_single_scratch_entry(smumgr, + if (0 != cz_smu_populate_single_scratch_entry(hwmgr, CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE, sizeof(struct SMU8_Fusion_ClkTable), &cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) { @@ -825,18 +823,18 @@ static int cz_smu_init(struct pp_smumgr *smumgr) return 0; } -static int cz_smu_fini(struct pp_smumgr *smumgr) +static int cz_smu_fini(struct pp_hwmgr *hwmgr) { struct cz_smumgr *cz_smu; - if (smumgr == NULL || smumgr->device == NULL) + if (hwmgr == NULL || hwmgr->device == NULL) return -EINVAL; - cz_smu = (struct cz_smumgr *)smumgr->backend; + cz_smu = (struct cz_smumgr *)hwmgr->smu_backend; if (cz_smu) { - cgs_free_gpu_mem(smumgr->device, + cgs_free_gpu_mem(hwmgr->device, cz_smu->toc_buffer.handle); - cgs_free_gpu_mem(smumgr->device, + cgs_free_gpu_mem(hwmgr->device, cz_smu->smu_buffer.handle); kfree(cz_smu); } diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c index 8712f093d6d9..b1a66b5ada4a 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c @@ -198,7 +198,7 @@ static void get_scl_sda_value(uint8_t line, uint8_t *scl, uint8_t *sda) static void fiji_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr) { - struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); @@ -216,7 +216,7 @@ static void fiji_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr) static int fiji_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr) { - struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); const struct fiji_pt_defaults *defaults = smu_data->power_tune_defaults; SMU73_Discrete_DpmTable *dpm_table = &(smu_data->smc_state_table); @@ -299,7 +299,7 @@ static int fiji_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr) static int fiji_populate_svi_load_line(struct pp_hwmgr *hwmgr) { - struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); const struct fiji_pt_defaults *defaults = smu_data->power_tune_defaults; smu_data->power_tune_table.SviLoadLineEn = defaults->SviLoadLineEn; @@ -314,7 +314,7 @@ static int fiji_populate_svi_load_line(struct pp_hwmgr *hwmgr) static int fiji_populate_tdc_limit(struct pp_hwmgr *hwmgr) { uint16_t tdc_limit; - struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); const struct fiji_pt_defaults *defaults = smu_data->power_tune_defaults; @@ -334,11 +334,11 @@ static int fiji_populate_tdc_limit(struct pp_hwmgr *hwmgr) static int fiji_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset) { - struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); const struct fiji_pt_defaults *defaults = smu_data->power_tune_defaults; uint32_t temp; - if (smu7_read_smc_sram_dword(hwmgr->smumgr, + if (smu7_read_smc_sram_dword(hwmgr, fuse_table_offset + offsetof(SMU73_Discrete_PmFuses, TdcWaterfallCtl), (uint32_t *)&temp, SMC_RAM_END)) @@ -359,7 +359,7 @@ static int fiji_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset) static int fiji_populate_temperature_scaler(struct pp_hwmgr *hwmgr) { int i; - struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); /* Currently not used. Set all to zero. */ for (i = 0; i < 16; i++) @@ -370,7 +370,7 @@ static int fiji_populate_temperature_scaler(struct pp_hwmgr *hwmgr) static int fiji_populate_fuzzy_fan(struct pp_hwmgr *hwmgr) { - struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); if ((hwmgr->thermal_controller.advanceFanControlParameters. usFanOutputSensitivity & (1 << 15)) || @@ -389,7 +389,7 @@ static int fiji_populate_fuzzy_fan(struct pp_hwmgr *hwmgr) static int fiji_populate_gnb_lpml(struct pp_hwmgr *hwmgr) { int i; - struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); /* Currently not used. Set all to zero. */ for (i = 0; i < 16; i++) @@ -398,14 +398,9 @@ static int fiji_populate_gnb_lpml(struct pp_hwmgr *hwmgr) return 0; } -static int fiji_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr) -{ - return 0; -} - static int fiji_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr) { - struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); uint16_t HiSidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd; @@ -426,11 +421,11 @@ static int fiji_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr) static int fiji_populate_pm_fuses(struct pp_hwmgr *hwmgr) { uint32_t pm_fuse_table_offset; - struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PowerContainment)) { - if (smu7_read_smc_sram_dword(hwmgr->smumgr, + if (smu7_read_smc_sram_dword(hwmgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU73_Firmware_Header, PmFuseTable), &pm_fuse_table_offset, SMC_RAM_END)) @@ -472,19 +467,13 @@ static int fiji_populate_pm_fuses(struct pp_hwmgr *hwmgr) "Attempt to populate GnbLPML Failed!", return -EINVAL); - /* DW19 */ - if (fiji_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr)) - PP_ASSERT_WITH_CODE(false, - "Attempt to populate GnbLPML Min and Max Vid Failed!", - return -EINVAL); - /* DW20 */ if (fiji_populate_bapm_vddc_base_leakage_sidd(hwmgr)) PP_ASSERT_WITH_CODE(false, "Attempt to populate BapmVddCBaseLeakage Hi and Lo " "Sidd Failed!", return -EINVAL); - if (smu7_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset, + if (smu7_copy_bytes_to_smc(hwmgr, pm_fuse_table_offset, (uint8_t *)&smu_data->power_tune_table, sizeof(struct SMU73_Discrete_PmFuses), SMC_RAM_END)) PP_ASSERT_WITH_CODE(false, @@ -586,7 +575,7 @@ static int fiji_populate_smc_link_level(struct pp_hwmgr *hwmgr, { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); struct smu7_dpm_table *dpm_table = &data->dpm_table; - struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); int i; /* Index (dpm_table->pcie_speed_table.count) @@ -774,7 +763,7 @@ static int fiji_populate_single_graphic_level(struct pp_hwmgr *hwmgr, int fiji_populate_all_graphic_levels(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); struct smu7_dpm_table *dpm_table = &data->dpm_table; struct phm_ppt_v1_information *table_info = @@ -859,7 +848,7 @@ int fiji_populate_all_graphic_levels(struct pp_hwmgr *hwmgr) levels[1].pcieDpmLevel = mid_pcie_level_enabled; } /* level count will send to smc once at init smc table and never change */ - result = smu7_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels, + result = smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels, (uint32_t)array_size, SMC_RAM_END); return result; @@ -1000,7 +989,7 @@ static int fiji_populate_single_memory_level(struct pp_hwmgr *hwmgr, int fiji_populate_all_memory_levels(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); struct smu7_dpm_table *dpm_table = &data->dpm_table; int result; /* populate MCLK dpm table to SMU7 */ @@ -1043,7 +1032,7 @@ int fiji_populate_all_memory_levels(struct pp_hwmgr *hwmgr) PPSMC_DISPLAY_WATERMARK_HIGH; /* level count will send to smc once at init smc table and never change */ - result = smu7_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels, + result = smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels, (uint32_t)array_size, SMC_RAM_END); return result; @@ -1352,7 +1341,7 @@ static int fiji_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr, static int fiji_program_memory_timing_parameters(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); struct SMU73_Discrete_MCArbDramTimingTable arb_regs; uint32_t i, j; int result = 0; @@ -1370,7 +1359,7 @@ static int fiji_program_memory_timing_parameters(struct pp_hwmgr *hwmgr) if (!result) result = smu7_copy_bytes_to_smc( - hwmgr->smumgr, + hwmgr, smu_data->smu7_data.arb_table_start, (uint8_t *)&arb_regs, sizeof(SMU73_Discrete_MCArbDramTimingTable), @@ -1460,7 +1449,7 @@ static int fiji_populate_smc_boot_level(struct pp_hwmgr *hwmgr, static int fiji_populate_smc_initailial_state(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); uint8_t count, level; @@ -1491,7 +1480,7 @@ static int fiji_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr) uint32_t ro, efuse, efuse2, clock_freq, volt_without_cks, volt_with_cks, value; uint16_t clock_freq_u16; - struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); uint8_t type, i, j, cks_setting, stretch_amount, stretch_amount2, volt_offset = 0; struct phm_ppt_v1_information *table_info = @@ -1694,9 +1683,9 @@ static int fiji_populate_vr_config(struct pp_hwmgr *hwmgr, return 0; } -static int fiji_init_arb_table_index(struct pp_smumgr *smumgr) +static int fiji_init_arb_table_index(struct pp_hwmgr *hwmgr) { - struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(smumgr->backend); + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); uint32_t tmp; int result; @@ -1708,7 +1697,7 @@ static int fiji_init_arb_table_index(struct pp_smumgr *smumgr) * In reality this field should not be in that structure * but in a soft register. */ - result = smu7_read_smc_sram_dword(smumgr, + result = smu7_read_smc_sram_dword(hwmgr, smu_data->smu7_data.arb_table_start, &tmp, SMC_RAM_END); if (result) @@ -1717,13 +1706,13 @@ static int fiji_init_arb_table_index(struct pp_smumgr *smumgr) tmp &= 0x00FFFFFF; tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24; - return smu7_write_smc_sram_dword(smumgr, + return smu7_write_smc_sram_dword(hwmgr, smu_data->smu7_data.arb_table_start, tmp, SMC_RAM_END); } static int fiji_save_default_power_profile(struct pp_hwmgr *hwmgr) { - struct fiji_smumgr *data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + struct fiji_smumgr *data = (struct fiji_smumgr *)(hwmgr->smu_backend); struct SMU73_Discrete_GraphicsLevel *levels = data->smc_state_table.GraphicsLevel; unsigned min_level = 1; @@ -1782,7 +1771,7 @@ static int fiji_setup_dpm_led_config(struct pp_hwmgr *hwmgr) } } if (mask) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_LedConfig, mask); return 0; @@ -1799,7 +1788,7 @@ int fiji_init_smc_table(struct pp_hwmgr *hwmgr) { int result; struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); struct SMU73_Discrete_DpmTable *table = &(smu_data->smc_state_table); @@ -1985,7 +1974,7 @@ int fiji_init_smc_table(struct pp_hwmgr *hwmgr) CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime); /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */ - result = smu7_copy_bytes_to_smc(hwmgr->smumgr, + result = smu7_copy_bytes_to_smc(hwmgr, smu_data->smu7_data.dpm_table_start + offsetof(SMU73_Discrete_DpmTable, SystemFlags), (uint8_t *)&(table->SystemFlags), @@ -1994,7 +1983,7 @@ int fiji_init_smc_table(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE(0 == result, "Failed to upload dpm data to SMC memory!", return result); - result = fiji_init_arb_table_index(hwmgr->smumgr); + result = fiji_init_arb_table_index(hwmgr); PP_ASSERT_WITH_CODE(0 == result, "Failed to upload arb data to SMC memory!", return result); @@ -2022,7 +2011,7 @@ int fiji_init_smc_table(struct pp_hwmgr *hwmgr) */ int fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr) { - struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); SMU73_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE }; uint32_t duty100; @@ -2104,20 +2093,20 @@ int fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr) hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_CTRL, TEMP_SEL); - res = smu7_copy_bytes_to_smc(hwmgr->smumgr, smu_data->smu7_data.fan_table_start, + res = smu7_copy_bytes_to_smc(hwmgr, smu_data->smu7_data.fan_table_start, (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table), SMC_RAM_END); if (!res && hwmgr->thermal_controller. advanceFanControlParameters.ucMinimumPWMLimit) - res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + res = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetFanMinPwm, hwmgr->thermal_controller. advanceFanControlParameters.ucMinimumPWMLimit); if (!res && hwmgr->thermal_controller. advanceFanControlParameters.ulMinFanSCLKAcousticLimit) - res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + res = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetFanSclkTarget, hwmgr->thermal_controller. advanceFanControlParameters.ulMinFanSCLKAcousticLimit); @@ -2133,13 +2122,12 @@ int fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr) int fiji_thermal_avfs_enable(struct pp_hwmgr *hwmgr) { int ret; - struct pp_smumgr *smumgr = (struct pp_smumgr *)(hwmgr->smumgr); - struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend); + struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend); if (smu_data->avfs.avfs_btc_status != AVFS_BTC_ENABLEAVFS) return 0; - ret = smum_send_msg_to_smc(smumgr, PPSMC_MSG_EnableAvfs); + ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs); if (!ret) /* If this param is not changed, this function could fire unnecessarily */ @@ -2162,7 +2150,7 @@ static int fiji_program_mem_timing_parameters(struct pp_hwmgr *hwmgr) int fiji_update_sclk_threshold(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); int result = 0; uint32_t low_sclk_interrupt_threshold = 0; @@ -2179,7 +2167,7 @@ int fiji_update_sclk_threshold(struct pp_hwmgr *hwmgr) CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold); result = smu7_copy_bytes_to_smc( - hwmgr->smumgr, + hwmgr, smu_data->smu7_data.dpm_table_start + offsetof(SMU73_Discrete_DpmTable, LowSclkInterruptThreshold), @@ -2256,7 +2244,7 @@ uint32_t fiji_get_mac_definition(uint32_t value) static int fiji_update_uvd_smc_table(struct pp_hwmgr *hwmgr) { - struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); uint32_t mm_boot_level_offset, mm_boot_level_value; struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); @@ -2280,7 +2268,7 @@ static int fiji_update_uvd_smc_table(struct pp_hwmgr *hwmgr) PHM_PlatformCaps_UVDDPM) || phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_UVDDPM_SetEnabledMask, (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel)); return 0; @@ -2288,7 +2276,7 @@ static int fiji_update_uvd_smc_table(struct pp_hwmgr *hwmgr) static int fiji_update_vce_smc_table(struct pp_hwmgr *hwmgr) { - struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); uint32_t mm_boot_level_offset, mm_boot_level_value; struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); @@ -2312,7 +2300,7 @@ static int fiji_update_vce_smc_table(struct pp_hwmgr *hwmgr) CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value); if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_VCEDPM_SetEnabledMask, (uint32_t)1 << smu_data->smc_state_table.VceBootLevel); return 0; @@ -2320,7 +2308,7 @@ static int fiji_update_vce_smc_table(struct pp_hwmgr *hwmgr) static int fiji_update_samu_smc_table(struct pp_hwmgr *hwmgr) { - struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); uint32_t mm_boot_level_offset, mm_boot_level_value; @@ -2339,7 +2327,7 @@ static int fiji_update_samu_smc_table(struct pp_hwmgr *hwmgr) if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SAMUDPM_SetEnabledMask, (uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel)); return 0; @@ -2373,12 +2361,12 @@ int fiji_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type) int fiji_process_firmware_header(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend); + struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend); uint32_t tmp; int result; bool error = false; - result = smu7_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU73_Firmware_Header, DpmTable), &tmp, SMC_RAM_END); @@ -2388,7 +2376,7 @@ int fiji_process_firmware_header(struct pp_hwmgr *hwmgr) error |= (0 != result); - result = smu7_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU73_Firmware_Header, SoftRegisters), &tmp, SMC_RAM_END); @@ -2400,7 +2388,7 @@ int fiji_process_firmware_header(struct pp_hwmgr *hwmgr) error |= (0 != result); - result = smu7_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU73_Firmware_Header, mcRegisterTable), &tmp, SMC_RAM_END); @@ -2408,7 +2396,7 @@ int fiji_process_firmware_header(struct pp_hwmgr *hwmgr) if (!result) smu_data->smu7_data.mc_reg_table_start = tmp; - result = smu7_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU73_Firmware_Header, FanTable), &tmp, SMC_RAM_END); @@ -2418,7 +2406,7 @@ int fiji_process_firmware_header(struct pp_hwmgr *hwmgr) error |= (0 != result); - result = smu7_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU73_Firmware_Header, mcArbDramTimingTable), &tmp, SMC_RAM_END); @@ -2428,7 +2416,7 @@ int fiji_process_firmware_header(struct pp_hwmgr *hwmgr) error |= (0 != result); - result = smu7_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU73_Firmware_Header, Version), &tmp, SMC_RAM_END); @@ -2476,7 +2464,7 @@ int fiji_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr, struct amd_pp_profile *request) { struct fiji_smumgr *smu_data = (struct fiji_smumgr *) - (hwmgr->smumgr->backend); + (hwmgr->smu_backend); struct SMU73_Discrete_GraphicsLevel *levels = smu_data->smc_state_table.GraphicsLevel; uint32_t array = smu_data->smu7_data.dpm_table_start + @@ -2493,6 +2481,6 @@ int fiji_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr, levels[i].DownHyst = request->down_hyst; } - return smu7_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels, + return smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels, array_size, SMC_RAM_END); } diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c index 6ae948fc524f..5b25e067b2f1 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c @@ -58,122 +58,122 @@ static const struct SMU73_Discrete_GraphicsLevel avfs_graphics_level[8] = { { 0xf811d047, 0x80380100, 0x01, 0x00, 0x1e00, 0x00000610, 0x87020000, 0x21680000, 0x12000000, 0, 0, 0x0c, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00 } }; -static int fiji_start_smu_in_protection_mode(struct pp_smumgr *smumgr) +static int fiji_start_smu_in_protection_mode(struct pp_hwmgr *hwmgr) { int result = 0; /* Wait for smc boot up */ - /* SMUM_WAIT_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND, + /* PHM_WAIT_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND, RCU_UC_EVENTS, boot_seq_done, 0); */ - SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 1); - result = smu7_upload_smu_firmware_image(smumgr); + result = smu7_upload_smu_firmware_image(hwmgr); if (result) return result; /* Clear status */ - cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC, + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMU_STATUS, 0); - SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0); /* De-assert reset */ - SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 0); /* Wait for ROM firmware to initialize interrupt hendler */ - /*SMUM_WAIT_VFPF_INDIRECT_REGISTER(smumgr, SMC_IND, + /*SMUM_WAIT_VFPF_INDIRECT_REGISTER(hwmgr, SMC_IND, SMC_INTR_CNTL_MASK_0, 0x10040, 0xFFFFFFFF); */ /* Set SMU Auto Start */ - SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMU_INPUT_DATA, AUTO_START, 1); /* Clear firmware interrupt enable flag */ - cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC, + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixFIRMWARE_FLAGS, 0); - SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND, RCU_UC_EVENTS, + PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, RCU_UC_EVENTS, INTERRUPTS_ENABLED, 1); - cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, 0x20000); - cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, PPSMC_MSG_Test); - SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); + cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, 0x20000); + cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, PPSMC_MSG_Test); + PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0); /* Wait for done bit to be set */ - SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND, + PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND, SMU_STATUS, SMU_DONE, 0); /* Check pass/failed indicator */ - if (SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + if (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMU_STATUS, SMU_PASS) != 1) { PP_ASSERT_WITH_CODE(false, "SMU Firmware start failed!", return -1); } /* Wait for firmware to initialize */ - SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND, + PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1); return result; } -static int fiji_start_smu_in_non_protection_mode(struct pp_smumgr *smumgr) +static int fiji_start_smu_in_non_protection_mode(struct pp_hwmgr *hwmgr) { int result = 0; /* wait for smc boot up */ - SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND, + PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND, RCU_UC_EVENTS, boot_seq_done, 0); /* Clear firmware interrupt enable flag */ - cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC, + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixFIRMWARE_FLAGS, 0); /* Assert reset */ - SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 1); - result = smu7_upload_smu_firmware_image(smumgr); + result = smu7_upload_smu_firmware_image(hwmgr); if (result) return result; /* Set smc instruct start point at 0x0 */ - smu7_program_jump_on_start(smumgr); + smu7_program_jump_on_start(hwmgr); /* Enable clock */ - SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0); /* De-assert reset */ - SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 0); /* Wait for firmware to initialize */ - SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND, + PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1); return result; } -static int fiji_setup_pwr_virus(struct pp_smumgr *smumgr) +static int fiji_setup_pwr_virus(struct pp_hwmgr *hwmgr) { int i; int result = -EINVAL; uint32_t reg, data; const PWR_Command_Table *pvirus = PwrVirusTable; - struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend); + struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend); for (i = 0; i < PWR_VIRUS_TABLE_SIZE; i++) { switch (pvirus->command) { case PwrCmdWrite: reg = pvirus->reg; data = pvirus->data; - cgs_write_register(smumgr->device, reg, data); + cgs_write_register(hwmgr->device, reg, data); break; case PwrCmdEnd: @@ -192,13 +192,13 @@ static int fiji_setup_pwr_virus(struct pp_smumgr *smumgr) return result; } -static int fiji_start_avfs_btc(struct pp_smumgr *smumgr) +static int fiji_start_avfs_btc(struct pp_hwmgr *hwmgr) { int result = 0; - struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend); + struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend); if (0 != smu_data->avfs.avfs_btc_param) { - if (0 != smu7_send_msg_to_smc_with_parameter(smumgr, + if (0 != smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_PerformBtc, smu_data->avfs.avfs_btc_param)) { pr_info("[AVFS][Fiji_PerformBtc] PerformBTC SMU msg failed"); result = -EINVAL; @@ -206,23 +206,23 @@ static int fiji_start_avfs_btc(struct pp_smumgr *smumgr) } /* Soft-Reset to reset the engine before loading uCode */ /* halt */ - cgs_write_register(smumgr->device, mmCP_MEC_CNTL, 0x50000000); + cgs_write_register(hwmgr->device, mmCP_MEC_CNTL, 0x50000000); /* reset everything */ - cgs_write_register(smumgr->device, mmGRBM_SOFT_RESET, 0xffffffff); + cgs_write_register(hwmgr->device, mmGRBM_SOFT_RESET, 0xffffffff); /* clear reset */ - cgs_write_register(smumgr->device, mmGRBM_SOFT_RESET, 0); + cgs_write_register(hwmgr->device, mmGRBM_SOFT_RESET, 0); return result; } -static int fiji_setup_graphics_level_structure(struct pp_smumgr *smumgr) +static int fiji_setup_graphics_level_structure(struct pp_hwmgr *hwmgr) { int32_t vr_config; uint32_t table_start; uint32_t level_addr, vr_config_addr; uint32_t level_size = sizeof(avfs_graphics_level); - PP_ASSERT_WITH_CODE(0 == smu7_read_smc_sram_dword(smumgr, + PP_ASSERT_WITH_CODE(0 == smu7_read_smc_sram_dword(hwmgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU73_Firmware_Header, DpmTable), &table_start, 0x40000), @@ -237,7 +237,7 @@ static int fiji_setup_graphics_level_structure(struct pp_smumgr *smumgr) vr_config_addr = table_start + offsetof(SMU73_Discrete_DpmTable, VRConfig); - PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(smumgr, vr_config_addr, + PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(hwmgr, vr_config_addr, (uint8_t *)&vr_config, sizeof(int32_t), 0x40000), "[AVFS][Fiji_SetupGfxLvlStruct] Problems copying " "vr_config value over to SMC", @@ -245,7 +245,7 @@ static int fiji_setup_graphics_level_structure(struct pp_smumgr *smumgr) level_addr = table_start + offsetof(SMU73_Discrete_DpmTable, GraphicsLevel); - PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(smumgr, level_addr, + PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(hwmgr, level_addr, (uint8_t *)(&avfs_graphics_level), level_size, 0x40000), "[AVFS][Fiji_SetupGfxLvlStruct] Copying of DPM table failed!", return -1;); @@ -253,9 +253,9 @@ static int fiji_setup_graphics_level_structure(struct pp_smumgr *smumgr) return 0; } -static int fiji_avfs_event_mgr(struct pp_smumgr *smumgr, bool smu_started) +static int fiji_avfs_event_mgr(struct pp_hwmgr *hwmgr, bool smu_started) { - struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend); + struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend); switch (smu_data->avfs.avfs_btc_status) { case AVFS_BTC_COMPLETED_PREVIOUSLY: @@ -265,17 +265,17 @@ static int fiji_avfs_event_mgr(struct pp_smumgr *smumgr, bool smu_started) if (!smu_started) break; smu_data->avfs.avfs_btc_status = AVFS_BTC_FAILED; - PP_ASSERT_WITH_CODE(0 == fiji_setup_graphics_level_structure(smumgr), + PP_ASSERT_WITH_CODE(0 == fiji_setup_graphics_level_structure(hwmgr), "[AVFS][fiji_avfs_event_mgr] Could not Copy Graphics Level" " table over to SMU", return -EINVAL;); smu_data->avfs.avfs_btc_status = AVFS_BTC_VIRUS_FAIL; - PP_ASSERT_WITH_CODE(0 == fiji_setup_pwr_virus(smumgr), + PP_ASSERT_WITH_CODE(0 == fiji_setup_pwr_virus(hwmgr), "[AVFS][fiji_avfs_event_mgr] Could not setup " "Pwr Virus for AVFS ", return -EINVAL;); smu_data->avfs.avfs_btc_status = AVFS_BTC_FAILED; - PP_ASSERT_WITH_CODE(0 == fiji_start_avfs_btc(smumgr), + PP_ASSERT_WITH_CODE(0 == fiji_start_avfs_btc(hwmgr), "[AVFS][fiji_avfs_event_mgr] Failure at " "fiji_start_avfs_btc. AVFS Disabled", return -EINVAL;); @@ -293,64 +293,64 @@ static int fiji_avfs_event_mgr(struct pp_smumgr *smumgr, bool smu_started) return 0; } -static int fiji_start_smu(struct pp_smumgr *smumgr) +static int fiji_start_smu(struct pp_hwmgr *hwmgr) { int result = 0; - struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend); + struct fiji_smumgr *priv = (struct fiji_smumgr *)(hwmgr->smu_backend); /* Only start SMC if SMC RAM is not running */ - if (!(smu7_is_smc_ram_running(smumgr) - || cgs_is_virtualization_enabled(smumgr->device))) { - fiji_avfs_event_mgr(smumgr, false); + if (!(smu7_is_smc_ram_running(hwmgr) + || cgs_is_virtualization_enabled(hwmgr->device))) { + fiji_avfs_event_mgr(hwmgr, false); /* Check if SMU is running in protected mode */ - if (0 == SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, + if (0 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_MODE)) { - result = fiji_start_smu_in_non_protection_mode(smumgr); + result = fiji_start_smu_in_non_protection_mode(hwmgr); if (result) return result; } else { - result = fiji_start_smu_in_protection_mode(smumgr); + result = fiji_start_smu_in_protection_mode(hwmgr); if (result) return result; } - fiji_avfs_event_mgr(smumgr, true); + fiji_avfs_event_mgr(hwmgr, true); } /* To initialize all clock gating before RLC loaded and running.*/ - cgs_set_clockgating_state(smumgr->device, + cgs_set_clockgating_state(hwmgr->device, AMD_IP_BLOCK_TYPE_GFX, AMD_CG_STATE_GATE); - cgs_set_clockgating_state(smumgr->device, + cgs_set_clockgating_state(hwmgr->device, AMD_IP_BLOCK_TYPE_GMC, AMD_CG_STATE_GATE); - cgs_set_clockgating_state(smumgr->device, + cgs_set_clockgating_state(hwmgr->device, AMD_IP_BLOCK_TYPE_SDMA, AMD_CG_STATE_GATE); - cgs_set_clockgating_state(smumgr->device, + cgs_set_clockgating_state(hwmgr->device, AMD_IP_BLOCK_TYPE_COMMON, AMD_CG_STATE_GATE); /* Setup SoftRegsStart here for register lookup in case * DummyBackEnd is used and ProcessFirmwareHeader is not executed */ - smu7_read_smc_sram_dword(smumgr, + smu7_read_smc_sram_dword(hwmgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU73_Firmware_Header, SoftRegisters), &(priv->smu7_data.soft_regs_start), 0x40000); - result = smu7_request_smu_load_fw(smumgr); + result = smu7_request_smu_load_fw(hwmgr); return result; } -static bool fiji_is_hw_avfs_present(struct pp_smumgr *smumgr) +static bool fiji_is_hw_avfs_present(struct pp_hwmgr *hwmgr) { uint32_t efuse = 0; uint32_t mask = (1 << ((AVFS_EN_MSB - AVFS_EN_LSB) + 1)) - 1; - if (cgs_is_virtualization_enabled(smumgr->device)) + if (cgs_is_virtualization_enabled(hwmgr->device)) return 0; - if (!atomctrl_read_efuse(smumgr->device, AVFS_EN_LSB, AVFS_EN_MSB, + if (!atomctrl_read_efuse(hwmgr->device, AVFS_EN_LSB, AVFS_EN_MSB, mask, &efuse)) { if (efuse) return true; @@ -365,7 +365,7 @@ static bool fiji_is_hw_avfs_present(struct pp_smumgr *smumgr) * @param smc_addr the address in the SMC RAM to access. * @param value to write to the SMC SRAM. */ -static int fiji_smu_init(struct pp_smumgr *smumgr) +static int fiji_smu_init(struct pp_hwmgr *hwmgr) { int i; struct fiji_smumgr *fiji_priv = NULL; @@ -375,9 +375,9 @@ static int fiji_smu_init(struct pp_smumgr *smumgr) if (fiji_priv == NULL) return -ENOMEM; - smumgr->backend = fiji_priv; + hwmgr->smu_backend = fiji_priv; - if (smu7_init(smumgr)) + if (smu7_init(hwmgr)) return -EINVAL; for (i = 0; i < SMU73_MAX_LEVELS_GRAPHICS; i++) diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c index 51adf04ab4b3..efb0fc033274 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c @@ -101,7 +101,7 @@ static const struct iceland_pt_defaults defaults_icelandpro = { static void iceland_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr) { - struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend); struct cgs_system_info sys_info = {0}; uint32_t dev_id; @@ -130,7 +130,7 @@ static void iceland_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr) static int iceland_populate_svi_load_line(struct pp_hwmgr *hwmgr) { - struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend); const struct iceland_pt_defaults *defaults = smu_data->power_tune_defaults; smu_data->power_tune_table.SviLoadLineEn = defaults->svi_load_line_en; @@ -144,7 +144,7 @@ static int iceland_populate_svi_load_line(struct pp_hwmgr *hwmgr) static int iceland_populate_tdc_limit(struct pp_hwmgr *hwmgr) { uint16_t tdc_limit; - struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend); const struct iceland_pt_defaults *defaults = smu_data->power_tune_defaults; tdc_limit = (uint16_t)(hwmgr->dyn_state.cac_dtp_table->usTDC * 256); @@ -159,11 +159,11 @@ static int iceland_populate_tdc_limit(struct pp_hwmgr *hwmgr) static int iceland_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset) { - struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend); const struct iceland_pt_defaults *defaults = smu_data->power_tune_defaults; uint32_t temp; - if (smu7_read_smc_sram_dword(hwmgr->smumgr, + if (smu7_read_smc_sram_dword(hwmgr, fuse_table_offset + offsetof(SMU71_Discrete_PmFuses, TdcWaterfallCtl), (uint32_t *)&temp, SMC_RAM_END)) @@ -184,7 +184,7 @@ static int iceland_populate_temperature_scaler(struct pp_hwmgr *hwmgr) static int iceland_populate_gnb_lpml(struct pp_hwmgr *hwmgr) { int i; - struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend); /* Currently not used. Set all to zero. */ for (i = 0; i < 8; i++) @@ -193,14 +193,9 @@ static int iceland_populate_gnb_lpml(struct pp_hwmgr *hwmgr) return 0; } -static int iceland_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr) -{ - return 0; -} - static int iceland_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr) { - struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend); uint16_t HiSidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd; uint16_t LoSidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd; struct phm_cac_tdp_table *cac_table = hwmgr->dyn_state.cac_dtp_table; @@ -219,7 +214,7 @@ static int iceland_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr) static int iceland_populate_bapm_vddc_vid_sidd(struct pp_hwmgr *hwmgr) { int i; - struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend); uint8_t *hi_vid = smu_data->power_tune_table.BapmVddCVidHiSidd; uint8_t *lo_vid = smu_data->power_tune_table.BapmVddCVidLoSidd; @@ -245,7 +240,7 @@ static int iceland_populate_bapm_vddc_vid_sidd(struct pp_hwmgr *hwmgr) static int iceland_populate_vddc_vid(struct pp_hwmgr *hwmgr) { int i; - struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend); uint8_t *vid = smu_data->power_tune_table.VddCVid; struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); @@ -264,12 +259,12 @@ static int iceland_populate_vddc_vid(struct pp_hwmgr *hwmgr) static int iceland_populate_pm_fuses(struct pp_hwmgr *hwmgr) { - struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend); uint32_t pm_fuse_table_offset; if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PowerContainment)) { - if (smu7_read_smc_sram_dword(hwmgr->smumgr, + if (smu7_read_smc_sram_dword(hwmgr, SMU71_FIRMWARE_HEADER_LOCATION + offsetof(SMU71_Firmware_Header, PmFuseTable), &pm_fuse_table_offset, SMC_RAM_END)) @@ -317,19 +312,13 @@ static int iceland_populate_pm_fuses(struct pp_hwmgr *hwmgr) "Attempt to populate GnbLPML Failed!", return -EINVAL); - /* DW17 */ - if (iceland_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr)) - PP_ASSERT_WITH_CODE(false, - "Attempt to populate GnbLPML Min and Max Vid Failed!", - return -EINVAL); - /* DW18 */ if (iceland_populate_bapm_vddc_base_leakage_sidd(hwmgr)) PP_ASSERT_WITH_CODE(false, "Attempt to populate BapmVddCBaseLeakage Hi and Lo Sidd Failed!", return -EINVAL); - if (smu7_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset, + if (smu7_copy_bytes_to_smc(hwmgr, pm_fuse_table_offset, (uint8_t *)&smu_data->power_tune_table, sizeof(struct SMU71_Discrete_PmFuses), SMC_RAM_END)) PP_ASSERT_WITH_CODE(false, @@ -339,7 +328,7 @@ static int iceland_populate_pm_fuses(struct pp_hwmgr *hwmgr) return 0; } -static int iceland_get_dependecy_volt_by_clk(struct pp_hwmgr *hwmgr, +static int iceland_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr, struct phm_clock_voltage_dependency_table *allowed_clock_voltage_table, uint32_t clock, uint32_t *vol) { @@ -601,7 +590,7 @@ static int iceland_populate_smc_link_level(struct pp_hwmgr *hwmgr, SMU71_Discret { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); struct smu7_dpm_table *dpm_table = &data->dpm_table; - struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend); uint32_t i; /* Index (dpm_table->pcie_speed_table.count) is reserved for PCIE boot level. */ @@ -749,7 +738,7 @@ static int iceland_populate_single_graphic_level(struct pp_hwmgr *hwmgr, result = iceland_calculate_sclk_params(hwmgr, engine_clock, graphic_level); /* populate graphics levels*/ - result = iceland_get_dependecy_volt_by_clk(hwmgr, + result = iceland_get_dependency_volt_by_clk(hwmgr, hwmgr->dyn_state.vddc_dependency_on_sclk, engine_clock, &graphic_level->MinVddc); PP_ASSERT_WITH_CODE((0 == result), @@ -816,7 +805,7 @@ static int iceland_populate_single_graphic_level(struct pp_hwmgr *hwmgr, int iceland_populate_all_graphic_levels(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend); struct smu7_dpm_table *dpm_table = &data->dpm_table; uint32_t level_array_adress = smu_data->smu7_data.dpm_table_start + offsetof(SMU71_Discrete_DpmTable, GraphicsLevel); @@ -892,7 +881,7 @@ int iceland_populate_all_graphic_levels(struct pp_hwmgr *hwmgr) smu_data->smc_state_table.GraphicsLevel[1].pcieDpmLevel = mid_pcie_level_enabled; /* level count will send to smc once at init smc table and never change*/ - result = smu7_copy_bytes_to_smc(hwmgr->smumgr, level_array_adress, + result = smu7_copy_bytes_to_smc(hwmgr, level_array_adress, (uint8_t *)levels, (uint32_t)level_array_size, SMC_RAM_END); @@ -1104,7 +1093,7 @@ static int iceland_populate_single_memory_level( uint32_t mclk_strobe_mode_threshold = 40000; if (hwmgr->dyn_state.vddc_dependency_on_mclk != NULL) { - result = iceland_get_dependecy_volt_by_clk(hwmgr, + result = iceland_get_dependency_volt_by_clk(hwmgr, hwmgr->dyn_state.vddc_dependency_on_mclk, memory_clock, &memory_level->MinVddc); PP_ASSERT_WITH_CODE((0 == result), "can not find MinVddc voltage value from memory VDDC voltage dependency table", return result); @@ -1113,7 +1102,7 @@ static int iceland_populate_single_memory_level( if (data->vddci_control == SMU7_VOLTAGE_CONTROL_NONE) { memory_level->MinVddci = memory_level->MinVddc; } else if (NULL != hwmgr->dyn_state.vddci_dependency_on_mclk) { - result = iceland_get_dependecy_volt_by_clk(hwmgr, + result = iceland_get_dependency_volt_by_clk(hwmgr, hwmgr->dyn_state.vddci_dependency_on_mclk, memory_clock, &memory_level->MinVddci); @@ -1218,7 +1207,7 @@ static int iceland_populate_single_memory_level( int iceland_populate_all_memory_levels(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend); struct smu7_dpm_table *dpm_table = &data->dpm_table; int result; @@ -1257,7 +1246,7 @@ int iceland_populate_all_memory_levels(struct pp_hwmgr *hwmgr) smu_data->smc_state_table.MemoryLevel[dpm_table->mclk_table.count-1].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH; /* level count will send to smc once at init smc table and never change*/ - result = smu7_copy_bytes_to_smc(hwmgr->smumgr, + result = smu7_copy_bytes_to_smc(hwmgr, level_array_adress, (uint8_t *)levels, (uint32_t)level_array_size, SMC_RAM_END); @@ -1496,7 +1485,7 @@ static int iceland_populate_memory_timing_parameters( static int iceland_program_memory_timing_parameters(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend); int result = 0; SMU71_Discrete_MCArbDramTimingTable arb_regs; uint32_t i, j; @@ -1518,7 +1507,7 @@ static int iceland_program_memory_timing_parameters(struct pp_hwmgr *hwmgr) if (0 == result) { result = smu7_copy_bytes_to_smc( - hwmgr->smumgr, + hwmgr, smu_data->smu7_data.arb_table_start, (uint8_t *)&arb_regs, sizeof(SMU71_Discrete_MCArbDramTimingTable), @@ -1534,7 +1523,7 @@ static int iceland_populate_smc_boot_level(struct pp_hwmgr *hwmgr, { int result = 0; struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend); table->GraphicsBootLevel = 0; table->MemoryBootLevel = 0; @@ -1572,10 +1561,10 @@ static int iceland_populate_smc_boot_level(struct pp_hwmgr *hwmgr, return result; } -static int iceland_populate_mc_reg_address(struct pp_smumgr *smumgr, +static int iceland_populate_mc_reg_address(struct pp_hwmgr *hwmgr, SMU71_Discrete_MCRegisters *mc_reg_table) { - const struct iceland_smumgr *smu_data = (struct iceland_smumgr *)smumgr->backend; + const struct iceland_smumgr *smu_data = (struct iceland_smumgr *)hwmgr->smu_backend; uint32_t i, j; @@ -1612,13 +1601,12 @@ static void iceland_convert_mc_registers( } } -static int iceland_convert_mc_reg_table_entry_to_smc( - struct pp_smumgr *smumgr, +static int iceland_convert_mc_reg_table_entry_to_smc(struct pp_hwmgr *hwmgr, const uint32_t memory_clock, SMU71_Discrete_MCRegisterSet *mc_reg_table_data ) { - struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(smumgr->backend); + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend); uint32_t i = 0; for (i = 0; i < smu_data->mc_reg_table.num_entries; i++) { @@ -1648,7 +1636,7 @@ static int iceland_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr, for (i = 0; i < data->dpm_table.mclk_table.count; i++) { res = iceland_convert_mc_reg_table_entry_to_smc( - hwmgr->smumgr, + hwmgr, data->dpm_table.mclk_table.dpm_levels[i].value, &mc_regs->data[i] ); @@ -1662,8 +1650,7 @@ static int iceland_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr, static int iceland_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr) { - struct pp_smumgr *smumgr = hwmgr->smumgr; - struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(smumgr->backend); + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend); struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); uint32_t address; int32_t result; @@ -1682,7 +1669,7 @@ static int iceland_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr) address = smu_data->smu7_data.mc_reg_table_start + (uint32_t)offsetof(SMU71_Discrete_MCRegisters, data[0]); - return smu7_copy_bytes_to_smc(hwmgr->smumgr, address, + return smu7_copy_bytes_to_smc(hwmgr, address, (uint8_t *)&smu_data->mc_regs.data[0], sizeof(SMU71_Discrete_MCRegisterSet) * data->dpm_table.mclk_table.count, SMC_RAM_END); @@ -1691,11 +1678,10 @@ static int iceland_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr) static int iceland_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr) { int result; - struct pp_smumgr *smumgr = hwmgr->smumgr; - struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(smumgr->backend); + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend); memset(&smu_data->mc_regs, 0x00, sizeof(SMU71_Discrete_MCRegisters)); - result = iceland_populate_mc_reg_address(smumgr, &(smu_data->mc_regs)); + result = iceland_populate_mc_reg_address(hwmgr, &(smu_data->mc_regs)); PP_ASSERT_WITH_CODE(0 == result, "Failed to initialize MCRegTable for the MC register addresses!", return result;); @@ -1703,14 +1689,14 @@ static int iceland_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE(0 == result, "Failed to initialize MCRegTable for driver state!", return result;); - return smu7_copy_bytes_to_smc(smumgr, smu_data->smu7_data.mc_reg_table_start, + return smu7_copy_bytes_to_smc(hwmgr, smu_data->smu7_data.mc_reg_table_start, (uint8_t *)&smu_data->mc_regs, sizeof(SMU71_Discrete_MCRegisters), SMC_RAM_END); } static int iceland_populate_smc_initial_state(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend); uint8_t count, level; count = (uint8_t)(hwmgr->dyn_state.vddc_dependency_on_sclk->count); @@ -1739,7 +1725,7 @@ static int iceland_populate_smc_initial_state(struct pp_hwmgr *hwmgr) static int iceland_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend); const struct iceland_pt_defaults *defaults = smu_data->power_tune_defaults; SMU71_Discrete_DpmTable *dpm_table = &(smu_data->smc_state_table); struct phm_cac_tdp_table *cac_dtp_table = hwmgr->dyn_state.cac_dtp_table; @@ -1776,7 +1762,7 @@ static int iceland_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr) CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_PkgPwrLimit); CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_TemperatureLimit); - dpm_table->BAPM_TEMP_GRADIENT = PP_HOST_TO_SMC_UL(defaults->bamp_temp_gradient); + dpm_table->BAPM_TEMP_GRADIENT = PP_HOST_TO_SMC_UL(defaults->bapm_temp_gradient); def1 = defaults->bapmti_r; def2 = defaults->bapmti_rc; @@ -1827,7 +1813,7 @@ int iceland_init_smc_table(struct pp_hwmgr *hwmgr) { int result; struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend); SMU71_Discrete_DpmTable *table = &(smu_data->smc_state_table); @@ -1955,7 +1941,7 @@ int iceland_init_smc_table(struct pp_hwmgr *hwmgr) table->BootMVdd = PP_HOST_TO_SMC_US(table->BootMVdd * VOLTAGE_SCALE); /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */ - result = smu7_copy_bytes_to_smc(hwmgr->smumgr, smu_data->smu7_data.dpm_table_start + + result = smu7_copy_bytes_to_smc(hwmgr, smu_data->smu7_data.dpm_table_start + offsetof(SMU71_Discrete_DpmTable, SystemFlags), (uint8_t *)&(table->SystemFlags), sizeof(SMU71_Discrete_DpmTable)-3 * sizeof(SMU71_PIDController), @@ -1965,7 +1951,7 @@ int iceland_init_smc_table(struct pp_hwmgr *hwmgr) "Failed to upload dpm data to SMC memory!", return result;); /* Upload all ulv setting to SMC memory.(dpm level, dpm level count etc) */ - result = smu7_copy_bytes_to_smc(hwmgr->smumgr, + result = smu7_copy_bytes_to_smc(hwmgr, smu_data->smu7_data.ulv_setting_starts, (uint8_t *)&(smu_data->ulv_setting), sizeof(SMU71_Discrete_Ulv), @@ -1994,7 +1980,7 @@ int iceland_init_smc_table(struct pp_hwmgr *hwmgr) */ int iceland_thermal_setup_fan_table(struct pp_hwmgr *hwmgr) { - struct smu7_smumgr *smu7_data = (struct smu7_smumgr *)(hwmgr->smumgr->backend); + struct smu7_smumgr *smu7_data = (struct smu7_smumgr *)(hwmgr->smu_backend); SMU71_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE }; uint32_t duty100; uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2; @@ -2064,7 +2050,7 @@ int iceland_thermal_setup_fan_table(struct pp_hwmgr *hwmgr) /* fan_table.FanControl_GL_Flag = 1; */ - res = smu7_copy_bytes_to_smc(hwmgr->smumgr, smu7_data->fan_table_start, (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table), SMC_RAM_END); + res = smu7_copy_bytes_to_smc(hwmgr, smu7_data->fan_table_start, (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table), SMC_RAM_END); return 0; } @@ -2084,7 +2070,7 @@ static int iceland_program_mem_timing_parameters(struct pp_hwmgr *hwmgr) int iceland_update_sclk_threshold(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend); int result = 0; uint32_t low_sclk_interrupt_threshold = 0; @@ -2101,7 +2087,7 @@ int iceland_update_sclk_threshold(struct pp_hwmgr *hwmgr) CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold); result = smu7_copy_bytes_to_smc( - hwmgr->smumgr, + hwmgr, smu_data->smu7_data.dpm_table_start + offsetof(SMU71_Discrete_DpmTable, LowSclkInterruptThreshold), @@ -2182,13 +2168,13 @@ uint32_t iceland_get_mac_definition(uint32_t value) int iceland_process_firmware_header(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - struct smu7_smumgr *smu7_data = (struct smu7_smumgr *)(hwmgr->smumgr->backend); + struct smu7_smumgr *smu7_data = (struct smu7_smumgr *)(hwmgr->smu_backend); uint32_t tmp; int result; bool error = false; - result = smu7_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr, SMU71_FIRMWARE_HEADER_LOCATION + offsetof(SMU71_Firmware_Header, DpmTable), &tmp, SMC_RAM_END); @@ -2199,7 +2185,7 @@ int iceland_process_firmware_header(struct pp_hwmgr *hwmgr) error |= (0 != result); - result = smu7_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr, SMU71_FIRMWARE_HEADER_LOCATION + offsetof(SMU71_Firmware_Header, SoftRegisters), &tmp, SMC_RAM_END); @@ -2212,7 +2198,7 @@ int iceland_process_firmware_header(struct pp_hwmgr *hwmgr) error |= (0 != result); - result = smu7_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr, SMU71_FIRMWARE_HEADER_LOCATION + offsetof(SMU71_Firmware_Header, mcRegisterTable), &tmp, SMC_RAM_END); @@ -2221,7 +2207,7 @@ int iceland_process_firmware_header(struct pp_hwmgr *hwmgr) smu7_data->mc_reg_table_start = tmp; } - result = smu7_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr, SMU71_FIRMWARE_HEADER_LOCATION + offsetof(SMU71_Firmware_Header, FanTable), &tmp, SMC_RAM_END); @@ -2232,7 +2218,7 @@ int iceland_process_firmware_header(struct pp_hwmgr *hwmgr) error |= (0 != result); - result = smu7_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr, SMU71_FIRMWARE_HEADER_LOCATION + offsetof(SMU71_Firmware_Header, mcArbDramTimingTable), &tmp, SMC_RAM_END); @@ -2244,7 +2230,7 @@ int iceland_process_firmware_header(struct pp_hwmgr *hwmgr) error |= (0 != result); - result = smu7_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr, SMU71_FIRMWARE_HEADER_LOCATION + offsetof(SMU71_Firmware_Header, Version), &tmp, SMC_RAM_END); @@ -2255,7 +2241,7 @@ int iceland_process_firmware_header(struct pp_hwmgr *hwmgr) error |= (0 != result); - result = smu7_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr, SMU71_FIRMWARE_HEADER_LOCATION + offsetof(SMU71_Firmware_Header, UlvSettings), &tmp, SMC_RAM_END); @@ -2522,7 +2508,7 @@ static int iceland_set_valid_flag(struct iceland_mc_reg_table *table) int iceland_initialize_mc_reg_table(struct pp_hwmgr *hwmgr) { int result; - struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend); + struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend); pp_atomctrl_mc_reg_table *table; struct iceland_mc_reg_table *ni_table = &smu_data->mc_reg_table; uint8_t module_index = iceland_get_memory_modile_index(hwmgr); diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c index 0bf2def3b659..78aa1122eacc 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c @@ -39,55 +39,55 @@ #define ICELAND_SMC_SIZE 0x20000 -static int iceland_start_smc(struct pp_smumgr *smumgr) +static int iceland_start_smc(struct pp_hwmgr *hwmgr) { - SMUM_WRITE_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 0); return 0; } -static void iceland_reset_smc(struct pp_smumgr *smumgr) +static void iceland_reset_smc(struct pp_hwmgr *hwmgr) { - SMUM_WRITE_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 1); } -static void iceland_stop_smc_clock(struct pp_smumgr *smumgr) +static void iceland_stop_smc_clock(struct pp_hwmgr *hwmgr) { - SMUM_WRITE_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 1); } -static void iceland_start_smc_clock(struct pp_smumgr *smumgr) +static void iceland_start_smc_clock(struct pp_hwmgr *hwmgr) { - SMUM_WRITE_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0); } -static int iceland_smu_start_smc(struct pp_smumgr *smumgr) +static int iceland_smu_start_smc(struct pp_hwmgr *hwmgr) { /* set smc instruct start point at 0x0 */ - smu7_program_jump_on_start(smumgr); + smu7_program_jump_on_start(hwmgr); /* enable smc clock */ - iceland_start_smc_clock(smumgr); + iceland_start_smc_clock(hwmgr); /* de-assert reset */ - iceland_start_smc(smumgr); + iceland_start_smc(hwmgr); - SMUM_WAIT_INDIRECT_FIELD(smumgr, SMC_IND, FIRMWARE_FLAGS, + PHM_WAIT_INDIRECT_FIELD(hwmgr, SMC_IND, FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1); return 0; } -static int iceland_upload_smc_firmware_data(struct pp_smumgr *smumgr, +static int iceland_upload_smc_firmware_data(struct pp_hwmgr *hwmgr, uint32_t length, const uint8_t *src, uint32_t limit, uint32_t start_addr) { @@ -96,17 +96,17 @@ static int iceland_upload_smc_firmware_data(struct pp_smumgr *smumgr, PP_ASSERT_WITH_CODE((limit >= byte_count), "SMC address is beyond the SMC RAM area.", return -EINVAL); - cgs_write_register(smumgr->device, mmSMC_IND_INDEX_0, start_addr); - SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1); + cgs_write_register(hwmgr->device, mmSMC_IND_INDEX_0, start_addr); + PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1); while (byte_count >= 4) { data = src[0] * 0x1000000 + src[1] * 0x10000 + src[2] * 0x100 + src[3]; - cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data); + cgs_write_register(hwmgr->device, mmSMC_IND_DATA_0, data); src += 4; byte_count -= 4; } - SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0); + PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0); PP_ASSERT_WITH_CODE((0 == byte_count), "SMC size must be dividable by 4.", return -EINVAL); @@ -114,16 +114,16 @@ static int iceland_upload_smc_firmware_data(struct pp_smumgr *smumgr, } -static int iceland_smu_upload_firmware_image(struct pp_smumgr *smumgr) +static int iceland_smu_upload_firmware_image(struct pp_hwmgr *hwmgr) { uint32_t val; struct cgs_firmware_info info = {0}; - if (smumgr == NULL || smumgr->device == NULL) + if (hwmgr == NULL || hwmgr->device == NULL) return -EINVAL; /* load SMC firmware */ - cgs_get_firmware_info(smumgr->device, + cgs_get_firmware_info(hwmgr->device, smu7_convert_fw_type_to_cgs(UCODE_ID_SMU), &info); if (info.image_size & 3) { @@ -137,56 +137,56 @@ static int iceland_smu_upload_firmware_image(struct pp_smumgr *smumgr) } /* wait for smc boot up */ - SMUM_WAIT_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND, + PHM_WAIT_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND, RCU_UC_EVENTS, boot_seq_done, 0); /* clear firmware interrupt enable flag */ - val = cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC, + val = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMC_SYSCON_MISC_CNTL); - cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC, + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMC_SYSCON_MISC_CNTL, val | 1); /* stop smc clock */ - iceland_stop_smc_clock(smumgr); + iceland_stop_smc_clock(hwmgr); /* reset smc */ - iceland_reset_smc(smumgr); - iceland_upload_smc_firmware_data(smumgr, info.image_size, + iceland_reset_smc(hwmgr); + iceland_upload_smc_firmware_data(hwmgr, info.image_size, (uint8_t *)info.kptr, ICELAND_SMC_SIZE, info.ucode_start_address); return 0; } -static int iceland_request_smu_load_specific_fw(struct pp_smumgr *smumgr, +static int iceland_request_smu_load_specific_fw(struct pp_hwmgr *hwmgr, uint32_t firmwareType) { return 0; } -static int iceland_start_smu(struct pp_smumgr *smumgr) +static int iceland_start_smu(struct pp_hwmgr *hwmgr) { int result; - result = iceland_smu_upload_firmware_image(smumgr); + result = iceland_smu_upload_firmware_image(hwmgr); if (result) return result; - result = iceland_smu_start_smc(smumgr); + result = iceland_smu_start_smc(hwmgr); if (result) return result; - if (!smu7_is_smc_ram_running(smumgr)) { + if (!smu7_is_smc_ram_running(hwmgr)) { pr_info("smu not running, upload firmware again \n"); - result = iceland_smu_upload_firmware_image(smumgr); + result = iceland_smu_upload_firmware_image(hwmgr); if (result) return result; - result = iceland_smu_start_smc(smumgr); + result = iceland_smu_start_smc(hwmgr); if (result) return result; } - result = smu7_request_smu_load_fw(smumgr); + result = smu7_request_smu_load_fw(hwmgr); return result; } @@ -198,7 +198,7 @@ static int iceland_start_smu(struct pp_smumgr *smumgr) * @param smcAddress the address in the SMC RAM to access. * @param value to write to the SMC SRAM. */ -static int iceland_smu_init(struct pp_smumgr *smumgr) +static int iceland_smu_init(struct pp_hwmgr *hwmgr) { int i; struct iceland_smumgr *iceland_priv = NULL; @@ -208,9 +208,9 @@ static int iceland_smu_init(struct pp_smumgr *smumgr) if (iceland_priv == NULL) return -ENOMEM; - smumgr->backend = iceland_priv; + hwmgr->smu_backend = iceland_priv; - if (smu7_init(smumgr)) + if (smu7_init(hwmgr)) return -EINVAL; for (i = 0; i < SMU71_MAX_LEVELS_GRAPHICS; i++) diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.h index 8eae01b37c40..802472530d34 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.h +++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.h @@ -39,7 +39,7 @@ struct iceland_pt_defaults { uint8_t tdc_waterfall_ctl; uint8_t dte_ambient_temp_base; uint32_t display_cac; - uint32_t bamp_temp_gradient; + uint32_t bapm_temp_gradient; uint16_t bapmti_r[SMU71_DTE_ITERATIONS * SMU71_DTE_SOURCES * SMU71_DTE_SINKS]; uint16_t bapmti_rc[SMU71_DTE_ITERATIONS * SMU71_DTE_SOURCES * SMU71_DTE_SINKS]; }; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c index 99a00bd39256..c92ea38d2e15 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c @@ -148,7 +148,7 @@ static uint16_t scale_fan_gain_settings(uint16_t raw_setting) static int polaris10_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr) { - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); const struct polaris10_pt_defaults *defaults = smu_data->power_tune_defaults; SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table); @@ -196,7 +196,7 @@ static int polaris10_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmg static int polaris10_populate_svi_load_line(struct pp_hwmgr *hwmgr) { - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); const struct polaris10_pt_defaults *defaults = smu_data->power_tune_defaults; smu_data->power_tune_table.SviLoadLineEn = defaults->SviLoadLineEn; @@ -210,7 +210,7 @@ static int polaris10_populate_svi_load_line(struct pp_hwmgr *hwmgr) static int polaris10_populate_tdc_limit(struct pp_hwmgr *hwmgr) { uint16_t tdc_limit; - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); const struct polaris10_pt_defaults *defaults = smu_data->power_tune_defaults; @@ -227,11 +227,11 @@ static int polaris10_populate_tdc_limit(struct pp_hwmgr *hwmgr) static int polaris10_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset) { - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); const struct polaris10_pt_defaults *defaults = smu_data->power_tune_defaults; uint32_t temp; - if (smu7_read_smc_sram_dword(hwmgr->smumgr, + if (smu7_read_smc_sram_dword(hwmgr, fuse_table_offset + offsetof(SMU74_Discrete_PmFuses, TdcWaterfallCtl), (uint32_t *)&temp, SMC_RAM_END)) @@ -252,7 +252,7 @@ static int polaris10_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_of static int polaris10_populate_temperature_scaler(struct pp_hwmgr *hwmgr) { int i; - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); /* Currently not used. Set all to zero. */ for (i = 0; i < 16; i++) @@ -263,7 +263,7 @@ static int polaris10_populate_temperature_scaler(struct pp_hwmgr *hwmgr) static int polaris10_populate_fuzzy_fan(struct pp_hwmgr *hwmgr) { - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); /* TO DO move to hwmgr */ if ((hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity & (1 << 15)) @@ -279,7 +279,7 @@ static int polaris10_populate_fuzzy_fan(struct pp_hwmgr *hwmgr) static int polaris10_populate_gnb_lpml(struct pp_hwmgr *hwmgr) { int i; - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); /* Currently not used. Set all to zero. */ for (i = 0; i < 16; i++) @@ -288,14 +288,9 @@ static int polaris10_populate_gnb_lpml(struct pp_hwmgr *hwmgr) return 0; } -static int polaris10_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr) -{ - return 0; -} - static int polaris10_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr) { - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); uint16_t hi_sidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd; @@ -315,12 +310,12 @@ static int polaris10_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr static int polaris10_populate_pm_fuses(struct pp_hwmgr *hwmgr) { - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); uint32_t pm_fuse_table_offset; if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PowerContainment)) { - if (smu7_read_smc_sram_dword(hwmgr->smumgr, + if (smu7_read_smc_sram_dword(hwmgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, PmFuseTable), &pm_fuse_table_offset, SMC_RAM_END)) @@ -358,17 +353,12 @@ static int polaris10_populate_pm_fuses(struct pp_hwmgr *hwmgr) "Attempt to populate GnbLPML Failed!", return -EINVAL); - if (polaris10_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr)) - PP_ASSERT_WITH_CODE(false, - "Attempt to populate GnbLPML Min and Max Vid Failed!", - return -EINVAL); - if (polaris10_populate_bapm_vddc_base_leakage_sidd(hwmgr)) PP_ASSERT_WITH_CODE(false, "Attempt to populate BapmVddCBaseLeakage Hi and Lo " "Sidd Failed!", return -EINVAL); - if (smu7_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset, + if (smu7_copy_bytes_to_smc(hwmgr, pm_fuse_table_offset, (uint8_t *)&smu_data->power_tune_table, (sizeof(struct SMU74_Discrete_PmFuses) - 92), SMC_RAM_END)) PP_ASSERT_WITH_CODE(false, @@ -494,7 +484,6 @@ static int polaris10_populate_ulv_level(struct pp_hwmgr *hwmgr, struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); - struct pp_smumgr *smumgr = hwmgr->smumgr; state->CcPwrDynRm = 0; state->CcPwrDynRm1 = 0; @@ -503,7 +492,7 @@ static int polaris10_populate_ulv_level(struct pp_hwmgr *hwmgr, state->VddcOffsetVid = (uint8_t)(table_info->us_ulv_voltage_offset * VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1); - if (smumgr->chip_id == CHIP_POLARIS12 || smumgr->is_kicker) + if (hwmgr->chip_id == CHIP_POLARIS12 || hwmgr->is_kicker) state->VddcPhase = data->vddc_phase_shed_control ^ 0x3; else state->VddcPhase = (data->vddc_phase_shed_control) ? 0 : 1; @@ -525,7 +514,7 @@ static int polaris10_populate_smc_link_level(struct pp_hwmgr *hwmgr, struct SMU74_Discrete_DpmTable *table) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); struct smu7_dpm_table *dpm_table = &data->dpm_table; int i; @@ -556,8 +545,7 @@ static int polaris10_populate_smc_link_level(struct pp_hwmgr *hwmgr, static void polaris10_get_sclk_range_table(struct pp_hwmgr *hwmgr, SMU74_Discrete_DpmTable *table) { - struct pp_smumgr *smumgr = hwmgr->smumgr; - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); uint32_t i, ref_clk; struct pp_atom_ctrl_sclk_range_table range_table_from_vbios = { { {0} } }; @@ -607,8 +595,7 @@ static void polaris10_get_sclk_range_table(struct pp_hwmgr *hwmgr, static int polaris10_calculate_sclk_params(struct pp_hwmgr *hwmgr, uint32_t clock, SMU_SclkSetting *sclk_setting) { - struct pp_smumgr *smumgr = hwmgr->smumgr; - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); const SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table); struct pp_atomctrl_clock_dividers_ai dividers; uint32_t ref_clock; @@ -751,9 +738,8 @@ static int polaris10_populate_single_graphic_level(struct pp_hwmgr *hwmgr, */ int polaris10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr) { - struct pp_smumgr *smumgr = hwmgr->smumgr; struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend); - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); struct smu7_dpm_table *dpm_table = &hw_data->dpm_table; struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); @@ -838,7 +824,7 @@ int polaris10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr) levels[1].pcieDpmLevel = mid_pcie_level_enabled; } /* level count will send to smc once at init smc table and never change */ - result = smu7_copy_bytes_to_smc(smumgr, array, (uint8_t *)levels, + result = smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels, (uint32_t)array_size, SMC_RAM_END); return result; @@ -880,7 +866,7 @@ static int polaris10_populate_single_memory_level(struct pp_hwmgr *hwmgr, if (mclk_stutter_mode_threshold && (clock <= mclk_stutter_mode_threshold) && - (SMUM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL, + (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL, STUTTER_ENABLE) & 0x1)) mem_level->StutterEnable = true; @@ -900,9 +886,8 @@ static int polaris10_populate_single_memory_level(struct pp_hwmgr *hwmgr, */ int polaris10_populate_all_memory_levels(struct pp_hwmgr *hwmgr) { - struct pp_smumgr *smumgr = hwmgr->smumgr; struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend); - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); struct smu7_dpm_table *dpm_table = &hw_data->dpm_table; int result; /* populate MCLK dpm table to SMU7 */ @@ -943,7 +928,7 @@ int polaris10_populate_all_memory_levels(struct pp_hwmgr *hwmgr) phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table); /* level count will send to smc once at init smc table and never change */ - result = smu7_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels, + result = smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels, (uint32_t)array_size, SMC_RAM_END); return result; @@ -1201,9 +1186,8 @@ static int polaris10_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr, static int polaris10_program_memory_timing_parameters(struct pp_hwmgr *hwmgr) { - struct pp_smumgr *smumgr = hwmgr->smumgr; struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend); - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); struct SMU74_Discrete_MCArbDramTimingTable arb_regs; uint32_t i, j; int result = 0; @@ -1222,7 +1206,7 @@ static int polaris10_program_memory_timing_parameters(struct pp_hwmgr *hwmgr) } result = smu7_copy_bytes_to_smc( - hwmgr->smumgr, + hwmgr, smu_data->smu7_data.arb_table_start, (uint8_t *)&arb_regs, sizeof(SMU74_Discrete_MCArbDramTimingTable), @@ -1321,9 +1305,8 @@ static int polaris10_populate_smc_boot_level(struct pp_hwmgr *hwmgr, static int polaris10_populate_smc_initailial_state(struct pp_hwmgr *hwmgr) { - struct pp_smumgr *smumgr = hwmgr->smumgr; struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend); - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); uint8_t count, level; @@ -1354,8 +1337,7 @@ static int polaris10_populate_smc_initailial_state(struct pp_hwmgr *hwmgr) static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr) { uint32_t ro, efuse, volt_without_cks, volt_with_cks, value, max, min; - struct pp_smumgr *smumgr = hwmgr->smumgr; - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); uint8_t i, stretch_amount, stretch_amount2, volt_offset = 0; struct phm_ppt_v1_information *table_info = @@ -1438,7 +1420,7 @@ static int polaris10_populate_vr_config(struct pp_hwmgr *hwmgr, struct SMU74_Discrete_DpmTable *table) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); uint16_t config; config = VR_MERGED_WITH_VDDC; @@ -1482,8 +1464,7 @@ static int polaris10_populate_vr_config(struct pp_hwmgr *hwmgr, static int polaris10_populate_avfs_parameters(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - struct pp_smumgr *smumgr = hwmgr->smumgr; - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table); int result = 0; @@ -1534,20 +1515,20 @@ static int polaris10_populate_avfs_parameters(struct pp_hwmgr *hwmgr) AVFS_SclkOffset.Sclk_Offset[i] = PP_HOST_TO_SMC_US((uint16_t)(sclk_table->entries[i].sclk_offset) / 100); } - result = smu7_read_smc_sram_dword(smumgr, + result = smu7_read_smc_sram_dword(hwmgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, AvfsMeanNSigma), &tmp, SMC_RAM_END); - smu7_copy_bytes_to_smc(smumgr, + smu7_copy_bytes_to_smc(hwmgr, tmp, (uint8_t *)&AVFS_meanNsigma, sizeof(AVFS_meanNsigma_t), SMC_RAM_END); - result = smu7_read_smc_sram_dword(smumgr, + result = smu7_read_smc_sram_dword(hwmgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, AvfsSclkOffsetTable), &tmp, SMC_RAM_END); - smu7_copy_bytes_to_smc(smumgr, + smu7_copy_bytes_to_smc(hwmgr, tmp, (uint8_t *)&AVFS_SclkOffset, sizeof(AVFS_Sclk_Offset_t), @@ -1569,9 +1550,9 @@ static int polaris10_populate_avfs_parameters(struct pp_hwmgr *hwmgr) * @param hwmgr the address of the powerplay hardware manager. * @return always 0 */ -static int polaris10_init_arb_table_index(struct pp_smumgr *smumgr) +static int polaris10_init_arb_table_index(struct pp_hwmgr *hwmgr) { - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); uint32_t tmp; int result; @@ -1583,7 +1564,7 @@ static int polaris10_init_arb_table_index(struct pp_smumgr *smumgr) * In reality this field should not be in that structure * but in a soft register. */ - result = smu7_read_smc_sram_dword(smumgr, + result = smu7_read_smc_sram_dword(hwmgr, smu_data->smu7_data.arb_table_start, &tmp, SMC_RAM_END); if (result) @@ -1592,13 +1573,13 @@ static int polaris10_init_arb_table_index(struct pp_smumgr *smumgr) tmp &= 0x00FFFFFF; tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24; - return smu7_write_smc_sram_dword(smumgr, + return smu7_write_smc_sram_dword(hwmgr, smu_data->smu7_data.arb_table_start, tmp, SMC_RAM_END); } static void polaris10_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr) { - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); @@ -1615,7 +1596,7 @@ static void polaris10_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr) static void polaris10_save_default_power_profile(struct pp_hwmgr *hwmgr) { - struct polaris10_smumgr *data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + struct polaris10_smumgr *data = (struct polaris10_smumgr *)(hwmgr->smu_backend); struct SMU74_Discrete_GraphicsLevel *levels = data->smc_state_table.GraphicsLevel; unsigned min_level = 1; @@ -1658,9 +1639,9 @@ static void polaris10_save_default_power_profile(struct pp_hwmgr *hwmgr) int polaris10_init_smc_table(struct pp_hwmgr *hwmgr) { int result; - struct pp_smumgr *smumgr = hwmgr->smumgr; struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend); - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); + struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); struct SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table); @@ -1852,7 +1833,7 @@ int polaris10_init_smc_table(struct pp_hwmgr *hwmgr) CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime); /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */ - result = smu7_copy_bytes_to_smc(hwmgr->smumgr, + result = smu7_copy_bytes_to_smc(hwmgr, smu_data->smu7_data.dpm_table_start + offsetof(SMU74_Discrete_DpmTable, SystemFlags), (uint8_t *)&(table->SystemFlags), @@ -1861,7 +1842,7 @@ int polaris10_init_smc_table(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE(0 == result, "Failed to upload dpm data to SMC memory!", return result); - result = polaris10_init_arb_table_index(hwmgr->smumgr); + result = polaris10_init_arb_table_index(hwmgr); PP_ASSERT_WITH_CODE(0 == result, "Failed to upload arb data to SMC memory!", return result); @@ -1888,17 +1869,16 @@ static int polaris10_program_mem_timing_parameters(struct pp_hwmgr *hwmgr) int polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr) { int ret; - struct pp_smumgr *smumgr = (struct pp_smumgr *)(hwmgr->smumgr); - struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend); + struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend); struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED) return 0; - ret = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetGBDroopSettings, data->avfs_vdroop_override_setting); - ret = (smum_send_msg_to_smc(smumgr, PPSMC_MSG_EnableAvfs) == 0) ? + ret = (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs) == 0) ? 0 : -1; if (!ret) @@ -1919,7 +1899,7 @@ int polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr) */ int polaris10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr) { - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); SMU74_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE }; uint32_t duty100; uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2; @@ -2000,20 +1980,20 @@ int polaris10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr) hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_CTRL, TEMP_SEL); - res = smu7_copy_bytes_to_smc(hwmgr->smumgr, smu_data->smu7_data.fan_table_start, + res = smu7_copy_bytes_to_smc(hwmgr, smu_data->smu7_data.fan_table_start, (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table), SMC_RAM_END); if (!res && hwmgr->thermal_controller. advanceFanControlParameters.ucMinimumPWMLimit) - res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + res = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetFanMinPwm, hwmgr->thermal_controller. advanceFanControlParameters.ucMinimumPWMLimit); if (!res && hwmgr->thermal_controller. advanceFanControlParameters.ulMinFanSCLKAcousticLimit) - res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + res = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetFanSclkTarget, hwmgr->thermal_controller. advanceFanControlParameters.ulMinFanSCLKAcousticLimit); @@ -2027,7 +2007,7 @@ int polaris10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr) static int polaris10_update_uvd_smc_table(struct pp_hwmgr *hwmgr) { - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); uint32_t mm_boot_level_offset, mm_boot_level_value; struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); @@ -2051,7 +2031,7 @@ static int polaris10_update_uvd_smc_table(struct pp_hwmgr *hwmgr) PHM_PlatformCaps_UVDDPM) || phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_UVDDPM_SetEnabledMask, (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel)); return 0; @@ -2059,7 +2039,7 @@ static int polaris10_update_uvd_smc_table(struct pp_hwmgr *hwmgr) static int polaris10_update_vce_smc_table(struct pp_hwmgr *hwmgr) { - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); uint32_t mm_boot_level_offset, mm_boot_level_value; struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); @@ -2083,7 +2063,7 @@ static int polaris10_update_vce_smc_table(struct pp_hwmgr *hwmgr) CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value); if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_VCEDPM_SetEnabledMask, (uint32_t)1 << smu_data->smc_state_table.VceBootLevel); return 0; @@ -2091,7 +2071,7 @@ static int polaris10_update_vce_smc_table(struct pp_hwmgr *hwmgr) static int polaris10_update_samu_smc_table(struct pp_hwmgr *hwmgr) { - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); uint32_t mm_boot_level_offset, mm_boot_level_value; @@ -2110,7 +2090,7 @@ static int polaris10_update_samu_smc_table(struct pp_hwmgr *hwmgr) if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SAMUDPM_SetEnabledMask, (uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel)); return 0; @@ -2119,7 +2099,7 @@ static int polaris10_update_samu_smc_table(struct pp_hwmgr *hwmgr) static int polaris10_update_bif_smc_table(struct pp_hwmgr *hwmgr) { - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table; @@ -2157,7 +2137,7 @@ int polaris10_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type) int polaris10_update_sclk_threshold(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); int result = 0; uint32_t low_sclk_interrupt_threshold = 0; @@ -2174,7 +2154,7 @@ int polaris10_update_sclk_threshold(struct pp_hwmgr *hwmgr) CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold); result = smu7_copy_bytes_to_smc( - hwmgr->smumgr, + hwmgr, smu_data->smu7_data.dpm_table_start + offsetof(SMU74_Discrete_DpmTable, LowSclkInterruptThreshold), @@ -2262,13 +2242,13 @@ uint32_t polaris10_get_mac_definition(uint32_t value) */ int polaris10_process_firmware_header(struct pp_hwmgr *hwmgr) { - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); uint32_t tmp; int result; bool error = false; - result = smu7_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, DpmTable), &tmp, SMC_RAM_END); @@ -2278,7 +2258,7 @@ int polaris10_process_firmware_header(struct pp_hwmgr *hwmgr) error |= (0 != result); - result = smu7_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, SoftRegisters), &tmp, SMC_RAM_END); @@ -2290,7 +2270,7 @@ int polaris10_process_firmware_header(struct pp_hwmgr *hwmgr) error |= (0 != result); - result = smu7_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, mcRegisterTable), &tmp, SMC_RAM_END); @@ -2298,7 +2278,7 @@ int polaris10_process_firmware_header(struct pp_hwmgr *hwmgr) if (!result) smu_data->smu7_data.mc_reg_table_start = tmp; - result = smu7_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, FanTable), &tmp, SMC_RAM_END); @@ -2308,7 +2288,7 @@ int polaris10_process_firmware_header(struct pp_hwmgr *hwmgr) error |= (0 != result); - result = smu7_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, mcArbDramTimingTable), &tmp, SMC_RAM_END); @@ -2318,7 +2298,7 @@ int polaris10_process_firmware_header(struct pp_hwmgr *hwmgr) error |= (0 != result); - result = smu7_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, Version), &tmp, SMC_RAM_END); @@ -2342,7 +2322,7 @@ int polaris10_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr, struct amd_pp_profile *request) { struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *) - (hwmgr->smumgr->backend); + (hwmgr->smu_backend); struct SMU74_Discrete_GraphicsLevel *levels = smu_data->smc_state_table.GraphicsLevel; uint32_t array = smu_data->smu7_data.dpm_table_start + @@ -2359,6 +2339,6 @@ int polaris10_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr, levels[i].DownHyst = request->down_hyst; } - return smu7_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels, + return smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels, array_size, SMC_RAM_END); } diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c index 75f43dadc56b..22b8ecbf7fce 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c @@ -60,21 +60,21 @@ static const SMU74_Discrete_GraphicsLevel avfs_graphics_level_polaris10[8] = { static const SMU74_Discrete_MemoryLevel avfs_memory_level_polaris10 = { 0x100ea446, 0, 0x30750000, 0x01, 0x01, 0x01, 0x00, 0x00, 0x64, 0x00, 0x00, 0x1f00, 0x00, 0x00}; -static int polaris10_setup_pwr_virus(struct pp_smumgr *smumgr) +static int polaris10_setup_pwr_virus(struct pp_hwmgr *hwmgr) { int i; int result = -EINVAL; uint32_t reg, data; const PWR_Command_Table *pvirus = pwr_virus_table; - struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend); + struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend); for (i = 0; i < PWR_VIRUS_TABLE_SIZE; i++) { switch (pvirus->command) { case PwrCmdWrite: reg = pvirus->reg; data = pvirus->data; - cgs_write_register(smumgr->device, reg, data); + cgs_write_register(hwmgr->device, reg, data); break; case PwrCmdEnd: @@ -93,13 +93,13 @@ static int polaris10_setup_pwr_virus(struct pp_smumgr *smumgr) return result; } -static int polaris10_perform_btc(struct pp_smumgr *smumgr) +static int polaris10_perform_btc(struct pp_hwmgr *hwmgr) { int result = 0; - struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend); + struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend); if (0 != smu_data->avfs.avfs_btc_param) { - if (0 != smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_PerformBtc, smu_data->avfs.avfs_btc_param)) { + if (0 != smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_PerformBtc, smu_data->avfs.avfs_btc_param)) { pr_info("[AVFS][SmuPolaris10_PerformBtc] PerformBTC SMU msg failed"); result = -1; } @@ -107,16 +107,16 @@ static int polaris10_perform_btc(struct pp_smumgr *smumgr) if (smu_data->avfs.avfs_btc_param > 1) { /* Soft-Reset to reset the engine before loading uCode */ /* halt */ - cgs_write_register(smumgr->device, mmCP_MEC_CNTL, 0x50000000); + cgs_write_register(hwmgr->device, mmCP_MEC_CNTL, 0x50000000); /* reset everything */ - cgs_write_register(smumgr->device, mmGRBM_SOFT_RESET, 0xffffffff); - cgs_write_register(smumgr->device, mmGRBM_SOFT_RESET, 0); + cgs_write_register(hwmgr->device, mmGRBM_SOFT_RESET, 0xffffffff); + cgs_write_register(hwmgr->device, mmGRBM_SOFT_RESET, 0); } return result; } -static int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr) +static int polaris10_setup_graphics_level_structure(struct pp_hwmgr *hwmgr) { uint32_t vr_config; uint32_t dpm_table_start; @@ -127,7 +127,7 @@ static int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr) graphics_level_size = sizeof(avfs_graphics_level_polaris10); u16_boot_mvdd = PP_HOST_TO_SMC_US(1300 * VOLTAGE_SCALE); - PP_ASSERT_WITH_CODE(0 == smu7_read_smc_sram_dword(smumgr, + PP_ASSERT_WITH_CODE(0 == smu7_read_smc_sram_dword(hwmgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, DpmTable), &dpm_table_start, 0x40000), "[AVFS][Polaris10_SetupGfxLvlStruct] SMU could not communicate starting address of DPM table", @@ -138,14 +138,14 @@ static int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr) vr_config_address = dpm_table_start + offsetof(SMU74_Discrete_DpmTable, VRConfig); - PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(smumgr, vr_config_address, + PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(hwmgr, vr_config_address, (uint8_t *)&vr_config, sizeof(uint32_t), 0x40000), "[AVFS][Polaris10_SetupGfxLvlStruct] Problems copying VRConfig value over to SMC", return -1); graphics_level_address = dpm_table_start + offsetof(SMU74_Discrete_DpmTable, GraphicsLevel); - PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(smumgr, graphics_level_address, + PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(hwmgr, graphics_level_address, (uint8_t *)(&avfs_graphics_level_polaris10), graphics_level_size, 0x40000), "[AVFS][Polaris10_SetupGfxLvlStruct] Copying of SCLK DPM table failed!", @@ -153,7 +153,7 @@ static int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr) graphics_level_address = dpm_table_start + offsetof(SMU74_Discrete_DpmTable, MemoryLevel); - PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(smumgr, graphics_level_address, + PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(hwmgr, graphics_level_address, (uint8_t *)(&avfs_memory_level_polaris10), sizeof(avfs_memory_level_polaris10), 0x40000), "[AVFS][Polaris10_SetupGfxLvlStruct] Copying of MCLK DPM table failed!", return -1); @@ -162,7 +162,7 @@ static int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr) graphics_level_address = dpm_table_start + offsetof(SMU74_Discrete_DpmTable, BootMVdd); - PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(smumgr, graphics_level_address, + PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(hwmgr, graphics_level_address, (uint8_t *)(&u16_boot_mvdd), sizeof(u16_boot_mvdd), 0x40000), "[AVFS][Polaris10_SetupGfxLvlStruct] Copying of DPM table failed!", return -1); @@ -172,9 +172,9 @@ static int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr) static int -polaris10_avfs_event_mgr(struct pp_smumgr *smumgr, bool SMU_VFT_INTACT) +polaris10_avfs_event_mgr(struct pp_hwmgr *hwmgr, bool SMU_VFT_INTACT) { - struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend); + struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend); switch (smu_data->avfs.avfs_btc_status) { case AVFS_BTC_COMPLETED_PREVIOUSLY: @@ -183,20 +183,20 @@ polaris10_avfs_event_mgr(struct pp_smumgr *smumgr, bool SMU_VFT_INTACT) case AVFS_BTC_BOOT: /* Cold Boot State - Post SMU Start */ smu_data->avfs.avfs_btc_status = AVFS_BTC_DPMTABLESETUP_FAILED; - PP_ASSERT_WITH_CODE(0 == polaris10_setup_graphics_level_structure(smumgr), + PP_ASSERT_WITH_CODE(0 == polaris10_setup_graphics_level_structure(hwmgr), "[AVFS][Polaris10_AVFSEventMgr] Could not Copy Graphics Level table over to SMU", return -EINVAL); if (smu_data->avfs.avfs_btc_param > 1) { pr_info("[AVFS][Polaris10_AVFSEventMgr] AC BTC has not been successfully verified on Fiji. There may be in this setting."); smu_data->avfs.avfs_btc_status = AVFS_BTC_VIRUS_FAIL; - PP_ASSERT_WITH_CODE(0 == polaris10_setup_pwr_virus(smumgr), + PP_ASSERT_WITH_CODE(0 == polaris10_setup_pwr_virus(hwmgr), "[AVFS][Polaris10_AVFSEventMgr] Could not setup Pwr Virus for AVFS ", return -EINVAL); } smu_data->avfs.avfs_btc_status = AVFS_BTC_FAILED; - PP_ASSERT_WITH_CODE(0 == polaris10_perform_btc(smumgr), + PP_ASSERT_WITH_CODE(0 == polaris10_perform_btc(hwmgr), "[AVFS][Polaris10_AVFSEventMgr] Failure at SmuPolaris10_PerformBTC. AVFS Disabled", return -EINVAL); smu_data->avfs.avfs_btc_status = AVFS_BTC_ENABLEAVFS; @@ -215,146 +215,146 @@ polaris10_avfs_event_mgr(struct pp_smumgr *smumgr, bool SMU_VFT_INTACT) return 0; } -static int polaris10_start_smu_in_protection_mode(struct pp_smumgr *smumgr) +static int polaris10_start_smu_in_protection_mode(struct pp_hwmgr *hwmgr) { int result = 0; /* Wait for smc boot up */ - /* SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND, RCU_UC_EVENTS, boot_seq_done, 0) */ + /* PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND, RCU_UC_EVENTS, boot_seq_done, 0) */ /* Assert reset */ - SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 1); - result = smu7_upload_smu_firmware_image(smumgr); + result = smu7_upload_smu_firmware_image(hwmgr); if (result != 0) return result; /* Clear status */ - cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC, ixSMU_STATUS, 0); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMU_STATUS, 0); - SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0); /* De-assert reset */ - SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 0); - SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND, RCU_UC_EVENTS, INTERRUPTS_ENABLED, 1); + PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, RCU_UC_EVENTS, INTERRUPTS_ENABLED, 1); /* Call Test SMU message with 0x20000 offset to trigger SMU start */ - smu7_send_msg_to_smc_offset(smumgr); + smu7_send_msg_to_smc_offset(hwmgr); /* Wait done bit to be set */ /* Check pass/failed indicator */ - SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND, SMU_STATUS, SMU_DONE, 0); + PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND, SMU_STATUS, SMU_DONE, 0); - if (1 != SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + if (1 != PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMU_STATUS, SMU_PASS)) PP_ASSERT_WITH_CODE(false, "SMU Firmware start failed!", return -1); - cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC, ixFIRMWARE_FLAGS, 0); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixFIRMWARE_FLAGS, 0); - SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 1); - SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 0); /* Wait for firmware to initialize */ - SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND, FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1); + PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1); return result; } -static int polaris10_start_smu_in_non_protection_mode(struct pp_smumgr *smumgr) +static int polaris10_start_smu_in_non_protection_mode(struct pp_hwmgr *hwmgr) { int result = 0; /* wait for smc boot up */ - SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND, RCU_UC_EVENTS, boot_seq_done, 0); + PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND, RCU_UC_EVENTS, boot_seq_done, 0); /* Clear firmware interrupt enable flag */ - /* SMUM_WRITE_VFPF_INDIRECT_FIELD(pSmuMgr, SMC_IND, SMC_SYSCON_MISC_CNTL, pre_fetcher_en, 1); */ - cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC, + /* PHM_WRITE_VFPF_INDIRECT_FIELD(pSmuMgr, SMC_IND, SMC_SYSCON_MISC_CNTL, pre_fetcher_en, 1); */ + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixFIRMWARE_FLAGS, 0); - SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 1); - result = smu7_upload_smu_firmware_image(smumgr); + result = smu7_upload_smu_firmware_image(hwmgr); if (result != 0) return result; /* Set smc instruct start point at 0x0 */ - smu7_program_jump_on_start(smumgr); + smu7_program_jump_on_start(hwmgr); - SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0); - SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 0); /* Wait for firmware to initialize */ - SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND, + PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1); return result; } -static int polaris10_start_smu(struct pp_smumgr *smumgr) +static int polaris10_start_smu(struct pp_hwmgr *hwmgr) { int result = 0; - struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend); + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); bool SMU_VFT_INTACT; /* Only start SMC if SMC RAM is not running */ - if (!smu7_is_smc_ram_running(smumgr)) { + if (!smu7_is_smc_ram_running(hwmgr)) { SMU_VFT_INTACT = false; - smu_data->protected_mode = (uint8_t) (SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_MODE)); - smu_data->smu7_data.security_hard_key = (uint8_t) (SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_SEL)); + smu_data->protected_mode = (uint8_t) (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_MODE)); + smu_data->smu7_data.security_hard_key = (uint8_t) (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_SEL)); /* Check if SMU is running in protected mode */ if (smu_data->protected_mode == 0) { - result = polaris10_start_smu_in_non_protection_mode(smumgr); + result = polaris10_start_smu_in_non_protection_mode(hwmgr); } else { - result = polaris10_start_smu_in_protection_mode(smumgr); + result = polaris10_start_smu_in_protection_mode(hwmgr); /* If failed, try with different security Key. */ if (result != 0) { smu_data->smu7_data.security_hard_key ^= 1; - cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU); - result = polaris10_start_smu_in_protection_mode(smumgr); + cgs_rel_firmware(hwmgr->device, CGS_UCODE_ID_SMU); + result = polaris10_start_smu_in_protection_mode(hwmgr); } } if (result != 0) PP_ASSERT_WITH_CODE(0, "Failed to load SMU ucode.", return result); - polaris10_avfs_event_mgr(smumgr, true); + polaris10_avfs_event_mgr(hwmgr, true); } else SMU_VFT_INTACT = true; /*Driver went offline but SMU was still alive and contains the VFT table */ - polaris10_avfs_event_mgr(smumgr, SMU_VFT_INTACT); + polaris10_avfs_event_mgr(hwmgr, SMU_VFT_INTACT); /* Setup SoftRegsStart here for register lookup in case DummyBackEnd is used and ProcessFirmwareHeader is not executed */ - smu7_read_smc_sram_dword(smumgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, SoftRegisters), + smu7_read_smc_sram_dword(hwmgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, SoftRegisters), &(smu_data->smu7_data.soft_regs_start), 0x40000); - result = smu7_request_smu_load_fw(smumgr); + result = smu7_request_smu_load_fw(hwmgr); return result; } -static bool polaris10_is_hw_avfs_present(struct pp_smumgr *smumgr) +static bool polaris10_is_hw_avfs_present(struct pp_hwmgr *hwmgr) { uint32_t efuse; - efuse = cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC, ixSMU_EFUSE_0 + (49*4)); + efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMU_EFUSE_0 + (49*4)); efuse &= 0x00000001; if (efuse) return true; @@ -362,7 +362,7 @@ static bool polaris10_is_hw_avfs_present(struct pp_smumgr *smumgr) return false; } -static int polaris10_smu_init(struct pp_smumgr *smumgr) +static int polaris10_smu_init(struct pp_hwmgr *hwmgr) { struct polaris10_smumgr *smu_data; int i; @@ -371,9 +371,9 @@ static int polaris10_smu_init(struct pp_smumgr *smumgr) if (smu_data == NULL) return -ENOMEM; - smumgr->backend = smu_data; + hwmgr->smu_backend = smu_data; - if (smu7_init(smumgr)) + if (smu7_init(hwmgr)) return -EINVAL; for (i = 0; i < SMU74_MAX_LEVELS_GRAPHICS; i++) diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.c index ce0a30388ea1..b98ade676d12 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.c @@ -48,20 +48,20 @@ #define smnMP1_FIRMWARE_FLAGS 0x3010028 -bool rv_is_smc_ram_running(struct pp_smumgr *smumgr) +bool rv_is_smc_ram_running(struct pp_hwmgr *hwmgr) { uint32_t mp1_fw_flags, reg; reg = soc15_get_register_offset(NBIF_HWID, 0, mmPCIE_INDEX2_BASE_IDX, mmPCIE_INDEX2); - cgs_write_register(smumgr->device, reg, + cgs_write_register(hwmgr->device, reg, (MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff))); reg = soc15_get_register_offset(NBIF_HWID, 0, mmPCIE_DATA2_BASE_IDX, mmPCIE_DATA2); - mp1_fw_flags = cgs_read_register(smumgr->device, reg); + mp1_fw_flags = cgs_read_register(hwmgr->device, reg); if (mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) return true; @@ -69,97 +69,97 @@ bool rv_is_smc_ram_running(struct pp_smumgr *smumgr) return false; } -static uint32_t rv_wait_for_response(struct pp_smumgr *smumgr) +static uint32_t rv_wait_for_response(struct pp_hwmgr *hwmgr) { uint32_t reg; - if (!rv_is_smc_ram_running(smumgr)) + if (!rv_is_smc_ram_running(hwmgr)) return -EINVAL; reg = soc15_get_register_offset(MP1_HWID, 0, mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90); - smum_wait_for_register_unequal(smumgr, reg, + phm_wait_for_register_unequal(hwmgr, reg, 0, MP1_C2PMSG_90__CONTENT_MASK); - return cgs_read_register(smumgr->device, reg); + return cgs_read_register(hwmgr->device, reg); } -int rv_send_msg_to_smc_without_waiting(struct pp_smumgr *smumgr, +int rv_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr, uint16_t msg) { uint32_t reg; - if (!rv_is_smc_ram_running(smumgr)) + if (!rv_is_smc_ram_running(hwmgr)) return -EINVAL; reg = soc15_get_register_offset(MP1_HWID, 0, mmMP1_SMN_C2PMSG_66_BASE_IDX, mmMP1_SMN_C2PMSG_66); - cgs_write_register(smumgr->device, reg, msg); + cgs_write_register(hwmgr->device, reg, msg); return 0; } -int rv_read_arg_from_smc(struct pp_smumgr *smumgr, uint32_t *arg) +int rv_read_arg_from_smc(struct pp_hwmgr *hwmgr, uint32_t *arg) { uint32_t reg; reg = soc15_get_register_offset(MP1_HWID, 0, mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82); - *arg = cgs_read_register(smumgr->device, reg); + *arg = cgs_read_register(hwmgr->device, reg); return 0; } -int rv_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg) +int rv_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg) { uint32_t reg; - rv_wait_for_response(smumgr); + rv_wait_for_response(hwmgr); reg = soc15_get_register_offset(MP1_HWID, 0, mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90); - cgs_write_register(smumgr->device, reg, 0); + cgs_write_register(hwmgr->device, reg, 0); - rv_send_msg_to_smc_without_waiting(smumgr, msg); + rv_send_msg_to_smc_without_waiting(hwmgr, msg); - if (rv_wait_for_response(smumgr) == 0) + if (rv_wait_for_response(hwmgr) == 0) printk("Failed to send Message %x.\n", msg); return 0; } -int rv_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr, +int rv_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t parameter) { uint32_t reg; - rv_wait_for_response(smumgr); + rv_wait_for_response(hwmgr); reg = soc15_get_register_offset(MP1_HWID, 0, mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90); - cgs_write_register(smumgr->device, reg, 0); + cgs_write_register(hwmgr->device, reg, 0); reg = soc15_get_register_offset(MP1_HWID, 0, mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82); - cgs_write_register(smumgr->device, reg, parameter); + cgs_write_register(hwmgr->device, reg, parameter); - rv_send_msg_to_smc_without_waiting(smumgr, msg); + rv_send_msg_to_smc_without_waiting(hwmgr, msg); - if (rv_wait_for_response(smumgr) == 0) + if (rv_wait_for_response(hwmgr) == 0) printk("Failed to send Message %x.\n", msg); return 0; } -int rv_copy_table_from_smc(struct pp_smumgr *smumgr, +int rv_copy_table_from_smc(struct pp_hwmgr *hwmgr, uint8_t *table, int16_t table_id) { struct rv_smumgr *priv = - (struct rv_smumgr *)(smumgr->backend); + (struct rv_smumgr *)(hwmgr->smu_backend); PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE, "Invalid SMU Table ID!", return -EINVAL;); @@ -167,16 +167,16 @@ int rv_copy_table_from_smc(struct pp_smumgr *smumgr, "Invalid SMU Table version!", return -EINVAL;); PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0, "Invalid SMU Table Length!", return -EINVAL;); - PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(smumgr, + PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetDriverDramAddrHigh, priv->smu_tables.entry[table_id].table_addr_high) == 0, "[CopyTableFromSMC] Attempt to Set Dram Addr High Failed!", return -EINVAL;); - PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(smumgr, + PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetDriverDramAddrLow, priv->smu_tables.entry[table_id].table_addr_low) == 0, "[CopyTableFromSMC] Attempt to Set Dram Addr Low Failed!", return -EINVAL;); - PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(smumgr, + PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_TransferTableSmu2Dram, priv->smu_tables.entry[table_id].table_id) == 0, "[CopyTableFromSMC] Attempt to Transfer Table From SMU Failed!", @@ -188,11 +188,11 @@ int rv_copy_table_from_smc(struct pp_smumgr *smumgr, return 0; } -int rv_copy_table_to_smc(struct pp_smumgr *smumgr, +int rv_copy_table_to_smc(struct pp_hwmgr *hwmgr, uint8_t *table, int16_t table_id) { struct rv_smumgr *priv = - (struct rv_smumgr *)(smumgr->backend); + (struct rv_smumgr *)(hwmgr->smu_backend); PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE, "Invalid SMU Table ID!", return -EINVAL;); @@ -204,17 +204,17 @@ int rv_copy_table_to_smc(struct pp_smumgr *smumgr, memcpy(priv->smu_tables.entry[table_id].table, table, priv->smu_tables.entry[table_id].size); - PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(smumgr, + PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetDriverDramAddrHigh, priv->smu_tables.entry[table_id].table_addr_high) == 0, "[CopyTableToSMC] Attempt to Set Dram Addr High Failed!", return -EINVAL;); - PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(smumgr, + PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetDriverDramAddrLow, priv->smu_tables.entry[table_id].table_addr_low) == 0, "[CopyTableToSMC] Attempt to Set Dram Addr Low Failed!", return -EINVAL;); - PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(smumgr, + PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_TransferTableDram2Smu, priv->smu_tables.entry[table_id].table_id) == 0, "[CopyTableToSMC] Attempt to Transfer Table To SMU Failed!", @@ -223,15 +223,15 @@ int rv_copy_table_to_smc(struct pp_smumgr *smumgr, return 0; } -static int rv_verify_smc_interface(struct pp_smumgr *smumgr) +static int rv_verify_smc_interface(struct pp_hwmgr *hwmgr) { uint32_t smc_driver_if_version; - PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc(smumgr, + PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc(hwmgr, PPSMC_MSG_GetDriverIfVersion), "Attempt to get SMC IF Version Number Failed!", return -EINVAL); - PP_ASSERT_WITH_CODE(!rv_read_arg_from_smc(smumgr, + PP_ASSERT_WITH_CODE(!rv_read_arg_from_smc(hwmgr, &smc_driver_if_version), "Attempt to read SMC IF Version Number Failed!", return -EINVAL); @@ -243,9 +243,9 @@ static int rv_verify_smc_interface(struct pp_smumgr *smumgr) } /* sdma is disabled by default in vbios, need to re-enable in driver */ -static int rv_smc_enable_sdma(struct pp_smumgr *smumgr) +static int rv_smc_enable_sdma(struct pp_hwmgr *hwmgr) { - PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc(smumgr, + PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerUpSdma), "Attempt to power up sdma Failed!", return -EINVAL); @@ -253,9 +253,9 @@ static int rv_smc_enable_sdma(struct pp_smumgr *smumgr) return 0; } -static int rv_smc_disable_sdma(struct pp_smumgr *smumgr) +static int rv_smc_disable_sdma(struct pp_hwmgr *hwmgr) { - PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc(smumgr, + PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerDownSdma), "Attempt to power down sdma Failed!", return -EINVAL); @@ -264,9 +264,9 @@ static int rv_smc_disable_sdma(struct pp_smumgr *smumgr) } /* vcn is disabled by default in vbios, need to re-enable in driver */ -static int rv_smc_enable_vcn(struct pp_smumgr *smumgr) +static int rv_smc_enable_vcn(struct pp_hwmgr *hwmgr) { - PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc_with_parameter(smumgr, + PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_PowerUpVcn, 0), "Attempt to power up vcn Failed!", return -EINVAL); @@ -274,9 +274,9 @@ static int rv_smc_enable_vcn(struct pp_smumgr *smumgr) return 0; } -static int rv_smc_disable_vcn(struct pp_smumgr *smumgr) +static int rv_smc_disable_vcn(struct pp_hwmgr *hwmgr) { - PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc_with_parameter(smumgr, + PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_PowerDownVcn, 0), "Attempt to power down vcn Failed!", return -EINVAL); @@ -284,38 +284,38 @@ static int rv_smc_disable_vcn(struct pp_smumgr *smumgr) return 0; } -static int rv_smu_fini(struct pp_smumgr *smumgr) +static int rv_smu_fini(struct pp_hwmgr *hwmgr) { struct rv_smumgr *priv = - (struct rv_smumgr *)(smumgr->backend); + (struct rv_smumgr *)(hwmgr->smu_backend); if (priv) { - rv_smc_disable_sdma(smumgr); - rv_smc_disable_vcn(smumgr); - cgs_free_gpu_mem(smumgr->device, + rv_smc_disable_sdma(hwmgr); + rv_smc_disable_vcn(hwmgr); + cgs_free_gpu_mem(hwmgr->device, priv->smu_tables.entry[WMTABLE].handle); - cgs_free_gpu_mem(smumgr->device, + cgs_free_gpu_mem(hwmgr->device, priv->smu_tables.entry[CLOCKTABLE].handle); - kfree(smumgr->backend); - smumgr->backend = NULL; + kfree(hwmgr->smu_backend); + hwmgr->smu_backend = NULL; } return 0; } -static int rv_start_smu(struct pp_smumgr *smumgr) +static int rv_start_smu(struct pp_hwmgr *hwmgr) { - if (rv_verify_smc_interface(smumgr)) + if (rv_verify_smc_interface(hwmgr)) return -EINVAL; - if (rv_smc_enable_sdma(smumgr)) + if (rv_smc_enable_sdma(hwmgr)) return -EINVAL; - if (rv_smc_enable_vcn(smumgr)) + if (rv_smc_enable_vcn(hwmgr)) return -EINVAL; return 0; } -static int rv_smu_init(struct pp_smumgr *smumgr) +static int rv_smu_init(struct pp_hwmgr *hwmgr) { struct rv_smumgr *priv; uint64_t mc_addr; @@ -327,10 +327,10 @@ static int rv_smu_init(struct pp_smumgr *smumgr) if (!priv) return -ENOMEM; - smumgr->backend = priv; + hwmgr->smu_backend = priv; /* allocate space for watermarks table */ - smu_allocate_memory(smumgr->device, + smu_allocate_memory(hwmgr->device, sizeof(Watermarks_t), CGS_GPU_MEM_TYPE__GART_CACHEABLE, PAGE_SIZE, @@ -340,8 +340,8 @@ static int rv_smu_init(struct pp_smumgr *smumgr) PP_ASSERT_WITH_CODE(kaddr, "[rv_smu_init] Out of memory for wmtable.", - kfree(smumgr->backend); - smumgr->backend = NULL; + kfree(hwmgr->smu_backend); + hwmgr->smu_backend = NULL; return -EINVAL); priv->smu_tables.entry[WMTABLE].version = 0x01; @@ -355,7 +355,7 @@ static int rv_smu_init(struct pp_smumgr *smumgr) priv->smu_tables.entry[WMTABLE].handle = handle; /* allocate space for watermarks table */ - smu_allocate_memory(smumgr->device, + smu_allocate_memory(hwmgr->device, sizeof(DpmClocks_t), CGS_GPU_MEM_TYPE__GART_CACHEABLE, PAGE_SIZE, @@ -365,10 +365,10 @@ static int rv_smu_init(struct pp_smumgr *smumgr) PP_ASSERT_WITH_CODE(kaddr, "[rv_smu_init] Out of memory for CLOCKTABLE.", - cgs_free_gpu_mem(smumgr->device, + cgs_free_gpu_mem(hwmgr->device, (cgs_handle_t)priv->smu_tables.entry[WMTABLE].handle); - kfree(smumgr->backend); - smumgr->backend = NULL; + kfree(hwmgr->smu_backend); + hwmgr->smu_backend = NULL; return -EINVAL); priv->smu_tables.entry[CLOCKTABLE].version = 0x01; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h index 262c8ded87c0..58888400f1b8 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h +++ b/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h @@ -51,11 +51,11 @@ struct rv_smumgr { struct smu_table_array smu_tables; }; -int rv_read_arg_from_smc(struct pp_smumgr *smumgr, uint32_t *arg); -bool rv_is_smc_ram_running(struct pp_smumgr *smumgr); -int rv_copy_table_from_smc(struct pp_smumgr *smumgr, +int rv_read_arg_from_smc(struct pp_hwmgr *hwmgr, uint32_t *arg); +bool rv_is_smc_ram_running(struct pp_hwmgr *hwmgr); +int rv_copy_table_from_smc(struct pp_hwmgr *hwmgr, uint8_t *table, int16_t table_id); -int rv_copy_table_to_smc(struct pp_smumgr *smumgr, +int rv_copy_table_to_smc(struct pp_hwmgr *hwmgr, uint8_t *table, int16_t table_id); diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c index c49a6f22002f..2ae05bbdb974 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c @@ -34,18 +34,18 @@ #define SMU7_SMC_SIZE 0x20000 -static int smu7_set_smc_sram_address(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t limit) +static int smu7_set_smc_sram_address(struct pp_hwmgr *hwmgr, uint32_t smc_addr, uint32_t limit) { PP_ASSERT_WITH_CODE((0 == (3 & smc_addr)), "SMC address must be 4 byte aligned.", return -EINVAL); PP_ASSERT_WITH_CODE((limit > (smc_addr + 3)), "SMC addr is beyond the SMC RAM area.", return -EINVAL); - cgs_write_register(smumgr->device, mmSMC_IND_INDEX_11, smc_addr); - SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 0); /* on ci, SMC_IND_ACCESS_CNTL is different */ + cgs_write_register(hwmgr->device, mmSMC_IND_INDEX_11, smc_addr); + PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 0); /* on ci, SMC_IND_ACCESS_CNTL is different */ return 0; } -int smu7_copy_bytes_from_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address, uint32_t *dest, uint32_t byte_count, uint32_t limit) +int smu7_copy_bytes_from_smc(struct pp_hwmgr *hwmgr, uint32_t smc_start_address, uint32_t *dest, uint32_t byte_count, uint32_t limit) { uint32_t data; uint32_t addr; @@ -59,7 +59,7 @@ int smu7_copy_bytes_from_smc(struct pp_smumgr *smumgr, uint32_t smc_start_addres addr = smc_start_address; while (byte_count >= 4) { - smu7_read_smc_sram_dword(smumgr, addr, &data, limit); + smu7_read_smc_sram_dword(hwmgr, addr, &data, limit); *dest = PP_SMC_TO_HOST_UL(data); @@ -69,7 +69,7 @@ int smu7_copy_bytes_from_smc(struct pp_smumgr *smumgr, uint32_t smc_start_addres } if (byte_count) { - smu7_read_smc_sram_dword(smumgr, addr, &data, limit); + smu7_read_smc_sram_dword(hwmgr, addr, &data, limit); *pdata = PP_SMC_TO_HOST_UL(data); /* Cast dest into byte type in dest_byte. This way, we don't overflow if the allocated memory is not 4-byte aligned. */ dest_byte = (uint8_t *)dest; @@ -81,7 +81,7 @@ int smu7_copy_bytes_from_smc(struct pp_smumgr *smumgr, uint32_t smc_start_addres } -int smu7_copy_bytes_to_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address, +int smu7_copy_bytes_to_smc(struct pp_hwmgr *hwmgr, uint32_t smc_start_address, const uint8_t *src, uint32_t byte_count, uint32_t limit) { int result; @@ -99,12 +99,12 @@ int smu7_copy_bytes_to_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address, /* Bytes are written into the SMC addres space with the MSB first. */ data = src[0] * 0x1000000 + src[1] * 0x10000 + src[2] * 0x100 + src[3]; - result = smu7_set_smc_sram_address(smumgr, addr, limit); + result = smu7_set_smc_sram_address(hwmgr, addr, limit); if (0 != result) return result; - cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, data); + cgs_write_register(hwmgr->device, mmSMC_IND_DATA_11, data); src += 4; byte_count -= 4; @@ -115,13 +115,13 @@ int smu7_copy_bytes_to_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address, data = 0; - result = smu7_set_smc_sram_address(smumgr, addr, limit); + result = smu7_set_smc_sram_address(hwmgr, addr, limit); if (0 != result) return result; - original_data = cgs_read_register(smumgr->device, mmSMC_IND_DATA_11); + original_data = cgs_read_register(hwmgr->device, mmSMC_IND_DATA_11); extra_shift = 8 * (4 - byte_count); @@ -135,53 +135,53 @@ int smu7_copy_bytes_to_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address, data |= (original_data & ~((~0UL) << extra_shift)); - result = smu7_set_smc_sram_address(smumgr, addr, limit); + result = smu7_set_smc_sram_address(hwmgr, addr, limit); if (0 != result) return result; - cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, data); + cgs_write_register(hwmgr->device, mmSMC_IND_DATA_11, data); } return 0; } -int smu7_program_jump_on_start(struct pp_smumgr *smumgr) +int smu7_program_jump_on_start(struct pp_hwmgr *hwmgr) { static const unsigned char data[4] = { 0xE0, 0x00, 0x80, 0x40 }; - smu7_copy_bytes_to_smc(smumgr, 0x0, data, 4, sizeof(data)+1); + smu7_copy_bytes_to_smc(hwmgr, 0x0, data, 4, sizeof(data)+1); return 0; } -bool smu7_is_smc_ram_running(struct pp_smumgr *smumgr) +bool smu7_is_smc_ram_running(struct pp_hwmgr *hwmgr) { - return ((0 == SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable)) - && (0x20100 <= cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC, ixSMC_PC_C))); + return ((0 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable)) + && (0x20100 <= cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMC_PC_C))); } -int smu7_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg) +int smu7_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg) { int ret; - if (!smu7_is_smc_ram_running(smumgr)) + if (!smu7_is_smc_ram_running(hwmgr)) return -EINVAL; - SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); + PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0); - ret = SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP); + ret = PHM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP); if (ret != 1) pr_info("\n failed to send pre message %x ret is %d \n", msg, ret); - cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg); + cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, msg); - SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); + PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0); - ret = SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP); + ret = PHM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP); if (ret != 1) pr_info("\n failed to send message %x ret is %d \n", msg, ret); @@ -189,53 +189,53 @@ int smu7_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg) return 0; } -int smu7_send_msg_to_smc_without_waiting(struct pp_smumgr *smumgr, uint16_t msg) +int smu7_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr, uint16_t msg) { - cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg); + cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, msg); return 0; } -int smu7_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr, uint16_t msg, uint32_t parameter) +int smu7_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t parameter) { - if (!smu7_is_smc_ram_running(smumgr)) { + if (!smu7_is_smc_ram_running(hwmgr)) { return -EINVAL; } - SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); + PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0); - cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter); + cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, parameter); - return smu7_send_msg_to_smc(smumgr, msg); + return smu7_send_msg_to_smc(hwmgr, msg); } -int smu7_send_msg_to_smc_with_parameter_without_waiting(struct pp_smumgr *smumgr, uint16_t msg, uint32_t parameter) +int smu7_send_msg_to_smc_with_parameter_without_waiting(struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t parameter) { - cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter); + cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, parameter); - return smu7_send_msg_to_smc_without_waiting(smumgr, msg); + return smu7_send_msg_to_smc_without_waiting(hwmgr, msg); } -int smu7_send_msg_to_smc_offset(struct pp_smumgr *smumgr) +int smu7_send_msg_to_smc_offset(struct pp_hwmgr *hwmgr) { - cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, 0x20000); + cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, 0x20000); - cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, PPSMC_MSG_Test); + cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, PPSMC_MSG_Test); - SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); + PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0); - if (1 != SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP)) + if (1 != PHM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP)) pr_info("Failed to send Message.\n"); return 0; } -int smu7_wait_for_smc_inactive(struct pp_smumgr *smumgr) +int smu7_wait_for_smc_inactive(struct pp_hwmgr *hwmgr) { - if (!smu7_is_smc_ram_running(smumgr)) + if (!smu7_is_smc_ram_running(hwmgr)) return -EINVAL; - SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND, SMC_SYSCON_CLOCK_CNTL_0, cken, 0); + PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, SMC_SYSCON_CLOCK_CNTL_0, cken, 0); return 0; } @@ -289,29 +289,29 @@ enum cgs_ucode_id smu7_convert_fw_type_to_cgs(uint32_t fw_type) } -int smu7_read_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t *value, uint32_t limit) +int smu7_read_smc_sram_dword(struct pp_hwmgr *hwmgr, uint32_t smc_addr, uint32_t *value, uint32_t limit) { int result; - result = smu7_set_smc_sram_address(smumgr, smc_addr, limit); + result = smu7_set_smc_sram_address(hwmgr, smc_addr, limit); if (result) return result; - *value = cgs_read_register(smumgr->device, mmSMC_IND_DATA_11); + *value = cgs_read_register(hwmgr->device, mmSMC_IND_DATA_11); return 0; } -int smu7_write_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t value, uint32_t limit) +int smu7_write_smc_sram_dword(struct pp_hwmgr *hwmgr, uint32_t smc_addr, uint32_t value, uint32_t limit) { int result; - result = smu7_set_smc_sram_address(smumgr, smc_addr, limit); + result = smu7_set_smc_sram_address(hwmgr, smc_addr, limit); if (result) return result; - cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, value); + cgs_write_register(hwmgr->device, mmSMC_IND_DATA_11, value); return 0; } @@ -354,14 +354,14 @@ static uint32_t smu7_get_mask_for_firmware_type(uint32_t fw_type) return result; } -static int smu7_populate_single_firmware_entry(struct pp_smumgr *smumgr, +static int smu7_populate_single_firmware_entry(struct pp_hwmgr *hwmgr, uint32_t fw_type, struct SMU_Entry *entry) { int result = 0; struct cgs_firmware_info info = {0}; - result = cgs_get_firmware_info(smumgr->device, + result = cgs_get_firmware_info(hwmgr->device, smu7_convert_fw_type_to_cgs(fw_type), &info); @@ -374,7 +374,7 @@ static int smu7_populate_single_firmware_entry(struct pp_smumgr *smumgr, entry->meta_data_addr_low = 0; /* digest need be excluded out */ - if (cgs_is_virtualization_enabled(smumgr->device)) + if (cgs_is_virtualization_enabled(hwmgr->device)) info.image_size -= 20; entry->data_size_byte = info.image_size; entry->num_register_entries = 0; @@ -389,30 +389,30 @@ static int smu7_populate_single_firmware_entry(struct pp_smumgr *smumgr, return 0; } -int smu7_request_smu_load_fw(struct pp_smumgr *smumgr) +int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr) { - struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend); + struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend); uint32_t fw_to_load; int result = 0; struct SMU_DRAMData_TOC *toc; - if (!smumgr->reload_fw) { + if (!hwmgr->reload_fw) { pr_info("skip reloading...\n"); return 0; } if (smu_data->soft_regs_start) - cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC, - smu_data->soft_regs_start + smum_get_offsetof(smumgr, + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + smu_data->soft_regs_start + smum_get_offsetof(hwmgr, SMU_SoftRegisters, UcodeLoadStatus), 0x0); - if (smumgr->chip_id > CHIP_TOPAZ) { /* add support for Topaz */ - if (!cgs_is_virtualization_enabled(smumgr->device)) { - smu7_send_msg_to_smc_with_parameter(smumgr, + if (hwmgr->chip_id > CHIP_TOPAZ) { /* add support for Topaz */ + if (!cgs_is_virtualization_enabled(hwmgr->device)) { + smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SMU_DRAM_ADDR_HI, smu_data->smu_buffer.mc_addr_high); - smu7_send_msg_to_smc_with_parameter(smumgr, + smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SMU_DRAM_ADDR_LO, smu_data->smu_buffer.mc_addr_low); } @@ -439,80 +439,79 @@ int smu7_request_smu_load_fw(struct pp_smumgr *smumgr) toc->num_entries = 0; toc->structure_version = 1; - PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr, + PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, UCODE_ID_RLC_G, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -EINVAL); - PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr, + PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, UCODE_ID_CP_CE, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -EINVAL); - PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr, + PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, UCODE_ID_CP_PFP, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -EINVAL); - PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr, + PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, UCODE_ID_CP_ME, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -EINVAL); - PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr, + PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, UCODE_ID_CP_MEC, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -EINVAL); - PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr, + PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, UCODE_ID_CP_MEC_JT1, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -EINVAL); - PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr, + PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, UCODE_ID_CP_MEC_JT2, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -EINVAL); - PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr, + PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, UCODE_ID_SDMA0, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -EINVAL); - PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr, + PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -EINVAL); - if (cgs_is_virtualization_enabled(smumgr->device)) - PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr, + if (cgs_is_virtualization_enabled(hwmgr->device)) + PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, UCODE_ID_MEC_STORAGE, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -EINVAL); - smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_DRV_DRAM_ADDR_HI, smu_data->header_buffer.mc_addr_high); - smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_DRV_DRAM_ADDR_LO, smu_data->header_buffer.mc_addr_low); + smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DRV_DRAM_ADDR_HI, smu_data->header_buffer.mc_addr_high); + smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DRV_DRAM_ADDR_LO, smu_data->header_buffer.mc_addr_low); - if (smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_LoadUcodes, fw_to_load)) + if (smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_LoadUcodes, fw_to_load)) pr_err("Fail to Request SMU Load uCode"); return result; } /* Check if the FW has been loaded, SMU will not return if loading has not finished. */ -int smu7_check_fw_load_finish(struct pp_smumgr *smumgr, uint32_t fw_type) +int smu7_check_fw_load_finish(struct pp_hwmgr *hwmgr, uint32_t fw_type) { - struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend); + struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend); uint32_t fw_mask = smu7_get_mask_for_firmware_type(fw_type); uint32_t ret; - ret = smum_wait_on_indirect_register(smumgr, mmSMC_IND_INDEX_11, - smu_data->soft_regs_start + smum_get_offsetof(smumgr, + ret = phm_wait_on_indirect_register(hwmgr, mmSMC_IND_INDEX_11, + smu_data->soft_regs_start + smum_get_offsetof(hwmgr, SMU_SoftRegisters, UcodeLoadStatus), fw_mask, fw_mask); - return ret; } -int smu7_reload_firmware(struct pp_smumgr *smumgr) +int smu7_reload_firmware(struct pp_hwmgr *hwmgr) { - return smumgr->smumgr_funcs->start_smu(smumgr); + return hwmgr->smumgr_funcs->start_smu(hwmgr); } -static int smu7_upload_smc_firmware_data(struct pp_smumgr *smumgr, uint32_t length, uint32_t *src, uint32_t limit) +static int smu7_upload_smc_firmware_data(struct pp_hwmgr *hwmgr, uint32_t length, uint32_t *src, uint32_t limit) { uint32_t byte_count = length; PP_ASSERT_WITH_CODE((limit >= byte_count), "SMC address is beyond the SMC RAM area.", return -EINVAL); - cgs_write_register(smumgr->device, mmSMC_IND_INDEX_11, 0x20000); - SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 1); + cgs_write_register(hwmgr->device, mmSMC_IND_INDEX_11, 0x20000); + PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 1); for (; byte_count >= 4; byte_count -= 4) - cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, *src++); + cgs_write_register(hwmgr->device, mmSMC_IND_DATA_11, *src++); - SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 0); + PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 0); PP_ASSERT_WITH_CODE((0 == byte_count), "SMC size must be dividable by 4.", return -EINVAL); @@ -520,41 +519,41 @@ static int smu7_upload_smc_firmware_data(struct pp_smumgr *smumgr, uint32_t leng } -int smu7_upload_smu_firmware_image(struct pp_smumgr *smumgr) +int smu7_upload_smu_firmware_image(struct pp_hwmgr *hwmgr) { int result = 0; - struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend); + struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend); struct cgs_firmware_info info = {0}; if (smu_data->security_hard_key == 1) - cgs_get_firmware_info(smumgr->device, + cgs_get_firmware_info(hwmgr->device, smu7_convert_fw_type_to_cgs(UCODE_ID_SMU), &info); else - cgs_get_firmware_info(smumgr->device, + cgs_get_firmware_info(hwmgr->device, smu7_convert_fw_type_to_cgs(UCODE_ID_SMU_SK), &info); - smumgr->is_kicker = info.is_kicker; + hwmgr->is_kicker = info.is_kicker; - result = smu7_upload_smc_firmware_data(smumgr, info.image_size, (uint32_t *)info.kptr, SMU7_SMC_SIZE); + result = smu7_upload_smc_firmware_data(hwmgr, info.image_size, (uint32_t *)info.kptr, SMU7_SMC_SIZE); return result; } -int smu7_init(struct pp_smumgr *smumgr) +int smu7_init(struct pp_hwmgr *hwmgr) { struct smu7_smumgr *smu_data; uint8_t *internal_buf; uint64_t mc_addr = 0; /* Allocate memory for backend private data */ - smu_data = (struct smu7_smumgr *)(smumgr->backend); + smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend); smu_data->header_buffer.data_size = ((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096; /* Allocate FW image data structure and header buffer and * send the header buffer address to SMU */ - smu_allocate_memory(smumgr->device, + smu_allocate_memory(hwmgr->device, smu_data->header_buffer.data_size, CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB, PAGE_SIZE, @@ -568,16 +567,16 @@ int smu7_init(struct pp_smumgr *smumgr) PP_ASSERT_WITH_CODE((NULL != smu_data->header), "Out of memory.", - kfree(smumgr->backend); - cgs_free_gpu_mem(smumgr->device, + kfree(hwmgr->smu_backend); + cgs_free_gpu_mem(hwmgr->device, (cgs_handle_t)smu_data->header_buffer.handle); return -EINVAL); - if (cgs_is_virtualization_enabled(smumgr->device)) + if (cgs_is_virtualization_enabled(hwmgr->device)) return 0; smu_data->smu_buffer.data_size = 200*4096; - smu_allocate_memory(smumgr->device, + smu_allocate_memory(hwmgr->device, smu_data->smu_buffer.data_size, CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB, PAGE_SIZE, @@ -591,12 +590,12 @@ int smu7_init(struct pp_smumgr *smumgr) PP_ASSERT_WITH_CODE((NULL != internal_buf), "Out of memory.", - kfree(smumgr->backend); - cgs_free_gpu_mem(smumgr->device, + kfree(hwmgr->smu_backend); + cgs_free_gpu_mem(hwmgr->device, (cgs_handle_t)smu_data->smu_buffer.handle); return -EINVAL); - if (smum_is_hw_avfs_present(smumgr)) + if (smum_is_hw_avfs_present(hwmgr)) smu_data->avfs.avfs_btc_status = AVFS_BTC_BOOT; else smu_data->avfs.avfs_btc_status = AVFS_BTC_NOTSUPPORTED; @@ -605,12 +604,10 @@ int smu7_init(struct pp_smumgr *smumgr) } -int smu7_smu_fini(struct pp_smumgr *smumgr) +int smu7_smu_fini(struct pp_hwmgr *hwmgr) { - if (smumgr->backend) { - kfree(smumgr->backend); - smumgr->backend = NULL; - } - cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU); + kfree(hwmgr->smu_backend); + hwmgr->smu_backend = NULL; + cgs_rel_firmware(hwmgr->device, CGS_UCODE_ID_SMU); return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h index ee5e32d2921e..0b63c5c1043c 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h @@ -60,32 +60,32 @@ struct smu7_smumgr { }; -int smu7_copy_bytes_from_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address, +int smu7_copy_bytes_from_smc(struct pp_hwmgr *hwmgr, uint32_t smc_start_address, uint32_t *dest, uint32_t byte_count, uint32_t limit); -int smu7_copy_bytes_to_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address, +int smu7_copy_bytes_to_smc(struct pp_hwmgr *hwmgr, uint32_t smc_start_address, const uint8_t *src, uint32_t byte_count, uint32_t limit); -int smu7_program_jump_on_start(struct pp_smumgr *smumgr); -bool smu7_is_smc_ram_running(struct pp_smumgr *smumgr); -int smu7_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg); -int smu7_send_msg_to_smc_without_waiting(struct pp_smumgr *smumgr, uint16_t msg); -int smu7_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr, uint16_t msg, +int smu7_program_jump_on_start(struct pp_hwmgr *hwmgr); +bool smu7_is_smc_ram_running(struct pp_hwmgr *hwmgr); +int smu7_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg); +int smu7_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr, uint16_t msg); +int smu7_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t parameter); -int smu7_send_msg_to_smc_with_parameter_without_waiting(struct pp_smumgr *smumgr, +int smu7_send_msg_to_smc_with_parameter_without_waiting(struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t parameter); -int smu7_send_msg_to_smc_offset(struct pp_smumgr *smumgr); -int smu7_wait_for_smc_inactive(struct pp_smumgr *smumgr); +int smu7_send_msg_to_smc_offset(struct pp_hwmgr *hwmgr); +int smu7_wait_for_smc_inactive(struct pp_hwmgr *hwmgr); enum cgs_ucode_id smu7_convert_fw_type_to_cgs(uint32_t fw_type); -int smu7_read_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr, +int smu7_read_smc_sram_dword(struct pp_hwmgr *hwmgr, uint32_t smc_addr, uint32_t *value, uint32_t limit); -int smu7_write_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr, +int smu7_write_smc_sram_dword(struct pp_hwmgr *hwmgr, uint32_t smc_addr, uint32_t value, uint32_t limit); -int smu7_request_smu_load_fw(struct pp_smumgr *smumgr); -int smu7_check_fw_load_finish(struct pp_smumgr *smumgr, uint32_t fw_type); -int smu7_reload_firmware(struct pp_smumgr *smumgr); -int smu7_upload_smu_firmware_image(struct pp_smumgr *smumgr); -int smu7_init(struct pp_smumgr *smumgr); -int smu7_smu_fini(struct pp_smumgr *smumgr); +int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr); +int smu7_check_fw_load_finish(struct pp_hwmgr *hwmgr, uint32_t fw_type); +int smu7_reload_firmware(struct pp_hwmgr *hwmgr); +int smu7_upload_smu_firmware_image(struct pp_hwmgr *hwmgr); +int smu7_init(struct pp_hwmgr *hwmgr); +int smu7_smu_fini(struct pp_hwmgr *hwmgr); #endif
\ No newline at end of file diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c index 3bdf6478de7f..867388456530 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c @@ -27,7 +27,6 @@ #include <linux/slab.h> #include <linux/types.h> #include <drm/amdgpu_drm.h> -#include "pp_instance.h" #include "smumgr.h" #include "cgs_common.h" @@ -46,88 +45,18 @@ MODULE_FIRMWARE("amdgpu/polaris12_smc.bin"); MODULE_FIRMWARE("amdgpu/vega10_smc.bin"); MODULE_FIRMWARE("amdgpu/vega10_acg_smc.bin"); -int smum_early_init(struct pp_instance *handle) +int smum_thermal_avfs_enable(struct pp_hwmgr *hwmgr) { - struct pp_smumgr *smumgr; - - if (handle == NULL) - return -EINVAL; - - smumgr = kzalloc(sizeof(struct pp_smumgr), GFP_KERNEL); - if (smumgr == NULL) - return -ENOMEM; - - smumgr->device = handle->device; - smumgr->chip_family = handle->chip_family; - smumgr->chip_id = handle->chip_id; - smumgr->usec_timeout = AMD_MAX_USEC_TIMEOUT; - smumgr->reload_fw = 1; - handle->smu_mgr = smumgr; - - switch (smumgr->chip_family) { - case AMDGPU_FAMILY_CZ: - smumgr->smumgr_funcs = &cz_smu_funcs; - break; - case AMDGPU_FAMILY_VI: - switch (smumgr->chip_id) { - case CHIP_TOPAZ: - smumgr->smumgr_funcs = &iceland_smu_funcs; - break; - case CHIP_TONGA: - smumgr->smumgr_funcs = &tonga_smu_funcs; - break; - case CHIP_FIJI: - smumgr->smumgr_funcs = &fiji_smu_funcs; - break; - case CHIP_POLARIS11: - case CHIP_POLARIS10: - case CHIP_POLARIS12: - smumgr->smumgr_funcs = &polaris10_smu_funcs; - break; - default: - return -EINVAL; - } - break; - case AMDGPU_FAMILY_AI: - switch (smumgr->chip_id) { - case CHIP_VEGA10: - smumgr->smumgr_funcs = &vega10_smu_funcs; - break; - default: - return -EINVAL; - } - break; - case AMDGPU_FAMILY_RV: - switch (smumgr->chip_id) { - case CHIP_RAVEN: - smumgr->smumgr_funcs = &rv_smu_funcs; - break; - default: - return -EINVAL; - } - break; - default: - kfree(smumgr); - return -EINVAL; - } - - return 0; -} - -int smum_thermal_avfs_enable(struct pp_hwmgr *hwmgr, - void *input, void *output, void *storage, int result) -{ - if (NULL != hwmgr->smumgr->smumgr_funcs->thermal_avfs_enable) - return hwmgr->smumgr->smumgr_funcs->thermal_avfs_enable(hwmgr); + if (NULL != hwmgr->smumgr_funcs->thermal_avfs_enable) + return hwmgr->smumgr_funcs->thermal_avfs_enable(hwmgr); return 0; } -int smum_thermal_setup_fan_table(struct pp_hwmgr *hwmgr, - void *input, void *output, void *storage, int result) +int smum_thermal_setup_fan_table(struct pp_hwmgr *hwmgr) { - if (NULL != hwmgr->smumgr->smumgr_funcs->thermal_setup_fan_table) - return hwmgr->smumgr->smumgr_funcs->thermal_setup_fan_table(hwmgr); + if (NULL != hwmgr->smumgr_funcs->thermal_setup_fan_table) + return hwmgr->smumgr_funcs->thermal_setup_fan_table(hwmgr); return 0; } @@ -135,8 +64,8 @@ int smum_thermal_setup_fan_table(struct pp_hwmgr *hwmgr, int smum_update_sclk_threshold(struct pp_hwmgr *hwmgr) { - if (NULL != hwmgr->smumgr->smumgr_funcs->update_sclk_threshold) - return hwmgr->smumgr->smumgr_funcs->update_sclk_threshold(hwmgr); + if (NULL != hwmgr->smumgr_funcs->update_sclk_threshold) + return hwmgr->smumgr_funcs->update_sclk_threshold(hwmgr); return 0; } @@ -144,163 +73,75 @@ int smum_update_sclk_threshold(struct pp_hwmgr *hwmgr) int smum_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type) { - if (NULL != hwmgr->smumgr->smumgr_funcs->update_smc_table) - return hwmgr->smumgr->smumgr_funcs->update_smc_table(hwmgr, type); + if (NULL != hwmgr->smumgr_funcs->update_smc_table) + return hwmgr->smumgr_funcs->update_smc_table(hwmgr, type); return 0; } -uint32_t smum_get_offsetof(struct pp_smumgr *smumgr, uint32_t type, uint32_t member) +uint32_t smum_get_offsetof(struct pp_hwmgr *hwmgr, uint32_t type, uint32_t member) { - if (NULL != smumgr->smumgr_funcs->get_offsetof) - return smumgr->smumgr_funcs->get_offsetof(type, member); + if (NULL != hwmgr->smumgr_funcs->get_offsetof) + return hwmgr->smumgr_funcs->get_offsetof(type, member); return 0; } int smum_process_firmware_header(struct pp_hwmgr *hwmgr) { - if (NULL != hwmgr->smumgr->smumgr_funcs->process_firmware_header) - return hwmgr->smumgr->smumgr_funcs->process_firmware_header(hwmgr); + if (NULL != hwmgr->smumgr_funcs->process_firmware_header) + return hwmgr->smumgr_funcs->process_firmware_header(hwmgr); return 0; } -int smum_get_argument(struct pp_smumgr *smumgr) +int smum_get_argument(struct pp_hwmgr *hwmgr) { - if (NULL != smumgr->smumgr_funcs->get_argument) - return smumgr->smumgr_funcs->get_argument(smumgr); + if (NULL != hwmgr->smumgr_funcs->get_argument) + return hwmgr->smumgr_funcs->get_argument(hwmgr); return 0; } -uint32_t smum_get_mac_definition(struct pp_smumgr *smumgr, uint32_t value) +uint32_t smum_get_mac_definition(struct pp_hwmgr *hwmgr, uint32_t value) { - if (NULL != smumgr->smumgr_funcs->get_mac_definition) - return smumgr->smumgr_funcs->get_mac_definition(value); + if (NULL != hwmgr->smumgr_funcs->get_mac_definition) + return hwmgr->smumgr_funcs->get_mac_definition(value); return 0; } -int smum_download_powerplay_table(struct pp_smumgr *smumgr, - void **table) +int smum_download_powerplay_table(struct pp_hwmgr *hwmgr, void **table) { - if (NULL != smumgr->smumgr_funcs->download_pptable_settings) - return smumgr->smumgr_funcs->download_pptable_settings(smumgr, + if (NULL != hwmgr->smumgr_funcs->download_pptable_settings) + return hwmgr->smumgr_funcs->download_pptable_settings(hwmgr, table); return 0; } -int smum_upload_powerplay_table(struct pp_smumgr *smumgr) +int smum_upload_powerplay_table(struct pp_hwmgr *hwmgr) { - if (NULL != smumgr->smumgr_funcs->upload_pptable_settings) - return smumgr->smumgr_funcs->upload_pptable_settings(smumgr); + if (NULL != hwmgr->smumgr_funcs->upload_pptable_settings) + return hwmgr->smumgr_funcs->upload_pptable_settings(hwmgr); return 0; } -int smum_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg) +int smum_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg) { - if (smumgr == NULL || smumgr->smumgr_funcs->send_msg_to_smc == NULL) + if (hwmgr == NULL || hwmgr->smumgr_funcs->send_msg_to_smc == NULL) return -EINVAL; - return smumgr->smumgr_funcs->send_msg_to_smc(smumgr, msg); + return hwmgr->smumgr_funcs->send_msg_to_smc(hwmgr, msg); } -int smum_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr, +int smum_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t parameter) { - if (smumgr == NULL || - smumgr->smumgr_funcs->send_msg_to_smc_with_parameter == NULL) - return -EINVAL; - return smumgr->smumgr_funcs->send_msg_to_smc_with_parameter( - smumgr, msg, parameter); -} - -/* - * Returns once the part of the register indicated by the mask has - * reached the given value. - */ -int smum_wait_on_register(struct pp_smumgr *smumgr, - uint32_t index, - uint32_t value, uint32_t mask) -{ - uint32_t i; - uint32_t cur_value; - - if (smumgr == NULL || smumgr->device == NULL) - return -EINVAL; - - for (i = 0; i < smumgr->usec_timeout; i++) { - cur_value = cgs_read_register(smumgr->device, index); - if ((cur_value & mask) == (value & mask)) - break; - udelay(1); - } - - /* timeout means wrong logic*/ - if (i == smumgr->usec_timeout) - return -1; - - return 0; -} - -int smum_wait_for_register_unequal(struct pp_smumgr *smumgr, - uint32_t index, - uint32_t value, uint32_t mask) -{ - uint32_t i; - uint32_t cur_value; - - if (smumgr == NULL) + if (hwmgr == NULL || + hwmgr->smumgr_funcs->send_msg_to_smc_with_parameter == NULL) return -EINVAL; - - for (i = 0; i < smumgr->usec_timeout; i++) { - cur_value = cgs_read_register(smumgr->device, - index); - if ((cur_value & mask) != (value & mask)) - break; - udelay(1); - } - - /* timeout means wrong logic */ - if (i == smumgr->usec_timeout) - return -1; - - return 0; -} - - -/* - * Returns once the part of the register indicated by the mask - * has reached the given value.The indirect space is described by - * giving the memory-mapped index of the indirect index register. - */ -int smum_wait_on_indirect_register(struct pp_smumgr *smumgr, - uint32_t indirect_port, - uint32_t index, - uint32_t value, - uint32_t mask) -{ - if (smumgr == NULL || smumgr->device == NULL) - return -EINVAL; - - cgs_write_register(smumgr->device, indirect_port, index); - return smum_wait_on_register(smumgr, indirect_port + 1, - mask, value); -} - -void smum_wait_for_indirect_register_unequal( - struct pp_smumgr *smumgr, - uint32_t indirect_port, - uint32_t index, - uint32_t value, - uint32_t mask) -{ - if (smumgr == NULL || smumgr->device == NULL) - return; - cgs_write_register(smumgr->device, indirect_port, index); - smum_wait_for_register_unequal(smumgr, indirect_port + 1, - value, mask); + return hwmgr->smumgr_funcs->send_msg_to_smc_with_parameter( + hwmgr, msg, parameter); } int smu_allocate_memory(void *device, uint32_t size, @@ -316,7 +157,7 @@ int smu_allocate_memory(void *device, uint32_t size, return -EINVAL; ret = cgs_alloc_gpu_mem(device, type, size, byte_align, - 0, 0, (cgs_handle_t *)handle); + (cgs_handle_t *)handle); if (ret) return -ENOMEM; @@ -356,24 +197,24 @@ int smu_free_memory(void *device, void *handle) int smum_init_smc_table(struct pp_hwmgr *hwmgr) { - if (NULL != hwmgr->smumgr->smumgr_funcs->init_smc_table) - return hwmgr->smumgr->smumgr_funcs->init_smc_table(hwmgr); + if (NULL != hwmgr->smumgr_funcs->init_smc_table) + return hwmgr->smumgr_funcs->init_smc_table(hwmgr); return 0; } int smum_populate_all_graphic_levels(struct pp_hwmgr *hwmgr) { - if (NULL != hwmgr->smumgr->smumgr_funcs->populate_all_graphic_levels) - return hwmgr->smumgr->smumgr_funcs->populate_all_graphic_levels(hwmgr); + if (NULL != hwmgr->smumgr_funcs->populate_all_graphic_levels) + return hwmgr->smumgr_funcs->populate_all_graphic_levels(hwmgr); return 0; } int smum_populate_all_memory_levels(struct pp_hwmgr *hwmgr) { - if (NULL != hwmgr->smumgr->smumgr_funcs->populate_all_memory_levels) - return hwmgr->smumgr->smumgr_funcs->populate_all_memory_levels(hwmgr); + if (NULL != hwmgr->smumgr_funcs->populate_all_memory_levels) + return hwmgr->smumgr_funcs->populate_all_memory_levels(hwmgr); return 0; } @@ -381,16 +222,16 @@ int smum_populate_all_memory_levels(struct pp_hwmgr *hwmgr) /*this interface is needed by island ci/vi */ int smum_initialize_mc_reg_table(struct pp_hwmgr *hwmgr) { - if (NULL != hwmgr->smumgr->smumgr_funcs->initialize_mc_reg_table) - return hwmgr->smumgr->smumgr_funcs->initialize_mc_reg_table(hwmgr); + if (NULL != hwmgr->smumgr_funcs->initialize_mc_reg_table) + return hwmgr->smumgr_funcs->initialize_mc_reg_table(hwmgr); return 0; } bool smum_is_dpm_running(struct pp_hwmgr *hwmgr) { - if (NULL != hwmgr->smumgr->smumgr_funcs->is_dpm_running) - return hwmgr->smumgr->smumgr_funcs->is_dpm_running(hwmgr); + if (NULL != hwmgr->smumgr_funcs->is_dpm_running) + return hwmgr->smumgr_funcs->is_dpm_running(hwmgr); return true; } @@ -398,17 +239,17 @@ bool smum_is_dpm_running(struct pp_hwmgr *hwmgr) int smum_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr, struct amd_pp_profile *request) { - if (hwmgr->smumgr->smumgr_funcs->populate_requested_graphic_levels) - return hwmgr->smumgr->smumgr_funcs->populate_requested_graphic_levels( + if (hwmgr->smumgr_funcs->populate_requested_graphic_levels) + return hwmgr->smumgr_funcs->populate_requested_graphic_levels( hwmgr, request); return 0; } -bool smum_is_hw_avfs_present(struct pp_smumgr *smumgr) +bool smum_is_hw_avfs_present(struct pp_hwmgr *hwmgr) { - if (smumgr->smumgr_funcs->is_hw_avfs_present) - return smumgr->smumgr_funcs->is_hw_avfs_present(smumgr); + if (hwmgr->smumgr_funcs->is_hw_avfs_present) + return hwmgr->smumgr_funcs->is_hw_avfs_present(hwmgr); return false; } diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c index 65d3a4893958..1f720ccdaf99 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c @@ -97,7 +97,7 @@ static const uint8_t tonga_clock_stretch_amount_conversion[2][6] = { */ -static int tonga_get_dependecy_volt_by_clk(struct pp_hwmgr *hwmgr, +static int tonga_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr, phm_ppt_v1_clock_voltage_dependency_table *allowed_clock_voltage_table, uint32_t clock, SMU_VoltageLevel *voltage, uint32_t *mvdd) { @@ -406,7 +406,7 @@ static int tonga_populate_smc_link_level(struct pp_hwmgr *hwmgr, SMU72_Discrete_ { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); struct smu7_dpm_table *dpm_table = &data->dpm_table; - struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend); + struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend); uint32_t i; /* Index (dpm_table->pcie_speed_table.count) is reserved for PCIE boot level. */ @@ -539,7 +539,7 @@ static int tonga_populate_single_graphic_level(struct pp_hwmgr *hwmgr, result = tonga_calculate_sclk_params(hwmgr, engine_clock, graphic_level); /* populate graphics levels*/ - result = tonga_get_dependecy_volt_by_clk(hwmgr, + result = tonga_get_dependency_volt_by_clk(hwmgr, pptable_info->vdd_dep_on_sclk, engine_clock, &graphic_level->MinVoltage, &mvdd); PP_ASSERT_WITH_CODE((!result), @@ -598,7 +598,7 @@ static int tonga_populate_single_graphic_level(struct pp_hwmgr *hwmgr, int tonga_populate_all_graphic_levels(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend); + struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend); struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); struct smu7_dpm_table *dpm_table = &data->dpm_table; struct phm_ppt_v1_pcie_table *pcie_table = pptable_info->pcie_table; @@ -690,7 +690,7 @@ int tonga_populate_all_graphic_levels(struct pp_hwmgr *hwmgr) smu_data->smc_state_table.GraphicsLevel[1].pcieDpmLevel = mid_pcie_level_enabled; } /* level count will send to smc once at init smc table and never change*/ - result = smu7_copy_bytes_to_smc(hwmgr->smumgr, level_array_address, + result = smu7_copy_bytes_to_smc(hwmgr, level_array_address, (uint8_t *)levels, (uint32_t)level_array_size, SMC_RAM_END); @@ -895,7 +895,7 @@ static int tonga_populate_single_memory_level( uint32_t mclk_strobe_mode_threshold = 40000; if (NULL != pptable_info->vdd_dep_on_mclk) { - result = tonga_get_dependecy_volt_by_clk(hwmgr, + result = tonga_get_dependency_volt_by_clk(hwmgr, pptable_info->vdd_dep_on_mclk, memory_clock, &memory_level->MinVoltage, &mvdd); @@ -1002,7 +1002,7 @@ int tonga_populate_all_memory_levels(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); struct tonga_smumgr *smu_data = - (struct tonga_smumgr *)(hwmgr->smumgr->backend); + (struct tonga_smumgr *)(hwmgr->smu_backend); struct smu7_dpm_table *dpm_table = &data->dpm_table; int result; @@ -1048,7 +1048,7 @@ int tonga_populate_all_memory_levels(struct pp_hwmgr *hwmgr) smu_data->smc_state_table.MemoryLevel[dpm_table->mclk_table.count-1].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH; /* level count will send to smc once at init smc table and never change*/ - result = smu7_copy_bytes_to_smc(hwmgr->smumgr, + result = smu7_copy_bytes_to_smc(hwmgr, level_array_address, (uint8_t *)levels, (uint32_t)level_array_size, SMC_RAM_END); @@ -1090,7 +1090,7 @@ static int tonga_populate_smc_acpi_level(struct pp_hwmgr *hwmgr, { int result = 0; struct tonga_smumgr *smu_data = - (struct tonga_smumgr *)(hwmgr->smumgr->backend); + (struct tonga_smumgr *)(hwmgr->smu_backend); const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); struct pp_atomctrl_clock_dividers_vi dividers; @@ -1454,7 +1454,7 @@ static int tonga_program_memory_timing_parameters(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); struct tonga_smumgr *smu_data = - (struct tonga_smumgr *)(hwmgr->smumgr->backend); + (struct tonga_smumgr *)(hwmgr->smu_backend); int result = 0; SMU72_Discrete_MCArbDramTimingTable arb_regs; uint32_t i, j; @@ -1475,7 +1475,7 @@ static int tonga_program_memory_timing_parameters(struct pp_hwmgr *hwmgr) if (!result) { result = smu7_copy_bytes_to_smc( - hwmgr->smumgr, + hwmgr, smu_data->smu7_data.arb_table_start, (uint8_t *)&arb_regs, sizeof(SMU72_Discrete_MCArbDramTimingTable), @@ -1492,7 +1492,7 @@ static int tonga_populate_smc_boot_level(struct pp_hwmgr *hwmgr, int result = 0; struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); struct tonga_smumgr *smu_data = - (struct tonga_smumgr *)(hwmgr->smumgr->backend); + (struct tonga_smumgr *)(hwmgr->smu_backend); table->GraphicsBootLevel = 0; table->MemoryBootLevel = 0; @@ -1543,7 +1543,7 @@ static int tonga_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr) volt_with_cks, value; uint16_t clock_freq_u16; struct tonga_smumgr *smu_data = - (struct tonga_smumgr *)(hwmgr->smumgr->backend); + (struct tonga_smumgr *)(hwmgr->smu_backend); uint8_t type, i, j, cks_setting, stretch_amount, stretch_amount2, volt_offset = 0; struct phm_ppt_v1_information *table_info = @@ -1782,9 +1782,9 @@ static int tonga_populate_vr_config(struct pp_hwmgr *hwmgr, * @param hwmgr the address of the powerplay hardware manager. * @return always 0 */ -static int tonga_init_arb_table_index(struct pp_smumgr *smumgr) +static int tonga_init_arb_table_index(struct pp_hwmgr *hwmgr) { - struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(smumgr->backend); + struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend); uint32_t tmp; int result; @@ -1797,7 +1797,7 @@ static int tonga_init_arb_table_index(struct pp_smumgr *smumgr) * In reality this field should not be in that structure * but in a soft register. */ - result = smu7_read_smc_sram_dword(smumgr, + result = smu7_read_smc_sram_dword(hwmgr, smu_data->smu7_data.arb_table_start, &tmp, SMC_RAM_END); if (result != 0) @@ -1806,7 +1806,7 @@ static int tonga_init_arb_table_index(struct pp_smumgr *smumgr) tmp &= 0x00FFFFFF; tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24; - return smu7_write_smc_sram_dword(smumgr, + return smu7_write_smc_sram_dword(hwmgr, smu_data->smu7_data.arb_table_start, tmp, SMC_RAM_END); } @@ -1814,7 +1814,7 @@ static int tonga_init_arb_table_index(struct pp_smumgr *smumgr) static int tonga_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr) { struct tonga_smumgr *smu_data = - (struct tonga_smumgr *)(hwmgr->smumgr->backend); + (struct tonga_smumgr *)(hwmgr->smu_backend); const struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults; SMU72_Discrete_DpmTable *dpm_table = &(smu_data->smc_state_table); struct phm_ppt_v1_information *table_info = @@ -1838,7 +1838,7 @@ static int tonga_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr) dpm_table->DTEAmbientTempBase = defaults->dte_ambient_temp_base; dpm_table->BAPM_TEMP_GRADIENT = - PP_HOST_TO_SMC_UL(defaults->bamp_temp_gradient); + PP_HOST_TO_SMC_UL(defaults->bapm_temp_gradient); pdef1 = defaults->bapmti_r; pdef2 = defaults->bapmti_rc; @@ -1861,7 +1861,7 @@ static int tonga_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr) static int tonga_populate_svi_load_line(struct pp_hwmgr *hwmgr) { struct tonga_smumgr *smu_data = - (struct tonga_smumgr *)(hwmgr->smumgr->backend); + (struct tonga_smumgr *)(hwmgr->smu_backend); const struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults; smu_data->power_tune_table.SviLoadLineEn = defaults->svi_load_line_en; @@ -1876,7 +1876,7 @@ static int tonga_populate_tdc_limit(struct pp_hwmgr *hwmgr) { uint16_t tdc_limit; struct tonga_smumgr *smu_data = - (struct tonga_smumgr *)(hwmgr->smumgr->backend); + (struct tonga_smumgr *)(hwmgr->smu_backend); const struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults; struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); @@ -1897,11 +1897,11 @@ static int tonga_populate_tdc_limit(struct pp_hwmgr *hwmgr) static int tonga_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset) { struct tonga_smumgr *smu_data = - (struct tonga_smumgr *)(hwmgr->smumgr->backend); + (struct tonga_smumgr *)(hwmgr->smu_backend); const struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults; uint32_t temp; - if (smu7_read_smc_sram_dword(hwmgr->smumgr, + if (smu7_read_smc_sram_dword(hwmgr, fuse_table_offset + offsetof(SMU72_Discrete_PmFuses, TdcWaterfallCtl), (uint32_t *)&temp, SMC_RAM_END)) @@ -1919,7 +1919,7 @@ static int tonga_populate_temperature_scaler(struct pp_hwmgr *hwmgr) { int i; struct tonga_smumgr *smu_data = - (struct tonga_smumgr *)(hwmgr->smumgr->backend); + (struct tonga_smumgr *)(hwmgr->smu_backend); /* Currently not used. Set all to zero. */ for (i = 0; i < 16; i++) @@ -1930,7 +1930,7 @@ static int tonga_populate_temperature_scaler(struct pp_hwmgr *hwmgr) static int tonga_populate_fuzzy_fan(struct pp_hwmgr *hwmgr) { - struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend); + struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend); if ((hwmgr->thermal_controller.advanceFanControlParameters. usFanOutputSensitivity & (1 << 15)) || @@ -1949,7 +1949,7 @@ static int tonga_populate_gnb_lpml(struct pp_hwmgr *hwmgr) { int i; struct tonga_smumgr *smu_data = - (struct tonga_smumgr *)(hwmgr->smumgr->backend); + (struct tonga_smumgr *)(hwmgr->smu_backend); /* Currently not used. Set all to zero. */ for (i = 0; i < 16; i++) @@ -1958,15 +1958,10 @@ static int tonga_populate_gnb_lpml(struct pp_hwmgr *hwmgr) return 0; } -static int tonga_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr) -{ - return 0; -} - static int tonga_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr) { struct tonga_smumgr *smu_data = - (struct tonga_smumgr *)(hwmgr->smumgr->backend); + (struct tonga_smumgr *)(hwmgr->smu_backend); struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); uint16_t hi_sidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd; @@ -1987,12 +1982,12 @@ static int tonga_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr) static int tonga_populate_pm_fuses(struct pp_hwmgr *hwmgr) { struct tonga_smumgr *smu_data = - (struct tonga_smumgr *)(hwmgr->smumgr->backend); + (struct tonga_smumgr *)(hwmgr->smu_backend); uint32_t pm_fuse_table_offset; if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PowerContainment)) { - if (smu7_read_smc_sram_dword(hwmgr->smumgr, + if (smu7_read_smc_sram_dword(hwmgr, SMU72_FIRMWARE_HEADER_LOCATION + offsetof(SMU72_Firmware_Header, PmFuseTable), &pm_fuse_table_offset, SMC_RAM_END)) @@ -2035,13 +2030,6 @@ static int tonga_populate_pm_fuses(struct pp_hwmgr *hwmgr) "Attempt to populate GnbLPML Failed !", return -EINVAL); - /* DW19 */ - if (tonga_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr)) - PP_ASSERT_WITH_CODE(false, - "Attempt to populate GnbLPML " - "Min and Max Vid Failed !", - return -EINVAL); - /* DW20 */ if (tonga_populate_bapm_vddc_base_leakage_sidd(hwmgr)) PP_ASSERT_WITH_CODE( @@ -2050,7 +2038,7 @@ static int tonga_populate_pm_fuses(struct pp_hwmgr *hwmgr) "Hi and Lo Sidd Failed !", return -EINVAL); - if (smu7_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset, + if (smu7_copy_bytes_to_smc(hwmgr, pm_fuse_table_offset, (uint8_t *)&smu_data->power_tune_table, sizeof(struct SMU72_Discrete_PmFuses), SMC_RAM_END)) PP_ASSERT_WITH_CODE(false, @@ -2060,10 +2048,10 @@ static int tonga_populate_pm_fuses(struct pp_hwmgr *hwmgr) return 0; } -static int tonga_populate_mc_reg_address(struct pp_smumgr *smumgr, +static int tonga_populate_mc_reg_address(struct pp_hwmgr *hwmgr, SMU72_Discrete_MCRegisters *mc_reg_table) { - const struct tonga_smumgr *smu_data = (struct tonga_smumgr *)smumgr->backend; + const struct tonga_smumgr *smu_data = (struct tonga_smumgr *)hwmgr->smu_backend; uint32_t i, j; @@ -2104,12 +2092,12 @@ static void tonga_convert_mc_registers( } static int tonga_convert_mc_reg_table_entry_to_smc( - struct pp_smumgr *smumgr, + struct pp_hwmgr *hwmgr, const uint32_t memory_clock, SMU72_Discrete_MCRegisterSet *mc_reg_table_data ) { - struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(smumgr->backend); + struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend); uint32_t i = 0; for (i = 0; i < smu_data->mc_reg_table.num_entries; i++) { @@ -2139,7 +2127,7 @@ static int tonga_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr, for (i = 0; i < data->dpm_table.mclk_table.count; i++) { res = tonga_convert_mc_reg_table_entry_to_smc( - hwmgr->smumgr, + hwmgr, data->dpm_table.mclk_table.dpm_levels[i].value, &mc_regs->data[i] ); @@ -2153,8 +2141,7 @@ static int tonga_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr, static int tonga_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr) { - struct pp_smumgr *smumgr = hwmgr->smumgr; - struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(smumgr->backend); + struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend); struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); uint32_t address; int32_t result; @@ -2175,7 +2162,7 @@ static int tonga_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr) (uint32_t)offsetof(SMU72_Discrete_MCRegisters, data[0]); return smu7_copy_bytes_to_smc( - hwmgr->smumgr, address, + hwmgr, address, (uint8_t *)&smu_data->mc_regs.data[0], sizeof(SMU72_Discrete_MCRegisterSet) * data->dpm_table.mclk_table.count, @@ -2185,11 +2172,10 @@ static int tonga_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr) static int tonga_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr) { int result; - struct pp_smumgr *smumgr = hwmgr->smumgr; - struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(smumgr->backend); + struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend); memset(&smu_data->mc_regs, 0x00, sizeof(SMU72_Discrete_MCRegisters)); - result = tonga_populate_mc_reg_address(smumgr, &(smu_data->mc_regs)); + result = tonga_populate_mc_reg_address(hwmgr, &(smu_data->mc_regs)); PP_ASSERT_WITH_CODE(!result, "Failed to initialize MCRegTable for the MC register addresses !", return result;); @@ -2199,13 +2185,13 @@ static int tonga_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr) "Failed to initialize MCRegTable for driver state !", return result;); - return smu7_copy_bytes_to_smc(smumgr, smu_data->smu7_data.mc_reg_table_start, + return smu7_copy_bytes_to_smc(hwmgr, smu_data->smu7_data.mc_reg_table_start, (uint8_t *)&smu_data->mc_regs, sizeof(SMU72_Discrete_MCRegisters), SMC_RAM_END); } static void tonga_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr) { - struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend); + struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend); struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); @@ -2221,7 +2207,7 @@ static void tonga_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr) static void tonga_save_default_power_profile(struct pp_hwmgr *hwmgr) { - struct tonga_smumgr *data = (struct tonga_smumgr *)(hwmgr->smumgr->backend); + struct tonga_smumgr *data = (struct tonga_smumgr *)(hwmgr->smu_backend); struct SMU72_Discrete_GraphicsLevel *levels = data->smc_state_table.GraphicsLevel; unsigned min_level = 1; @@ -2267,7 +2253,7 @@ int tonga_init_smc_table(struct pp_hwmgr *hwmgr) int result; struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); struct tonga_smumgr *smu_data = - (struct tonga_smumgr *)(hwmgr->smumgr->backend); + (struct tonga_smumgr *)(hwmgr->smu_backend); SMU72_Discrete_DpmTable *table = &(smu_data->smc_state_table); struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); @@ -2483,7 +2469,7 @@ int tonga_init_smc_table(struct pp_hwmgr *hwmgr) /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */ result = smu7_copy_bytes_to_smc( - hwmgr->smumgr, + hwmgr, smu_data->smu7_data.dpm_table_start + offsetof(SMU72_Discrete_DpmTable, SystemFlags), (uint8_t *)&(table->SystemFlags), sizeof(SMU72_Discrete_DpmTable) - 3 * sizeof(SMU72_PIDController), @@ -2492,7 +2478,7 @@ int tonga_init_smc_table(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE(!result, "Failed to upload dpm data to SMC memory !", return result;); - result = tonga_init_arb_table_index(hwmgr->smumgr); + result = tonga_init_arb_table_index(hwmgr); PP_ASSERT_WITH_CODE(!result, "Failed to upload arb data to SMC memory !", return result); @@ -2521,7 +2507,7 @@ int tonga_init_smc_table(struct pp_hwmgr *hwmgr) int tonga_thermal_setup_fan_table(struct pp_hwmgr *hwmgr) { struct tonga_smumgr *smu_data = - (struct tonga_smumgr *)(hwmgr->smumgr->backend); + (struct tonga_smumgr *)(hwmgr->smu_backend); SMU72_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE }; uint32_t duty100; uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2; @@ -2600,7 +2586,7 @@ int tonga_thermal_setup_fan_table(struct pp_hwmgr *hwmgr) fan_table.FanControl_GL_Flag = 1; - res = smu7_copy_bytes_to_smc(hwmgr->smumgr, + res = smu7_copy_bytes_to_smc(hwmgr, smu_data->smu7_data.fan_table_start, (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table), @@ -2625,7 +2611,7 @@ int tonga_update_sclk_threshold(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); struct tonga_smumgr *smu_data = - (struct tonga_smumgr *)(hwmgr->smumgr->backend); + (struct tonga_smumgr *)(hwmgr->smu_backend); int result = 0; uint32_t low_sclk_interrupt_threshold = 0; @@ -2642,7 +2628,7 @@ int tonga_update_sclk_threshold(struct pp_hwmgr *hwmgr) CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold); result = smu7_copy_bytes_to_smc( - hwmgr->smumgr, + hwmgr, smu_data->smu7_data.dpm_table_start + offsetof(SMU72_Discrete_DpmTable, LowSclkInterruptThreshold), @@ -2728,7 +2714,7 @@ uint32_t tonga_get_mac_definition(uint32_t value) static int tonga_update_uvd_smc_table(struct pp_hwmgr *hwmgr) { struct tonga_smumgr *smu_data = - (struct tonga_smumgr *)(hwmgr->smumgr->backend); + (struct tonga_smumgr *)(hwmgr->smu_backend); uint32_t mm_boot_level_offset, mm_boot_level_value; struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); @@ -2753,7 +2739,7 @@ static int tonga_update_uvd_smc_table(struct pp_hwmgr *hwmgr) PHM_PlatformCaps_UVDDPM) || phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_UVDDPM_SetEnabledMask, (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel)); return 0; @@ -2762,7 +2748,7 @@ static int tonga_update_uvd_smc_table(struct pp_hwmgr *hwmgr) static int tonga_update_vce_smc_table(struct pp_hwmgr *hwmgr) { struct tonga_smumgr *smu_data = - (struct tonga_smumgr *)(hwmgr->smumgr->backend); + (struct tonga_smumgr *)(hwmgr->smu_backend); uint32_t mm_boot_level_offset, mm_boot_level_value; struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); @@ -2784,7 +2770,7 @@ static int tonga_update_vce_smc_table(struct pp_hwmgr *hwmgr) if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_VCEDPM_SetEnabledMask, (uint32_t)1 << smu_data->smc_state_table.VceBootLevel); return 0; @@ -2792,7 +2778,7 @@ static int tonga_update_vce_smc_table(struct pp_hwmgr *hwmgr) static int tonga_update_samu_smc_table(struct pp_hwmgr *hwmgr) { - struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend); + struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend); uint32_t mm_boot_level_offset, mm_boot_level_value; smu_data->smc_state_table.SamuBootLevel = 0; @@ -2810,7 +2796,7 @@ static int tonga_update_samu_smc_table(struct pp_hwmgr *hwmgr) if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SAMUDPM_SetEnabledMask, (uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel)); return 0; @@ -2844,13 +2830,13 @@ int tonga_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type) int tonga_process_firmware_header(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend); + struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend); uint32_t tmp; int result; bool error = false; - result = smu7_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr, SMU72_FIRMWARE_HEADER_LOCATION + offsetof(SMU72_Firmware_Header, DpmTable), &tmp, SMC_RAM_END); @@ -2860,7 +2846,7 @@ int tonga_process_firmware_header(struct pp_hwmgr *hwmgr) error |= (result != 0); - result = smu7_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr, SMU72_FIRMWARE_HEADER_LOCATION + offsetof(SMU72_Firmware_Header, SoftRegisters), &tmp, SMC_RAM_END); @@ -2873,7 +2859,7 @@ int tonga_process_firmware_header(struct pp_hwmgr *hwmgr) error |= (result != 0); - result = smu7_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr, SMU72_FIRMWARE_HEADER_LOCATION + offsetof(SMU72_Firmware_Header, mcRegisterTable), &tmp, SMC_RAM_END); @@ -2881,7 +2867,7 @@ int tonga_process_firmware_header(struct pp_hwmgr *hwmgr) if (!result) smu_data->smu7_data.mc_reg_table_start = tmp; - result = smu7_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr, SMU72_FIRMWARE_HEADER_LOCATION + offsetof(SMU72_Firmware_Header, FanTable), &tmp, SMC_RAM_END); @@ -2891,7 +2877,7 @@ int tonga_process_firmware_header(struct pp_hwmgr *hwmgr) error |= (result != 0); - result = smu7_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr, SMU72_FIRMWARE_HEADER_LOCATION + offsetof(SMU72_Firmware_Header, mcArbDramTimingTable), &tmp, SMC_RAM_END); @@ -2901,7 +2887,7 @@ int tonga_process_firmware_header(struct pp_hwmgr *hwmgr) error |= (result != 0); - result = smu7_read_smc_sram_dword(hwmgr->smumgr, + result = smu7_read_smc_sram_dword(hwmgr, SMU72_FIRMWARE_HEADER_LOCATION + offsetof(SMU72_Firmware_Header, Version), &tmp, SMC_RAM_END); @@ -3170,7 +3156,7 @@ static int tonga_set_valid_flag(struct tonga_mc_reg_table *table) int tonga_initialize_mc_reg_table(struct pp_hwmgr *hwmgr) { int result; - struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend); + struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend); pp_atomctrl_mc_reg_table *table; struct tonga_mc_reg_table *ni_table = &smu_data->mc_reg_table; uint8_t module_index = tonga_get_memory_modile_index(hwmgr); @@ -3253,7 +3239,7 @@ int tonga_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr, struct amd_pp_profile *request) { struct tonga_smumgr *smu_data = (struct tonga_smumgr *) - (hwmgr->smumgr->backend); + (hwmgr->smu_backend); struct SMU72_Discrete_GraphicsLevel *levels = smu_data->smc_state_table.GraphicsLevel; uint32_t array = smu_data->smu7_data.dpm_table_start + @@ -3270,6 +3256,6 @@ int tonga_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr, levels[i].DownHyst = request->down_hyst; } - return smu7_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels, + return smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels, array_size, SMC_RAM_END); } diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.h b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.h index 962860f13f24..9d6a78a65976 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.h +++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.h @@ -40,7 +40,7 @@ struct tonga_pt_defaults { uint8_t tdc_waterfall_ctl; uint8_t dte_ambient_temp_base; uint32_t display_cac; - uint32_t bamp_temp_gradient; + uint32_t bapm_temp_gradient; uint16_t bapmti_r[SMU72_DTE_ITERATIONS * SMU72_DTE_SOURCES * SMU72_DTE_SINKS]; uint16_t bapmti_rc[SMU72_DTE_ITERATIONS * SMU72_DTE_SOURCES * SMU72_DTE_SINKS]; }; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c index c35f4c35c9ca..d22cf218cf18 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c @@ -37,125 +37,125 @@ #include "smu7_smumgr.h" -static int tonga_start_in_protection_mode(struct pp_smumgr *smumgr) +static int tonga_start_in_protection_mode(struct pp_hwmgr *hwmgr) { int result; /* Assert reset */ - SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 1); - result = smu7_upload_smu_firmware_image(smumgr); + result = smu7_upload_smu_firmware_image(hwmgr); if (result) return result; /* Clear status */ - cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC, + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMU_STATUS, 0); /* Enable clock */ - SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0); /* De-assert reset */ - SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 0); /* Set SMU Auto Start */ - SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMU_INPUT_DATA, AUTO_START, 1); /* Clear firmware interrupt enable flag */ - cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC, + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixFIRMWARE_FLAGS, 0); - SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND, + PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, RCU_UC_EVENTS, INTERRUPTS_ENABLED, 1); /** * Call Test SMU message with 0x20000 offset to trigger SMU start */ - smu7_send_msg_to_smc_offset(smumgr); + smu7_send_msg_to_smc_offset(hwmgr); /* Wait for done bit to be set */ - SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND, + PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND, SMU_STATUS, SMU_DONE, 0); /* Check pass/failed indicator */ - if (1 != SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, + if (1 != PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMU_STATUS, SMU_PASS)) { pr_err("SMU Firmware start failed\n"); return -EINVAL; } /* Wait for firmware to initialize */ - SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND, + PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1); return 0; } -static int tonga_start_in_non_protection_mode(struct pp_smumgr *smumgr) +static int tonga_start_in_non_protection_mode(struct pp_hwmgr *hwmgr) { int result = 0; /* wait for smc boot up */ - SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND, + PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND, RCU_UC_EVENTS, boot_seq_done, 0); /*Clear firmware interrupt enable flag*/ - cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC, + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixFIRMWARE_FLAGS, 0); - SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 1); - result = smu7_upload_smu_firmware_image(smumgr); + result = smu7_upload_smu_firmware_image(hwmgr); if (result != 0) return result; /* Set smc instruct start point at 0x0 */ - smu7_program_jump_on_start(smumgr); + smu7_program_jump_on_start(hwmgr); - SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0); /*De-assert reset*/ - SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 0); /* Wait for firmware to initialize */ - SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND, + PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1); return result; } -static int tonga_start_smu(struct pp_smumgr *smumgr) +static int tonga_start_smu(struct pp_hwmgr *hwmgr) { int result; /* Only start SMC if SMC RAM is not running */ - if (!(smu7_is_smc_ram_running(smumgr) || - cgs_is_virtualization_enabled(smumgr->device))) { + if (!(smu7_is_smc_ram_running(hwmgr) || + cgs_is_virtualization_enabled(hwmgr->device))) { /*Check if SMU is running in protected mode*/ - if (0 == SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + if (0 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_MODE)) { - result = tonga_start_in_non_protection_mode(smumgr); + result = tonga_start_in_non_protection_mode(hwmgr); if (result) return result; } else { - result = tonga_start_in_protection_mode(smumgr); + result = tonga_start_in_protection_mode(hwmgr); if (result) return result; } } - result = smu7_request_smu_load_fw(smumgr); + result = smu7_request_smu_load_fw(hwmgr); return result; } @@ -167,7 +167,7 @@ static int tonga_start_smu(struct pp_smumgr *smumgr) * @param smcAddress the address in the SMC RAM to access. * @param value to write to the SMC SRAM. */ -static int tonga_smu_init(struct pp_smumgr *smumgr) +static int tonga_smu_init(struct pp_hwmgr *hwmgr) { struct tonga_smumgr *tonga_priv = NULL; int i; @@ -176,9 +176,9 @@ static int tonga_smu_init(struct pp_smumgr *smumgr) if (tonga_priv == NULL) return -ENOMEM; - smumgr->backend = tonga_priv; + hwmgr->smu_backend = tonga_priv; - if (smu7_init(smumgr)) + if (smu7_init(hwmgr)) return -EINVAL; for (i = 0; i < SMU72_MAX_LEVELS_GRAPHICS; i++) diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c index 408514c965a0..2f979fb86824 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c @@ -53,20 +53,20 @@ #define smnMP0_FW_INTF 0x3010104 #define smnMP1_PUB_CTRL 0x3010b14 -static bool vega10_is_smc_ram_running(struct pp_smumgr *smumgr) +static bool vega10_is_smc_ram_running(struct pp_hwmgr *hwmgr) { uint32_t mp1_fw_flags, reg; reg = soc15_get_register_offset(NBIF_HWID, 0, mmPCIE_INDEX2_BASE_IDX, mmPCIE_INDEX2); - cgs_write_register(smumgr->device, reg, + cgs_write_register(hwmgr->device, reg, (MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff))); reg = soc15_get_register_offset(NBIF_HWID, 0, mmPCIE_DATA2_BASE_IDX, mmPCIE_DATA2); - mp1_fw_flags = cgs_read_register(smumgr->device, reg); + mp1_fw_flags = cgs_read_register(hwmgr->device, reg); if (mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) return true; @@ -80,20 +80,20 @@ static bool vega10_is_smc_ram_running(struct pp_smumgr *smumgr) * @param smumgr the address of the powerplay hardware manager. * @return TRUE SMC has responded, FALSE otherwise. */ -static uint32_t vega10_wait_for_response(struct pp_smumgr *smumgr) +static uint32_t vega10_wait_for_response(struct pp_hwmgr *hwmgr) { uint32_t reg; - if (!vega10_is_smc_ram_running(smumgr)) + if (!vega10_is_smc_ram_running(hwmgr)) return -EINVAL; reg = soc15_get_register_offset(MP1_HWID, 0, mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90); - smum_wait_for_register_unequal(smumgr, reg, + phm_wait_for_register_unequal(hwmgr, reg, 0, MP1_C2PMSG_90__CONTENT_MASK); - return cgs_read_register(smumgr->device, reg); + return cgs_read_register(hwmgr->device, reg); } /* @@ -102,43 +102,43 @@ static uint32_t vega10_wait_for_response(struct pp_smumgr *smumgr) * @param msg the message to send. * @return Always return 0. */ -int vega10_send_msg_to_smc_without_waiting(struct pp_smumgr *smumgr, +int vega10_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr, uint16_t msg) { uint32_t reg; - if (!vega10_is_smc_ram_running(smumgr)) + if (!vega10_is_smc_ram_running(hwmgr)) return -EINVAL; reg = soc15_get_register_offset(MP1_HWID, 0, mmMP1_SMN_C2PMSG_66_BASE_IDX, mmMP1_SMN_C2PMSG_66); - cgs_write_register(smumgr->device, reg, msg); + cgs_write_register(hwmgr->device, reg, msg); return 0; } /* * Send a message to the SMC, and wait for its response. - * @param smumgr the address of the powerplay hardware manager. + * @param hwmgr the address of the powerplay hardware manager. * @param msg the message to send. * @return Always return 0. */ -int vega10_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg) +int vega10_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg) { uint32_t reg; - if (!vega10_is_smc_ram_running(smumgr)) + if (!vega10_is_smc_ram_running(hwmgr)) return -EINVAL; - vega10_wait_for_response(smumgr); + vega10_wait_for_response(hwmgr); reg = soc15_get_register_offset(MP1_HWID, 0, mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90); - cgs_write_register(smumgr->device, reg, 0); + cgs_write_register(hwmgr->device, reg, 0); - vega10_send_msg_to_smc_without_waiting(smumgr, msg); + vega10_send_msg_to_smc_without_waiting(hwmgr, msg); - if (vega10_wait_for_response(smumgr) != 1) + if (vega10_wait_for_response(hwmgr) != 1) pr_err("Failed to send message: 0x%x\n", msg); return 0; @@ -146,32 +146,32 @@ int vega10_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg) /* * Send a message to the SMC with parameter - * @param smumgr: the address of the powerplay hardware manager. + * @param hwmgr: the address of the powerplay hardware manager. * @param msg: the message to send. * @param parameter: the parameter to send * @return Always return 0. */ -int vega10_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr, +int vega10_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t parameter) { uint32_t reg; - if (!vega10_is_smc_ram_running(smumgr)) + if (!vega10_is_smc_ram_running(hwmgr)) return -EINVAL; - vega10_wait_for_response(smumgr); + vega10_wait_for_response(hwmgr); reg = soc15_get_register_offset(MP1_HWID, 0, mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90); - cgs_write_register(smumgr->device, reg, 0); + cgs_write_register(hwmgr->device, reg, 0); reg = soc15_get_register_offset(MP1_HWID, 0, mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82); - cgs_write_register(smumgr->device, reg, parameter); + cgs_write_register(hwmgr->device, reg, parameter); - vega10_send_msg_to_smc_without_waiting(smumgr, msg); + vega10_send_msg_to_smc_without_waiting(hwmgr, msg); - if (vega10_wait_for_response(smumgr) != 1) + if (vega10_wait_for_response(hwmgr) != 1) pr_err("Failed to send message: 0x%x\n", msg); return 0; @@ -180,51 +180,51 @@ int vega10_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr, /* * Send a message to the SMC with parameter, do not wait for response - * @param smumgr: the address of the powerplay hardware manager. + * @param hwmgr: the address of the powerplay hardware manager. * @param msg: the message to send. * @param parameter: the parameter to send * @return The response that came from the SMC. */ int vega10_send_msg_to_smc_with_parameter_without_waiting( - struct pp_smumgr *smumgr, uint16_t msg, uint32_t parameter) + struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t parameter) { uint32_t reg; reg = soc15_get_register_offset(MP1_HWID, 0, mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82); - cgs_write_register(smumgr->device, reg, parameter); + cgs_write_register(hwmgr->device, reg, parameter); - return vega10_send_msg_to_smc_without_waiting(smumgr, msg); + return vega10_send_msg_to_smc_without_waiting(hwmgr, msg); } /* * Retrieve an argument from SMC. - * @param smumgr the address of the powerplay hardware manager. + * @param hwmgr the address of the powerplay hardware manager. * @param arg pointer to store the argument from SMC. * @return Always return 0. */ -int vega10_read_arg_from_smc(struct pp_smumgr *smumgr, uint32_t *arg) +int vega10_read_arg_from_smc(struct pp_hwmgr *hwmgr, uint32_t *arg) { uint32_t reg; reg = soc15_get_register_offset(MP1_HWID, 0, mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82); - *arg = cgs_read_register(smumgr->device, reg); + *arg = cgs_read_register(hwmgr->device, reg); return 0; } /* * Copy table from SMC into driver FB - * @param smumgr the address of the SMC manager + * @param hwmgr the address of the HW manager * @param table_id the driver's table ID to copy from */ -int vega10_copy_table_from_smc(struct pp_smumgr *smumgr, +int vega10_copy_table_from_smc(struct pp_hwmgr *hwmgr, uint8_t *table, int16_t table_id) { struct vega10_smumgr *priv = - (struct vega10_smumgr *)(smumgr->backend); + (struct vega10_smumgr *)(hwmgr->smu_backend); PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE, "Invalid SMU Table ID!", return -EINVAL); @@ -232,16 +232,16 @@ int vega10_copy_table_from_smc(struct pp_smumgr *smumgr, "Invalid SMU Table version!", return -EINVAL); PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0, "Invalid SMU Table Length!", return -EINVAL); - PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(smumgr, + PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetDriverDramAddrHigh, priv->smu_tables.entry[table_id].table_addr_high) == 0, "[CopyTableFromSMC] Attempt to Set Dram Addr High Failed!", return -EINVAL); - PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(smumgr, + PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetDriverDramAddrLow, priv->smu_tables.entry[table_id].table_addr_low) == 0, "[CopyTableFromSMC] Attempt to Set Dram Addr Low Failed!", return -EINVAL); - PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(smumgr, + PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_TransferTableSmu2Dram, priv->smu_tables.entry[table_id].table_id) == 0, "[CopyTableFromSMC] Attempt to Transfer Table From SMU Failed!", @@ -255,14 +255,14 @@ int vega10_copy_table_from_smc(struct pp_smumgr *smumgr, /* * Copy table from Driver FB into SMC - * @param smumgr the address of the SMC manager + * @param hwmgr the address of the HW manager * @param table_id the table to copy from */ -int vega10_copy_table_to_smc(struct pp_smumgr *smumgr, +int vega10_copy_table_to_smc(struct pp_hwmgr *hwmgr, uint8_t *table, int16_t table_id) { struct vega10_smumgr *priv = - (struct vega10_smumgr *)(smumgr->backend); + (struct vega10_smumgr *)(hwmgr->smu_backend); PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE, "Invalid SMU Table ID!", return -EINVAL); @@ -274,17 +274,17 @@ int vega10_copy_table_to_smc(struct pp_smumgr *smumgr, memcpy(priv->smu_tables.entry[table_id].table, table, priv->smu_tables.entry[table_id].size); - PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(smumgr, + PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetDriverDramAddrHigh, priv->smu_tables.entry[table_id].table_addr_high) == 0, "[CopyTableToSMC] Attempt to Set Dram Addr High Failed!", return -EINVAL;); - PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(smumgr, + PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetDriverDramAddrLow, priv->smu_tables.entry[table_id].table_addr_low) == 0, "[CopyTableToSMC] Attempt to Set Dram Addr Low Failed!", return -EINVAL); - PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(smumgr, + PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_TransferTableDram2Smu, priv->smu_tables.entry[table_id].table_id) == 0, "[CopyTableToSMC] Attempt to Transfer Table To SMU Failed!", @@ -293,87 +293,87 @@ int vega10_copy_table_to_smc(struct pp_smumgr *smumgr, return 0; } -int vega10_save_vft_table(struct pp_smumgr *smumgr, uint8_t *avfs_table) +int vega10_save_vft_table(struct pp_hwmgr *hwmgr, uint8_t *avfs_table) { PP_ASSERT_WITH_CODE(avfs_table, "No access to SMC AVFS Table", return -EINVAL); - return vega10_copy_table_from_smc(smumgr, avfs_table, AVFSTABLE); + return vega10_copy_table_from_smc(hwmgr, avfs_table, AVFSTABLE); } -int vega10_restore_vft_table(struct pp_smumgr *smumgr, uint8_t *avfs_table) +int vega10_restore_vft_table(struct pp_hwmgr *hwmgr, uint8_t *avfs_table) { PP_ASSERT_WITH_CODE(avfs_table, "No access to SMC AVFS Table", return -EINVAL); - return vega10_copy_table_to_smc(smumgr, avfs_table, AVFSTABLE); + return vega10_copy_table_to_smc(hwmgr, avfs_table, AVFSTABLE); } -int vega10_enable_smc_features(struct pp_smumgr *smumgr, +int vega10_enable_smc_features(struct pp_hwmgr *hwmgr, bool enable, uint32_t feature_mask) { int msg = enable ? PPSMC_MSG_EnableSmuFeatures : PPSMC_MSG_DisableSmuFeatures; - return vega10_send_msg_to_smc_with_parameter(smumgr, + return vega10_send_msg_to_smc_with_parameter(hwmgr, msg, feature_mask); } -int vega10_get_smc_features(struct pp_smumgr *smumgr, +int vega10_get_smc_features(struct pp_hwmgr *hwmgr, uint32_t *features_enabled) { if (features_enabled == NULL) return -EINVAL; - if (!vega10_send_msg_to_smc(smumgr, + if (!vega10_send_msg_to_smc(hwmgr, PPSMC_MSG_GetEnabledSmuFeatures)) { - vega10_read_arg_from_smc(smumgr, features_enabled); + vega10_read_arg_from_smc(hwmgr, features_enabled); return 0; } return -EINVAL; } -int vega10_set_tools_address(struct pp_smumgr *smumgr) +int vega10_set_tools_address(struct pp_hwmgr *hwmgr) { struct vega10_smumgr *priv = - (struct vega10_smumgr *)(smumgr->backend); + (struct vega10_smumgr *)(hwmgr->smu_backend); if (priv->smu_tables.entry[TOOLSTABLE].table_addr_high || priv->smu_tables.entry[TOOLSTABLE].table_addr_low) { - if (!vega10_send_msg_to_smc_with_parameter(smumgr, + if (!vega10_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetToolsDramAddrHigh, priv->smu_tables.entry[TOOLSTABLE].table_addr_high)) - vega10_send_msg_to_smc_with_parameter(smumgr, + vega10_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetToolsDramAddrLow, priv->smu_tables.entry[TOOLSTABLE].table_addr_low); } return 0; } -static int vega10_verify_smc_interface(struct pp_smumgr *smumgr) +static int vega10_verify_smc_interface(struct pp_hwmgr *hwmgr) { uint32_t smc_driver_if_version; struct cgs_system_info sys_info = {0}; uint32_t dev_id; uint32_t rev_id; - PP_ASSERT_WITH_CODE(!vega10_send_msg_to_smc(smumgr, + PP_ASSERT_WITH_CODE(!vega10_send_msg_to_smc(hwmgr, PPSMC_MSG_GetDriverIfVersion), "Attempt to get SMC IF Version Number Failed!", return -EINVAL); - vega10_read_arg_from_smc(smumgr, &smc_driver_if_version); + vega10_read_arg_from_smc(hwmgr, &smc_driver_if_version); sys_info.size = sizeof(struct cgs_system_info); sys_info.info_id = CGS_SYSTEM_INFO_PCIE_DEV; - cgs_query_system_info(smumgr->device, &sys_info); + cgs_query_system_info(hwmgr->device, &sys_info); dev_id = (uint32_t)sys_info.value; sys_info.size = sizeof(struct cgs_system_info); sys_info.info_id = CGS_SYSTEM_INFO_PCIE_REV; - cgs_query_system_info(smumgr->device, &sys_info); + cgs_query_system_info(hwmgr->device, &sys_info); rev_id = (uint32_t)sys_info.value; if (!((dev_id == 0x687f) && @@ -392,7 +392,7 @@ static int vega10_verify_smc_interface(struct pp_smumgr *smumgr) return 0; } -static int vega10_smu_init(struct pp_smumgr *smumgr) +static int vega10_smu_init(struct pp_hwmgr *hwmgr) { struct vega10_smumgr *priv; uint64_t mc_addr; @@ -401,7 +401,7 @@ static int vega10_smu_init(struct pp_smumgr *smumgr) int ret; struct cgs_firmware_info info = {0}; - ret = cgs_get_firmware_info(smumgr->device, + ret = cgs_get_firmware_info(hwmgr->device, smu7_convert_fw_type_to_cgs(UCODE_ID_SMU), &info); if (ret || !info.kptr) @@ -412,10 +412,10 @@ static int vega10_smu_init(struct pp_smumgr *smumgr) if (!priv) return -ENOMEM; - smumgr->backend = priv; + hwmgr->smu_backend = priv; /* allocate space for pptable */ - smu_allocate_memory(smumgr->device, + smu_allocate_memory(hwmgr->device, sizeof(PPTable_t), CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB, PAGE_SIZE, @@ -425,8 +425,8 @@ static int vega10_smu_init(struct pp_smumgr *smumgr) PP_ASSERT_WITH_CODE(kaddr, "[vega10_smu_init] Out of memory for pptable.", - kfree(smumgr->backend); - cgs_free_gpu_mem(smumgr->device, + kfree(hwmgr->smu_backend); + cgs_free_gpu_mem(hwmgr->device, (cgs_handle_t)handle); return -EINVAL); @@ -441,7 +441,7 @@ static int vega10_smu_init(struct pp_smumgr *smumgr) priv->smu_tables.entry[PPTABLE].handle = handle; /* allocate space for watermarks table */ - smu_allocate_memory(smumgr->device, + smu_allocate_memory(hwmgr->device, sizeof(Watermarks_t), CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB, PAGE_SIZE, @@ -451,10 +451,10 @@ static int vega10_smu_init(struct pp_smumgr *smumgr) PP_ASSERT_WITH_CODE(kaddr, "[vega10_smu_init] Out of memory for wmtable.", - kfree(smumgr->backend); - cgs_free_gpu_mem(smumgr->device, + kfree(hwmgr->smu_backend); + cgs_free_gpu_mem(hwmgr->device, (cgs_handle_t)priv->smu_tables.entry[PPTABLE].handle); - cgs_free_gpu_mem(smumgr->device, + cgs_free_gpu_mem(hwmgr->device, (cgs_handle_t)handle); return -EINVAL); @@ -469,7 +469,7 @@ static int vega10_smu_init(struct pp_smumgr *smumgr) priv->smu_tables.entry[WMTABLE].handle = handle; /* allocate space for AVFS table */ - smu_allocate_memory(smumgr->device, + smu_allocate_memory(hwmgr->device, sizeof(AvfsTable_t), CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB, PAGE_SIZE, @@ -479,12 +479,12 @@ static int vega10_smu_init(struct pp_smumgr *smumgr) PP_ASSERT_WITH_CODE(kaddr, "[vega10_smu_init] Out of memory for avfs table.", - kfree(smumgr->backend); - cgs_free_gpu_mem(smumgr->device, + kfree(hwmgr->smu_backend); + cgs_free_gpu_mem(hwmgr->device, (cgs_handle_t)priv->smu_tables.entry[PPTABLE].handle); - cgs_free_gpu_mem(smumgr->device, + cgs_free_gpu_mem(hwmgr->device, (cgs_handle_t)priv->smu_tables.entry[WMTABLE].handle); - cgs_free_gpu_mem(smumgr->device, + cgs_free_gpu_mem(hwmgr->device, (cgs_handle_t)handle); return -EINVAL); @@ -500,7 +500,7 @@ static int vega10_smu_init(struct pp_smumgr *smumgr) tools_size = 0x19000; if (tools_size) { - smu_allocate_memory(smumgr->device, + smu_allocate_memory(hwmgr->device, tools_size, CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB, PAGE_SIZE, @@ -522,7 +522,7 @@ static int vega10_smu_init(struct pp_smumgr *smumgr) } /* allocate space for AVFS Fuse table */ - smu_allocate_memory(smumgr->device, + smu_allocate_memory(hwmgr->device, sizeof(AvfsFuseOverride_t), CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB, PAGE_SIZE, @@ -532,16 +532,16 @@ static int vega10_smu_init(struct pp_smumgr *smumgr) PP_ASSERT_WITH_CODE(kaddr, "[vega10_smu_init] Out of memory for avfs fuse table.", - kfree(smumgr->backend); - cgs_free_gpu_mem(smumgr->device, + kfree(hwmgr->smu_backend); + cgs_free_gpu_mem(hwmgr->device, (cgs_handle_t)priv->smu_tables.entry[PPTABLE].handle); - cgs_free_gpu_mem(smumgr->device, + cgs_free_gpu_mem(hwmgr->device, (cgs_handle_t)priv->smu_tables.entry[WMTABLE].handle); - cgs_free_gpu_mem(smumgr->device, + cgs_free_gpu_mem(hwmgr->device, (cgs_handle_t)priv->smu_tables.entry[AVFSTABLE].handle); - cgs_free_gpu_mem(smumgr->device, + cgs_free_gpu_mem(hwmgr->device, (cgs_handle_t)priv->smu_tables.entry[TOOLSTABLE].handle); - cgs_free_gpu_mem(smumgr->device, + cgs_free_gpu_mem(hwmgr->device, (cgs_handle_t)handle); return -EINVAL); @@ -558,36 +558,36 @@ static int vega10_smu_init(struct pp_smumgr *smumgr) return 0; } -static int vega10_smu_fini(struct pp_smumgr *smumgr) +static int vega10_smu_fini(struct pp_hwmgr *hwmgr) { struct vega10_smumgr *priv = - (struct vega10_smumgr *)(smumgr->backend); + (struct vega10_smumgr *)(hwmgr->smu_backend); if (priv) { - cgs_free_gpu_mem(smumgr->device, + cgs_free_gpu_mem(hwmgr->device, (cgs_handle_t)priv->smu_tables.entry[PPTABLE].handle); - cgs_free_gpu_mem(smumgr->device, + cgs_free_gpu_mem(hwmgr->device, (cgs_handle_t)priv->smu_tables.entry[WMTABLE].handle); - cgs_free_gpu_mem(smumgr->device, + cgs_free_gpu_mem(hwmgr->device, (cgs_handle_t)priv->smu_tables.entry[AVFSTABLE].handle); if (priv->smu_tables.entry[TOOLSTABLE].table) - cgs_free_gpu_mem(smumgr->device, + cgs_free_gpu_mem(hwmgr->device, (cgs_handle_t)priv->smu_tables.entry[TOOLSTABLE].handle); - cgs_free_gpu_mem(smumgr->device, + cgs_free_gpu_mem(hwmgr->device, (cgs_handle_t)priv->smu_tables.entry[AVFSFUSETABLE].handle); - kfree(smumgr->backend); - smumgr->backend = NULL; + kfree(hwmgr->smu_backend); + hwmgr->smu_backend = NULL; } return 0; } -static int vega10_start_smu(struct pp_smumgr *smumgr) +static int vega10_start_smu(struct pp_hwmgr *hwmgr) { - PP_ASSERT_WITH_CODE(!vega10_verify_smc_interface(smumgr), + PP_ASSERT_WITH_CODE(!vega10_verify_smc_interface(hwmgr), "Failed to verify SMC interface!", return -EINVAL); - vega10_set_tools_address(smumgr); + vega10_set_tools_address(hwmgr); return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.h index 821425c1e4e0..0695455b21b2 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.h +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.h @@ -52,19 +52,19 @@ struct vega10_smumgr { struct smu_table_array smu_tables; }; -int vega10_read_arg_from_smc(struct pp_smumgr *smumgr, uint32_t *arg); -int vega10_copy_table_from_smc(struct pp_smumgr *smumgr, +int vega10_read_arg_from_smc(struct pp_hwmgr *hwmgr, uint32_t *arg); +int vega10_copy_table_from_smc(struct pp_hwmgr *hwmgr, uint8_t *table, int16_t table_id); -int vega10_copy_table_to_smc(struct pp_smumgr *smumgr, +int vega10_copy_table_to_smc(struct pp_hwmgr *hwmgr, uint8_t *table, int16_t table_id); -int vega10_enable_smc_features(struct pp_smumgr *smumgr, +int vega10_enable_smc_features(struct pp_hwmgr *hwmgr, bool enable, uint32_t feature_mask); -int vega10_get_smc_features(struct pp_smumgr *smumgr, +int vega10_get_smc_features(struct pp_hwmgr *hwmgr, uint32_t *features_enabled); -int vega10_save_vft_table(struct pp_smumgr *smumgr, uint8_t *avfs_table); -int vega10_restore_vft_table(struct pp_smumgr *smumgr, uint8_t *avfs_table); +int vega10_save_vft_table(struct pp_hwmgr *hwmgr, uint8_t *avfs_table); +int vega10_restore_vft_table(struct pp_hwmgr *hwmgr, uint8_t *avfs_table); -int vega10_set_tools_address(struct pp_smumgr *smumgr); +int vega10_set_tools_address(struct pp_hwmgr *hwmgr); #endif |