diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2023-03-10 08:57:46 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2023-03-10 08:57:46 -0800 |
commit | b0d14d2aaf7d4b36b44f5a09955ebdf9eef4b0f8 (patch) | |
tree | 284c76bc78101e1c9be66537aa29afa36850ecd9 /drivers | |
parent | 388a810192fd383acce6933e7f272dd6a6802bb0 (diff) | |
parent | 519b23310aa100073f0b58c39df120a486ed7f8e (diff) | |
download | linux-stable-b0d14d2aaf7d4b36b44f5a09955ebdf9eef4b0f8.tar.gz linux-stable-b0d14d2aaf7d4b36b44f5a09955ebdf9eef4b0f8.tar.bz2 linux-stable-b0d14d2aaf7d4b36b44f5a09955ebdf9eef4b0f8.zip |
Merge tag 'drm-fixes-2023-03-10' of git://anongit.freedesktop.org/drm/drm
Pull drm fixes from Dave Airlie:
"Weekly fixes.
msm and amdgpu are the vast majority of these, otherwise some
straggler misc from last week for nouveau and cirrus and a mailmap
update for a drm developer.
mailmap:
- add an entry
nouveau:
- fix system shutdown regression
- build warning fix
cirrus:
- NULL ptr deref fix
msm:
- fix invalid ptr free in syncobj cleanup
- sync GMU removal in teardown
- a5xx preemption fixes
- fix runpm imbalance
- DPU hw fixes
- stack corruption fix
- clear DSPP reservation
amdgpu:
- Misc display fixes
- UMC 8.10 fixes
- Driver unload fixes
- NBIO 7.3.0 fix
- Error checking fixes for soc15, nv, soc21 read register interface
- Fix video cap query for VCN 4.0.4
amdkfd:
- Fix return check in doorbell handling"
* tag 'drm-fixes-2023-03-10' of git://anongit.freedesktop.org/drm/drm: (42 commits)
drm/amdgpu/soc21: Add video cap query support for VCN_4_0_4
drm/amdgpu: fix error checking in amdgpu_read_mm_registers for nv
drm/amdgpu: fix error checking in amdgpu_read_mm_registers for soc21
drm/amdgpu: fix error checking in amdgpu_read_mm_registers for soc15
drm/amdgpu: Fix the warning info when removing amdgpu device
drm/amdgpu: fix return value check in kfd
drm/amd: Fix initialization mistake for NBIO 7.3.0
drm/amdgpu: Fix call trace warning and hang when removing amdgpu device
mailmap: add mailmap entries for Faith.
drm/msm: DEVFREQ_GOV_SIMPLE_ONDEMAND is no longer needed
drm/amd/display: Update clock table to include highest clock setting
drm/amd/pm: Enable ecc_info table support for smu v13_0_10
drm/amdgpu: Support umc node harvest config on umc v8_10
drm/connector: print max_requested_bpc in state debugfs
drm/display: Don't block HDR_OUTPUT_METADATA on unknown EOTF
drm/msm/dpu: clear DSPP reservations in rm release
drm/msm/disp/dpu: fix sc7280_pp base offset
drm/msm/dpu: fix stack smashing in dpu_hw_ctl_setup_blendstage
drm/msm/dpu: don't use DPU_CLK_CTRL_CURSORn for DMA SSPP clocks
drm/msm/dpu: fix clocks settings for msm8998 SSPP blocks
...
Diffstat (limited to 'drivers')
36 files changed, 277 insertions, 191 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c index b719852daa07..1a3cb53d2e0d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c @@ -543,6 +543,7 @@ static void amdgpu_discovery_read_from_harvest_table(struct amdgpu_device *adev, struct harvest_table *harvest_info; u16 offset; int i; + uint32_t umc_harvest_config = 0; bhdr = (struct binary_header *)adev->mman.discovery_bin; offset = le16_to_cpu(bhdr->table_list[HARVEST_INFO].offset); @@ -570,12 +571,17 @@ static void amdgpu_discovery_read_from_harvest_table(struct amdgpu_device *adev, adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK; break; case UMC_HWID: + umc_harvest_config |= + 1 << (le16_to_cpu(harvest_info->list[i].number_instance)); (*umc_harvest_count)++; break; default: break; } } + + adev->umc.active_mask = ((1 << adev->umc.node_inst_num) - 1) & + ~umc_harvest_config; } /* ================================================== */ @@ -1156,8 +1162,10 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev) AMDGPU_MAX_SDMA_INSTANCES); } - if (le16_to_cpu(ip->hw_id) == UMC_HWID) + if (le16_to_cpu(ip->hw_id) == UMC_HWID) { adev->gmc.num_umc++; + adev->umc.node_inst_num++; + } for (k = 0; k < num_base_address; k++) { /* diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index e3e1ed4314dd..6c7d672412b2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -1315,7 +1315,7 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo) if (!bo->resource || bo->resource->mem_type != TTM_PL_VRAM || !(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE) || - adev->in_suspend || adev->shutdown) + adev->in_suspend || drm_dev_is_unplugged(adev_to_drm(adev))) return; if (WARN_ON_ONCE(!dma_resv_trylock(bo->base.resv))) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 28fe6d941054..3f5d13035aff 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -602,27 +602,14 @@ psp_cmd_submit_buf(struct psp_context *psp, struct psp_gfx_cmd_resp *cmd, uint64_t fence_mc_addr) { int ret; - int index, idx; + int index; int timeout = 20000; bool ras_intr = false; bool skip_unsupport = false; - bool dev_entered; if (psp->adev->no_hw_access) return 0; - dev_entered = drm_dev_enter(adev_to_drm(psp->adev), &idx); - /* - * We allow sending PSP messages LOAD_ASD and UNLOAD_TA without acquiring - * a lock in drm_dev_enter during driver unload because we must call - * drm_dev_unplug as the beginning of unload driver sequence . It is very - * crucial that userspace can't access device instances anymore. - */ - if (!dev_entered) - WARN_ON(psp->cmd_buf_mem->cmd_id != GFX_CMD_ID_LOAD_ASD && - psp->cmd_buf_mem->cmd_id != GFX_CMD_ID_UNLOAD_TA && - psp->cmd_buf_mem->cmd_id != GFX_CMD_ID_INVOKE_CMD); - memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE); memcpy(psp->cmd_buf_mem, cmd, sizeof(struct psp_gfx_cmd_resp)); @@ -686,8 +673,6 @@ psp_cmd_submit_buf(struct psp_context *psp, } exit: - if (dev_entered) - drm_dev_exit(idx); return ret; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h index f2bf979af588..36e19336f3b3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h @@ -42,7 +42,7 @@ #define LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) LOOP_UMC_INST((umc_inst)) LOOP_UMC_CH_INST((ch_inst)) #define LOOP_UMC_NODE_INST(node_inst) \ - for ((node_inst) = 0; (node_inst) < adev->umc.node_inst_num; (node_inst)++) + for_each_set_bit((node_inst), &(adev->umc.active_mask), adev->umc.node_inst_num) #define LOOP_UMC_EACH_NODE_INST_AND_CH(node_inst, umc_inst, ch_inst) \ LOOP_UMC_NODE_INST((node_inst)) LOOP_UMC_INST_AND_CH((umc_inst), (ch_inst)) @@ -69,7 +69,7 @@ struct amdgpu_umc { /* number of umc instance with memory map register access */ uint32_t umc_inst_num; - /*number of umc node instance with memory map register access*/ + /* Total number of umc node instance including harvest one */ uint32_t node_inst_num; /* UMC regiser per channel offset */ @@ -82,6 +82,9 @@ struct amdgpu_umc { const struct amdgpu_umc_funcs *funcs; struct amdgpu_umc_ras *ras; + + /* active mask for umc node instance */ + unsigned long active_mask; }; int amdgpu_umc_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c index 85e0afc3d4f7..af7b3ba1ca00 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c @@ -567,7 +567,6 @@ static void gmc_v11_0_set_umc_funcs(struct amdgpu_device *adev) case IP_VERSION(8, 10, 0): adev->umc.channel_inst_num = UMC_V8_10_CHANNEL_INSTANCE_NUM; adev->umc.umc_inst_num = UMC_V8_10_UMC_INSTANCE_NUM; - adev->umc.node_inst_num = adev->gmc.num_umc; adev->umc.max_ras_err_cnt_per_query = UMC_V8_10_TOTAL_CHANNEL_NUM(adev); adev->umc.channel_offs = UMC_V8_10_PER_CHANNEL_OFFSET; adev->umc.retire_unit = UMC_V8_10_NA_COL_2BITS_POWER_OF_2_NUM; diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c index 4b0d563c6522..4ef1fa4603c8 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c @@ -382,11 +382,6 @@ static void nbio_v7_2_init_registers(struct amdgpu_device *adev) if (def != data) WREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regBIF1_PCIE_MST_CTRL_3), data); break; - case IP_VERSION(7, 5, 1): - data = RREG32_SOC15(NBIO, 0, regRCC_DEV2_EPF0_STRAP2); - data &= ~RCC_DEV2_EPF0_STRAP2__STRAP_NO_SOFT_RESET_DEV2_F0_MASK; - WREG32_SOC15(NBIO, 0, regRCC_DEV2_EPF0_STRAP2, data); - fallthrough; default: def = data = RREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regPCIE_CONFIG_CNTL)); data = REG_SET_FIELD(data, PCIE_CONFIG_CNTL, @@ -399,6 +394,15 @@ static void nbio_v7_2_init_registers(struct amdgpu_device *adev) break; } + switch (adev->ip_versions[NBIO_HWIP][0]) { + case IP_VERSION(7, 3, 0): + case IP_VERSION(7, 5, 1): + data = RREG32_SOC15(NBIO, 0, regRCC_DEV2_EPF0_STRAP2); + data &= ~RCC_DEV2_EPF0_STRAP2__STRAP_NO_SOFT_RESET_DEV2_F0_MASK; + WREG32_SOC15(NBIO, 0, regRCC_DEV2_EPF0_STRAP2, data); + break; + } + if (amdgpu_sriov_vf(adev)) adev->rmmio_remap.reg_offset = SOC15_REG_OFFSET(NBIO, 0, regBIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_CNTL) << 2; diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index d972025f0d20..855d390c41de 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -444,9 +444,10 @@ static int nv_read_register(struct amdgpu_device *adev, u32 se_num, *value = 0; for (i = 0; i < ARRAY_SIZE(nv_allowed_read_registers); i++) { en = &nv_allowed_read_registers[i]; - if (adev->reg_offset[en->hwip][en->inst] && - reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg] - + en->reg_offset)) + if (!adev->reg_offset[en->hwip][en->inst]) + continue; + else if (reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg] + + en->reg_offset)) continue; *value = nv_get_register_value(adev, diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 7cd17dda32ce..2eddd7f6cd41 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -439,8 +439,9 @@ static int soc15_read_register(struct amdgpu_device *adev, u32 se_num, *value = 0; for (i = 0; i < ARRAY_SIZE(soc15_allowed_read_registers); i++) { en = &soc15_allowed_read_registers[i]; - if (adev->reg_offset[en->hwip][en->inst] && - reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg] + if (!adev->reg_offset[en->hwip][en->inst]) + continue; + else if (reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg] + en->reg_offset)) continue; diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c index 620f7409825d..061793d390cc 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc21.c +++ b/drivers/gpu/drm/amd/amdgpu/soc21.c @@ -111,6 +111,7 @@ static int soc21_query_video_codecs(struct amdgpu_device *adev, bool encode, switch (adev->ip_versions[UVD_HWIP][0]) { case IP_VERSION(4, 0, 0): case IP_VERSION(4, 0, 2): + case IP_VERSION(4, 0, 4): if (adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0) { if (encode) *codecs = &vcn_4_0_0_video_codecs_encode_vcn1; @@ -291,9 +292,10 @@ static int soc21_read_register(struct amdgpu_device *adev, u32 se_num, *value = 0; for (i = 0; i < ARRAY_SIZE(soc21_allowed_read_registers); i++) { en = &soc21_allowed_read_registers[i]; - if (adev->reg_offset[en->hwip][en->inst] && - reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg] - + en->reg_offset)) + if (!adev->reg_offset[en->hwip][en->inst]) + continue; + else if (reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg] + + en->reg_offset)) continue; *value = soc21_get_register_value(adev, diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v8_10.h b/drivers/gpu/drm/amd/amdgpu/umc_v8_10.h index 25eaf4af5fcf..c6dfd433fec7 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v8_10.h +++ b/drivers/gpu/drm/amd/amdgpu/umc_v8_10.h @@ -31,9 +31,9 @@ /* number of umc instance with memory map register access */ #define UMC_V8_10_UMC_INSTANCE_NUM 2 -/* Total channel instances for all umc nodes */ +/* Total channel instances for all available umc nodes */ #define UMC_V8_10_TOTAL_CHANNEL_NUM(adev) \ - (UMC_V8_10_CHANNEL_INSTANCE_NUM * UMC_V8_10_UMC_INSTANCE_NUM * (adev)->umc.node_inst_num) + (UMC_V8_10_CHANNEL_INSTANCE_NUM * UMC_V8_10_UMC_INSTANCE_NUM * (adev)->gmc.num_umc) /* UMC regiser per channel offset */ #define UMC_V8_10_PER_CHANNEL_OFFSET 0x400 diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c index cbef2e147da5..38c9e1ca6691 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c @@ -280,7 +280,7 @@ phys_addr_t kfd_get_process_doorbells(struct kfd_process_device *pdd) if (!pdd->doorbell_index) { int r = kfd_alloc_process_doorbells(pdd->dev, &pdd->doorbell_index); - if (r) + if (r < 0) return 0; } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c index 24715ca2fa94..01383aac6b41 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c @@ -529,6 +529,19 @@ static struct clk_bw_params vg_bw_params = { }; +static uint32_t find_max_clk_value(const uint32_t clocks[], uint32_t num_clocks) +{ + uint32_t max = 0; + int i; + + for (i = 0; i < num_clocks; ++i) { + if (clocks[i] > max) + max = clocks[i]; + } + + return max; +} + static unsigned int find_dcfclk_for_voltage(const struct vg_dpm_clocks *clock_table, unsigned int voltage) { @@ -572,12 +585,16 @@ static void vg_clk_mgr_helper_populate_bw_params( bw_params->clk_table.num_entries = j + 1; - for (i = 0; i < bw_params->clk_table.num_entries; i++, j--) { + for (i = 0; i < bw_params->clk_table.num_entries - 1; i++, j--) { bw_params->clk_table.entries[i].fclk_mhz = clock_table->DfPstateTable[j].fclk; bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[j].memclk; bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[j].voltage; bw_params->clk_table.entries[i].dcfclk_mhz = find_dcfclk_for_voltage(clock_table, clock_table->DfPstateTable[j].voltage); } + bw_params->clk_table.entries[i].fclk_mhz = clock_table->DfPstateTable[j].fclk; + bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[j].memclk; + bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[j].voltage; + bw_params->clk_table.entries[i].dcfclk_mhz = find_max_clk_value(clock_table->DcfClocks, VG_NUM_DCFCLK_DPM_LEVELS); bw_params->vram_type = bios_info->memory_type; bw_params->num_channels = bios_info->ma_channel_number; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c index 923a9fb3c887..27448ffe60a4 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c @@ -46,6 +46,7 @@ #include "asic_reg/mp/mp_13_0_0_sh_mask.h" #include "smu_cmn.h" #include "amdgpu_ras.h" +#include "umc_v8_10.h" /* * DO NOT use these for err/warn/info/debug messages. @@ -90,6 +91,12 @@ #define DEBUGSMC_MSG_Mode1Reset 2 +/* + * SMU_v13_0_10 supports ECCTABLE since version 80.34.0, + * use this to check ECCTABLE feature whether support + */ +#define SUPPORT_ECCTABLE_SMU_13_0_10_VERSION 0x00502200 + static struct cmn2asic_msg_mapping smu_v13_0_0_message_map[SMU_MSG_MAX_COUNT] = { MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1), MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1), @@ -229,6 +236,7 @@ static struct cmn2asic_mapping smu_v13_0_0_table_map[SMU_TABLE_COUNT] = { TAB_MAP(ACTIVITY_MONITOR_COEFF), [SMU_TABLE_COMBO_PPTABLE] = {1, TABLE_COMBO_PPTABLE}, TAB_MAP(I2C_COMMANDS), + TAB_MAP(ECCINFO), }; static struct cmn2asic_mapping smu_v13_0_0_pwr_src_map[SMU_POWER_SOURCE_COUNT] = { @@ -462,6 +470,8 @@ static int smu_v13_0_0_tables_init(struct smu_context *smu) AMDGPU_GEM_DOMAIN_VRAM); SMU_TABLE_INIT(tables, SMU_TABLE_COMBO_PPTABLE, MP0_MP1_DATA_REGION_SIZE_COMBOPPTABLE, PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); + SMU_TABLE_INIT(tables, SMU_TABLE_ECCINFO, sizeof(EccInfoTable_t), + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); smu_table->metrics_table = kzalloc(sizeof(SmuMetricsExternal_t), GFP_KERNEL); if (!smu_table->metrics_table) @@ -477,8 +487,14 @@ static int smu_v13_0_0_tables_init(struct smu_context *smu) if (!smu_table->watermarks_table) goto err2_out; + smu_table->ecc_table = kzalloc(tables[SMU_TABLE_ECCINFO].size, GFP_KERNEL); + if (!smu_table->ecc_table) + goto err3_out; + return 0; +err3_out: + kfree(smu_table->watermarks_table); err2_out: kfree(smu_table->gpu_metrics_table); err1_out: @@ -2036,6 +2052,64 @@ static int smu_v13_0_0_send_bad_mem_channel_flag(struct smu_context *smu, return ret; } +static int smu_v13_0_0_check_ecc_table_support(struct smu_context *smu) +{ + struct amdgpu_device *adev = smu->adev; + uint32_t if_version = 0xff, smu_version = 0xff; + int ret = 0; + + ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version); + if (ret) + return -EOPNOTSUPP; + + if ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 10)) && + (smu_version >= SUPPORT_ECCTABLE_SMU_13_0_10_VERSION)) + return ret; + else + return -EOPNOTSUPP; +} + +static ssize_t smu_v13_0_0_get_ecc_info(struct smu_context *smu, + void *table) +{ + struct smu_table_context *smu_table = &smu->smu_table; + struct amdgpu_device *adev = smu->adev; + EccInfoTable_t *ecc_table = NULL; + struct ecc_info_per_ch *ecc_info_per_channel = NULL; + int i, ret = 0; + struct umc_ecc_info *eccinfo = (struct umc_ecc_info *)table; + + ret = smu_v13_0_0_check_ecc_table_support(smu); + if (ret) + return ret; + + ret = smu_cmn_update_table(smu, + SMU_TABLE_ECCINFO, + 0, + smu_table->ecc_table, + false); + if (ret) { + dev_info(adev->dev, "Failed to export SMU ecc table!\n"); + return ret; + } + + ecc_table = (EccInfoTable_t *)smu_table->ecc_table; + + for (i = 0; i < UMC_V8_10_TOTAL_CHANNEL_NUM(adev); i++) { + ecc_info_per_channel = &(eccinfo->ecc[i]); + ecc_info_per_channel->ce_count_lo_chip = + ecc_table->EccInfo[i].ce_count_lo_chip; + ecc_info_per_channel->ce_count_hi_chip = + ecc_table->EccInfo[i].ce_count_hi_chip; + ecc_info_per_channel->mca_umc_status = + ecc_table->EccInfo[i].mca_umc_status; + ecc_info_per_channel->mca_umc_addr = + ecc_table->EccInfo[i].mca_umc_addr; + } + + return ret; +} + static const struct pptable_funcs smu_v13_0_0_ppt_funcs = { .get_allowed_feature_mask = smu_v13_0_0_get_allowed_feature_mask, .set_default_dpm_table = smu_v13_0_0_set_default_dpm_table, @@ -2111,6 +2185,7 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = { .send_hbm_bad_pages_num = smu_v13_0_0_smu_send_bad_mem_page_num, .send_hbm_bad_channel_flag = smu_v13_0_0_send_bad_mem_channel_flag, .gpo_control = smu_v13_0_gpo_control, + .get_ecc_info = smu_v13_0_0_get_ecc_info, }; void smu_v13_0_0_set_ppt_funcs(struct smu_context *smu) diff --git a/drivers/gpu/drm/display/drm_hdmi_helper.c b/drivers/gpu/drm/display/drm_hdmi_helper.c index 0264abe55278..faf5e9efa7d3 100644 --- a/drivers/gpu/drm/display/drm_hdmi_helper.c +++ b/drivers/gpu/drm/display/drm_hdmi_helper.c @@ -44,10 +44,8 @@ int drm_hdmi_infoframe_set_hdr_metadata(struct hdmi_drm_infoframe *frame, /* Sink EOTF is Bit map while infoframe is absolute values */ if (!is_eotf_supported(hdr_metadata->hdmi_metadata_type1.eotf, - connector->hdr_sink_metadata.hdmi_type1.eotf)) { - DRM_DEBUG_KMS("EOTF Not Supported\n"); - return -EINVAL; - } + connector->hdr_sink_metadata.hdmi_type1.eotf)) + DRM_DEBUG_KMS("Unknown EOTF %d\n", hdr_metadata->hdmi_metadata_type1.eotf); err = hdmi_drm_infoframe_init(frame); if (err < 0) diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index 5457c02ca1ab..fed41800fea7 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -1070,6 +1070,7 @@ static void drm_atomic_connector_print_state(struct drm_printer *p, drm_printf(p, "connector[%u]: %s\n", connector->base.id, connector->name); drm_printf(p, "\tcrtc=%s\n", state->crtc ? state->crtc->name : "(null)"); drm_printf(p, "\tself_refresh_aware=%d\n", state->self_refresh_aware); + drm_printf(p, "\tmax_requested_bpc=%d\n", state->max_requested_bpc); if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) if (state->writeback_job && state->writeback_job->fb) diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig index 871870ddf7ec..949b18a29a55 100644 --- a/drivers/gpu/drm/msm/Kconfig +++ b/drivers/gpu/drm/msm/Kconfig @@ -23,7 +23,6 @@ config DRM_MSM select SHMEM select TMPFS select QCOM_SCM - select DEVFREQ_GOV_SIMPLE_ONDEMAND select WANT_DEV_COREDUMP select SND_SOC_HDMI_CODEC if SND_SOC select SYNC_FILE diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c index d09221f97f71..a1e006ec5dce 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c @@ -151,8 +151,8 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) OUT_RING(ring, 1); /* Enable local preemption for finegrain preemption */ - OUT_PKT7(ring, CP_PREEMPT_ENABLE_GLOBAL, 1); - OUT_RING(ring, 0x02); + OUT_PKT7(ring, CP_PREEMPT_ENABLE_LOCAL, 1); + OUT_RING(ring, 0x1); /* Allow CP_CONTEXT_SWITCH_YIELD packets in the IB2 */ OUT_PKT7(ring, CP_YIELD_ENABLE, 1); @@ -806,7 +806,7 @@ static int a5xx_hw_init(struct msm_gpu *gpu) gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL2, 0x0000003F); /* Set the highest bank bit */ - if (adreno_is_a540(adreno_gpu)) + if (adreno_is_a540(adreno_gpu) || adreno_is_a530(adreno_gpu)) regbit = 2; else regbit = 1; diff --git a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c index 7658e89844b4..f58dd564d122 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c @@ -63,7 +63,7 @@ static struct msm_ringbuffer *get_next_ring(struct msm_gpu *gpu) struct msm_ringbuffer *ring = gpu->rb[i]; spin_lock_irqsave(&ring->preempt_lock, flags); - empty = (get_wptr(ring) == ring->memptrs->rptr); + empty = (get_wptr(ring) == gpu->funcs->get_rptr(gpu, ring)); spin_unlock_irqrestore(&ring->preempt_lock, flags); if (!empty) @@ -207,6 +207,7 @@ void a5xx_preempt_hw_init(struct msm_gpu *gpu) a5xx_gpu->preempt[i]->wptr = 0; a5xx_gpu->preempt[i]->rptr = 0; a5xx_gpu->preempt[i]->rbase = gpu->rb[i]->iova; + a5xx_gpu->preempt[i]->rptr_addr = shadowptr(a5xx_gpu, gpu->rb[i]); } /* Write a 0 to signal that we aren't switching pagetables */ @@ -257,7 +258,6 @@ static int preempt_init_ring(struct a5xx_gpu *a5xx_gpu, ptr->data = 0; ptr->cntl = MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE; - ptr->rptr_addr = shadowptr(a5xx_gpu, ring); ptr->counter = counters_iova; return 0; diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c index f3c9600221d4..7f5bc73b2040 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c @@ -974,7 +974,7 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu) int status, ret; if (WARN(!gmu->initialized, "The GMU is not set up yet\n")) - return 0; + return -EINVAL; gmu->hung = false; diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c index aae60cbd9164..6faea5049f76 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c @@ -1746,7 +1746,9 @@ static void a6xx_destroy(struct msm_gpu *gpu) a6xx_llc_slices_destroy(a6xx_gpu); + mutex_lock(&a6xx_gpu->gmu.lock); a6xx_gmu_remove(a6xx_gpu); + mutex_unlock(&a6xx_gpu->gmu.lock); adreno_gpu_cleanup(adreno_gpu); diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c index 36f062c7582f..c5c4c93b3689 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_device.c +++ b/drivers/gpu/drm/msm/adreno/adreno_device.c @@ -558,7 +558,8 @@ static void adreno_unbind(struct device *dev, struct device *master, struct msm_drm_private *priv = dev_get_drvdata(master); struct msm_gpu *gpu = dev_to_gpu(dev); - WARN_ON_ONCE(adreno_system_suspend(dev)); + if (pm_runtime_enabled(dev)) + WARN_ON_ONCE(adreno_system_suspend(dev)); gpu->funcs->destroy(gpu); priv->gpu_pdev = NULL; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c index cf053e8f081e..497c9e1673ab 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c @@ -12,11 +12,15 @@ #include "dpu_hw_catalog.h" #include "dpu_kms.h" -#define VIG_MASK \ +#define VIG_BASE_MASK \ (BIT(DPU_SSPP_SRC) | BIT(DPU_SSPP_QOS) |\ - BIT(DPU_SSPP_CSC_10BIT) | BIT(DPU_SSPP_CDP) |\ + BIT(DPU_SSPP_CDP) |\ BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_EXCL_RECT)) +#define VIG_MASK \ + (VIG_BASE_MASK | \ + BIT(DPU_SSPP_CSC_10BIT)) + #define VIG_MSM8998_MASK \ (VIG_MASK | BIT(DPU_SSPP_SCALER_QSEED3)) @@ -26,10 +30,7 @@ #define VIG_SC7180_MASK \ (VIG_MASK | BIT(DPU_SSPP_QOS_8LVL) | BIT(DPU_SSPP_SCALER_QSEED4)) -#define VIG_SM8250_MASK \ - (VIG_MASK | BIT(DPU_SSPP_QOS_8LVL) | BIT(DPU_SSPP_SCALER_QSEED3LITE)) - -#define VIG_QCM2290_MASK (VIG_MASK | BIT(DPU_SSPP_QOS_8LVL)) +#define VIG_QCM2290_MASK (VIG_BASE_MASK | BIT(DPU_SSPP_QOS_8LVL)) #define DMA_MSM8998_MASK \ (BIT(DPU_SSPP_SRC) | BIT(DPU_SSPP_QOS) |\ @@ -51,7 +52,7 @@ (DMA_MSM8998_MASK | BIT(DPU_SSPP_CURSOR)) #define MIXER_MSM8998_MASK \ - (BIT(DPU_MIXER_SOURCESPLIT) | BIT(DPU_DIM_LAYER)) + (BIT(DPU_MIXER_SOURCESPLIT)) #define MIXER_SDM845_MASK \ (BIT(DPU_MIXER_SOURCESPLIT) | BIT(DPU_DIM_LAYER) | BIT(DPU_MIXER_COMBINED_ALPHA)) @@ -314,10 +315,9 @@ static const struct dpu_caps msm8998_dpu_caps = { }; static const struct dpu_caps qcm2290_dpu_caps = { - .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH, + .max_mixer_width = DEFAULT_DPU_LINE_WIDTH, .max_mixer_blendstages = 0x4, .smart_dma_rev = DPU_SSPP_SMART_DMA_V2, - .ubwc_version = DPU_HW_UBWC_VER_20, .has_dim_layer = true, .has_idle_pc = true, .max_linewidth = 2160, @@ -353,9 +353,9 @@ static const struct dpu_caps sc7180_dpu_caps = { }; static const struct dpu_caps sm6115_dpu_caps = { - .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH, + .max_mixer_width = DEFAULT_DPU_LINE_WIDTH, .max_mixer_blendstages = 0x4, - .qseed_type = DPU_SSPP_SCALER_QSEED3LITE, + .qseed_type = DPU_SSPP_SCALER_QSEED4, .smart_dma_rev = DPU_SSPP_SMART_DMA_V2, /* TODO: v2.5 */ .ubwc_version = DPU_HW_UBWC_VER_10, .has_dim_layer = true, @@ -399,7 +399,7 @@ static const struct dpu_caps sc8180x_dpu_caps = { static const struct dpu_caps sc8280xp_dpu_caps = { .max_mixer_width = 2560, .max_mixer_blendstages = 11, - .qseed_type = DPU_SSPP_SCALER_QSEED3LITE, + .qseed_type = DPU_SSPP_SCALER_QSEED4, .smart_dma_rev = DPU_SSPP_SMART_DMA_V2, /* TODO: v2.5 */ .ubwc_version = DPU_HW_UBWC_VER_40, .has_src_split = true, @@ -413,7 +413,7 @@ static const struct dpu_caps sc8280xp_dpu_caps = { static const struct dpu_caps sm8250_dpu_caps = { .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH, .max_mixer_blendstages = 0xb, - .qseed_type = DPU_SSPP_SCALER_QSEED3LITE, + .qseed_type = DPU_SSPP_SCALER_QSEED4, .smart_dma_rev = DPU_SSPP_SMART_DMA_V2, /* TODO: v2.5 */ .ubwc_version = DPU_HW_UBWC_VER_40, .has_src_split = true, @@ -427,7 +427,7 @@ static const struct dpu_caps sm8250_dpu_caps = { static const struct dpu_caps sm8350_dpu_caps = { .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH, .max_mixer_blendstages = 0xb, - .qseed_type = DPU_SSPP_SCALER_QSEED3LITE, + .qseed_type = DPU_SSPP_SCALER_QSEED4, .smart_dma_rev = DPU_SSPP_SMART_DMA_V2, /* TODO: v2.5 */ .ubwc_version = DPU_HW_UBWC_VER_40, .has_src_split = true, @@ -455,7 +455,7 @@ static const struct dpu_caps sm8450_dpu_caps = { static const struct dpu_caps sm8550_dpu_caps = { .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH, .max_mixer_blendstages = 0xb, - .qseed_type = DPU_SSPP_SCALER_QSEED3LITE, + .qseed_type = DPU_SSPP_SCALER_QSEED4, .smart_dma_rev = DPU_SSPP_SMART_DMA_V2, /* TODO: v2.5 */ .ubwc_version = DPU_HW_UBWC_VER_40, .has_src_split = true, @@ -525,9 +525,9 @@ static const struct dpu_mdp_cfg sdm845_mdp[] = { .reg_off = 0x2AC, .bit_off = 8}, .clk_ctrls[DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2B4, .bit_off = 8}, - .clk_ctrls[DPU_CLK_CTRL_CURSOR0] = { + .clk_ctrls[DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2BC, .bit_off = 8}, - .clk_ctrls[DPU_CLK_CTRL_CURSOR1] = { + .clk_ctrls[DPU_CLK_CTRL_DMA3] = { .reg_off = 0x2C4, .bit_off = 8}, }, }; @@ -542,9 +542,9 @@ static const struct dpu_mdp_cfg sc7180_mdp[] = { .reg_off = 0x2AC, .bit_off = 0}, .clk_ctrls[DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2AC, .bit_off = 8}, - .clk_ctrls[DPU_CLK_CTRL_CURSOR0] = { + .clk_ctrls[DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2B4, .bit_off = 8}, - .clk_ctrls[DPU_CLK_CTRL_CURSOR1] = { + .clk_ctrls[DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2C4, .bit_off = 8}, .clk_ctrls[DPU_CLK_CTRL_WB2] = { .reg_off = 0x3B8, .bit_off = 24}, @@ -569,9 +569,9 @@ static const struct dpu_mdp_cfg sc8180x_mdp[] = { .reg_off = 0x2AC, .bit_off = 8}, .clk_ctrls[DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2B4, .bit_off = 8}, - .clk_ctrls[DPU_CLK_CTRL_CURSOR0] = { + .clk_ctrls[DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2BC, .bit_off = 8}, - .clk_ctrls[DPU_CLK_CTRL_CURSOR1] = { + .clk_ctrls[DPU_CLK_CTRL_DMA3] = { .reg_off = 0x2C4, .bit_off = 8}, }, }; @@ -609,9 +609,9 @@ static const struct dpu_mdp_cfg sm8250_mdp[] = { .reg_off = 0x2AC, .bit_off = 8}, .clk_ctrls[DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2B4, .bit_off = 8}, - .clk_ctrls[DPU_CLK_CTRL_CURSOR0] = { + .clk_ctrls[DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2BC, .bit_off = 8}, - .clk_ctrls[DPU_CLK_CTRL_CURSOR1] = { + .clk_ctrls[DPU_CLK_CTRL_DMA3] = { .reg_off = 0x2C4, .bit_off = 8}, .clk_ctrls[DPU_CLK_CTRL_REG_DMA] = { .reg_off = 0x2BC, .bit_off = 20}, @@ -638,9 +638,9 @@ static const struct dpu_mdp_cfg sm8350_mdp[] = { .reg_off = 0x2ac, .bit_off = 8}, .clk_ctrls[DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8}, - .clk_ctrls[DPU_CLK_CTRL_CURSOR0] = { + .clk_ctrls[DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2bc, .bit_off = 8}, - .clk_ctrls[DPU_CLK_CTRL_CURSOR1] = { + .clk_ctrls[DPU_CLK_CTRL_DMA3] = { .reg_off = 0x2c4, .bit_off = 8}, .clk_ctrls[DPU_CLK_CTRL_REG_DMA] = { .reg_off = 0x2bc, .bit_off = 20}, @@ -666,9 +666,9 @@ static const struct dpu_mdp_cfg sm8450_mdp[] = { .reg_off = 0x2AC, .bit_off = 8}, .clk_ctrls[DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2B4, .bit_off = 8}, - .clk_ctrls[DPU_CLK_CTRL_CURSOR0] = { + .clk_ctrls[DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2BC, .bit_off = 8}, - .clk_ctrls[DPU_CLK_CTRL_CURSOR1] = { + .clk_ctrls[DPU_CLK_CTRL_DMA3] = { .reg_off = 0x2C4, .bit_off = 8}, .clk_ctrls[DPU_CLK_CTRL_REG_DMA] = { .reg_off = 0x2BC, .bit_off = 20}, @@ -685,9 +685,9 @@ static const struct dpu_mdp_cfg sc7280_mdp[] = { .reg_off = 0x2AC, .bit_off = 0}, .clk_ctrls[DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2AC, .bit_off = 8}, - .clk_ctrls[DPU_CLK_CTRL_CURSOR0] = { + .clk_ctrls[DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2B4, .bit_off = 8}, - .clk_ctrls[DPU_CLK_CTRL_CURSOR1] = { + .clk_ctrls[DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2C4, .bit_off = 8}, }, }; @@ -696,7 +696,7 @@ static const struct dpu_mdp_cfg sc8280xp_mdp[] = { { .name = "top_0", .id = MDP_TOP, .base = 0x0, .len = 0x494, - .features = 0, + .features = BIT(DPU_MDP_PERIPH_0_REMOVED), .highest_bank_bit = 2, .ubwc_swizzle = 6, .clk_ctrls[DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0}, @@ -705,8 +705,8 @@ static const struct dpu_mdp_cfg sc8280xp_mdp[] = { .clk_ctrls[DPU_CLK_CTRL_VIG3] = { .reg_off = 0x2c4, .bit_off = 0}, .clk_ctrls[DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8}, .clk_ctrls[DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8}, - .clk_ctrls[DPU_CLK_CTRL_CURSOR0] = { .reg_off = 0x2bc, .bit_off = 8}, - .clk_ctrls[DPU_CLK_CTRL_CURSOR1] = { .reg_off = 0x2c4, .bit_off = 8}, + .clk_ctrls[DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2bc, .bit_off = 8}, + .clk_ctrls[DPU_CLK_CTRL_DMA3] = { .reg_off = 0x2c4, .bit_off = 8}, .clk_ctrls[DPU_CLK_CTRL_REG_DMA] = { .reg_off = 0x2bc, .bit_off = 20}, }, }; @@ -734,9 +734,9 @@ static const struct dpu_mdp_cfg sm8550_mdp[] = { .reg_off = 0x28330, .bit_off = 0}, .clk_ctrls[DPU_CLK_CTRL_DMA3] = { .reg_off = 0x2a330, .bit_off = 0}, - .clk_ctrls[DPU_CLK_CTRL_CURSOR0] = { + .clk_ctrls[DPU_CLK_CTRL_DMA4] = { .reg_off = 0x2c330, .bit_off = 0}, - .clk_ctrls[DPU_CLK_CTRL_CURSOR1] = { + .clk_ctrls[DPU_CLK_CTRL_DMA5] = { .reg_off = 0x2e330, .bit_off = 0}, .clk_ctrls[DPU_CLK_CTRL_REG_DMA] = { .reg_off = 0x2bc, .bit_off = 20}, @@ -828,19 +828,19 @@ static const struct dpu_ctl_cfg sdm845_ctl[] = { static const struct dpu_ctl_cfg sc7180_ctl[] = { { .name = "ctl_0", .id = CTL_0, - .base = 0x1000, .len = 0xE4, + .base = 0x1000, .len = 0x1dc, .features = BIT(DPU_CTL_ACTIVE_CFG), .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9), }, { .name = "ctl_1", .id = CTL_1, - .base = 0x1200, .len = 0xE4, + .base = 0x1200, .len = 0x1dc, .features = BIT(DPU_CTL_ACTIVE_CFG), .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10), }, { .name = "ctl_2", .id = CTL_2, - .base = 0x1400, .len = 0xE4, + .base = 0x1400, .len = 0x1dc, .features = BIT(DPU_CTL_ACTIVE_CFG), .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11), }, @@ -1190,9 +1190,9 @@ static const struct dpu_sspp_cfg msm8998_sspp[] = { SSPP_BLK("sspp_9", SSPP_DMA1, 0x26000, DMA_MSM8998_MASK, sdm845_dma_sblk_1, 5, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA1), SSPP_BLK("sspp_10", SSPP_DMA2, 0x28000, DMA_CURSOR_MSM8998_MASK, - sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR0), + sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA2), SSPP_BLK("sspp_11", SSPP_DMA3, 0x2a000, DMA_CURSOR_MSM8998_MASK, - sdm845_dma_sblk_3, 13, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR1), + sdm845_dma_sblk_3, 13, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA3), }; static const struct dpu_sspp_cfg sdm845_sspp[] = { @@ -1209,9 +1209,9 @@ static const struct dpu_sspp_cfg sdm845_sspp[] = { SSPP_BLK("sspp_9", SSPP_DMA1, 0x26000, DMA_SDM845_MASK, sdm845_dma_sblk_1, 5, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA1), SSPP_BLK("sspp_10", SSPP_DMA2, 0x28000, DMA_CURSOR_SDM845_MASK, - sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR0), + sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA2), SSPP_BLK("sspp_11", SSPP_DMA3, 0x2a000, DMA_CURSOR_SDM845_MASK, - sdm845_dma_sblk_3, 13, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR1), + sdm845_dma_sblk_3, 13, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA3), }; static const struct dpu_sspp_sub_blks sc7180_vig_sblk_0 = @@ -1226,57 +1226,57 @@ static const struct dpu_sspp_cfg sc7180_sspp[] = { SSPP_BLK("sspp_8", SSPP_DMA0, 0x24000, DMA_SDM845_MASK, sdm845_dma_sblk_0, 1, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA0), SSPP_BLK("sspp_9", SSPP_DMA1, 0x26000, DMA_CURSOR_SDM845_MASK, - sdm845_dma_sblk_1, 5, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR0), + sdm845_dma_sblk_1, 5, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA1), SSPP_BLK("sspp_10", SSPP_DMA2, 0x28000, DMA_CURSOR_SDM845_MASK, - sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR1), + sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA2), }; static const struct dpu_sspp_sub_blks sm6115_vig_sblk_0 = - _VIG_SBLK("0", 2, DPU_SSPP_SCALER_QSEED3LITE); + _VIG_SBLK("0", 2, DPU_SSPP_SCALER_QSEED4); static const struct dpu_sspp_cfg sm6115_sspp[] = { - SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, VIG_SM8250_MASK, + SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, VIG_SC7180_MASK, sm6115_vig_sblk_0, 0, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG0), SSPP_BLK("sspp_8", SSPP_DMA0, 0x24000, DMA_SDM845_MASK, sdm845_dma_sblk_0, 1, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA0), }; static const struct dpu_sspp_sub_blks sm8250_vig_sblk_0 = - _VIG_SBLK("0", 5, DPU_SSPP_SCALER_QSEED3LITE); + _VIG_SBLK("0", 5, DPU_SSPP_SCALER_QSEED4); static const struct dpu_sspp_sub_blks sm8250_vig_sblk_1 = - _VIG_SBLK("1", 6, DPU_SSPP_SCALER_QSEED3LITE); + _VIG_SBLK("1", 6, DPU_SSPP_SCALER_QSEED4); static const struct dpu_sspp_sub_blks sm8250_vig_sblk_2 = - _VIG_SBLK("2", 7, DPU_SSPP_SCALER_QSEED3LITE); + _VIG_SBLK("2", 7, DPU_SSPP_SCALER_QSEED4); static const struct dpu_sspp_sub_blks sm8250_vig_sblk_3 = - _VIG_SBLK("3", 8, DPU_SSPP_SCALER_QSEED3LITE); + _VIG_SBLK("3", 8, DPU_SSPP_SCALER_QSEED4); static const struct dpu_sspp_cfg sm8250_sspp[] = { - SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, VIG_SM8250_MASK, + SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, VIG_SC7180_MASK, sm8250_vig_sblk_0, 0, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG0), - SSPP_BLK("sspp_1", SSPP_VIG1, 0x6000, VIG_SM8250_MASK, + SSPP_BLK("sspp_1", SSPP_VIG1, 0x6000, VIG_SC7180_MASK, sm8250_vig_sblk_1, 4, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG1), - SSPP_BLK("sspp_2", SSPP_VIG2, 0x8000, VIG_SM8250_MASK, + SSPP_BLK("sspp_2", SSPP_VIG2, 0x8000, VIG_SC7180_MASK, sm8250_vig_sblk_2, 8, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG2), - SSPP_BLK("sspp_3", SSPP_VIG3, 0xa000, VIG_SM8250_MASK, + SSPP_BLK("sspp_3", SSPP_VIG3, 0xa000, VIG_SC7180_MASK, sm8250_vig_sblk_3, 12, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG3), SSPP_BLK("sspp_8", SSPP_DMA0, 0x24000, DMA_SDM845_MASK, sdm845_dma_sblk_0, 1, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA0), SSPP_BLK("sspp_9", SSPP_DMA1, 0x26000, DMA_SDM845_MASK, sdm845_dma_sblk_1, 5, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA1), SSPP_BLK("sspp_10", SSPP_DMA2, 0x28000, DMA_CURSOR_SDM845_MASK, - sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR0), + sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA2), SSPP_BLK("sspp_11", SSPP_DMA3, 0x2a000, DMA_CURSOR_SDM845_MASK, - sdm845_dma_sblk_3, 13, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR1), + sdm845_dma_sblk_3, 13, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA3), }; static const struct dpu_sspp_sub_blks sm8450_vig_sblk_0 = - _VIG_SBLK("0", 5, DPU_SSPP_SCALER_QSEED3LITE); + _VIG_SBLK("0", 5, DPU_SSPP_SCALER_QSEED4); static const struct dpu_sspp_sub_blks sm8450_vig_sblk_1 = - _VIG_SBLK("1", 6, DPU_SSPP_SCALER_QSEED3LITE); + _VIG_SBLK("1", 6, DPU_SSPP_SCALER_QSEED4); static const struct dpu_sspp_sub_blks sm8450_vig_sblk_2 = - _VIG_SBLK("2", 7, DPU_SSPP_SCALER_QSEED3LITE); + _VIG_SBLK("2", 7, DPU_SSPP_SCALER_QSEED4); static const struct dpu_sspp_sub_blks sm8450_vig_sblk_3 = - _VIG_SBLK("3", 8, DPU_SSPP_SCALER_QSEED3LITE); + _VIG_SBLK("3", 8, DPU_SSPP_SCALER_QSEED4); static const struct dpu_sspp_cfg sm8450_sspp[] = { SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, VIG_SC7180_MASK, @@ -1292,21 +1292,21 @@ static const struct dpu_sspp_cfg sm8450_sspp[] = { SSPP_BLK("sspp_9", SSPP_DMA1, 0x26000, DMA_SDM845_MASK, sdm845_dma_sblk_1, 5, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA1), SSPP_BLK("sspp_10", SSPP_DMA2, 0x28000, DMA_CURSOR_SDM845_MASK, - sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR0), + sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA2), SSPP_BLK("sspp_11", SSPP_DMA3, 0x2a000, DMA_CURSOR_SDM845_MASK, - sdm845_dma_sblk_3, 13, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR1), + sdm845_dma_sblk_3, 13, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA3), }; static const struct dpu_sspp_sub_blks sm8550_vig_sblk_0 = - _VIG_SBLK("0", 7, DPU_SSPP_SCALER_QSEED3LITE); + _VIG_SBLK("0", 7, DPU_SSPP_SCALER_QSEED4); static const struct dpu_sspp_sub_blks sm8550_vig_sblk_1 = - _VIG_SBLK("1", 8, DPU_SSPP_SCALER_QSEED3LITE); + _VIG_SBLK("1", 8, DPU_SSPP_SCALER_QSEED4); static const struct dpu_sspp_sub_blks sm8550_vig_sblk_2 = - _VIG_SBLK("2", 9, DPU_SSPP_SCALER_QSEED3LITE); + _VIG_SBLK("2", 9, DPU_SSPP_SCALER_QSEED4); static const struct dpu_sspp_sub_blks sm8550_vig_sblk_3 = - _VIG_SBLK("3", 10, DPU_SSPP_SCALER_QSEED3LITE); + _VIG_SBLK("3", 10, DPU_SSPP_SCALER_QSEED4); static const struct dpu_sspp_sub_blks sm8550_dma_sblk_4 = _DMA_SBLK("12", 5); -static const struct dpu_sspp_sub_blks sd8550_dma_sblk_5 = _DMA_SBLK("13", 6); +static const struct dpu_sspp_sub_blks sm8550_dma_sblk_5 = _DMA_SBLK("13", 6); static const struct dpu_sspp_cfg sm8550_sspp[] = { SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, VIG_SC7180_MASK, @@ -1326,9 +1326,9 @@ static const struct dpu_sspp_cfg sm8550_sspp[] = { SSPP_BLK("sspp_11", SSPP_DMA3, 0x2a000, DMA_SDM845_MASK, sdm845_dma_sblk_3, 13, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA3), SSPP_BLK("sspp_12", SSPP_DMA4, 0x2c000, DMA_CURSOR_SDM845_MASK, - sm8550_dma_sblk_4, 14, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR0), + sm8550_dma_sblk_4, 14, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA4), SSPP_BLK("sspp_13", SSPP_DMA5, 0x2e000, DMA_CURSOR_SDM845_MASK, - sd8550_dma_sblk_5, 15, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR1), + sm8550_dma_sblk_5, 15, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA5), }; static const struct dpu_sspp_cfg sc7280_sspp[] = { @@ -1337,37 +1337,37 @@ static const struct dpu_sspp_cfg sc7280_sspp[] = { SSPP_BLK("sspp_8", SSPP_DMA0, 0x24000, DMA_SDM845_MASK, sdm845_dma_sblk_0, 1, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA0), SSPP_BLK("sspp_9", SSPP_DMA1, 0x26000, DMA_CURSOR_SDM845_MASK, - sdm845_dma_sblk_1, 5, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR0), + sdm845_dma_sblk_1, 5, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA1), SSPP_BLK("sspp_10", SSPP_DMA2, 0x28000, DMA_CURSOR_SDM845_MASK, - sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR1), + sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA2), }; static const struct dpu_sspp_sub_blks sc8280xp_vig_sblk_0 = - _VIG_SBLK("0", 5, DPU_SSPP_SCALER_QSEED3LITE); + _VIG_SBLK("0", 5, DPU_SSPP_SCALER_QSEED4); static const struct dpu_sspp_sub_blks sc8280xp_vig_sblk_1 = - _VIG_SBLK("1", 6, DPU_SSPP_SCALER_QSEED3LITE); + _VIG_SBLK("1", 6, DPU_SSPP_SCALER_QSEED4); static const struct dpu_sspp_sub_blks sc8280xp_vig_sblk_2 = - _VIG_SBLK("2", 7, DPU_SSPP_SCALER_QSEED3LITE); + _VIG_SBLK("2", 7, DPU_SSPP_SCALER_QSEED4); static const struct dpu_sspp_sub_blks sc8280xp_vig_sblk_3 = - _VIG_SBLK("3", 8, DPU_SSPP_SCALER_QSEED3LITE); + _VIG_SBLK("3", 8, DPU_SSPP_SCALER_QSEED4); static const struct dpu_sspp_cfg sc8280xp_sspp[] = { - SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, VIG_SM8250_MASK, + SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, VIG_SC7180_MASK, sc8280xp_vig_sblk_0, 0, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG0), - SSPP_BLK("sspp_1", SSPP_VIG1, 0x6000, VIG_SM8250_MASK, + SSPP_BLK("sspp_1", SSPP_VIG1, 0x6000, VIG_SC7180_MASK, sc8280xp_vig_sblk_1, 4, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG1), - SSPP_BLK("sspp_2", SSPP_VIG2, 0x8000, VIG_SM8250_MASK, + SSPP_BLK("sspp_2", SSPP_VIG2, 0x8000, VIG_SC7180_MASK, sc8280xp_vig_sblk_2, 8, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG2), - SSPP_BLK("sspp_3", SSPP_VIG3, 0xa000, VIG_SM8250_MASK, + SSPP_BLK("sspp_3", SSPP_VIG3, 0xa000, VIG_SC7180_MASK, sc8280xp_vig_sblk_3, 12, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG3), SSPP_BLK("sspp_8", SSPP_DMA0, 0x24000, DMA_SDM845_MASK, sdm845_dma_sblk_0, 1, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA0), SSPP_BLK("sspp_9", SSPP_DMA1, 0x26000, DMA_SDM845_MASK, sdm845_dma_sblk_1, 5, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA1), SSPP_BLK("sspp_10", SSPP_DMA2, 0x28000, DMA_CURSOR_SDM845_MASK, - sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR0), + sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA2), SSPP_BLK("sspp_11", SSPP_DMA3, 0x2a000, DMA_CURSOR_SDM845_MASK, - sdm845_dma_sblk_3, 13, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR1), + sdm845_dma_sblk_3, 13, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA3), }; #define _VIG_SBLK_NOSCALE(num, sdma_pri) \ @@ -1517,7 +1517,7 @@ static const struct dpu_lm_cfg sc7280_lm[] = { /* QCM2290 */ static const struct dpu_lm_sub_blks qcm2290_lm_sblk = { - .maxwidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH, + .maxwidth = DEFAULT_DPU_LINE_WIDTH, .maxblendstages = 4, /* excluding base layer */ .blendstage_base = { /* offsets relative to mixer base */ 0x20, 0x38, 0x50, 0x68 @@ -1714,7 +1714,7 @@ static const struct dpu_pingpong_cfg sm8350_pp[] = { }; static const struct dpu_pingpong_cfg sc7280_pp[] = { - PP_BLK("pingpong_0", PINGPONG_0, 0x59000, 0, sc7280_pp_sblk, -1, -1), + PP_BLK("pingpong_0", PINGPONG_0, 0x69000, 0, sc7280_pp_sblk, -1, -1), PP_BLK("pingpong_1", PINGPONG_1, 0x6a000, 0, sc7280_pp_sblk, -1, -1), PP_BLK("pingpong_2", PINGPONG_2, 0x6b000, 0, sc7280_pp_sblk, -1, -1), PP_BLK("pingpong_3", PINGPONG_3, 0x6c000, 0, sc7280_pp_sblk, -1, -1), @@ -2841,8 +2841,6 @@ static const struct dpu_mdss_cfg qcm2290_dpu_cfg = { .intf = qcm2290_intf, .vbif_count = ARRAY_SIZE(sdm845_vbif), .vbif = sdm845_vbif, - .reg_dma_count = 1, - .dma_cfg = &sdm845_regdma, .perf = &qcm2290_perf_data, .mdss_irqs = IRQ_SC7180_MASK, }; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h index ddab9caebb18..e6590302b3bf 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h @@ -515,6 +515,8 @@ enum dpu_clk_ctrl_type { DPU_CLK_CTRL_DMA1, DPU_CLK_CTRL_DMA2, DPU_CLK_CTRL_DMA3, + DPU_CLK_CTRL_DMA4, + DPU_CLK_CTRL_DMA5, DPU_CLK_CTRL_CURSOR0, DPU_CLK_CTRL_CURSOR1, DPU_CLK_CTRL_INLINE_ROT0_SSPP, diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c index b88a2f3724e6..6c53ea560ffa 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c @@ -446,7 +446,9 @@ static void dpu_hw_ctl_setup_blendstage(struct dpu_hw_ctl *ctx, * CTL_LAYER has 3-bit field (and extra bits in EXT register), * all EXT registers has 4-bit fields. */ - if (cfg->idx == 0) { + if (cfg->idx == -1) { + continue; + } else if (cfg->idx == 0) { mixercfg[0] |= mix << cfg->shift; mixercfg[1] |= ext << cfg->ext_shift; } else { diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c index 396429e63756..66c1b70d244f 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c @@ -577,6 +577,8 @@ void dpu_rm_release(struct dpu_global_state *global_state, ARRAY_SIZE(global_state->ctl_to_enc_id), enc->base.id); _dpu_rm_clear_mapping(global_state->dsc_to_enc_id, ARRAY_SIZE(global_state->dsc_to_enc_id), enc->base.id); + _dpu_rm_clear_mapping(global_state->dspp_to_enc_id, + ARRAY_SIZE(global_state->dspp_to_enc_id), enc->base.id); } int dpu_rm_reserve( diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index be4bf77103cd..ac8ed731f76d 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -637,8 +637,8 @@ static struct msm_submit_post_dep *msm_parse_post_deps(struct drm_device *dev, int ret = 0; uint32_t i, j; - post_deps = kmalloc_array(nr_syncobjs, sizeof(*post_deps), - GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY); + post_deps = kcalloc(nr_syncobjs, sizeof(*post_deps), + GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY); if (!post_deps) return ERR_PTR(-ENOMEM); @@ -653,7 +653,6 @@ static struct msm_submit_post_dep *msm_parse_post_deps(struct drm_device *dev, } post_deps[i].point = syncobj_desc.point; - post_deps[i].chain = NULL; if (syncobj_desc.flags) { ret = -EINVAL; diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.h b/drivers/gpu/drm/nouveau/dispnv50/wndw.h index 591c852f326b..76a6ae5d5652 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/wndw.h +++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.h @@ -35,8 +35,9 @@ struct nv50_wndw { int nv50_wndw_new_(const struct nv50_wndw_func *, struct drm_device *, enum drm_plane_type, const char *name, int index, - const u32 *format, enum nv50_disp_interlock_type, - u32 interlock_data, u32 heads, struct nv50_wndw **); + const u32 *format, u32 heads, + enum nv50_disp_interlock_type, u32 interlock_data, + struct nv50_wndw **); void nv50_wndw_flush_set(struct nv50_wndw *, u32 *interlock, struct nv50_wndw_atom *); void nv50_wndw_flush_clr(struct nv50_wndw *, u32 *interlock, bool flush, diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h index c5a4f49ee206..01a22a13b452 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h @@ -2,6 +2,7 @@ #ifndef __NVKM_FB_H__ #define __NVKM_FB_H__ #include <core/subdev.h> +#include <core/falcon.h> #include <core/mm.h> /* memory type/access flags, do not match hardware values */ @@ -33,7 +34,7 @@ struct nvkm_fb { const struct nvkm_fb_func *func; struct nvkm_subdev subdev; - struct nvkm_blob vpr_scrubber; + struct nvkm_falcon_fw vpr_scrubber; struct { struct page *flush_page; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c index bac7dcc4c2c1..0955340cc421 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c @@ -143,6 +143,10 @@ nvkm_fb_mem_unlock(struct nvkm_fb *fb) if (!fb->func->vpr.scrub_required) return 0; + ret = nvkm_subdev_oneinit(subdev); + if (ret) + return ret; + if (!fb->func->vpr.scrub_required(fb)) { nvkm_debug(subdev, "VPR not locked\n"); return 0; @@ -150,7 +154,7 @@ nvkm_fb_mem_unlock(struct nvkm_fb *fb) nvkm_debug(subdev, "VPR locked, running scrubber binary\n"); - if (!fb->vpr_scrubber.size) { + if (!fb->vpr_scrubber.fw.img) { nvkm_warn(subdev, "VPR locked, but no scrubber binary!\n"); return 0; } @@ -229,7 +233,7 @@ nvkm_fb_dtor(struct nvkm_subdev *subdev) nvkm_ram_del(&fb->ram); - nvkm_blob_dtor(&fb->vpr_scrubber); + nvkm_falcon_fw_dtor(&fb->vpr_scrubber); if (fb->sysmem.flush_page) { dma_unmap_page(subdev->device->dev, fb->sysmem.flush_page_addr, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga100.c index 5098f219e3e6..a7456e786463 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga100.c @@ -37,5 +37,5 @@ ga100_fb = { int ga100_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb) { - return gp102_fb_new_(&ga100_fb, device, type, inst, pfb); + return gf100_fb_new_(&ga100_fb, device, type, inst, pfb); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c index 5a21b0ae4595..dd476e079fe1 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c @@ -25,25 +25,20 @@ #include <engine/nvdec.h> static int -ga102_fb_vpr_scrub(struct nvkm_fb *fb) +ga102_fb_oneinit(struct nvkm_fb *fb) { - struct nvkm_falcon_fw fw = {}; - int ret; + struct nvkm_subdev *subdev = &fb->subdev; - ret = nvkm_falcon_fw_ctor_hs_v2(&ga102_flcn_fw, "mem-unlock", &fb->subdev, "nvdec/scrubber", - 0, &fb->subdev.device->nvdec[0]->falcon, &fw); - if (ret) - return ret; + nvkm_falcon_fw_ctor_hs_v2(&ga102_flcn_fw, "mem-unlock", subdev, "nvdec/scrubber", + 0, &subdev->device->nvdec[0]->falcon, &fb->vpr_scrubber); - ret = nvkm_falcon_fw_boot(&fw, &fb->subdev, true, NULL, NULL, 0, 0); - nvkm_falcon_fw_dtor(&fw); - return ret; + return gf100_fb_oneinit(fb); } static const struct nvkm_fb_func ga102_fb = { .dtor = gf100_fb_dtor, - .oneinit = gf100_fb_oneinit, + .oneinit = ga102_fb_oneinit, .init = gm200_fb_init, .init_page = gv100_fb_init_page, .init_unkn = gp100_fb_init_unkn, @@ -51,13 +46,13 @@ ga102_fb = { .ram_new = ga102_ram_new, .default_bigpage = 16, .vpr.scrub_required = tu102_fb_vpr_scrub_required, - .vpr.scrub = ga102_fb_vpr_scrub, + .vpr.scrub = gp102_fb_vpr_scrub, }; int ga102_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb) { - return gp102_fb_new_(&ga102_fb, device, type, inst, pfb); + return gf100_fb_new_(&ga102_fb, device, type, inst, pfb); } MODULE_FIRMWARE("nvidia/ga102/nvdec/scrubber.bin"); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c index 2658481d575b..14d942e8b857 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c @@ -29,18 +29,7 @@ int gp102_fb_vpr_scrub(struct nvkm_fb *fb) { - struct nvkm_subdev *subdev = &fb->subdev; - struct nvkm_falcon_fw fw = {}; - int ret; - - ret = nvkm_falcon_fw_ctor_hs(&gm200_flcn_fw, "mem-unlock", subdev, NULL, - "nvdec/scrubber", 0, &subdev->device->nvdec[0]->falcon, &fw); - if (ret) - return ret; - - ret = nvkm_falcon_fw_boot(&fw, subdev, true, NULL, NULL, 0, 0x00000000); - nvkm_falcon_fw_dtor(&fw); - return ret; + return nvkm_falcon_fw_boot(&fb->vpr_scrubber, &fb->subdev, true, NULL, NULL, 0, 0x00000000); } bool @@ -51,10 +40,21 @@ gp102_fb_vpr_scrub_required(struct nvkm_fb *fb) return (nvkm_rd32(device, 0x100cd0) & 0x00000010) != 0; } +int +gp102_fb_oneinit(struct nvkm_fb *fb) +{ + struct nvkm_subdev *subdev = &fb->subdev; + + nvkm_falcon_fw_ctor_hs(&gm200_flcn_fw, "mem-unlock", subdev, NULL, "nvdec/scrubber", + 0, &subdev->device->nvdec[0]->falcon, &fb->vpr_scrubber); + + return gf100_fb_oneinit(fb); +} + static const struct nvkm_fb_func gp102_fb = { .dtor = gf100_fb_dtor, - .oneinit = gf100_fb_oneinit, + .oneinit = gp102_fb_oneinit, .init = gm200_fb_init, .init_remapper = gp100_fb_init_remapper, .init_page = gm200_fb_init_page, @@ -65,22 +65,9 @@ gp102_fb = { }; int -gp102_fb_new_(const struct nvkm_fb_func *func, struct nvkm_device *device, - enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb) -{ - int ret = gf100_fb_new_(func, device, type, inst, pfb); - if (ret) - return ret; - - nvkm_firmware_load_blob(&(*pfb)->subdev, "nvdec/scrubber", "", 0, - &(*pfb)->vpr_scrubber); - return 0; -} - -int gp102_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb) { - return gp102_fb_new_(&gp102_fb, device, type, inst, pfb); + return gf100_fb_new_(&gp102_fb, device, type, inst, pfb); } MODULE_FIRMWARE("nvidia/gp102/nvdec/scrubber.bin"); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c index 0e3c0a8f5d71..4d8a286a7a34 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c @@ -31,7 +31,7 @@ gv100_fb_init_page(struct nvkm_fb *fb) static const struct nvkm_fb_func gv100_fb = { .dtor = gf100_fb_dtor, - .oneinit = gf100_fb_oneinit, + .oneinit = gp102_fb_oneinit, .init = gm200_fb_init, .init_page = gv100_fb_init_page, .init_unkn = gp100_fb_init_unkn, @@ -45,7 +45,7 @@ gv100_fb = { int gv100_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb) { - return gp102_fb_new_(&gv100_fb, device, type, inst, pfb); + return gf100_fb_new_(&gv100_fb, device, type, inst, pfb); } MODULE_FIRMWARE("nvidia/gv100/nvdec/scrubber.bin"); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h index f517751f94ac..726c30c8bf95 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h @@ -83,8 +83,7 @@ int gm200_fb_init_page(struct nvkm_fb *); void gp100_fb_init_remapper(struct nvkm_fb *); void gp100_fb_init_unkn(struct nvkm_fb *); -int gp102_fb_new_(const struct nvkm_fb_func *, struct nvkm_device *, enum nvkm_subdev_type, int, - struct nvkm_fb **); +int gp102_fb_oneinit(struct nvkm_fb *); bool gp102_fb_vpr_scrub_required(struct nvkm_fb *); int gp102_fb_vpr_scrub(struct nvkm_fb *); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/tu102.c index be82af0364ee..b8803c124c3b 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/tu102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/tu102.c @@ -31,7 +31,7 @@ tu102_fb_vpr_scrub_required(struct nvkm_fb *fb) static const struct nvkm_fb_func tu102_fb = { .dtor = gf100_fb_dtor, - .oneinit = gf100_fb_oneinit, + .oneinit = gp102_fb_oneinit, .init = gm200_fb_init, .init_page = gv100_fb_init_page, .init_unkn = gp100_fb_init_unkn, @@ -45,7 +45,7 @@ tu102_fb = { int tu102_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb) { - return gp102_fb_new_(&tu102_fb, device, type, inst, pfb); + return gf100_fb_new_(&tu102_fb, device, type, inst, pfb); } MODULE_FIRMWARE("nvidia/tu102/nvdec/scrubber.bin"); diff --git a/drivers/gpu/drm/tiny/cirrus.c b/drivers/gpu/drm/tiny/cirrus.c index cf35b6090503..accfa52e78c5 100644 --- a/drivers/gpu/drm/tiny/cirrus.c +++ b/drivers/gpu/drm/tiny/cirrus.c @@ -455,7 +455,7 @@ static void cirrus_pipe_update(struct drm_simple_display_pipe *pipe, if (state->fb && cirrus->cpp != cirrus_cpp(state->fb)) cirrus_mode_set(cirrus, &crtc->mode, state->fb); - if (drm_atomic_helper_damage_merged(old_state, state, &rect)) + if (state->fb && drm_atomic_helper_damage_merged(old_state, state, &rect)) cirrus_fb_blit_rect(state->fb, &shadow_plane_state->data[0], &rect); } |