summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
authorMaxime Ripard <maxime@cerno.tech>2022-10-18 15:00:03 +0200
committerMaxime Ripard <maxime@cerno.tech>2022-10-18 15:00:03 +0200
commita140a6a2d5ec0329ad05cd3532a91ad0ce58dceb (patch)
treeb2d44a1da423c53bd6c3ab3facd45ff5f2087ffd /drivers/gpu/drm
parent28743e25fa1c867675bd8ff976eb92d4251f13a1 (diff)
parent9abf2313adc1ca1b6180c508c25f22f9395cc780 (diff)
downloadlinux-stable-a140a6a2d5ec0329ad05cd3532a91ad0ce58dceb.tar.gz
linux-stable-a140a6a2d5ec0329ad05cd3532a91ad0ce58dceb.tar.bz2
linux-stable-a140a6a2d5ec0329ad05cd3532a91ad0ce58dceb.zip
Merge drm/drm-next into drm-misc-next
Let's kick-off this release cycle. Signed-off-by: Maxime Ripard <maxime@cerno.tech>
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/Kconfig12
-rw-r--r--drivers/gpu/drm/Makefile2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c1216
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.h10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c139
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c48
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c140
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c48
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c47
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.h10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c48
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c272
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c156
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c168
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c100
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c44
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c282
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c310
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c206
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/imu_v11_0_3.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v11_0.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v13_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c139
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c60
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c84
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c95
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dma.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc21.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v6_1.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v6_7.c129
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v8_10.c90
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v8_7.c63
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c46
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_ih.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega20_ih.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h771
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm21
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c32
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_crat.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c47
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_migrate.c74
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_migrate.h2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c92
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c93
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.h3
-rw-r--r--drivers/gpu/drm/amd/display/Kconfig2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c107
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c13
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c91
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c25
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c36
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c89
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c268
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c125
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c466
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c33
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c92
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h17
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c261
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_link.h23
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_audio.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_aux.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c177
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c271
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c46
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c66
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h220
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c30
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c66
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mmhubbub.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c26
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c53
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.h70
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c30
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c27
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c61
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c59
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dpp.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hpo_dp_link_encoder.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c25
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.h29
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c30
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c460
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h17
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mmhubbub.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mmhubbub.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c184
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h270
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c303
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn321/dcn321_dio_link_encoder.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c98
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_helpers.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/Makefile5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calc_auto.c22
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calc_math.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c118
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c120
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c445
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c443
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c357
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c283
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c386
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h52
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h19
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h16
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/cursor_reg_cache.h99
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h16
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/link_hwss.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/resource.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_hwss_dio.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_hwss_dio.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_hwss_dpia.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/virtual/virtual_link_hwss.c8
-rw-r--r--drivers/gpu/drm/amd/display/dmub/dmub_srv.h1
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h138
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c1
-rw-r--r--drivers/gpu/drm/amd/display/include/dal_asic_id.h1
-rw-r--r--drivers/gpu/drm/amd/display/include/ddc_service_types.h4
-rw-r--r--drivers/gpu/drm/amd/display/include/link_service_types.h1
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/color_gamma.c6
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_offset.h100
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_sh_mask.h1459
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_offset.h4
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_sh_mask.h8
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_10_0_offset.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_10_0_sh_mask.h3
-rw-r--r--drivers/gpu/drm/amd/include/kgd_pp_interface.h58
-rw-r--r--drivers/gpu/drm/amd/include/mes_v11_api_def.h3
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c3
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c5
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c5
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c25
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_processpptables.c5
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c11
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c160
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c46
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c142
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c3
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_drv.c4
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix-anx6345.c4
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c4
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.c13
-rw-r--r--drivers/gpu/drm/bridge/analogix/anx7625.c4
-rw-r--r--drivers/gpu/drm/bridge/chrontel-ch7033.c4
-rw-r--r--drivers/gpu/drm/bridge/cros-ec-anx7688.c4
-rw-r--r--drivers/gpu/drm/bridge/ite-it6505.c4
-rw-r--r--drivers/gpu/drm/bridge/ite-it66121.c4
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt8912b.c3
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9211.c4
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9611.c4
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9611uxc.c4
-rw-r--r--drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c8
-rw-r--r--drivers/gpu/drm/bridge/nxp-ptn3460.c4
-rw-r--r--drivers/gpu/drm/bridge/parade-ps8622.c4
-rw-r--r--drivers/gpu/drm/bridge/sii902x.c4
-rw-r--r--drivers/gpu/drm/bridge/sii9234.c4
-rw-r--r--drivers/gpu/drm/bridge/sil-sii8620.c4
-rw-r--r--drivers/gpu/drm/bridge/tc358767.c4
-rw-r--r--drivers/gpu/drm/bridge/tc358768.c4
-rw-r--r--drivers/gpu/drm/bridge/tc358775.c4
-rw-r--r--drivers/gpu/drm/bridge/ti-dlpc3433.c4
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi83.c4
-rw-r--r--drivers/gpu/drm/bridge/ti-tfp410.c4
-rw-r--r--drivers/gpu/drm/display/drm_dp_helper.c13
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c13
-rw-r--r--drivers/gpu/drm/drm_debugfs.c4
-rw-r--r--drivers/gpu/drm/drm_edid.c24
-rw-r--r--drivers/gpu/drm/drm_gem.c187
-rw-r--r--drivers/gpu/drm/drm_internal.h4
-rw-r--r--drivers/gpu/drm/drm_prime.c20
-rw-r--r--drivers/gpu/drm/drm_print.c48
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dma.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c2
-rw-r--r--drivers/gpu/drm/gma500/gem.c4
-rw-r--r--drivers/gpu/drm/gma500/gma_display.c11
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/Kconfig1
-rw-r--r--drivers/gpu/drm/hyperv/hyperv_drm_drv.c12
-rw-r--r--drivers/gpu/drm/hyperv/hyperv_drm_proto.c2
-rw-r--r--drivers/gpu/drm/i2c/ch7006_drv.c4
-rw-r--r--drivers/gpu/drm/i2c/sil164_drv.c7
-rw-r--r--drivers/gpu/drm/i2c/tda9950.c4
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c3
-rw-r--r--drivers/gpu/drm/i915/Makefile4
-rw-r--r--drivers/gpu/drm/i915/display/g4x_dp.c4
-rw-r--r--drivers/gpu/drm/i915/display/g4x_hdmi.c2
-rw-r--r--drivers/gpu/drm/i915/display/hsw_ips.c2
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_plane.c2
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c22
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.c102
-rw-r--r--drivers/gpu/drm/i915/display/intel_backlight.c77
-rw-r--r--drivers/gpu/drm/i915/display/intel_backlight_regs.h124
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c392
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.c152
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c293
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.c35
-rw-r--r--drivers/gpu/drm/i915/display/intel_connector.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_crt.c13
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc_state_dump.c9
-rw-r--r--drivers/gpu/drm/i915/display/intel_cursor.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c89
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c390
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_core.h418
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.c147
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c153
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_map.c119
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_well.c129
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_well.h16
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc.c142
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc_regs.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c147
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux.c57
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_hdcp.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.c105
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpio_phy.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll.c111
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.c326
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.c30
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.h28
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c14
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo_dev.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb_pin.c62
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.c18
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.c36
-rw-r--r--drivers/gpu/drm/i915/display/intel_fdi.c20
-rw-r--r--drivers/gpu/drm/i915/display/intel_frontbuffer.c56
-rw-r--r--drivers/gpu/drm/i915/display/intel_frontbuffer.h18
-rw-r--r--drivers/gpu/drm/i915/display/intel_gmbus.c290
-rw-r--r--drivers/gpu/drm/i915/display/intel_gmbus.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_gmbus_regs.h81
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c139
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp_regs.h270
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c12
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug.c116
-rw-r--r--drivers/gpu/drm/i915/display/intel_lpe_audio.c47
-rw-r--r--drivers/gpu/drm/i915/display/intel_lvds.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_modeset_setup.c15
-rw-r--r--drivers/gpu/drm/i915/display/intel_modeset_verify.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_opregion.c42
-rw-r--r--drivers/gpu/drm/i915/display/intel_overlay.c16
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.c45
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_refclk.c16
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_refclk.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_plane_initial.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_pps.c51
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c121
-rw-r--r--drivers/gpu/drm/i915/display/intel_quirks.c25
-rw-r--r--drivers/gpu/drm/i915/display/intel_quirks.h14
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.c18
-rw-r--r--drivers/gpu/drm/i915/display/intel_snps_phy.c1116
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_tv.c23
-rw-r--r--drivers/gpu/drm/i915/display/intel_vbt_defs.h346
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_vrr.c14
-rw-r--r--drivers/gpu/drm/i915/display/skl_universal_plane.c6
-rw-r--r--drivers/gpu/drm/i915/display/skl_watermark.c3574
-rw-r--r--drivers/gpu/drm/i915/display/skl_watermark.h80
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.c30
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi_pll.c141
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi_regs.h188
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.c16
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_lmem.h2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.c23
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.h1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.c39
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.h4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_types.h6
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pages.c4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pm.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.c62
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_tiling.c4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm.c43
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_userptr.c14
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/huge_pages.c18
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c2
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c4
-rw-r--r--drivers/gpu/drm/i915/gt/gen6_ppgtt.c16
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_engine_cs.c15
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_engine_cs.h3
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_ppgtt.c58
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.c5
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c156
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_regs.h1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_types.h15
-rw-r--r--drivers/gpu/drm/i915/gt/intel_execlists_submission.c21
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt.c24
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gsc.c106
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gsc.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.c114
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.h4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c174
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_irq.c35
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm_irq.c8
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_regs.h10
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c15
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_types.h36
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c29
-rw-r--r--drivers/gpu/drm/i915/gt/intel_mocs.c8
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ppgtt.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_region_lmem.c17
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.c26
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sa_media.c47
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sa_media.h15
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sseu.c5
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_lrc.c115
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.c24
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_log.c25
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c37
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc.c4
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c30
-rw-r--r--drivers/gpu/drm/i915/gvt/aperture_gm.c24
-rw-r--r--drivers/gpu/drm/i915/gvt/cfg_space.c5
-rw-r--r--drivers/gpu/drm/i915/gvt/edid.c3
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h46
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c8
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c205
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c212
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c7
-rw-r--r--drivers/gpu/drm/i915/i915_driver.c147
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h528
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c53
-rw-r--r--drivers/gpu/drm/i915/i915_gem.h43
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c6
-rw-r--r--drivers/gpu/drm/i915/i915_getparam.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c3
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c98
-rw-r--r--drivers/gpu/drm/i915/i915_params.c12
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c307
-rw-r--r--drivers/gpu/drm/i915/i915_pci.h6
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c3
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h573
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.c2
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.h6
-rw-r--r--drivers/gpu/drm/i915/i915_utils.h4
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c97
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.h96
-rw-r--r--drivers/gpu/drm/i915/intel_dram.c41
-rw-r--r--drivers/gpu/drm/i915/intel_gvt_mmio_table.c4
-rw-r--r--drivers/gpu/drm/i915/intel_pch.c9
-rw-r--r--drivers/gpu/drm/i915/intel_pch.h4
-rw-r--r--drivers/gpu/drm/i915/intel_pci_config.h7
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c3707
-rw-r--r--drivers/gpu/drm/i915/intel_pm.h65
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c94
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.h40
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp.c4
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c4
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_irq.c14
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_session.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_selftest.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c9
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dpi.c1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c5
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c24
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.c1
-rw-r--r--drivers/gpu/drm/meson/meson_plane.c2
-rw-r--r--drivers/gpu/drm/meson/meson_viu.c2
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c6
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx.xml.h4
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c83
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c45
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c50
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c37
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c9
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c78
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h35
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c74
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h6
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c3
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c27
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c94
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h22
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c65
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c9
-rw-r--r--drivers/gpu/drm/msm/dp/dp_catalog.c2
-rw-r--r--drivers/gpu/drm/msm/dp/dp_ctrl.c150
-rw-r--r--drivers/gpu/drm/msm/dp/dp_ctrl.h1
-rw-r--r--drivers/gpu/drm/msm/dp/dp_link.c5
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.c37
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.h31
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.c172
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.h3
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c299
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_manager.c288
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.c162
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.h5
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c185
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c87
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c14
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c145
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c102
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c188
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c25
-rw-r--r--drivers/gpu/drm/msm/msm_debugfs.c8
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c35
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h88
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c179
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h123
-rw-r--r--drivers/gpu/drm/msm/msm_gem_prime.c4
-rw-r--r--drivers/gpu/drm/msm/msm_gem_shrinker.c164
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c78
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c24
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h14
-rw-r--r--drivers/gpu/drm/msm/msm_gpu_devfreq.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gpu_trace.h36
-rw-r--r--drivers/gpu/drm/msm/msm_io_utils.c22
-rw-r--r--drivers/gpu/drm/msm/msm_iommu.c101
-rw-r--r--drivers/gpu/drm/msm/msm_rd.c3
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.c4
-rw-r--r--drivers/gpu/drm/msm/msm_submitqueue.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dmem.c108
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c13
-rw-r--r--drivers/gpu/drm/panel/panel-edp.c3
-rw-r--r--drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c4
-rw-r--r--drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c4
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c2
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_devfreq.c11
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c5
-rw-r--r--drivers/gpu/drm/rcar-du/Makefile7
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.c30
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.c4
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.h3
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_encoder.c9
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.c4
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_plane.c16
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_vsp.c26
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_writeback.c4
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_lvds.c8
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_lvds.h10
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_mipi_dsi.c69
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_mipi_dsi.h31
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-core.c5
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop2.c4
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c7
-rw-r--r--drivers/gpu/drm/solomon/ssd130x-i2c.c4
-rw-r--r--drivers/gpu/drm/tests/drm_buddy_test.c2
-rw-r--r--drivers/gpu/drm/tests/drm_mm_test.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c13
593 files changed, 21856 insertions, 16314 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 198ba846d34b..34f5a092c99e 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -51,6 +51,18 @@ config DRM_DEBUG_MM
If in doubt, say "N".
+config DRM_USE_DYNAMIC_DEBUG
+ bool "use dynamic debug to implement drm.debug"
+ default y
+ depends on DRM
+ depends on DYNAMIC_DEBUG || DYNAMIC_DEBUG_CORE
+ depends on JUMP_LABEL
+ help
+ Use dynamic-debug to avoid drm_debug_enabled() runtime overheads.
+ Due to callsite counts in DRM drivers (~4k in amdgpu) and 56
+ bytes per callsite, the .data costs can be substantial, and
+ are therefore configurable.
+
config DRM_KUNIT_TEST
tristate "KUnit tests for DRM" if !KUNIT_ALL_TESTS
depends on DRM && KUNIT
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 6ad98d3ceff7..6e55c47288e4 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -3,6 +3,8 @@
# Makefile for the drm device driver. This driver provides support for the
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
+CFLAGS-$(CONFIG_DRM_USE_DYNAMIC_DEBUG) += -DDYNAMIC_DEBUG_MODULE
+
drm-y := \
drm_aperture.o \
drm_atomic.o \
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 79bb6fd83094..ae9371b172e3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -885,6 +885,7 @@ struct amdgpu_device {
u64 fence_context;
unsigned num_rings;
struct amdgpu_ring *rings[AMDGPU_MAX_RINGS];
+ struct dma_fence __rcu *gang_submit;
bool ib_pool_ready;
struct amdgpu_sa_manager ib_pools[AMDGPU_IB_POOL_MAX];
struct amdgpu_sched gpu_sched[AMDGPU_HW_IP_NUM][AMDGPU_RING_PRIO_MAX];
@@ -1294,6 +1295,8 @@ u32 amdgpu_device_pcie_port_rreg(struct amdgpu_device *adev,
u32 reg);
void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev,
u32 reg, u32 v);
+struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev,
+ struct dma_fence *gang);
/* atpx handler */
#if defined(CONFIG_VGA_SWITCHEROO)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index 55402d238919..b14800ac179e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright 2012 Advanced Micro Devices, Inc.
*
@@ -849,6 +850,7 @@ int amdgpu_acpi_init(struct amdgpu_device *adev)
if (amdgpu_device_has_dc_support(adev)) {
#if defined(CONFIG_DRM_AMD_DC)
struct amdgpu_display_manager *dm = &adev->dm;
+
if (dm->backlight_dev[0])
atif->bd = dm->backlight_dev[0];
#endif
@@ -863,6 +865,7 @@ int amdgpu_acpi_init(struct amdgpu_device *adev)
if ((enc->devices & (ATOM_DEVICE_LCD_SUPPORT)) &&
enc->enc_priv) {
struct amdgpu_encoder_atom_dig *dig = enc->enc_priv;
+
if (dig->bl_dev) {
atif->bd = dig->bl_dev;
break;
@@ -919,9 +922,9 @@ static bool amdgpu_atif_pci_probe_handle(struct pci_dev *pdev)
return false;
status = acpi_get_handle(dhandle, "ATIF", &atif_handle);
- if (ACPI_FAILURE(status)) {
+ if (ACPI_FAILURE(status))
return false;
- }
+
amdgpu_acpi_priv.atif.handle = atif_handle;
acpi_get_name(amdgpu_acpi_priv.atif.handle, ACPI_FULL_PATHNAME, &buffer);
DRM_DEBUG_DRIVER("Found ATIF handle %s\n", acpi_method_name);
@@ -954,9 +957,9 @@ static bool amdgpu_atcs_pci_probe_handle(struct pci_dev *pdev)
return false;
status = acpi_get_handle(dhandle, "ATCS", &atcs_handle);
- if (ACPI_FAILURE(status)) {
+ if (ACPI_FAILURE(status))
return false;
- }
+
amdgpu_acpi_priv.atcs.handle = atcs_handle;
acpi_get_name(amdgpu_acpi_priv.atcs.handle, ACPI_FULL_PATHNAME, &buffer);
DRM_DEBUG_DRIVER("Found ATCS handle %s\n", acpi_method_name);
@@ -1050,6 +1053,10 @@ bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev)
{
if (adev->flags & AMD_IS_APU)
return false;
+
+ if (amdgpu_sriov_vf(adev))
+ return false;
+
return pm_suspend_target_state != PM_SUSPEND_TO_IDLE;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 091415a4abf0..03bbfaa51cbc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright 2014 Advanced Micro Devices, Inc.
*
@@ -74,9 +75,6 @@ void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev)
return;
adev->kfd.dev = kgd2kfd_probe(adev, vf);
-
- if (adev->kfd.dev)
- amdgpu_amdkfd_total_mem_size += adev->gmc.real_vram_size;
}
/**
@@ -130,6 +128,7 @@ static void amdgpu_amdkfd_reset_work(struct work_struct *work)
kfd.reset_work);
struct amdgpu_reset_context reset_context;
+
memset(&reset_context, 0, sizeof(reset_context));
reset_context.method = AMD_RESET_METHOD_NONE;
@@ -199,6 +198,8 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
adev->kfd.init_complete = kgd2kfd_device_init(adev->kfd.dev,
adev_to_drm(adev), &gpu_resources);
+ amdgpu_amdkfd_total_mem_size += adev->gmc.real_vram_size;
+
INIT_WORK(&adev->kfd.reset_work, amdgpu_amdkfd_reset_work);
}
}
@@ -208,6 +209,7 @@ void amdgpu_amdkfd_device_fini_sw(struct amdgpu_device *adev)
if (adev->kfd.dev) {
kgd2kfd_device_exit(adev->kfd.dev);
adev->kfd.dev = NULL;
+ amdgpu_amdkfd_total_mem_size -= adev->gmc.real_vram_size;
}
}
@@ -684,6 +686,7 @@ int amdgpu_amdkfd_submit_ib(struct amdgpu_device *adev,
ib->length_dw = ib_len;
/* This works for NO_HWS. TODO: need to handle without knowing VMID */
job->vmid = vmid;
+ job->num_ibs = 1;
ret = amdgpu_ib_schedule(ring, 1, ib, job, &f);
@@ -753,11 +756,7 @@ void amdgpu_amdkfd_ras_poison_consumption_handler(struct amdgpu_device *adev, bo
{
struct ras_err_data err_data = {0, 0, 0, NULL};
- /* CPU MCA will handle page retirement if connected_to_cpu is 1 */
- if (!adev->gmc.xgmi.connected_to_cpu)
- amdgpu_umc_poison_handler(adev, &err_data, reset);
- else if (reset)
- amdgpu_amdkfd_gpu_reset(adev);
+ amdgpu_umc_poison_handler(adev, &err_data, reset);
}
bool amdgpu_amdkfd_ras_query_utcl2_poison_status(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 2170db83e41d..978d3970b5cc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright 2014-2018 Advanced Micro Devices, Inc.
*
@@ -297,7 +298,7 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
*/
replacement = dma_fence_get_stub();
dma_resv_replace_fences(bo->tbo.base.resv, ef->base.context,
- replacement, DMA_RESV_USAGE_READ);
+ replacement, DMA_RESV_USAGE_BOOKKEEP);
dma_fence_put(replacement);
return 0;
}
@@ -1390,8 +1391,9 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
ret = dma_resv_reserve_fences(vm->root.bo->tbo.base.resv, 1);
if (ret)
goto reserve_shared_fail;
- amdgpu_bo_fence(vm->root.bo,
- &vm->process_info->eviction_fence->base, true);
+ dma_resv_add_fence(vm->root.bo->tbo.base.resv,
+ &vm->process_info->eviction_fence->base,
+ DMA_RESV_USAGE_BOOKKEEP);
amdgpu_bo_unreserve(vm->root.bo);
/* Update process info */
@@ -1612,6 +1614,7 @@ size_t amdgpu_amdkfd_get_available_memory(struct amdgpu_device *adev)
uint64_t reserved_for_pt =
ESTIMATE_PT_SIZE(amdgpu_amdkfd_total_mem_size);
size_t available;
+
spin_lock(&kfd_mem_limit.mem_limit_lock);
available = adev->gmc.real_vram_size
- adev->kfd.vram_used_aligned
@@ -1987,9 +1990,9 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
}
if (!amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && !bo->tbo.pin_count)
- amdgpu_bo_fence(bo,
- &avm->process_info->eviction_fence->base,
- true);
+ dma_resv_add_fence(bo->tbo.base.resv,
+ &avm->process_info->eviction_fence->base,
+ DMA_RESV_USAGE_BOOKKEEP);
ret = unreserve_bo_and_vms(&ctx, false, false);
goto out;
@@ -2216,7 +2219,7 @@ int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct amdgpu_device *adev,
{
if (atomic_read(&adev->gmc.vm_fault_info_updated) == 1) {
*mem = *adev->gmc.vm_fault_info;
- mb();
+ mb(); /* make sure read happened */
atomic_set(&adev->gmc.vm_fault_info_updated, 0);
}
return 0;
@@ -2758,15 +2761,18 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
if (mem->bo->tbo.pin_count)
continue;
- amdgpu_bo_fence(mem->bo,
- &process_info->eviction_fence->base, true);
+ dma_resv_add_fence(mem->bo->tbo.base.resv,
+ &process_info->eviction_fence->base,
+ DMA_RESV_USAGE_BOOKKEEP);
}
/* Attach eviction fence to PD / PT BOs */
list_for_each_entry(peer_vm, &process_info->vm_list_head,
vm_list_node) {
struct amdgpu_bo *bo = peer_vm->root.bo;
- amdgpu_bo_fence(bo, &process_info->eviction_fence->base, true);
+ dma_resv_add_fence(bo->tbo.base.resv,
+ &process_info->eviction_fence->base,
+ DMA_RESV_USAGE_BOOKKEEP);
}
validate_map_fail:
@@ -2820,7 +2826,9 @@ int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem
ret = dma_resv_reserve_fences(gws_bo->tbo.base.resv, 1);
if (ret)
goto reserve_shared_fail;
- amdgpu_bo_fence(gws_bo, &process_info->eviction_fence->base, true);
+ dma_resv_add_fence(gws_bo->tbo.base.resv,
+ &process_info->eviction_fence->base,
+ DMA_RESV_USAGE_BOOKKEEP);
amdgpu_bo_unreserve(gws_bo);
mutex_unlock(&(*mem)->process_info->lock);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index b7933c2ce765..491d4846fc02 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -1674,10 +1674,12 @@ amdgpu_connector_add(struct amdgpu_device *adev,
adev->mode_info.dither_property,
AMDGPU_FMT_DITHER_DISABLE);
- if (amdgpu_audio != 0)
+ if (amdgpu_audio != 0) {
drm_object_attach_property(&amdgpu_connector->base.base,
adev->mode_info.audio_property,
AMDGPU_AUDIO_AUTO);
+ amdgpu_connector->audio = AMDGPU_AUDIO_AUTO;
+ }
subpixel_order = SubPixelHorizontalRGB;
connector->interlace_allowed = true;
@@ -1799,6 +1801,7 @@ amdgpu_connector_add(struct amdgpu_device *adev,
drm_object_attach_property(&amdgpu_connector->base.base,
adev->mode_info.audio_property,
AMDGPU_AUDIO_AUTO);
+ amdgpu_connector->audio = AMDGPU_AUDIO_AUTO;
}
drm_object_attach_property(&amdgpu_connector->base.base,
adev->mode_info.dither_property,
@@ -1852,6 +1855,7 @@ amdgpu_connector_add(struct amdgpu_device *adev,
drm_object_attach_property(&amdgpu_connector->base.base,
adev->mode_info.audio_property,
AMDGPU_AUDIO_AUTO);
+ amdgpu_connector->audio = AMDGPU_AUDIO_AUTO;
}
drm_object_attach_property(&amdgpu_connector->base.base,
adev->mode_info.dither_property,
@@ -1902,6 +1906,7 @@ amdgpu_connector_add(struct amdgpu_device *adev,
drm_object_attach_property(&amdgpu_connector->base.base,
adev->mode_info.audio_property,
AMDGPU_AUDIO_AUTO);
+ amdgpu_connector->audio = AMDGPU_AUDIO_AUTO;
}
drm_object_attach_property(&amdgpu_connector->base.base,
adev->mode_info.dither_property,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index b7bae833c804..1bbd39b3b0fc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -39,9 +39,82 @@
#include "amdgpu_gem.h"
#include "amdgpu_ras.h"
-static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
- struct drm_amdgpu_cs_chunk_fence *data,
- uint32_t *offset)
+static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p,
+ struct amdgpu_device *adev,
+ struct drm_file *filp,
+ union drm_amdgpu_cs *cs)
+{
+ struct amdgpu_fpriv *fpriv = filp->driver_priv;
+
+ if (cs->in.num_chunks == 0)
+ return -EINVAL;
+
+ memset(p, 0, sizeof(*p));
+ p->adev = adev;
+ p->filp = filp;
+
+ p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id);
+ if (!p->ctx)
+ return -EINVAL;
+
+ if (atomic_read(&p->ctx->guilty)) {
+ amdgpu_ctx_put(p->ctx);
+ return -ECANCELED;
+ }
+ return 0;
+}
+
+static int amdgpu_cs_job_idx(struct amdgpu_cs_parser *p,
+ struct drm_amdgpu_cs_chunk_ib *chunk_ib)
+{
+ struct drm_sched_entity *entity;
+ unsigned int i;
+ int r;
+
+ r = amdgpu_ctx_get_entity(p->ctx, chunk_ib->ip_type,
+ chunk_ib->ip_instance,
+ chunk_ib->ring, &entity);
+ if (r)
+ return r;
+
+ /*
+ * Abort if there is no run queue associated with this entity.
+ * Possibly because of disabled HW IP.
+ */
+ if (entity->rq == NULL)
+ return -EINVAL;
+
+ /* Check if we can add this IB to some existing job */
+ for (i = 0; i < p->gang_size; ++i)
+ if (p->entities[i] == entity)
+ return i;
+
+ /* If not increase the gang size if possible */
+ if (i == AMDGPU_CS_GANG_SIZE)
+ return -EINVAL;
+
+ p->entities[i] = entity;
+ p->gang_size = i + 1;
+ return i;
+}
+
+static int amdgpu_cs_p1_ib(struct amdgpu_cs_parser *p,
+ struct drm_amdgpu_cs_chunk_ib *chunk_ib,
+ unsigned int *num_ibs)
+{
+ int r;
+
+ r = amdgpu_cs_job_idx(p, chunk_ib);
+ if (r < 0)
+ return r;
+
+ ++(num_ibs[r]);
+ return 0;
+}
+
+static int amdgpu_cs_p1_user_fence(struct amdgpu_cs_parser *p,
+ struct drm_amdgpu_cs_chunk_fence *data,
+ uint32_t *offset)
{
struct drm_gem_object *gobj;
struct amdgpu_bo *bo;
@@ -80,11 +153,11 @@ error_unref:
return r;
}
-static int amdgpu_cs_bo_handles_chunk(struct amdgpu_cs_parser *p,
- struct drm_amdgpu_bo_list_in *data)
+static int amdgpu_cs_p1_bo_handles(struct amdgpu_cs_parser *p,
+ struct drm_amdgpu_bo_list_in *data)
{
+ struct drm_amdgpu_bo_list_entry *info;
int r;
- struct drm_amdgpu_bo_list_entry *info = NULL;
r = amdgpu_bo_create_list_entry_array(data, &info);
if (r)
@@ -104,38 +177,25 @@ error_free:
return r;
}
-static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs *cs)
+/* Copy the data from userspace and go over it the first time */
+static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
+ union drm_amdgpu_cs *cs)
{
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
+ unsigned int num_ibs[AMDGPU_CS_GANG_SIZE] = { };
struct amdgpu_vm *vm = &fpriv->vm;
uint64_t *chunk_array_user;
uint64_t *chunk_array;
- unsigned size, num_ibs = 0;
uint32_t uf_offset = 0;
- int i;
+ unsigned int size;
int ret;
+ int i;
- if (cs->in.num_chunks == 0)
- return -EINVAL;
-
- chunk_array = kvmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL);
+ chunk_array = kvmalloc_array(cs->in.num_chunks, sizeof(uint64_t),
+ GFP_KERNEL);
if (!chunk_array)
return -ENOMEM;
- p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id);
- if (!p->ctx) {
- ret = -EINVAL;
- goto free_chunk;
- }
-
- mutex_lock(&p->ctx->lock);
-
- /* skip guilty context job */
- if (atomic_read(&p->ctx->guilty) == 1) {
- ret = -ECANCELED;
- goto free_chunk;
- }
-
/* get chunks */
chunk_array_user = u64_to_user_ptr(cs->in.chunks);
if (copy_from_user(chunk_array, chunk_array_user,
@@ -170,7 +230,8 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs
size = p->chunks[i].length_dw;
cdata = u64_to_user_ptr(user_chunk.chunk_data);
- p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t), GFP_KERNEL);
+ p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t),
+ GFP_KERNEL);
if (p->chunks[i].kdata == NULL) {
ret = -ENOMEM;
i--;
@@ -182,36 +243,35 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs
goto free_partial_kdata;
}
+ /* Assume the worst on the following checks */
+ ret = -EINVAL;
switch (p->chunks[i].chunk_id) {
case AMDGPU_CHUNK_ID_IB:
- ++num_ibs;
+ if (size < sizeof(struct drm_amdgpu_cs_chunk_ib))
+ goto free_partial_kdata;
+
+ ret = amdgpu_cs_p1_ib(p, p->chunks[i].kdata, num_ibs);
+ if (ret)
+ goto free_partial_kdata;
break;
case AMDGPU_CHUNK_ID_FENCE:
- size = sizeof(struct drm_amdgpu_cs_chunk_fence);
- if (p->chunks[i].length_dw * sizeof(uint32_t) < size) {
- ret = -EINVAL;
+ if (size < sizeof(struct drm_amdgpu_cs_chunk_fence))
goto free_partial_kdata;
- }
- ret = amdgpu_cs_user_fence_chunk(p, p->chunks[i].kdata,
- &uf_offset);
+ ret = amdgpu_cs_p1_user_fence(p, p->chunks[i].kdata,
+ &uf_offset);
if (ret)
goto free_partial_kdata;
-
break;
case AMDGPU_CHUNK_ID_BO_HANDLES:
- size = sizeof(struct drm_amdgpu_bo_list_in);
- if (p->chunks[i].length_dw * sizeof(uint32_t) < size) {
- ret = -EINVAL;
+ if (size < sizeof(struct drm_amdgpu_bo_list_in))
goto free_partial_kdata;
- }
- ret = amdgpu_cs_bo_handles_chunk(p, p->chunks[i].kdata);
+ ret = amdgpu_cs_p1_bo_handles(p, p->chunks[i].kdata);
if (ret)
goto free_partial_kdata;
-
break;
case AMDGPU_CHUNK_ID_DEPENDENCIES:
@@ -223,22 +283,32 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs
break;
default:
- ret = -EINVAL;
goto free_partial_kdata;
}
}
- ret = amdgpu_job_alloc(p->adev, num_ibs, &p->job, vm);
- if (ret)
- goto free_all_kdata;
+ if (!p->gang_size)
+ return -EINVAL;
+
+ for (i = 0; i < p->gang_size; ++i) {
+ ret = amdgpu_job_alloc(p->adev, num_ibs[i], &p->jobs[i], vm);
+ if (ret)
+ goto free_all_kdata;
+
+ ret = drm_sched_job_init(&p->jobs[i]->base, p->entities[i],
+ &fpriv->vm);
+ if (ret)
+ goto free_all_kdata;
+ }
+ p->gang_leader = p->jobs[p->gang_size - 1];
- if (p->ctx->vram_lost_counter != p->job->vram_lost_counter) {
+ if (p->ctx->vram_lost_counter != p->gang_leader->vram_lost_counter) {
ret = -ECANCELED;
goto free_all_kdata;
}
if (p->uf_entry.tv.bo)
- p->job->uf_addr = uf_offset;
+ p->gang_leader->uf_addr = uf_offset;
kvfree(chunk_array);
/* Use this opportunity to fill in task info for the vm */
@@ -260,6 +330,297 @@ free_chunk:
return ret;
}
+static int amdgpu_cs_p2_ib(struct amdgpu_cs_parser *p,
+ struct amdgpu_cs_chunk *chunk,
+ unsigned int *ce_preempt,
+ unsigned int *de_preempt)
+{
+ struct drm_amdgpu_cs_chunk_ib *chunk_ib = chunk->kdata;
+ struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
+ struct amdgpu_vm *vm = &fpriv->vm;
+ struct amdgpu_ring *ring;
+ struct amdgpu_job *job;
+ struct amdgpu_ib *ib;
+ int r;
+
+ r = amdgpu_cs_job_idx(p, chunk_ib);
+ if (r < 0)
+ return r;
+
+ job = p->jobs[r];
+ ring = amdgpu_job_ring(job);
+ ib = &job->ibs[job->num_ibs++];
+
+ /* MM engine doesn't support user fences */
+ if (p->uf_entry.tv.bo && ring->funcs->no_user_fence)
+ return -EINVAL;
+
+ if (chunk_ib->ip_type == AMDGPU_HW_IP_GFX &&
+ chunk_ib->flags & AMDGPU_IB_FLAG_PREEMPT) {
+ if (chunk_ib->flags & AMDGPU_IB_FLAG_CE)
+ (*ce_preempt)++;
+ else
+ (*de_preempt)++;
+
+ /* Each GFX command submit allows only 1 IB max
+ * preemptible for CE & DE */
+ if (*ce_preempt > 1 || *de_preempt > 1)
+ return -EINVAL;
+ }
+
+ if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE)
+ job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT;
+
+ r = amdgpu_ib_get(p->adev, vm, ring->funcs->parse_cs ?
+ chunk_ib->ib_bytes : 0,
+ AMDGPU_IB_POOL_DELAYED, ib);
+ if (r) {
+ DRM_ERROR("Failed to get ib !\n");
+ return r;
+ }
+
+ ib->gpu_addr = chunk_ib->va_start;
+ ib->length_dw = chunk_ib->ib_bytes / 4;
+ ib->flags = chunk_ib->flags;
+ return 0;
+}
+
+static int amdgpu_cs_p2_dependencies(struct amdgpu_cs_parser *p,
+ struct amdgpu_cs_chunk *chunk)
+{
+ struct drm_amdgpu_cs_chunk_dep *deps = chunk->kdata;
+ struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
+ unsigned num_deps;
+ int i, r;
+
+ num_deps = chunk->length_dw * 4 /
+ sizeof(struct drm_amdgpu_cs_chunk_dep);
+
+ for (i = 0; i < num_deps; ++i) {
+ struct amdgpu_ctx *ctx;
+ struct drm_sched_entity *entity;
+ struct dma_fence *fence;
+
+ ctx = amdgpu_ctx_get(fpriv, deps[i].ctx_id);
+ if (ctx == NULL)
+ return -EINVAL;
+
+ r = amdgpu_ctx_get_entity(ctx, deps[i].ip_type,
+ deps[i].ip_instance,
+ deps[i].ring, &entity);
+ if (r) {
+ amdgpu_ctx_put(ctx);
+ return r;
+ }
+
+ fence = amdgpu_ctx_get_fence(ctx, entity, deps[i].handle);
+ amdgpu_ctx_put(ctx);
+
+ if (IS_ERR(fence))
+ return PTR_ERR(fence);
+ else if (!fence)
+ continue;
+
+ if (chunk->chunk_id == AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES) {
+ struct drm_sched_fence *s_fence;
+ struct dma_fence *old = fence;
+
+ s_fence = to_drm_sched_fence(fence);
+ fence = dma_fence_get(&s_fence->scheduled);
+ dma_fence_put(old);
+ }
+
+ r = amdgpu_sync_fence(&p->gang_leader->sync, fence);
+ dma_fence_put(fence);
+ if (r)
+ return r;
+ }
+ return 0;
+}
+
+static int amdgpu_syncobj_lookup_and_add(struct amdgpu_cs_parser *p,
+ uint32_t handle, u64 point,
+ u64 flags)
+{
+ struct dma_fence *fence;
+ int r;
+
+ r = drm_syncobj_find_fence(p->filp, handle, point, flags, &fence);
+ if (r) {
+ DRM_ERROR("syncobj %u failed to find fence @ %llu (%d)!\n",
+ handle, point, r);
+ return r;
+ }
+
+ r = amdgpu_sync_fence(&p->gang_leader->sync, fence);
+ dma_fence_put(fence);
+
+ return r;
+}
+
+static int amdgpu_cs_p2_syncobj_in(struct amdgpu_cs_parser *p,
+ struct amdgpu_cs_chunk *chunk)
+{
+ struct drm_amdgpu_cs_chunk_sem *deps = chunk->kdata;
+ unsigned num_deps;
+ int i, r;
+
+ num_deps = chunk->length_dw * 4 /
+ sizeof(struct drm_amdgpu_cs_chunk_sem);
+ for (i = 0; i < num_deps; ++i) {
+ r = amdgpu_syncobj_lookup_and_add(p, deps[i].handle, 0, 0);
+ if (r)
+ return r;
+ }
+
+ return 0;
+}
+
+static int amdgpu_cs_p2_syncobj_timeline_wait(struct amdgpu_cs_parser *p,
+ struct amdgpu_cs_chunk *chunk)
+{
+ struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps = chunk->kdata;
+ unsigned num_deps;
+ int i, r;
+
+ num_deps = chunk->length_dw * 4 /
+ sizeof(struct drm_amdgpu_cs_chunk_syncobj);
+ for (i = 0; i < num_deps; ++i) {
+ r = amdgpu_syncobj_lookup_and_add(p, syncobj_deps[i].handle,
+ syncobj_deps[i].point,
+ syncobj_deps[i].flags);
+ if (r)
+ return r;
+ }
+
+ return 0;
+}
+
+static int amdgpu_cs_p2_syncobj_out(struct amdgpu_cs_parser *p,
+ struct amdgpu_cs_chunk *chunk)
+{
+ struct drm_amdgpu_cs_chunk_sem *deps = chunk->kdata;
+ unsigned num_deps;
+ int i;
+
+ num_deps = chunk->length_dw * 4 /
+ sizeof(struct drm_amdgpu_cs_chunk_sem);
+
+ if (p->post_deps)
+ return -EINVAL;
+
+ p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
+ GFP_KERNEL);
+ p->num_post_deps = 0;
+
+ if (!p->post_deps)
+ return -ENOMEM;
+
+
+ for (i = 0; i < num_deps; ++i) {
+ p->post_deps[i].syncobj =
+ drm_syncobj_find(p->filp, deps[i].handle);
+ if (!p->post_deps[i].syncobj)
+ return -EINVAL;
+ p->post_deps[i].chain = NULL;
+ p->post_deps[i].point = 0;
+ p->num_post_deps++;
+ }
+
+ return 0;
+}
+
+static int amdgpu_cs_p2_syncobj_timeline_signal(struct amdgpu_cs_parser *p,
+ struct amdgpu_cs_chunk *chunk)
+{
+ struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps = chunk->kdata;
+ unsigned num_deps;
+ int i;
+
+ num_deps = chunk->length_dw * 4 /
+ sizeof(struct drm_amdgpu_cs_chunk_syncobj);
+
+ if (p->post_deps)
+ return -EINVAL;
+
+ p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
+ GFP_KERNEL);
+ p->num_post_deps = 0;
+
+ if (!p->post_deps)
+ return -ENOMEM;
+
+ for (i = 0; i < num_deps; ++i) {
+ struct amdgpu_cs_post_dep *dep = &p->post_deps[i];
+
+ dep->chain = NULL;
+ if (syncobj_deps[i].point) {
+ dep->chain = dma_fence_chain_alloc();
+ if (!dep->chain)
+ return -ENOMEM;
+ }
+
+ dep->syncobj = drm_syncobj_find(p->filp,
+ syncobj_deps[i].handle);
+ if (!dep->syncobj) {
+ dma_fence_chain_free(dep->chain);
+ return -EINVAL;
+ }
+ dep->point = syncobj_deps[i].point;
+ p->num_post_deps++;
+ }
+
+ return 0;
+}
+
+static int amdgpu_cs_pass2(struct amdgpu_cs_parser *p)
+{
+ unsigned int ce_preempt = 0, de_preempt = 0;
+ int i, r;
+
+ for (i = 0; i < p->nchunks; ++i) {
+ struct amdgpu_cs_chunk *chunk;
+
+ chunk = &p->chunks[i];
+
+ switch (chunk->chunk_id) {
+ case AMDGPU_CHUNK_ID_IB:
+ r = amdgpu_cs_p2_ib(p, chunk, &ce_preempt, &de_preempt);
+ if (r)
+ return r;
+ break;
+ case AMDGPU_CHUNK_ID_DEPENDENCIES:
+ case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES:
+ r = amdgpu_cs_p2_dependencies(p, chunk);
+ if (r)
+ return r;
+ break;
+ case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
+ r = amdgpu_cs_p2_syncobj_in(p, chunk);
+ if (r)
+ return r;
+ break;
+ case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
+ r = amdgpu_cs_p2_syncobj_out(p, chunk);
+ if (r)
+ return r;
+ break;
+ case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT:
+ r = amdgpu_cs_p2_syncobj_timeline_wait(p, chunk);
+ if (r)
+ return r;
+ break;
+ case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL:
+ r = amdgpu_cs_p2_syncobj_timeline_signal(p, chunk);
+ if (r)
+ return r;
+ break;
+ }
+ }
+
+ return 0;
+}
+
/* Convert microseconds to bytes. */
static u64 us_to_bytes(struct amdgpu_device *adev, s64 us)
{
@@ -495,9 +856,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
struct amdgpu_vm *vm = &fpriv->vm;
struct amdgpu_bo_list_entry *e;
struct list_head duplicates;
- struct amdgpu_bo *gds;
- struct amdgpu_bo *gws;
- struct amdgpu_bo *oa;
+ unsigned int i;
int r;
INIT_LIST_HEAD(&p->validated);
@@ -581,16 +940,6 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
e->bo_va = amdgpu_vm_bo_find(vm, bo);
}
- /* Move fence waiting after getting reservation lock of
- * PD root. Then there is no need on a ctx mutex lock.
- */
- r = amdgpu_ctx_wait_prev_fence(p->ctx, p->entity);
- if (unlikely(r != 0)) {
- if (r != -ERESTARTSYS)
- DRM_ERROR("amdgpu_ctx_wait_prev_fence failed.\n");
- goto error_validate;
- }
-
amdgpu_cs_get_threshold_for_moves(p->adev, &p->bytes_moved_threshold,
&p->bytes_moved_vis_threshold);
p->bytes_moved = 0;
@@ -611,197 +960,139 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
if (r)
goto error_validate;
- amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved,
- p->bytes_moved_vis);
+ if (p->uf_entry.tv.bo) {
+ struct amdgpu_bo *uf = ttm_to_amdgpu_bo(p->uf_entry.tv.bo);
- gds = p->bo_list->gds_obj;
- gws = p->bo_list->gws_obj;
- oa = p->bo_list->oa_obj;
+ r = amdgpu_ttm_alloc_gart(&uf->tbo);
+ if (r)
+ goto error_validate;
- if (gds) {
- p->job->gds_base = amdgpu_bo_gpu_offset(gds) >> PAGE_SHIFT;
- p->job->gds_size = amdgpu_bo_size(gds) >> PAGE_SHIFT;
- }
- if (gws) {
- p->job->gws_base = amdgpu_bo_gpu_offset(gws) >> PAGE_SHIFT;
- p->job->gws_size = amdgpu_bo_size(gws) >> PAGE_SHIFT;
- }
- if (oa) {
- p->job->oa_base = amdgpu_bo_gpu_offset(oa) >> PAGE_SHIFT;
- p->job->oa_size = amdgpu_bo_size(oa) >> PAGE_SHIFT;
+ p->gang_leader->uf_addr += amdgpu_bo_gpu_offset(uf);
}
- if (!r && p->uf_entry.tv.bo) {
- struct amdgpu_bo *uf = ttm_to_amdgpu_bo(p->uf_entry.tv.bo);
+ amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved,
+ p->bytes_moved_vis);
- r = amdgpu_ttm_alloc_gart(&uf->tbo);
- p->job->uf_addr += amdgpu_bo_gpu_offset(uf);
- }
+ for (i = 0; i < p->gang_size; ++i)
+ amdgpu_job_set_resources(p->jobs[i], p->bo_list->gds_obj,
+ p->bo_list->gws_obj,
+ p->bo_list->oa_obj);
+ return 0;
error_validate:
- if (r)
- ttm_eu_backoff_reservation(&p->ticket, &p->validated);
+ ttm_eu_backoff_reservation(&p->ticket, &p->validated);
out_free_user_pages:
- if (r) {
- amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
- struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
+ amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
+ struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
- if (!e->user_pages)
- continue;
- amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
- kvfree(e->user_pages);
- e->user_pages = NULL;
- }
- mutex_unlock(&p->bo_list->bo_list_mutex);
+ if (!e->user_pages)
+ continue;
+ amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
+ kvfree(e->user_pages);
+ e->user_pages = NULL;
}
return r;
}
-static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
+static void trace_amdgpu_cs_ibs(struct amdgpu_cs_parser *p)
{
- struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
- struct amdgpu_bo_list_entry *e;
- int r;
+ int i, j;
- list_for_each_entry(e, &p->validated, tv.head) {
- struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
- struct dma_resv *resv = bo->tbo.base.resv;
- enum amdgpu_sync_mode sync_mode;
+ if (!trace_amdgpu_cs_enabled())
+ return;
- sync_mode = amdgpu_bo_explicit_sync(bo) ?
- AMDGPU_SYNC_EXPLICIT : AMDGPU_SYNC_NE_OWNER;
- r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, sync_mode,
- &fpriv->vm);
- if (r)
- return r;
+ for (i = 0; i < p->gang_size; ++i) {
+ struct amdgpu_job *job = p->jobs[i];
+
+ for (j = 0; j < job->num_ibs; ++j)
+ trace_amdgpu_cs(p, job, &job->ibs[j]);
}
- return 0;
}
-/**
- * amdgpu_cs_parser_fini() - clean parser states
- * @parser: parser structure holding parsing context.
- * @error: error number
- * @backoff: indicator to backoff the reservation
- *
- * If error is set then unvalidate buffer, otherwise just free memory
- * used by parsing context.
- **/
-static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,
- bool backoff)
+static int amdgpu_cs_patch_ibs(struct amdgpu_cs_parser *p,
+ struct amdgpu_job *job)
{
- unsigned i;
+ struct amdgpu_ring *ring = amdgpu_job_ring(job);
+ unsigned int i;
+ int r;
- if (error && backoff) {
- ttm_eu_backoff_reservation(&parser->ticket,
- &parser->validated);
- mutex_unlock(&parser->bo_list->bo_list_mutex);
- }
+ /* Only for UVD/VCE VM emulation */
+ if (!ring->funcs->parse_cs && !ring->funcs->patch_cs_in_place)
+ return 0;
- for (i = 0; i < parser->num_post_deps; i++) {
- drm_syncobj_put(parser->post_deps[i].syncobj);
- kfree(parser->post_deps[i].chain);
- }
- kfree(parser->post_deps);
+ for (i = 0; i < job->num_ibs; ++i) {
+ struct amdgpu_ib *ib = &job->ibs[i];
+ struct amdgpu_bo_va_mapping *m;
+ struct amdgpu_bo *aobj;
+ uint64_t va_start;
+ uint8_t *kptr;
- dma_fence_put(parser->fence);
+ va_start = ib->gpu_addr & AMDGPU_GMC_HOLE_MASK;
+ r = amdgpu_cs_find_mapping(p, va_start, &aobj, &m);
+ if (r) {
+ DRM_ERROR("IB va_start is invalid\n");
+ return r;
+ }
- if (parser->ctx) {
- mutex_unlock(&parser->ctx->lock);
- amdgpu_ctx_put(parser->ctx);
+ if ((va_start + ib->length_dw * 4) >
+ (m->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
+ DRM_ERROR("IB va_start+ib_bytes is invalid\n");
+ return -EINVAL;
+ }
+
+ /* the IB should be reserved at this point */
+ r = amdgpu_bo_kmap(aobj, (void **)&kptr);
+ if (r) {
+ return r;
+ }
+
+ kptr += va_start - (m->start * AMDGPU_GPU_PAGE_SIZE);
+
+ if (ring->funcs->parse_cs) {
+ memcpy(ib->ptr, kptr, ib->length_dw * 4);
+ amdgpu_bo_kunmap(aobj);
+
+ r = amdgpu_ring_parse_cs(ring, p, job, ib);
+ if (r)
+ return r;
+ } else {
+ ib->ptr = (uint32_t *)kptr;
+ r = amdgpu_ring_patch_cs_in_place(ring, p, job, ib);
+ amdgpu_bo_kunmap(aobj);
+ if (r)
+ return r;
+ }
}
- if (parser->bo_list)
- amdgpu_bo_list_put(parser->bo_list);
- for (i = 0; i < parser->nchunks; i++)
- kvfree(parser->chunks[i].kdata);
- kvfree(parser->chunks);
- if (parser->job)
- amdgpu_job_free(parser->job);
- if (parser->uf_entry.tv.bo) {
- struct amdgpu_bo *uf = ttm_to_amdgpu_bo(parser->uf_entry.tv.bo);
+ return 0;
+}
- amdgpu_bo_unref(&uf);
+static int amdgpu_cs_patch_jobs(struct amdgpu_cs_parser *p)
+{
+ unsigned int i;
+ int r;
+
+ for (i = 0; i < p->gang_size; ++i) {
+ r = amdgpu_cs_patch_ibs(p, p->jobs[i]);
+ if (r)
+ return r;
}
+ return 0;
}
static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
{
- struct amdgpu_ring *ring = to_amdgpu_ring(p->entity->rq->sched);
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
+ struct amdgpu_job *job = p->gang_leader;
struct amdgpu_device *adev = p->adev;
struct amdgpu_vm *vm = &fpriv->vm;
struct amdgpu_bo_list_entry *e;
struct amdgpu_bo_va *bo_va;
struct amdgpu_bo *bo;
+ unsigned int i;
int r;
- /* Only for UVD/VCE VM emulation */
- if (ring->funcs->parse_cs || ring->funcs->patch_cs_in_place) {
- unsigned i, j;
-
- for (i = 0, j = 0; i < p->nchunks && j < p->job->num_ibs; i++) {
- struct drm_amdgpu_cs_chunk_ib *chunk_ib;
- struct amdgpu_bo_va_mapping *m;
- struct amdgpu_bo *aobj = NULL;
- struct amdgpu_cs_chunk *chunk;
- uint64_t offset, va_start;
- struct amdgpu_ib *ib;
- uint8_t *kptr;
-
- chunk = &p->chunks[i];
- ib = &p->job->ibs[j];
- chunk_ib = chunk->kdata;
-
- if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB)
- continue;
-
- va_start = chunk_ib->va_start & AMDGPU_GMC_HOLE_MASK;
- r = amdgpu_cs_find_mapping(p, va_start, &aobj, &m);
- if (r) {
- DRM_ERROR("IB va_start is invalid\n");
- return r;
- }
-
- if ((va_start + chunk_ib->ib_bytes) >
- (m->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
- DRM_ERROR("IB va_start+ib_bytes is invalid\n");
- return -EINVAL;
- }
-
- /* the IB should be reserved at this point */
- r = amdgpu_bo_kmap(aobj, (void **)&kptr);
- if (r) {
- return r;
- }
-
- offset = m->start * AMDGPU_GPU_PAGE_SIZE;
- kptr += va_start - offset;
-
- if (ring->funcs->parse_cs) {
- memcpy(ib->ptr, kptr, chunk_ib->ib_bytes);
- amdgpu_bo_kunmap(aobj);
-
- r = amdgpu_ring_parse_cs(ring, p, p->job, ib);
- if (r)
- return r;
- } else {
- ib->ptr = (uint32_t *)kptr;
- r = amdgpu_ring_patch_cs_in_place(ring, p, p->job, ib);
- amdgpu_bo_kunmap(aobj);
- if (r)
- return r;
- }
-
- j++;
- }
- }
-
- if (!p->job->vm)
- return amdgpu_cs_sync_rings(p);
-
-
r = amdgpu_vm_clear_freed(adev, vm, NULL);
if (r)
return r;
@@ -810,18 +1101,18 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
if (r)
return r;
- r = amdgpu_sync_fence(&p->job->sync, fpriv->prt_va->last_pt_update);
+ r = amdgpu_sync_fence(&job->sync, fpriv->prt_va->last_pt_update);
if (r)
return r;
- if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
+ if (fpriv->csa_va) {
bo_va = fpriv->csa_va;
BUG_ON(!bo_va);
r = amdgpu_vm_bo_update(adev, bo_va, false);
if (r)
return r;
- r = amdgpu_sync_fence(&p->job->sync, bo_va->last_pt_update);
+ r = amdgpu_sync_fence(&job->sync, bo_va->last_pt_update);
if (r)
return r;
}
@@ -840,7 +1131,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
if (r)
return r;
- r = amdgpu_sync_fence(&p->job->sync, bo_va->last_pt_update);
+ r = amdgpu_sync_fence(&job->sync, bo_va->last_pt_update);
if (r)
return r;
}
@@ -853,11 +1144,18 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
if (r)
return r;
- r = amdgpu_sync_fence(&p->job->sync, vm->last_update);
+ r = amdgpu_sync_fence(&job->sync, vm->last_update);
if (r)
return r;
- p->job->vm_pd_addr = amdgpu_gmc_pd_addr(vm->root.bo);
+ for (i = 0; i < p->gang_size; ++i) {
+ job = p->jobs[i];
+
+ if (!job->vm)
+ continue;
+
+ job->vm_pd_addr = amdgpu_gmc_pd_addr(vm->root.bo);
+ }
if (amdgpu_vm_debug) {
/* Invalidate all BOs to test for userspace bugs */
@@ -872,331 +1170,40 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
}
}
- return amdgpu_cs_sync_rings(p);
-}
-
-static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
- struct amdgpu_cs_parser *parser)
-{
- struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
- struct amdgpu_vm *vm = &fpriv->vm;
- int r, ce_preempt = 0, de_preempt = 0;
- struct amdgpu_ring *ring;
- int i, j;
-
- for (i = 0, j = 0; i < parser->nchunks && j < parser->job->num_ibs; i++) {
- struct amdgpu_cs_chunk *chunk;
- struct amdgpu_ib *ib;
- struct drm_amdgpu_cs_chunk_ib *chunk_ib;
- struct drm_sched_entity *entity;
-
- chunk = &parser->chunks[i];
- ib = &parser->job->ibs[j];
- chunk_ib = (struct drm_amdgpu_cs_chunk_ib *)chunk->kdata;
-
- if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB)
- continue;
-
- if (chunk_ib->ip_type == AMDGPU_HW_IP_GFX &&
- (amdgpu_mcbp || amdgpu_sriov_vf(adev))) {
- if (chunk_ib->flags & AMDGPU_IB_FLAG_PREEMPT) {
- if (chunk_ib->flags & AMDGPU_IB_FLAG_CE)
- ce_preempt++;
- else
- de_preempt++;
- }
-
- /* each GFX command submit allows 0 or 1 IB preemptible for CE & DE */
- if (ce_preempt > 1 || de_preempt > 1)
- return -EINVAL;
- }
-
- r = amdgpu_ctx_get_entity(parser->ctx, chunk_ib->ip_type,
- chunk_ib->ip_instance, chunk_ib->ring,
- &entity);
- if (r)
- return r;
-
- if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE)
- parser->job->preamble_status |=
- AMDGPU_PREAMBLE_IB_PRESENT;
-
- if (parser->entity && parser->entity != entity)
- return -EINVAL;
-
- /* Return if there is no run queue associated with this entity.
- * Possibly because of disabled HW IP*/
- if (entity->rq == NULL)
- return -EINVAL;
-
- parser->entity = entity;
-
- ring = to_amdgpu_ring(entity->rq->sched);
- r = amdgpu_ib_get(adev, vm, ring->funcs->parse_cs ?
- chunk_ib->ib_bytes : 0,
- AMDGPU_IB_POOL_DELAYED, ib);
- if (r) {
- DRM_ERROR("Failed to get ib !\n");
- return r;
- }
-
- ib->gpu_addr = chunk_ib->va_start;
- ib->length_dw = chunk_ib->ib_bytes / 4;
- ib->flags = chunk_ib->flags;
-
- j++;
- }
-
- /* MM engine doesn't support user fences */
- ring = to_amdgpu_ring(parser->entity->rq->sched);
- if (parser->job->uf_addr && ring->funcs->no_user_fence)
- return -EINVAL;
-
return 0;
}
-static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p,
- struct amdgpu_cs_chunk *chunk)
+static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
{
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
- unsigned num_deps;
- int i, r;
- struct drm_amdgpu_cs_chunk_dep *deps;
-
- deps = (struct drm_amdgpu_cs_chunk_dep *)chunk->kdata;
- num_deps = chunk->length_dw * 4 /
- sizeof(struct drm_amdgpu_cs_chunk_dep);
-
- for (i = 0; i < num_deps; ++i) {
- struct amdgpu_ctx *ctx;
- struct drm_sched_entity *entity;
- struct dma_fence *fence;
-
- ctx = amdgpu_ctx_get(fpriv, deps[i].ctx_id);
- if (ctx == NULL)
- return -EINVAL;
-
- r = amdgpu_ctx_get_entity(ctx, deps[i].ip_type,
- deps[i].ip_instance,
- deps[i].ring, &entity);
- if (r) {
- amdgpu_ctx_put(ctx);
- return r;
- }
-
- fence = amdgpu_ctx_get_fence(ctx, entity, deps[i].handle);
- amdgpu_ctx_put(ctx);
-
- if (IS_ERR(fence))
- return PTR_ERR(fence);
- else if (!fence)
- continue;
-
- if (chunk->chunk_id == AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES) {
- struct drm_sched_fence *s_fence;
- struct dma_fence *old = fence;
-
- s_fence = to_drm_sched_fence(fence);
- fence = dma_fence_get(&s_fence->scheduled);
- dma_fence_put(old);
- }
-
- r = amdgpu_sync_fence(&p->job->sync, fence);
- dma_fence_put(fence);
- if (r)
- return r;
- }
- return 0;
-}
-
-static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser *p,
- uint32_t handle, u64 point,
- u64 flags)
-{
- struct dma_fence *fence;
+ struct amdgpu_job *leader = p->gang_leader;
+ struct amdgpu_bo_list_entry *e;
+ unsigned int i;
int r;
- r = drm_syncobj_find_fence(p->filp, handle, point, flags, &fence);
- if (r) {
- DRM_ERROR("syncobj %u failed to find fence @ %llu (%d)!\n",
- handle, point, r);
- return r;
- }
-
- r = amdgpu_sync_fence(&p->job->sync, fence);
- dma_fence_put(fence);
-
- return r;
-}
-
-static int amdgpu_cs_process_syncobj_in_dep(struct amdgpu_cs_parser *p,
- struct amdgpu_cs_chunk *chunk)
-{
- struct drm_amdgpu_cs_chunk_sem *deps;
- unsigned num_deps;
- int i, r;
+ list_for_each_entry(e, &p->validated, tv.head) {
+ struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
+ struct dma_resv *resv = bo->tbo.base.resv;
+ enum amdgpu_sync_mode sync_mode;
- deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata;
- num_deps = chunk->length_dw * 4 /
- sizeof(struct drm_amdgpu_cs_chunk_sem);
- for (i = 0; i < num_deps; ++i) {
- r = amdgpu_syncobj_lookup_and_add_to_sync(p, deps[i].handle,
- 0, 0);
+ sync_mode = amdgpu_bo_explicit_sync(bo) ?
+ AMDGPU_SYNC_EXPLICIT : AMDGPU_SYNC_NE_OWNER;
+ r = amdgpu_sync_resv(p->adev, &leader->sync, resv, sync_mode,
+ &fpriv->vm);
if (r)
return r;
}
- return 0;
-}
-
-
-static int amdgpu_cs_process_syncobj_timeline_in_dep(struct amdgpu_cs_parser *p,
- struct amdgpu_cs_chunk *chunk)
-{
- struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps;
- unsigned num_deps;
- int i, r;
-
- syncobj_deps = (struct drm_amdgpu_cs_chunk_syncobj *)chunk->kdata;
- num_deps = chunk->length_dw * 4 /
- sizeof(struct drm_amdgpu_cs_chunk_syncobj);
- for (i = 0; i < num_deps; ++i) {
- r = amdgpu_syncobj_lookup_and_add_to_sync(p,
- syncobj_deps[i].handle,
- syncobj_deps[i].point,
- syncobj_deps[i].flags);
+ for (i = 0; i < p->gang_size - 1; ++i) {
+ r = amdgpu_sync_clone(&leader->sync, &p->jobs[i]->sync);
if (r)
return r;
}
- return 0;
-}
-
-static int amdgpu_cs_process_syncobj_out_dep(struct amdgpu_cs_parser *p,
- struct amdgpu_cs_chunk *chunk)
-{
- struct drm_amdgpu_cs_chunk_sem *deps;
- unsigned num_deps;
- int i;
-
- deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata;
- num_deps = chunk->length_dw * 4 /
- sizeof(struct drm_amdgpu_cs_chunk_sem);
-
- if (p->post_deps)
- return -EINVAL;
-
- p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
- GFP_KERNEL);
- p->num_post_deps = 0;
-
- if (!p->post_deps)
- return -ENOMEM;
-
-
- for (i = 0; i < num_deps; ++i) {
- p->post_deps[i].syncobj =
- drm_syncobj_find(p->filp, deps[i].handle);
- if (!p->post_deps[i].syncobj)
- return -EINVAL;
- p->post_deps[i].chain = NULL;
- p->post_deps[i].point = 0;
- p->num_post_deps++;
- }
-
- return 0;
-}
-
-
-static int amdgpu_cs_process_syncobj_timeline_out_dep(struct amdgpu_cs_parser *p,
- struct amdgpu_cs_chunk *chunk)
-{
- struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps;
- unsigned num_deps;
- int i;
-
- syncobj_deps = (struct drm_amdgpu_cs_chunk_syncobj *)chunk->kdata;
- num_deps = chunk->length_dw * 4 /
- sizeof(struct drm_amdgpu_cs_chunk_syncobj);
-
- if (p->post_deps)
- return -EINVAL;
-
- p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
- GFP_KERNEL);
- p->num_post_deps = 0;
-
- if (!p->post_deps)
- return -ENOMEM;
-
- for (i = 0; i < num_deps; ++i) {
- struct amdgpu_cs_post_dep *dep = &p->post_deps[i];
-
- dep->chain = NULL;
- if (syncobj_deps[i].point) {
- dep->chain = dma_fence_chain_alloc();
- if (!dep->chain)
- return -ENOMEM;
- }
-
- dep->syncobj = drm_syncobj_find(p->filp,
- syncobj_deps[i].handle);
- if (!dep->syncobj) {
- dma_fence_chain_free(dep->chain);
- return -EINVAL;
- }
- dep->point = syncobj_deps[i].point;
- p->num_post_deps++;
- }
-
- return 0;
-}
-
-static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
- struct amdgpu_cs_parser *p)
-{
- int i, r;
-
- /* TODO: Investigate why we still need the context lock */
- mutex_unlock(&p->ctx->lock);
-
- for (i = 0; i < p->nchunks; ++i) {
- struct amdgpu_cs_chunk *chunk;
-
- chunk = &p->chunks[i];
+ r = amdgpu_ctx_wait_prev_fence(p->ctx, p->entities[p->gang_size - 1]);
+ if (r && r != -ERESTARTSYS)
+ DRM_ERROR("amdgpu_ctx_wait_prev_fence failed.\n");
- switch (chunk->chunk_id) {
- case AMDGPU_CHUNK_ID_DEPENDENCIES:
- case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES:
- r = amdgpu_cs_process_fence_dep(p, chunk);
- if (r)
- goto out;
- break;
- case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
- r = amdgpu_cs_process_syncobj_in_dep(p, chunk);
- if (r)
- goto out;
- break;
- case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
- r = amdgpu_cs_process_syncobj_out_dep(p, chunk);
- if (r)
- goto out;
- break;
- case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT:
- r = amdgpu_cs_process_syncobj_timeline_in_dep(p, chunk);
- if (r)
- goto out;
- break;
- case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL:
- r = amdgpu_cs_process_syncobj_timeline_out_dep(p, chunk);
- if (r)
- goto out;
- break;
- }
- }
-
-out:
- mutex_lock(&p->ctx->lock);
return r;
}
@@ -1221,20 +1228,28 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
union drm_amdgpu_cs *cs)
{
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
- struct drm_sched_entity *entity = p->entity;
+ struct amdgpu_job *leader = p->gang_leader;
struct amdgpu_bo_list_entry *e;
- struct amdgpu_job *job;
+ unsigned int i;
uint64_t seq;
int r;
- job = p->job;
- p->job = NULL;
+ for (i = 0; i < p->gang_size; ++i)
+ drm_sched_job_arm(&p->jobs[i]->base);
- r = drm_sched_job_init(&job->base, entity, &fpriv->vm);
- if (r)
- goto error_unlock;
+ for (i = 0; i < (p->gang_size - 1); ++i) {
+ struct dma_fence *fence;
- drm_sched_job_arm(&job->base);
+ fence = &p->jobs[i]->base.s_fence->scheduled;
+ r = amdgpu_sync_fence(&leader->sync, fence);
+ if (r)
+ goto error_cleanup;
+ }
+
+ if (p->gang_size > 1) {
+ for (i = 0; i < p->gang_size; ++i)
+ amdgpu_job_set_gang_leader(p->jobs[i], leader);
+ }
/* No memory allocation is allowed while holding the notifier lock.
* The lock is held until amdgpu_cs_submit is finished and fence is
@@ -1245,6 +1260,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
/* If userptr are invalidated after amdgpu_cs_parser_bos(), return
* -EAGAIN, drmIoctl in libdrm will restart the amdgpu_cs_ioctl.
*/
+ r = 0;
amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
@@ -1252,67 +1268,96 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
}
if (r) {
r = -EAGAIN;
- goto error_abort;
+ goto error_unlock;
}
- p->fence = dma_fence_get(&job->base.s_fence->finished);
+ p->fence = dma_fence_get(&leader->base.s_fence->finished);
+ list_for_each_entry(e, &p->validated, tv.head) {
+
+ /* Everybody except for the gang leader uses READ */
+ for (i = 0; i < (p->gang_size - 1); ++i) {
+ dma_resv_add_fence(e->tv.bo->base.resv,
+ &p->jobs[i]->base.s_fence->finished,
+ DMA_RESV_USAGE_READ);
+ }
- seq = amdgpu_ctx_add_fence(p->ctx, entity, p->fence);
+ /* The gang leader is remembered as writer */
+ e->tv.num_shared = 0;
+ }
+
+ seq = amdgpu_ctx_add_fence(p->ctx, p->entities[p->gang_size - 1],
+ p->fence);
amdgpu_cs_post_dependencies(p);
- if ((job->preamble_status & AMDGPU_PREAMBLE_IB_PRESENT) &&
+ if ((leader->preamble_status & AMDGPU_PREAMBLE_IB_PRESENT) &&
!p->ctx->preamble_presented) {
- job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST;
+ leader->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST;
p->ctx->preamble_presented = true;
}
cs->out.handle = seq;
- job->uf_sequence = seq;
-
- amdgpu_job_free_resources(job);
+ leader->uf_sequence = seq;
- trace_amdgpu_cs_ioctl(job);
amdgpu_vm_bo_trace_cs(&fpriv->vm, &p->ticket);
- drm_sched_entity_push_job(&job->base);
+ for (i = 0; i < p->gang_size; ++i) {
+ amdgpu_job_free_resources(p->jobs[i]);
+ trace_amdgpu_cs_ioctl(p->jobs[i]);
+ drm_sched_entity_push_job(&p->jobs[i]->base);
+ p->jobs[i] = NULL;
+ }
amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm);
-
- /* Make sure all BOs are remembered as writers */
- amdgpu_bo_list_for_each_entry(e, p->bo_list)
- e->tv.num_shared = 0;
-
ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence);
+
mutex_unlock(&p->adev->notifier_lock);
mutex_unlock(&p->bo_list->bo_list_mutex);
-
return 0;
-error_abort:
- drm_sched_job_cleanup(&job->base);
+error_unlock:
mutex_unlock(&p->adev->notifier_lock);
-error_unlock:
- amdgpu_job_free(job);
+error_cleanup:
+ for (i = 0; i < p->gang_size; ++i)
+ drm_sched_job_cleanup(&p->jobs[i]->base);
return r;
}
-static void trace_amdgpu_cs_ibs(struct amdgpu_cs_parser *parser)
+/* Cleanup the parser structure */
+static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser)
{
- int i;
+ unsigned i;
- if (!trace_amdgpu_cs_enabled())
- return;
+ for (i = 0; i < parser->num_post_deps; i++) {
+ drm_syncobj_put(parser->post_deps[i].syncobj);
+ kfree(parser->post_deps[i].chain);
+ }
+ kfree(parser->post_deps);
+
+ dma_fence_put(parser->fence);
+
+ if (parser->ctx)
+ amdgpu_ctx_put(parser->ctx);
+ if (parser->bo_list)
+ amdgpu_bo_list_put(parser->bo_list);
+
+ for (i = 0; i < parser->nchunks; i++)
+ kvfree(parser->chunks[i].kdata);
+ kvfree(parser->chunks);
+ for (i = 0; i < parser->gang_size; ++i) {
+ if (parser->jobs[i])
+ amdgpu_job_free(parser->jobs[i]);
+ }
+ if (parser->uf_entry.tv.bo) {
+ struct amdgpu_bo *uf = ttm_to_amdgpu_bo(parser->uf_entry.tv.bo);
- for (i = 0; i < parser->job->num_ibs; i++)
- trace_amdgpu_cs(parser, i);
+ amdgpu_bo_unref(&uf);
+ }
}
int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
{
struct amdgpu_device *adev = drm_to_adev(dev);
- union drm_amdgpu_cs *cs = data;
- struct amdgpu_cs_parser parser = {};
- bool reserved_buffers = false;
+ struct amdgpu_cs_parser parser;
int r;
if (amdgpu_ras_intr_triggered())
@@ -1321,25 +1366,20 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
if (!adev->accel_working)
return -EBUSY;
- parser.adev = adev;
- parser.filp = filp;
-
- r = amdgpu_cs_parser_init(&parser, data);
+ r = amdgpu_cs_parser_init(&parser, adev, filp, data);
if (r) {
if (printk_ratelimit())
DRM_ERROR("Failed to initialize parser %d!\n", r);
- goto out;
+ return r;
}
- r = amdgpu_cs_ib_fill(adev, &parser);
+ r = amdgpu_cs_pass1(&parser, data);
if (r)
- goto out;
+ goto error_fini;
- r = amdgpu_cs_dependencies(adev, &parser);
- if (r) {
- DRM_ERROR("Failed in the dependencies handling %d!\n", r);
- goto out;
- }
+ r = amdgpu_cs_pass2(&parser);
+ if (r)
+ goto error_fini;
r = amdgpu_cs_parser_bos(&parser, data);
if (r) {
@@ -1347,22 +1387,36 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
DRM_ERROR("Not enough memory for command submission!\n");
else if (r != -ERESTARTSYS && r != -EAGAIN)
DRM_ERROR("Failed to process the buffer list %d!\n", r);
- goto out;
+ goto error_fini;
}
- reserved_buffers = true;
+ r = amdgpu_cs_patch_jobs(&parser);
+ if (r)
+ goto error_backoff;
+
+ r = amdgpu_cs_vm_handling(&parser);
+ if (r)
+ goto error_backoff;
+
+ r = amdgpu_cs_sync_rings(&parser);
+ if (r)
+ goto error_backoff;
trace_amdgpu_cs_ibs(&parser);
- r = amdgpu_cs_vm_handling(&parser);
+ r = amdgpu_cs_submit(&parser, data);
if (r)
- goto out;
+ goto error_backoff;
- r = amdgpu_cs_submit(&parser, cs);
+ amdgpu_cs_parser_fini(&parser);
+ return 0;
-out:
- amdgpu_cs_parser_fini(&parser, r, reserved_buffers);
+error_backoff:
+ ttm_eu_backoff_reservation(&parser.ticket, &parser.validated);
+ mutex_unlock(&parser.bo_list->bo_list_mutex);
+error_fini:
+ amdgpu_cs_parser_fini(&parser);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.h
index 30ecc4917f81..cbaa19b2b8a3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.h
@@ -27,6 +27,8 @@
#include "amdgpu_bo_list.h"
#include "amdgpu_ring.h"
+#define AMDGPU_CS_GANG_SIZE 4
+
struct amdgpu_bo_va_mapping;
struct amdgpu_cs_chunk {
@@ -50,9 +52,11 @@ struct amdgpu_cs_parser {
unsigned nchunks;
struct amdgpu_cs_chunk *chunks;
- /* scheduler job object */
- struct amdgpu_job *job;
- struct drm_sched_entity *entity;
+ /* scheduler job objects */
+ unsigned int gang_size;
+ struct drm_sched_entity *entities[AMDGPU_CS_GANG_SIZE];
+ struct amdgpu_job *jobs[AMDGPU_CS_GANG_SIZE];
+ struct amdgpu_job *gang_leader;
/* buffer objects */
struct ww_acquire_ctx ticket;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 3ea48385fab3..f6d9d5da53cd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -315,7 +315,6 @@ static int amdgpu_ctx_init(struct amdgpu_ctx_mgr *mgr, int32_t priority,
kref_init(&ctx->refcount);
ctx->mgr = mgr;
spin_lock_init(&ctx->ring_lock);
- mutex_init(&ctx->lock);
ctx->reset_counter = atomic_read(&mgr->adev->gpu_reset_counter);
ctx->reset_counter_query = ctx->reset_counter;
@@ -407,7 +406,6 @@ static void amdgpu_ctx_fini(struct kref *ref)
drm_dev_exit(idx);
}
- mutex_destroy(&ctx->lock);
kfree(ctx);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
index cc7c8afff414..0fa0e56daf67 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
@@ -53,7 +53,6 @@ struct amdgpu_ctx {
bool preamble_presented;
int32_t init_priority;
int32_t override_priority;
- struct mutex lock;
atomic_t guilty;
unsigned long ras_counter_ce;
unsigned long ras_counter_ue;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 62b26f0e37b0..ab8f970b2849 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2365,8 +2365,16 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
}
adev->ip_blocks[i].status.sw = true;
- /* need to do gmc hw init early so we can allocate gpu mem */
- if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
+ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
+ /* need to do common hw init early so everything is set up for gmc */
+ r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
+ if (r) {
+ DRM_ERROR("hw_init %d failed %d\n", i, r);
+ goto init_failed;
+ }
+ adev->ip_blocks[i].status.hw = true;
+ } else if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
+ /* need to do gmc hw init early so we can allocate gpu mem */
/* Try to reserve bad pages early */
if (amdgpu_sriov_vf(adev))
amdgpu_virt_exchange_data(adev);
@@ -2451,19 +2459,21 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
*/
if (adev->gmc.xgmi.num_physical_nodes > 1) {
if (amdgpu_xgmi_add_device(adev) == 0) {
- struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
+ if (!amdgpu_sriov_vf(adev)) {
+ struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
- if (!hive->reset_domain ||
- !amdgpu_reset_get_reset_domain(hive->reset_domain)) {
- r = -ENOENT;
+ if (!hive->reset_domain ||
+ !amdgpu_reset_get_reset_domain(hive->reset_domain)) {
+ r = -ENOENT;
+ amdgpu_put_xgmi_hive(hive);
+ goto init_failed;
+ }
+
+ /* Drop the early temporary reset domain we created for device */
+ amdgpu_reset_put_reset_domain(adev->reset_domain);
+ adev->reset_domain = hive->reset_domain;
amdgpu_put_xgmi_hive(hive);
- goto init_failed;
}
-
- /* Drop the early temporary reset domain we created for device */
- amdgpu_reset_put_reset_domain(adev->reset_domain);
- adev->reset_domain = hive->reset_domain;
- amdgpu_put_xgmi_hive(hive);
}
}
@@ -3052,8 +3062,8 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
int i, r;
static enum amd_ip_block_type ip_order[] = {
- AMD_IP_BLOCK_TYPE_GMC,
AMD_IP_BLOCK_TYPE_COMMON,
+ AMD_IP_BLOCK_TYPE_GMC,
AMD_IP_BLOCK_TYPE_PSP,
AMD_IP_BLOCK_TYPE_IH,
};
@@ -3144,7 +3154,8 @@ static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
continue;
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
- adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
+ adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
+ (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP && amdgpu_sriov_vf(adev))) {
r = adev->ip_blocks[i].version->funcs->resume(adev);
if (r) {
@@ -3501,6 +3512,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
adev->gmc.gart_size = 512 * 1024 * 1024;
adev->accel_working = false;
adev->num_rings = 0;
+ RCU_INIT_POINTER(adev->gang_submit, dma_fence_get_stub());
adev->mman.buffer_funcs = NULL;
adev->mman.buffer_funcs_ring = NULL;
adev->vm_manager.vm_pte_funcs = NULL;
@@ -3982,6 +3994,7 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev)
release_firmware(adev->firmware.gpu_info_fw);
adev->firmware.gpu_info_fw = NULL;
adev->accel_working = false;
+ dma_fence_put(rcu_dereference_protected(adev->gang_submit, true));
amdgpu_reset_fini(adev);
@@ -4057,12 +4070,20 @@ static void amdgpu_device_evict_resources(struct amdgpu_device *adev)
int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
{
struct amdgpu_device *adev = drm_to_adev(dev);
+ int r = 0;
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
adev->in_suspend = true;
+ if (amdgpu_sriov_vf(adev)) {
+ amdgpu_virt_fini_data_exchange(adev);
+ r = amdgpu_virt_request_full_gpu(adev, false);
+ if (r)
+ return r;
+ }
+
if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3))
DRM_WARN("smart shift update failed\n");
@@ -4086,6 +4107,9 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
amdgpu_device_ip_suspend_phase2(adev);
+ if (amdgpu_sriov_vf(adev))
+ amdgpu_virt_release_full_gpu(adev, false);
+
return 0;
}
@@ -4104,6 +4128,12 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
struct amdgpu_device *adev = drm_to_adev(dev);
int r = 0;
+ if (amdgpu_sriov_vf(adev)) {
+ r = amdgpu_virt_request_full_gpu(adev, true);
+ if (r)
+ return r;
+ }
+
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
@@ -4118,6 +4148,13 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
}
r = amdgpu_device_ip_resume(adev);
+
+ /* no matter what r is, always need to properly release full GPU */
+ if (amdgpu_sriov_vf(adev)) {
+ amdgpu_virt_init_data_exchange(adev);
+ amdgpu_virt_release_full_gpu(adev, true);
+ }
+
if (r) {
dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r);
return r;
@@ -4739,6 +4776,7 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle,
struct amdgpu_device *tmp_adev = NULL;
bool need_full_reset, skip_hw_reset, vram_lost = false;
int r = 0;
+ bool gpu_reset_for_dev_remove = 0;
/* Try reset handler method first */
tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
@@ -4758,6 +4796,10 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle,
test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags);
+ gpu_reset_for_dev_remove =
+ test_bit(AMDGPU_RESET_FOR_DEVICE_REMOVE, &reset_context->flags) &&
+ test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
+
/*
* ASIC reset has to be done on all XGMI hive nodes ASAP
* to allow proper links negotiation in FW (within 1 sec)
@@ -4802,6 +4844,18 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle,
amdgpu_ras_intr_cleared();
}
+ /* Since the mode1 reset affects base ip blocks, the
+ * phase1 ip blocks need to be resumed. Otherwise there
+ * will be a BIOS signature error and the psp bootloader
+ * can't load kdb on the next amdgpu install.
+ */
+ if (gpu_reset_for_dev_remove) {
+ list_for_each_entry(tmp_adev, device_list_handle, reset_list)
+ amdgpu_device_ip_resume_phase1(tmp_adev);
+
+ goto end;
+ }
+
list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
if (need_full_reset) {
/* post card */
@@ -5124,6 +5178,11 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
bool need_emergency_restart = false;
bool audio_suspended = false;
int tmp_vram_lost_counter;
+ bool gpu_reset_for_dev_remove = false;
+
+ gpu_reset_for_dev_remove =
+ test_bit(AMDGPU_RESET_FOR_DEVICE_REMOVE, &reset_context->flags) &&
+ test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
/*
* Special case: RAS triggered and full reset isn't supported
@@ -5159,8 +5218,11 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
*/
INIT_LIST_HEAD(&device_list);
if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1)) {
- list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head)
+ list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
list_add_tail(&tmp_adev->reset_list, &device_list);
+ if (gpu_reset_for_dev_remove && adev->shutdown)
+ tmp_adev->shutdown = true;
+ }
if (!list_is_first(&adev->reset_list, &device_list))
list_rotate_to_front(&adev->reset_list, &device_list);
device_list_handle = &device_list;
@@ -5243,6 +5305,10 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
retry: /* Rest of adevs pre asic reset from XGMI hive. */
list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
+ if (gpu_reset_for_dev_remove) {
+ /* Workaroud for ASICs need to disable SMC first */
+ amdgpu_device_smu_fini_early(tmp_adev);
+ }
r = amdgpu_device_pre_asic_reset(tmp_adev, reset_context);
/*TODO Should we stop ?*/
if (r) {
@@ -5276,6 +5342,9 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */
adev->asic_reset_res = 0;
goto retry;
}
+
+ if (!r && gpu_reset_for_dev_remove)
+ goto recover_end;
}
skip_hw_reset:
@@ -5349,6 +5418,7 @@ skip_sched_resume:
amdgpu_device_unset_mp1_state(tmp_adev);
}
+recover_end:
tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
reset_list);
amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain);
@@ -5531,9 +5601,9 @@ bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
~*peer_adev->dev->dma_mask : ~((1ULL << 32) - 1);
resource_size_t aper_limit =
adev->gmc.aper_base + adev->gmc.aper_size - 1;
- bool p2p_access = !adev->gmc.xgmi.connected_to_cpu &&
- !(pci_p2pdma_distance_many(adev->pdev,
- &peer_adev->dev, 1, true) < 0);
+ bool p2p_access =
+ !adev->gmc.xgmi.connected_to_cpu &&
+ !(pci_p2pdma_distance(adev->pdev, peer_adev->dev, false) < 0);
return pcie_p2p && p2p_access && (adev->gmc.visible_vram_size &&
adev->gmc.real_vram_size == adev->gmc.visible_vram_size &&
@@ -5917,3 +5987,36 @@ void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev,
(void)RREG32(data);
spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
}
+
+/**
+ * amdgpu_device_switch_gang - switch to a new gang
+ * @adev: amdgpu_device pointer
+ * @gang: the gang to switch to
+ *
+ * Try to switch to a new gang.
+ * Returns: NULL if we switched to the new gang or a reference to the current
+ * gang leader.
+ */
+struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev,
+ struct dma_fence *gang)
+{
+ struct dma_fence *old = NULL;
+
+ do {
+ dma_fence_put(old);
+ rcu_read_lock();
+ old = dma_fence_get_rcu_safe(&adev->gang_submit);
+ rcu_read_unlock();
+
+ if (old == gang)
+ break;
+
+ if (!dma_fence_is_signaled(old))
+ return old;
+
+ } while (cmpxchg((struct dma_fence __force **)&adev->gang_submit,
+ old, gang) != old);
+
+ dma_fence_put(old);
+ return NULL;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
index 9fa2a5ceb77d..3993e6134914 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -229,7 +229,7 @@ static int amdgpu_discovery_read_binary_from_file(struct amdgpu_device *adev, ui
return r;
}
- memcpy((u8 *)binary, (u8 *)fw->data, adev->mman.discovery_tmr_size);
+ memcpy((u8 *)binary, (u8 *)fw->data, fw->size);
release_firmware(fw);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index c20922a5af9f..1a06b8d724f3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -1101,6 +1101,7 @@ static int amdgpu_display_gem_fb_verify_and_init(struct drm_device *dev,
goto err;
ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
+
if (ret)
goto err;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index 782cbca37538..7bd8e33b14be 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -58,7 +58,7 @@ static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
int r;
- if (pci_p2pdma_distance_many(adev->pdev, &attach->dev, 1, true) < 0)
+ if (pci_p2pdma_distance(adev->pdev, attach->dev, false) < 0)
attach->peer2peer = false;
r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 728a0933ea6f..3c9fecdd6b2f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -38,6 +38,8 @@
#include <linux/mmu_notifier.h>
#include <linux/suspend.h>
#include <linux/cc_platform.h>
+#include <linux/fb.h>
+#include <linux/dynamic_debug.h>
#include "amdgpu.h"
#include "amdgpu_irq.h"
@@ -102,9 +104,10 @@
* - 3.46.0 - To enable hot plug amdgpu tests in libdrm
* - 3.47.0 - Add AMDGPU_GEM_CREATE_DISCARDABLE and AMDGPU_VM_NOALLOC flags
* - 3.48.0 - Add IP discovery version info to HW INFO
+ * 3.49.0 - Add gang submit into CS IOCTL
*/
#define KMS_DRIVER_MAJOR 3
-#define KMS_DRIVER_MINOR 48
+#define KMS_DRIVER_MINOR 49
#define KMS_DRIVER_PATCHLEVEL 0
int amdgpu_vram_limit;
@@ -185,6 +188,18 @@ int amdgpu_vcnfw_log;
static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work);
+DECLARE_DYNDBG_CLASSMAP(drm_debug_classes, DD_CLASS_TYPE_DISJOINT_BITS, 0,
+ "DRM_UT_CORE",
+ "DRM_UT_DRIVER",
+ "DRM_UT_KMS",
+ "DRM_UT_PRIME",
+ "DRM_UT_ATOMIC",
+ "DRM_UT_VBL",
+ "DRM_UT_STATE",
+ "DRM_UT_LEASE",
+ "DRM_UT_DP",
+ "DRM_UT_DRMRES");
+
struct amdgpu_mgpu_info mgpu_info = {
.mutex = __MUTEX_INITIALIZER(mgpu_info.mutex),
.delayed_reset_work = __DELAYED_WORK_INITIALIZER(
@@ -2186,6 +2201,37 @@ amdgpu_pci_remove(struct pci_dev *pdev)
pm_runtime_forbid(dev->dev);
}
+ if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 2)) {
+ bool need_to_reset_gpu = false;
+
+ if (adev->gmc.xgmi.num_physical_nodes > 1) {
+ struct amdgpu_hive_info *hive;
+
+ hive = amdgpu_get_xgmi_hive(adev);
+ if (hive->device_remove_count == 0)
+ need_to_reset_gpu = true;
+ hive->device_remove_count++;
+ amdgpu_put_xgmi_hive(hive);
+ } else {
+ need_to_reset_gpu = true;
+ }
+
+ /* Workaround for ASICs need to reset SMU.
+ * Called only when the first device is removed.
+ */
+ if (need_to_reset_gpu) {
+ struct amdgpu_reset_context reset_context;
+
+ adev->shutdown = true;
+ memset(&reset_context, 0, sizeof(reset_context));
+ reset_context.method = AMD_RESET_METHOD_NONE;
+ reset_context.reset_req_dev = adev;
+ set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
+ set_bit(AMDGPU_RESET_FOR_DEVICE_REMOVE, &reset_context.flags);
+ amdgpu_device_gpu_recover(adev, NULL, &reset_context);
+ }
+ }
+
amdgpu_driver_unload_kms(dev);
drm_dev_unplug(dev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 8adeb7469f1e..d0d99ed607dd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -400,7 +400,6 @@ unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring)
/* We are not protected by ring lock when reading the last sequence
* but it's ok to report slightly wrong fence count here.
*/
- amdgpu_fence_process(ring);
emitted = 0x100000000ull;
emitted -= atomic_read(&ring->fence_drv.last_seq);
emitted += READ_ONCE(ring->fence_drv.sync_seq);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index ceb91469958a..9546adc8a76f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -23,6 +23,7 @@
*
*/
+#include <linux/firmware.h>
#include "amdgpu.h"
#include "amdgpu_gfx.h"
#include "amdgpu_rlc.h"
@@ -865,3 +866,142 @@ int amdgpu_gfx_get_num_kcq(struct amdgpu_device *adev)
}
return amdgpu_num_kcq;
}
+
+void amdgpu_gfx_cp_init_microcode(struct amdgpu_device *adev,
+ uint32_t ucode_id)
+{
+ const struct gfx_firmware_header_v1_0 *cp_hdr;
+ const struct gfx_firmware_header_v2_0 *cp_hdr_v2_0;
+ struct amdgpu_firmware_info *info = NULL;
+ const struct firmware *ucode_fw;
+ unsigned int fw_size;
+
+ switch (ucode_id) {
+ case AMDGPU_UCODE_ID_CP_PFP:
+ cp_hdr = (const struct gfx_firmware_header_v1_0 *)
+ adev->gfx.pfp_fw->data;
+ adev->gfx.pfp_fw_version =
+ le32_to_cpu(cp_hdr->header.ucode_version);
+ adev->gfx.pfp_feature_version =
+ le32_to_cpu(cp_hdr->ucode_feature_version);
+ ucode_fw = adev->gfx.pfp_fw;
+ fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes);
+ break;
+ case AMDGPU_UCODE_ID_CP_RS64_PFP:
+ cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
+ adev->gfx.pfp_fw->data;
+ adev->gfx.pfp_fw_version =
+ le32_to_cpu(cp_hdr_v2_0->header.ucode_version);
+ adev->gfx.pfp_feature_version =
+ le32_to_cpu(cp_hdr_v2_0->ucode_feature_version);
+ ucode_fw = adev->gfx.pfp_fw;
+ fw_size = le32_to_cpu(cp_hdr_v2_0->ucode_size_bytes);
+ break;
+ case AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK:
+ case AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK:
+ cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
+ adev->gfx.pfp_fw->data;
+ ucode_fw = adev->gfx.pfp_fw;
+ fw_size = le32_to_cpu(cp_hdr_v2_0->data_size_bytes);
+ break;
+ case AMDGPU_UCODE_ID_CP_ME:
+ cp_hdr = (const struct gfx_firmware_header_v1_0 *)
+ adev->gfx.me_fw->data;
+ adev->gfx.me_fw_version =
+ le32_to_cpu(cp_hdr->header.ucode_version);
+ adev->gfx.me_feature_version =
+ le32_to_cpu(cp_hdr->ucode_feature_version);
+ ucode_fw = adev->gfx.me_fw;
+ fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes);
+ break;
+ case AMDGPU_UCODE_ID_CP_RS64_ME:
+ cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
+ adev->gfx.me_fw->data;
+ adev->gfx.me_fw_version =
+ le32_to_cpu(cp_hdr_v2_0->header.ucode_version);
+ adev->gfx.me_feature_version =
+ le32_to_cpu(cp_hdr_v2_0->ucode_feature_version);
+ ucode_fw = adev->gfx.me_fw;
+ fw_size = le32_to_cpu(cp_hdr_v2_0->ucode_size_bytes);
+ break;
+ case AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK:
+ case AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK:
+ cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
+ adev->gfx.me_fw->data;
+ ucode_fw = adev->gfx.me_fw;
+ fw_size = le32_to_cpu(cp_hdr_v2_0->data_size_bytes);
+ break;
+ case AMDGPU_UCODE_ID_CP_CE:
+ cp_hdr = (const struct gfx_firmware_header_v1_0 *)
+ adev->gfx.ce_fw->data;
+ adev->gfx.ce_fw_version =
+ le32_to_cpu(cp_hdr->header.ucode_version);
+ adev->gfx.ce_feature_version =
+ le32_to_cpu(cp_hdr->ucode_feature_version);
+ ucode_fw = adev->gfx.ce_fw;
+ fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes);
+ break;
+ case AMDGPU_UCODE_ID_CP_MEC1:
+ cp_hdr = (const struct gfx_firmware_header_v1_0 *)
+ adev->gfx.mec_fw->data;
+ adev->gfx.mec_fw_version =
+ le32_to_cpu(cp_hdr->header.ucode_version);
+ adev->gfx.mec_feature_version =
+ le32_to_cpu(cp_hdr->ucode_feature_version);
+ ucode_fw = adev->gfx.mec_fw;
+ fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes) -
+ le32_to_cpu(cp_hdr->jt_size) * 4;
+ break;
+ case AMDGPU_UCODE_ID_CP_MEC1_JT:
+ cp_hdr = (const struct gfx_firmware_header_v1_0 *)
+ adev->gfx.mec_fw->data;
+ ucode_fw = adev->gfx.mec_fw;
+ fw_size = le32_to_cpu(cp_hdr->jt_size) * 4;
+ break;
+ case AMDGPU_UCODE_ID_CP_MEC2:
+ cp_hdr = (const struct gfx_firmware_header_v1_0 *)
+ adev->gfx.mec2_fw->data;
+ adev->gfx.mec2_fw_version =
+ le32_to_cpu(cp_hdr->header.ucode_version);
+ adev->gfx.mec2_feature_version =
+ le32_to_cpu(cp_hdr->ucode_feature_version);
+ ucode_fw = adev->gfx.mec2_fw;
+ fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes) -
+ le32_to_cpu(cp_hdr->jt_size) * 4;
+ break;
+ case AMDGPU_UCODE_ID_CP_MEC2_JT:
+ cp_hdr = (const struct gfx_firmware_header_v1_0 *)
+ adev->gfx.mec2_fw->data;
+ ucode_fw = adev->gfx.mec2_fw;
+ fw_size = le32_to_cpu(cp_hdr->jt_size) * 4;
+ break;
+ case AMDGPU_UCODE_ID_CP_RS64_MEC:
+ cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
+ adev->gfx.mec_fw->data;
+ adev->gfx.mec_fw_version =
+ le32_to_cpu(cp_hdr_v2_0->header.ucode_version);
+ adev->gfx.mec_feature_version =
+ le32_to_cpu(cp_hdr_v2_0->ucode_feature_version);
+ ucode_fw = adev->gfx.mec_fw;
+ fw_size = le32_to_cpu(cp_hdr_v2_0->ucode_size_bytes);
+ break;
+ case AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK:
+ case AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK:
+ case AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK:
+ case AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK:
+ cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
+ adev->gfx.mec_fw->data;
+ ucode_fw = adev->gfx.mec_fw;
+ fw_size = le32_to_cpu(cp_hdr_v2_0->data_size_bytes);
+ break;
+ default:
+ break;
+ }
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ info = &adev->firmware.ucode[ucode_id];
+ info->ucode_id = ucode_id;
+ info->fw = ucode_fw;
+ adev->firmware.fw_size += ALIGN(fw_size, PAGE_SIZE);
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index 8abdf41d0f83..832b3807f1d6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -304,6 +304,10 @@ struct amdgpu_gfx {
uint32_t rlc_srlg_feature_version;
uint32_t rlc_srls_fw_version;
uint32_t rlc_srls_feature_version;
+ uint32_t rlcp_ucode_version;
+ uint32_t rlcp_ucode_feature_version;
+ uint32_t rlcv_ucode_version;
+ uint32_t rlcv_ucode_feature_version;
uint32_t mec_feature_version;
uint32_t mec2_feature_version;
bool mec_fw_write_wait;
@@ -422,4 +426,6 @@ int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev,
uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg);
void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v);
int amdgpu_gfx_get_num_kcq(struct amdgpu_device *adev);
+void amdgpu_gfx_cp_init_microcode(struct amdgpu_device *adev, uint32_t ucode_id);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index aebc384531ac..34233a74248c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -572,45 +572,15 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev)
void amdgpu_gmc_noretry_set(struct amdgpu_device *adev)
{
struct amdgpu_gmc *gmc = &adev->gmc;
-
- switch (adev->ip_versions[GC_HWIP][0]) {
- case IP_VERSION(9, 0, 1):
- case IP_VERSION(9, 3, 0):
- case IP_VERSION(9, 4, 0):
- case IP_VERSION(9, 4, 1):
- case IP_VERSION(9, 4, 2):
- case IP_VERSION(10, 3, 3):
- case IP_VERSION(10, 3, 4):
- case IP_VERSION(10, 3, 5):
- case IP_VERSION(10, 3, 6):
- case IP_VERSION(10, 3, 7):
- /*
- * noretry = 0 will cause kfd page fault tests fail
- * for some ASICs, so set default to 1 for these ASICs.
- */
- if (amdgpu_noretry == -1)
- gmc->noretry = 1;
- else
- gmc->noretry = amdgpu_noretry;
- break;
- default:
- /* Raven currently has issues with noretry
- * regardless of what we decide for other
- * asics, we should leave raven with
- * noretry = 0 until we root cause the
- * issues.
- *
- * default this to 0 for now, but we may want
- * to change this in the future for certain
- * GPUs as it can increase performance in
- * certain cases.
- */
- if (amdgpu_noretry == -1)
- gmc->noretry = 0;
- else
- gmc->noretry = amdgpu_noretry;
- break;
- }
+ uint32_t gc_ver = adev->ip_versions[GC_HWIP][0];
+ bool noretry_default = (gc_ver == IP_VERSION(9, 0, 1) ||
+ gc_ver == IP_VERSION(9, 3, 0) ||
+ gc_ver == IP_VERSION(9, 4, 0) ||
+ gc_ver == IP_VERSION(9, 4, 1) ||
+ gc_ver == IP_VERSION(9, 4, 2) ||
+ gc_ver >= IP_VERSION(10, 3, 0));
+
+ gmc->noretry = (amdgpu_noretry == -1) ? noretry_default : amdgpu_noretry;
}
void amdgpu_gmc_set_vm_fault_masks(struct amdgpu_device *adev, int hub_type,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 1062b7ed74ec..46c99331d7f1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -105,7 +105,6 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
*/
(*job)->base.sched = &adev->rings[0]->sched;
(*job)->vm = vm;
- (*job)->num_ibs = num_ibs;
amdgpu_sync_create(&(*job)->sync);
amdgpu_sync_create(&(*job)->sched_sync);
@@ -125,6 +124,7 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
if (r)
return r;
+ (*job)->num_ibs = 1;
r = amdgpu_ib_get(adev, NULL, size, pool_type, &(*job)->ibs[0]);
if (r)
kfree(*job);
@@ -132,6 +132,23 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
return r;
}
+void amdgpu_job_set_resources(struct amdgpu_job *job, struct amdgpu_bo *gds,
+ struct amdgpu_bo *gws, struct amdgpu_bo *oa)
+{
+ if (gds) {
+ job->gds_base = amdgpu_bo_gpu_offset(gds) >> PAGE_SHIFT;
+ job->gds_size = amdgpu_bo_size(gds) >> PAGE_SHIFT;
+ }
+ if (gws) {
+ job->gws_base = amdgpu_bo_gpu_offset(gws) >> PAGE_SHIFT;
+ job->gws_size = amdgpu_bo_size(gws) >> PAGE_SHIFT;
+ }
+ if (oa) {
+ job->oa_base = amdgpu_bo_gpu_offset(oa) >> PAGE_SHIFT;
+ job->oa_size = amdgpu_bo_size(oa) >> PAGE_SHIFT;
+ }
+}
+
void amdgpu_job_free_resources(struct amdgpu_job *job)
{
struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched);
@@ -156,11 +173,29 @@ static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
dma_fence_put(&job->hw_fence);
}
+void amdgpu_job_set_gang_leader(struct amdgpu_job *job,
+ struct amdgpu_job *leader)
+{
+ struct dma_fence *fence = &leader->base.s_fence->scheduled;
+
+ WARN_ON(job->gang_submit);
+
+ /*
+ * Don't add a reference when we are the gang leader to avoid circle
+ * dependency.
+ */
+ if (job != leader)
+ dma_fence_get(fence);
+ job->gang_submit = fence;
+}
+
void amdgpu_job_free(struct amdgpu_job *job)
{
amdgpu_job_free_resources(job);
amdgpu_sync_free(&job->sync);
amdgpu_sync_free(&job->sched_sync);
+ if (job->gang_submit != &job->base.s_fence->scheduled)
+ dma_fence_put(job->gang_submit);
if (!job->hw_fence.ops)
kfree(job);
@@ -230,12 +265,16 @@ static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
fence = amdgpu_sync_get_fence(&job->sync);
}
+ if (!fence && job->gang_submit)
+ fence = amdgpu_device_switch_gang(ring->adev, job->gang_submit);
+
return fence;
}
static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
{
struct amdgpu_ring *ring = to_amdgpu_ring(sched_job->sched);
+ struct amdgpu_device *adev = ring->adev;
struct dma_fence *fence = NULL, *finished;
struct amdgpu_job *job;
int r = 0;
@@ -247,8 +286,10 @@ static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
trace_amdgpu_sched_run_job(job);
- if (job->vram_lost_counter != atomic_read(&ring->adev->vram_lost_counter))
- dma_fence_set_error(finished, -ECANCELED);/* skip IB as well if VRAM lost */
+ /* Skip job if VRAM is lost and never resubmit gangs */
+ if (job->vram_lost_counter != atomic_read(&adev->vram_lost_counter) ||
+ (job->job_run_counter && job->gang_submit))
+ dma_fence_set_error(finished, -ECANCELED);
if (finished->error < 0) {
DRM_INFO("Skip scheduling IBs!\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
index babc0af751c2..ab7b150e5d50 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
@@ -50,6 +50,7 @@ struct amdgpu_job {
struct amdgpu_sync sync;
struct amdgpu_sync sched_sync;
struct dma_fence hw_fence;
+ struct dma_fence *gang_submit;
uint32_t preamble_status;
uint32_t preemption_status;
bool vm_needs_flush;
@@ -72,11 +73,20 @@ struct amdgpu_job {
struct amdgpu_ib ibs[];
};
+static inline struct amdgpu_ring *amdgpu_job_ring(struct amdgpu_job *job)
+{
+ return to_amdgpu_ring(job->base.entity->rq->sched);
+}
+
int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
struct amdgpu_job **job, struct amdgpu_vm *vm);
int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
enum amdgpu_ib_pool_type pool, struct amdgpu_job **job);
+void amdgpu_job_set_resources(struct amdgpu_job *job, struct amdgpu_bo *gds,
+ struct amdgpu_bo *gws, struct amdgpu_bo *oa);
void amdgpu_job_free_resources(struct amdgpu_job *job);
+void amdgpu_job_set_gang_leader(struct amdgpu_job *job,
+ struct amdgpu_job *leader);
void amdgpu_job_free(struct amdgpu_job *job);
int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
void *owner, struct dma_fence **f);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 77668c3dae5b..fe23e09eec98 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -247,6 +247,14 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
fw_info->ver = adev->gfx.rlc_srls_fw_version;
fw_info->feature = adev->gfx.rlc_srls_feature_version;
break;
+ case AMDGPU_INFO_FW_GFX_RLCP:
+ fw_info->ver = adev->gfx.rlcp_ucode_version;
+ fw_info->feature = adev->gfx.rlcp_ucode_feature_version;
+ break;
+ case AMDGPU_INFO_FW_GFX_RLCV:
+ fw_info->ver = adev->gfx.rlcv_ucode_version;
+ fw_info->feature = adev->gfx.rlcv_ucode_feature_version;
+ break;
case AMDGPU_INFO_FW_GFX_MEC:
if (query_fw->index == 0) {
fw_info->ver = adev->gfx.mec_fw_version;
@@ -328,6 +336,14 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
fw_info->ver = adev->psp.cap_fw_version;
fw_info->feature = adev->psp.cap_feature_version;
break;
+ case AMDGPU_INFO_FW_MES_KIQ:
+ fw_info->ver = adev->mes.ucode_fw_version[0];
+ fw_info->feature = 0;
+ break;
+ case AMDGPU_INFO_FW_MES:
+ fw_info->ver = adev->mes.ucode_fw_version[1];
+ fw_info->feature = 0;
+ break;
default:
return -EINVAL;
}
@@ -1469,6 +1485,22 @@ static int amdgpu_debugfs_firmware_info_show(struct seq_file *m, void *unused)
seq_printf(m, "RLC SRLS feature version: %u, firmware version: 0x%08x\n",
fw_info.feature, fw_info.ver);
+ /* RLCP */
+ query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLCP;
+ ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+ if (ret)
+ return ret;
+ seq_printf(m, "RLCP feature version: %u, firmware version: 0x%08x\n",
+ fw_info.feature, fw_info.ver);
+
+ /* RLCV */
+ query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLCV;
+ ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+ if (ret)
+ return ret;
+ seq_printf(m, "RLCV feature version: %u, firmware version: 0x%08x\n",
+ fw_info.feature, fw_info.ver);
+
/* MEC */
query_fw.fw_type = AMDGPU_INFO_FW_GFX_MEC;
query_fw.index = 0;
@@ -1581,6 +1613,22 @@ static int amdgpu_debugfs_firmware_info_show(struct seq_file *m, void *unused)
fw_info.feature, fw_info.ver);
}
+ /* MES_KIQ */
+ query_fw.fw_type = AMDGPU_INFO_FW_MES_KIQ;
+ ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+ if (ret)
+ return ret;
+ seq_printf(m, "MES_KIQ feature version: %u, firmware version: 0x%08x\n",
+ fw_info.feature, fw_info.ver);
+
+ /* MES */
+ query_fw.fw_type = AMDGPU_INFO_FW_MES;
+ ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+ if (ret)
+ return ret;
+ seq_printf(m, "MES feature version: %u, firmware version: 0x%08x\n",
+ fw_info.feature, fw_info.ver);
+
seq_printf(m, "VBIOS version: %s\n", ctx->vbios_version);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
index 7b46f6bf4187..ad980f4b66e1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
@@ -222,6 +222,8 @@ struct mes_add_queue_input {
uint64_t tba_addr;
uint64_t tma_addr;
uint32_t is_kfd_process;
+ uint32_t is_aql_queue;
+ uint32_t queue_size;
};
struct mes_remove_queue_input {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index e6a9b9fc9e0b..2e8f6cd7a729 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -688,13 +688,16 @@ int amdgpu_bo_create_vm(struct amdgpu_device *adev,
* num of amdgpu_vm_pt entries.
*/
BUG_ON(bp->bo_ptr_size < sizeof(struct amdgpu_bo_vm));
- bp->destroy = &amdgpu_bo_vm_destroy;
r = amdgpu_bo_create(adev, bp, &bo_ptr);
if (r)
return r;
*vmbo_ptr = to_amdgpu_bo_vm(bo_ptr);
INIT_LIST_HEAD(&(*vmbo_ptr)->shadow_list);
+ /* Set destroy callback to amdgpu_bo_vm_destroy after vmbo->shadow_list
+ * is initialized.
+ */
+ bo_ptr->tbo.destroy = &amdgpu_bo_vm_destroy;
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index cfcaf890a6a1..effa7df3ddbf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -511,6 +511,11 @@ static int psp_sw_fini(void *handle)
kfree(cmd);
cmd = NULL;
+ if (psp->km_ring.ring_mem)
+ amdgpu_bo_free_kernel(&adev->firmware.rbuf,
+ &psp->km_ring.ring_mem_mc_addr,
+ (void **)&psp->km_ring.ring_mem);
+
amdgpu_bo_free_kernel(&psp->fw_pri_bo,
&psp->fw_pri_mc_addr, &psp->fw_pri_buf);
amdgpu_bo_free_kernel(&psp->fence_buf_bo,
@@ -766,7 +771,7 @@ static int psp_tmr_init(struct psp_context *psp)
}
pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
- ret = amdgpu_bo_create_kernel(psp->adev, tmr_size, PSP_TMR_SIZE(psp->adev),
+ ret = amdgpu_bo_create_kernel(psp->adev, tmr_size, PSP_TMR_ALIGNMENT,
AMDGPU_GEM_DOMAIN_VRAM,
&psp->tmr_bo, &psp->tmr_mc_addr, pptr);
@@ -2055,6 +2060,15 @@ static int psp_hw_start(struct psp_context *psp)
}
}
+ if ((is_psp_fw_valid(psp->ras_drv)) &&
+ (psp->funcs->bootloader_load_ras_drv != NULL)) {
+ ret = psp_bootloader_load_ras_drv(psp);
+ if (ret) {
+ DRM_ERROR("PSP load ras_drv failed!\n");
+ return ret;
+ }
+ }
+
if ((is_psp_fw_valid(psp->sos)) &&
(psp->funcs->bootloader_load_sos != NULL)) {
ret = psp_bootloader_load_sos(psp);
@@ -3040,6 +3054,12 @@ static int parse_sos_bin_descriptor(struct psp_context *psp,
psp->dbg_drv.size_bytes = le32_to_cpu(desc->size_bytes);
psp->dbg_drv.start_addr = ucode_start_addr;
break;
+ case PSP_FW_TYPE_PSP_RAS_DRV:
+ psp->ras_drv.fw_version = le32_to_cpu(desc->fw_version);
+ psp->ras_drv.feature_version = le32_to_cpu(desc->fw_version);
+ psp->ras_drv.size_bytes = le32_to_cpu(desc->size_bytes);
+ psp->ras_drv.start_addr = ucode_start_addr;
+ break;
default:
dev_warn(psp->adev->dev, "Unsupported PSP FW type: %d\n", desc->fw_type);
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
index c32b74bd970f..58ce3ebb446c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
@@ -36,6 +36,7 @@
#define PSP_CMD_BUFFER_SIZE 0x1000
#define PSP_1_MEG 0x100000
#define PSP_TMR_SIZE(adev) ((adev)->asic_type == CHIP_ALDEBARAN ? 0x800000 : 0x400000)
+#define PSP_TMR_ALIGNMENT 0x100000
#define PSP_FW_NAME_LEN 0x24
enum psp_shared_mem_size {
@@ -71,6 +72,7 @@ enum psp_bootloader_cmd {
PSP_BL__LOAD_SOCDRV = 0xB0000,
PSP_BL__LOAD_DBGDRV = 0xC0000,
PSP_BL__LOAD_INTFDRV = 0xD0000,
+ PSP_BL__LOAD_RASDRV = 0xE0000,
PSP_BL__DRAM_LONG_TRAIN = 0x100000,
PSP_BL__DRAM_SHORT_TRAIN = 0x200000,
PSP_BL__LOAD_TOS_SPL_TABLE = 0x10000000,
@@ -114,6 +116,7 @@ struct psp_funcs
int (*bootloader_load_soc_drv)(struct psp_context *psp);
int (*bootloader_load_intf_drv)(struct psp_context *psp);
int (*bootloader_load_dbg_drv)(struct psp_context *psp);
+ int (*bootloader_load_ras_drv)(struct psp_context *psp);
int (*bootloader_load_sos)(struct psp_context *psp);
int (*ring_init)(struct psp_context *psp, enum psp_ring_type ring_type);
int (*ring_create)(struct psp_context *psp,
@@ -323,6 +326,7 @@ struct psp_context
struct psp_bin_desc soc_drv;
struct psp_bin_desc intf_drv;
struct psp_bin_desc dbg_drv;
+ struct psp_bin_desc ras_drv;
/* tmr buffer */
struct amdgpu_bo *tmr_bo;
@@ -403,6 +407,9 @@ struct amdgpu_psp_funcs {
((psp)->funcs->bootloader_load_intf_drv ? (psp)->funcs->bootloader_load_intf_drv((psp)) : 0)
#define psp_bootloader_load_dbg_drv(psp) \
((psp)->funcs->bootloader_load_dbg_drv ? (psp)->funcs->bootloader_load_dbg_drv((psp)) : 0)
+#define psp_bootloader_load_ras_drv(psp) \
+ ((psp)->funcs->bootloader_load_ras_drv ? \
+ (psp)->funcs->bootloader_load_ras_drv((psp)) : 0)
#define psp_bootloader_load_sos(psp) \
((psp)->funcs->bootloader_load_sos ? (psp)->funcs->bootloader_load_sos((psp)) : 0)
#define psp_smu_reload_quirk(psp) \
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index ab9ba5a9c33d..2dad7aa9a03b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -1811,7 +1811,8 @@ static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev)
amdgpu_ras_query_error_status(adev, &info);
if (adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) &&
- adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)) {
+ adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4) &&
+ adev->ip_versions[MP0_HWIP][0] != IP_VERSION(13, 0, 0)) {
if (amdgpu_ras_reset_error_status(adev, info.head.block))
dev_warn(adev->dev, "Failed to reset error counter and error status");
}
@@ -2719,7 +2720,8 @@ int amdgpu_ras_pre_fini(struct amdgpu_device *adev)
/* Need disable ras on all IPs here before ip [hw/sw]fini */
- amdgpu_ras_disable_all_features(adev, 0);
+ if (con->features)
+ amdgpu_ras_disable_all_features(adev, 0);
amdgpu_ras_recovery_fini(adev);
return 0;
}
@@ -2832,11 +2834,8 @@ static int amdgpu_bad_page_notifier(struct notifier_block *nb,
struct mce *m = (struct mce *)data;
struct amdgpu_device *adev = NULL;
uint32_t gpu_id = 0;
- uint32_t umc_inst = 0;
- uint32_t ch_inst, channel_index = 0;
+ uint32_t umc_inst = 0, ch_inst = 0;
struct ras_err_data err_data = {0, 0, 0, NULL};
- struct eeprom_table_record err_rec;
- uint64_t retired_page;
/*
* If the error was generated in UMC_V2, which belongs to GPU UMCs,
@@ -2875,21 +2874,22 @@ static int amdgpu_bad_page_notifier(struct notifier_block *nb,
dev_info(adev->dev, "Uncorrectable error detected in UMC inst: %d, chan_idx: %d",
umc_inst, ch_inst);
+ err_data.err_addr =
+ kcalloc(adev->umc.max_ras_err_cnt_per_query,
+ sizeof(struct eeprom_table_record), GFP_KERNEL);
+ if (!err_data.err_addr) {
+ dev_warn(adev->dev,
+ "Failed to alloc memory for umc error record in mca notifier!\n");
+ return NOTIFY_DONE;
+ }
+
/*
* Translate UMC channel address to Physical address
*/
- channel_index =
- adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num
- + ch_inst];
-
- retired_page = ADDR_OF_8KB_BLOCK(m->addr) |
- ADDR_OF_256B_BLOCK(channel_index) |
- OFFSET_IN_256B_BLOCK(m->addr);
-
- memset(&err_rec, 0x0, sizeof(struct eeprom_table_record));
- err_data.err_addr = &err_rec;
- amdgpu_umc_fill_error_record(&err_data, m->addr,
- retired_page, channel_index, umc_inst);
+ if (adev->umc.ras &&
+ adev->umc.ras->convert_ras_error_address)
+ adev->umc.ras->convert_ras_error_address(adev,
+ &err_data, m->addr, ch_inst, umc_inst);
if (amdgpu_bad_page_threshold != 0) {
amdgpu_ras_add_bad_pages(adev, err_data.err_addr,
@@ -2897,6 +2897,7 @@ static int amdgpu_bad_page_notifier(struct notifier_block *nb,
amdgpu_ras_save_bad_pages(adev);
}
+ kfree(err_data.err_addr);
return NOTIFY_OK;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
index c4283987bb1e..84c241b9a2a1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
@@ -38,6 +38,7 @@
#define EEPROM_I2C_MADDR_ARCTURUS_D342 0x0
#define EEPROM_I2C_MADDR_SIENNA_CICHLID 0x0
#define EEPROM_I2C_MADDR_ALDEBARAN 0x0
+#define EEPROM_I2C_MADDR_SMU_13_0_0 (0x54UL << 16)
/*
* The 2 macros bellow represent the actual size in bytes that
@@ -156,6 +157,15 @@ static bool __get_eeprom_i2c_addr(struct amdgpu_device *adev,
return false;
}
+ switch (adev->ip_versions[MP1_HWIP][0]) {
+ case IP_VERSION(13, 0, 0):
+ control->i2c_address = EEPROM_I2C_MADDR_SMU_13_0_0;
+ break;
+
+ default:
+ break;
+ }
+
return true;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
index f71b83c42590..f5318fedf2f0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
@@ -31,6 +31,7 @@ enum AMDGPU_RESET_FLAGS {
AMDGPU_NEED_FULL_RESET = 0,
AMDGPU_SKIP_HW_RESET = 1,
AMDGPU_SKIP_MODE2_RESET = 2,
+ AMDGPU_RESET_FOR_DEVICE_REMOVE = 3,
};
struct amdgpu_reset_context {
@@ -112,7 +113,8 @@ static inline bool amdgpu_reset_get_reset_domain(struct amdgpu_reset_domain *dom
static inline void amdgpu_reset_put_reset_domain(struct amdgpu_reset_domain *domain)
{
- kref_put(&domain->refcount, amdgpu_reset_destroy_reset_domain);
+ if (domain)
+ kref_put(&domain->refcount, amdgpu_reset_destroy_reset_domain);
}
static inline bool amdgpu_reset_domain_schedule(struct amdgpu_reset_domain *domain,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
index 6373bfb47d55..012b72d00e04 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
@@ -272,3 +272,275 @@ void amdgpu_gfx_rlc_fini(struct amdgpu_device *adev)
&adev->gfx.rlc.cp_table_gpu_addr,
(void **)&adev->gfx.rlc.cp_table_ptr);
}
+
+static int amdgpu_gfx_rlc_init_microcode_v2_0(struct amdgpu_device *adev)
+{
+ const struct common_firmware_header *common_hdr;
+ const struct rlc_firmware_header_v2_0 *rlc_hdr;
+ struct amdgpu_firmware_info *info;
+ unsigned int *tmp;
+ unsigned int i;
+
+ rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
+
+ adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
+ adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
+ adev->gfx.rlc.save_and_restore_offset =
+ le32_to_cpu(rlc_hdr->save_and_restore_offset);
+ adev->gfx.rlc.clear_state_descriptor_offset =
+ le32_to_cpu(rlc_hdr->clear_state_descriptor_offset);
+ adev->gfx.rlc.avail_scratch_ram_locations =
+ le32_to_cpu(rlc_hdr->avail_scratch_ram_locations);
+ adev->gfx.rlc.reg_restore_list_size =
+ le32_to_cpu(rlc_hdr->reg_restore_list_size);
+ adev->gfx.rlc.reg_list_format_start =
+ le32_to_cpu(rlc_hdr->reg_list_format_start);
+ adev->gfx.rlc.reg_list_format_separate_start =
+ le32_to_cpu(rlc_hdr->reg_list_format_separate_start);
+ adev->gfx.rlc.starting_offsets_start =
+ le32_to_cpu(rlc_hdr->starting_offsets_start);
+ adev->gfx.rlc.reg_list_format_size_bytes =
+ le32_to_cpu(rlc_hdr->reg_list_format_size_bytes);
+ adev->gfx.rlc.reg_list_size_bytes =
+ le32_to_cpu(rlc_hdr->reg_list_size_bytes);
+ adev->gfx.rlc.register_list_format =
+ kmalloc(adev->gfx.rlc.reg_list_format_size_bytes +
+ adev->gfx.rlc.reg_list_size_bytes, GFP_KERNEL);
+ if (!adev->gfx.rlc.register_list_format) {
+ dev_err(adev->dev, "failed to allocate memory for rlc register_list_format\n");
+ return -ENOMEM;
+ }
+
+ tmp = (unsigned int *)((uintptr_t)rlc_hdr +
+ le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes));
+ for (i = 0 ; i < (rlc_hdr->reg_list_format_size_bytes >> 2); i++)
+ adev->gfx.rlc.register_list_format[i] = le32_to_cpu(tmp[i]);
+
+ adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i;
+
+ tmp = (unsigned int *)((uintptr_t)rlc_hdr +
+ le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes));
+ for (i = 0 ; i < (rlc_hdr->reg_list_size_bytes >> 2); i++)
+ adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]);
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G];
+ info->ucode_id = AMDGPU_UCODE_ID_RLC_G;
+ info->fw = adev->gfx.rlc_fw;
+ if (info->fw) {
+ common_hdr = (const struct common_firmware_header *)info->fw->data;
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(common_hdr->ucode_size_bytes), PAGE_SIZE);
+ }
+ }
+
+ return 0;
+}
+
+static void amdgpu_gfx_rlc_init_microcode_v2_1(struct amdgpu_device *adev)
+{
+ const struct rlc_firmware_header_v2_1 *rlc_hdr;
+ struct amdgpu_firmware_info *info;
+
+ rlc_hdr = (const struct rlc_firmware_header_v2_1 *)adev->gfx.rlc_fw->data;
+ adev->gfx.rlc_srlc_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_ucode_ver);
+ adev->gfx.rlc_srlc_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_feature_ver);
+ adev->gfx.rlc.save_restore_list_cntl_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_cntl_size_bytes);
+ adev->gfx.rlc.save_restore_list_cntl = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_cntl_offset_bytes);
+ adev->gfx.rlc_srlg_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_ucode_ver);
+ adev->gfx.rlc_srlg_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_feature_ver);
+ adev->gfx.rlc.save_restore_list_gpm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_gpm_size_bytes);
+ adev->gfx.rlc.save_restore_list_gpm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_gpm_offset_bytes);
+ adev->gfx.rlc_srls_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_ucode_ver);
+ adev->gfx.rlc_srls_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_feature_ver);
+ adev->gfx.rlc.save_restore_list_srm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_srm_size_bytes);
+ adev->gfx.rlc.save_restore_list_srm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_srm_offset_bytes);
+ adev->gfx.rlc.reg_list_format_direct_reg_list_length =
+ le32_to_cpu(rlc_hdr->reg_list_format_direct_reg_list_length);
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ if (adev->gfx.rlc.save_restore_list_cntl_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL];
+ info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.save_restore_list_cntl_size_bytes, PAGE_SIZE);
+ }
+
+ if (adev->gfx.rlc.save_restore_list_gpm_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM];
+ info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.save_restore_list_gpm_size_bytes, PAGE_SIZE);
+ }
+
+ if (adev->gfx.rlc.save_restore_list_srm_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM];
+ info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.save_restore_list_srm_size_bytes, PAGE_SIZE);
+ }
+ }
+}
+
+static void amdgpu_gfx_rlc_init_microcode_v2_2(struct amdgpu_device *adev)
+{
+ const struct rlc_firmware_header_v2_2 *rlc_hdr;
+ struct amdgpu_firmware_info *info;
+
+ rlc_hdr = (const struct rlc_firmware_header_v2_2 *)adev->gfx.rlc_fw->data;
+ adev->gfx.rlc.rlc_iram_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlc_iram_ucode_size_bytes);
+ adev->gfx.rlc.rlc_iram_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlc_iram_ucode_offset_bytes);
+ adev->gfx.rlc.rlc_dram_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlc_dram_ucode_size_bytes);
+ adev->gfx.rlc.rlc_dram_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlc_dram_ucode_offset_bytes);
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ if (adev->gfx.rlc.rlc_iram_ucode_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_IRAM];
+ info->ucode_id = AMDGPU_UCODE_ID_RLC_IRAM;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.rlc_iram_ucode_size_bytes, PAGE_SIZE);
+ }
+
+ if (adev->gfx.rlc.rlc_dram_ucode_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_DRAM];
+ info->ucode_id = AMDGPU_UCODE_ID_RLC_DRAM;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.rlc_dram_ucode_size_bytes, PAGE_SIZE);
+ }
+ }
+}
+
+static void amdgpu_gfx_rlc_init_microcode_v2_3(struct amdgpu_device *adev)
+{
+ const struct rlc_firmware_header_v2_3 *rlc_hdr;
+ struct amdgpu_firmware_info *info;
+
+ rlc_hdr = (const struct rlc_firmware_header_v2_3 *)adev->gfx.rlc_fw->data;
+ adev->gfx.rlcp_ucode_version = le32_to_cpu(rlc_hdr->rlcp_ucode_version);
+ adev->gfx.rlcp_ucode_feature_version = le32_to_cpu(rlc_hdr->rlcp_ucode_feature_version);
+ adev->gfx.rlc.rlcp_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlcp_ucode_size_bytes);
+ adev->gfx.rlc.rlcp_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlcp_ucode_offset_bytes);
+
+ adev->gfx.rlcv_ucode_version = le32_to_cpu(rlc_hdr->rlcv_ucode_version);
+ adev->gfx.rlcv_ucode_feature_version = le32_to_cpu(rlc_hdr->rlcv_ucode_feature_version);
+ adev->gfx.rlc.rlcv_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlcv_ucode_size_bytes);
+ adev->gfx.rlc.rlcv_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlcv_ucode_offset_bytes);
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ if (adev->gfx.rlc.rlcp_ucode_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_P];
+ info->ucode_id = AMDGPU_UCODE_ID_RLC_P;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.rlcp_ucode_size_bytes, PAGE_SIZE);
+ }
+
+ if (adev->gfx.rlc.rlcv_ucode_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_V];
+ info->ucode_id = AMDGPU_UCODE_ID_RLC_V;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.rlcv_ucode_size_bytes, PAGE_SIZE);
+ }
+ }
+}
+
+static void amdgpu_gfx_rlc_init_microcode_v2_4(struct amdgpu_device *adev)
+{
+ const struct rlc_firmware_header_v2_4 *rlc_hdr;
+ struct amdgpu_firmware_info *info;
+
+ rlc_hdr = (const struct rlc_firmware_header_v2_4 *)adev->gfx.rlc_fw->data;
+ adev->gfx.rlc.global_tap_delays_ucode_size_bytes = le32_to_cpu(rlc_hdr->global_tap_delays_ucode_size_bytes);
+ adev->gfx.rlc.global_tap_delays_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->global_tap_delays_ucode_offset_bytes);
+ adev->gfx.rlc.se0_tap_delays_ucode_size_bytes = le32_to_cpu(rlc_hdr->se0_tap_delays_ucode_size_bytes);
+ adev->gfx.rlc.se0_tap_delays_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->se0_tap_delays_ucode_offset_bytes);
+ adev->gfx.rlc.se1_tap_delays_ucode_size_bytes = le32_to_cpu(rlc_hdr->se1_tap_delays_ucode_size_bytes);
+ adev->gfx.rlc.se1_tap_delays_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->se1_tap_delays_ucode_offset_bytes);
+ adev->gfx.rlc.se2_tap_delays_ucode_size_bytes = le32_to_cpu(rlc_hdr->se2_tap_delays_ucode_size_bytes);
+ adev->gfx.rlc.se2_tap_delays_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->se2_tap_delays_ucode_offset_bytes);
+ adev->gfx.rlc.se3_tap_delays_ucode_size_bytes = le32_to_cpu(rlc_hdr->se3_tap_delays_ucode_size_bytes);
+ adev->gfx.rlc.se3_tap_delays_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->se3_tap_delays_ucode_offset_bytes);
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ if (adev->gfx.rlc.global_tap_delays_ucode_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS];
+ info->ucode_id = AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.global_tap_delays_ucode_size_bytes, PAGE_SIZE);
+ }
+
+ if (adev->gfx.rlc.se0_tap_delays_ucode_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE0_TAP_DELAYS];
+ info->ucode_id = AMDGPU_UCODE_ID_SE0_TAP_DELAYS;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.se0_tap_delays_ucode_size_bytes, PAGE_SIZE);
+ }
+
+ if (adev->gfx.rlc.se1_tap_delays_ucode_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE1_TAP_DELAYS];
+ info->ucode_id = AMDGPU_UCODE_ID_SE1_TAP_DELAYS;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.se1_tap_delays_ucode_size_bytes, PAGE_SIZE);
+ }
+
+ if (adev->gfx.rlc.se2_tap_delays_ucode_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE2_TAP_DELAYS];
+ info->ucode_id = AMDGPU_UCODE_ID_SE2_TAP_DELAYS;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.se2_tap_delays_ucode_size_bytes, PAGE_SIZE);
+ }
+
+ if (adev->gfx.rlc.se3_tap_delays_ucode_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE3_TAP_DELAYS];
+ info->ucode_id = AMDGPU_UCODE_ID_SE3_TAP_DELAYS;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.se3_tap_delays_ucode_size_bytes, PAGE_SIZE);
+ }
+ }
+}
+
+int amdgpu_gfx_rlc_init_microcode(struct amdgpu_device *adev,
+ uint16_t version_major,
+ uint16_t version_minor)
+{
+ int err;
+
+ if (version_major < 2) {
+ /* only support rlc_hdr v2.x and onwards */
+ dev_err(adev->dev, "unsupported rlc fw hdr\n");
+ return -EINVAL;
+ }
+
+ /* is_rlc_v2_1 is still used in APU code path */
+ if (version_major == 2 && version_minor == 1)
+ adev->gfx.rlc.is_rlc_v2_1 = true;
+
+ if (version_minor >= 0) {
+ err = amdgpu_gfx_rlc_init_microcode_v2_0(adev);
+ if (err) {
+ dev_err(adev->dev, "fail to init rlc v2_0 microcode\n");
+ return err;
+ }
+ }
+ if (version_minor >= 1)
+ amdgpu_gfx_rlc_init_microcode_v2_1(adev);
+ if (version_minor >= 2)
+ amdgpu_gfx_rlc_init_microcode_v2_2(adev);
+ if (version_minor == 3)
+ amdgpu_gfx_rlc_init_microcode_v2_3(adev);
+ if (version_minor == 4)
+ amdgpu_gfx_rlc_init_microcode_v2_4(adev);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
index 03ac36b2c2cf..23f060db9255 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
@@ -267,5 +267,7 @@ int amdgpu_gfx_rlc_init_csb(struct amdgpu_device *adev);
int amdgpu_gfx_rlc_init_cpt(struct amdgpu_device *adev);
void amdgpu_gfx_rlc_setup_cp_table(struct amdgpu_device *adev);
void amdgpu_gfx_rlc_fini(struct amdgpu_device *adev);
-
+int amdgpu_gfx_rlc_init_microcode(struct amdgpu_device *adev,
+ uint16_t version_major,
+ uint16_t version_minor);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
index 42c1f050542f..ea5278f094c0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
@@ -21,6 +21,7 @@
*
*/
+#include <linux/firmware.h>
#include "amdgpu.h"
#include "amdgpu_sdma.h"
#include "amdgpu_ras.h"
@@ -150,3 +151,158 @@ int amdgpu_sdma_process_ecc_irq(struct amdgpu_device *adev,
amdgpu_ras_interrupt_dispatch(adev, &ih_data);
return 0;
}
+
+static int amdgpu_sdma_init_inst_ctx(struct amdgpu_sdma_instance *sdma_inst)
+{
+ int err = 0;
+ uint16_t version_major;
+ const struct common_firmware_header *header = NULL;
+ const struct sdma_firmware_header_v1_0 *hdr;
+ const struct sdma_firmware_header_v2_0 *hdr_v2;
+
+ err = amdgpu_ucode_validate(sdma_inst->fw);
+ if (err)
+ return err;
+
+ header = (const struct common_firmware_header *)
+ sdma_inst->fw->data;
+ version_major = le16_to_cpu(header->header_version_major);
+
+ switch (version_major) {
+ case 1:
+ hdr = (const struct sdma_firmware_header_v1_0 *)sdma_inst->fw->data;
+ sdma_inst->fw_version = le32_to_cpu(hdr->header.ucode_version);
+ sdma_inst->feature_version = le32_to_cpu(hdr->ucode_feature_version);
+ break;
+ case 2:
+ hdr_v2 = (const struct sdma_firmware_header_v2_0 *)sdma_inst->fw->data;
+ sdma_inst->fw_version = le32_to_cpu(hdr_v2->header.ucode_version);
+ sdma_inst->feature_version = le32_to_cpu(hdr_v2->ucode_feature_version);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (sdma_inst->feature_version >= 20)
+ sdma_inst->burst_nop = true;
+
+ return 0;
+}
+
+void amdgpu_sdma_destroy_inst_ctx(struct amdgpu_device *adev,
+ bool duplicate)
+{
+ int i;
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ release_firmware(adev->sdma.instance[i].fw);
+ if (duplicate)
+ break;
+ }
+
+ memset((void *)adev->sdma.instance, 0,
+ sizeof(struct amdgpu_sdma_instance) * AMDGPU_MAX_SDMA_INSTANCES);
+}
+
+int amdgpu_sdma_init_microcode(struct amdgpu_device *adev,
+ char *fw_name, u32 instance,
+ bool duplicate)
+{
+ struct amdgpu_firmware_info *info = NULL;
+ const struct common_firmware_header *header = NULL;
+ int err = 0, i;
+ const struct sdma_firmware_header_v2_0 *sdma_hdr;
+ uint16_t version_major;
+
+ err = request_firmware(&adev->sdma.instance[instance].fw, fw_name, adev->dev);
+ if (err)
+ goto out;
+
+ header = (const struct common_firmware_header *)
+ adev->sdma.instance[instance].fw->data;
+ version_major = le16_to_cpu(header->header_version_major);
+
+ if ((duplicate && instance) || (!duplicate && version_major > 1)) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ err = amdgpu_sdma_init_inst_ctx(&adev->sdma.instance[instance]);
+ if (err)
+ goto out;
+
+ if (duplicate) {
+ for (i = 1; i < adev->sdma.num_instances; i++)
+ memcpy((void *)&adev->sdma.instance[i],
+ (void *)&adev->sdma.instance[0],
+ sizeof(struct amdgpu_sdma_instance));
+ }
+
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
+ DRM_DEBUG("psp_load == '%s'\n",
+ adev->firmware.load_type == AMDGPU_FW_LOAD_PSP ? "true" : "false");
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ switch (version_major) {
+ case 1:
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ if (!duplicate && (instance != i))
+ continue;
+ else {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
+ info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i;
+ info->fw = adev->sdma.instance[i].fw;
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
+ }
+ }
+ break;
+ case 2:
+ sdma_hdr = (const struct sdma_firmware_header_v2_0 *)
+ adev->sdma.instance[0].fw->data;
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA_UCODE_TH0];
+ info->ucode_id = AMDGPU_UCODE_ID_SDMA_UCODE_TH0;
+ info->fw = adev->sdma.instance[0].fw;
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(sdma_hdr->ctx_ucode_size_bytes), PAGE_SIZE);
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA_UCODE_TH1];
+ info->ucode_id = AMDGPU_UCODE_ID_SDMA_UCODE_TH1;
+ info->fw = adev->sdma.instance[0].fw;
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(sdma_hdr->ctl_ucode_size_bytes), PAGE_SIZE);
+ break;
+ default:
+ err = -EINVAL;
+ }
+ }
+
+out:
+ if (err) {
+ DRM_ERROR("SDMA: Failed to init firmware \"%s\"\n", fw_name);
+ amdgpu_sdma_destroy_inst_ctx(adev, duplicate);
+ }
+ return err;
+}
+
+void amdgpu_sdma_unset_buffer_funcs_helper(struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *sdma;
+ int i;
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ if (adev->sdma.has_page_queue) {
+ sdma = &adev->sdma.instance[i].page;
+ if (adev->mman.buffer_funcs_ring == sdma) {
+ amdgpu_ttm_set_buffer_funcs_status(adev, false);
+ break;
+ }
+ }
+ sdma = &adev->sdma.instance[i].ring;
+ if (adev->mman.buffer_funcs_ring == sdma) {
+ amdgpu_ttm_set_buffer_funcs_status(adev, false);
+ break;
+ }
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
index 53ac3ebae8d6..7d99205c2e01 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
@@ -124,4 +124,10 @@ int amdgpu_sdma_process_ras_data_cb(struct amdgpu_device *adev,
int amdgpu_sdma_process_ecc_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry);
+int amdgpu_sdma_init_microcode(struct amdgpu_device *adev,
+ char *fw_name, u32 instance, bool duplicate);
+void amdgpu_sdma_destroy_inst_ctx(struct amdgpu_device *adev,
+ bool duplicate);
+void amdgpu_sdma_unset_buffer_funcs_helper(struct amdgpu_device *adev);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index 504af1b93bfa..090e66a1b284 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright 2014 Advanced Micro Devices, Inc.
* All Rights Reserved.
@@ -315,6 +316,7 @@ struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync)
struct hlist_node *tmp;
struct dma_fence *f;
int i;
+
hash_for_each_safe(sync->fences, i, tmp, e, node) {
f = e->fence;
@@ -392,7 +394,7 @@ void amdgpu_sync_free(struct amdgpu_sync *sync)
{
struct amdgpu_sync_entry *e;
struct hlist_node *tmp;
- unsigned i;
+ unsigned int i;
hash_for_each_safe(sync->fences, i, tmp, e, node) {
hash_del(&e->node);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index 06dfcf297a8d..5e6ddc7e101c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -140,8 +140,10 @@ TRACE_EVENT(amdgpu_bo_create,
);
TRACE_EVENT(amdgpu_cs,
- TP_PROTO(struct amdgpu_cs_parser *p, int i),
- TP_ARGS(p, i),
+ TP_PROTO(struct amdgpu_cs_parser *p,
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib),
+ TP_ARGS(p, job, ib),
TP_STRUCT__entry(
__field(struct amdgpu_bo_list *, bo_list)
__field(u32, ring)
@@ -151,10 +153,10 @@ TRACE_EVENT(amdgpu_cs,
TP_fast_assign(
__entry->bo_list = p->bo_list;
- __entry->ring = to_amdgpu_ring(p->entity->rq->sched)->idx;
- __entry->dw = p->job->ibs[i].length_dw;
+ __entry->ring = to_amdgpu_ring(job->base.sched)->idx;
+ __entry->dw = ib->length_dw;
__entry->fences = amdgpu_fence_count_emitted(
- to_amdgpu_ring(p->entity->rq->sched));
+ to_amdgpu_ring(job->base.sched));
),
TP_printk("bo_list=%p, ring=%u, dw=%u, fences=%u",
__entry->bo_list, __entry->ring, __entry->dw,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index b1c455329023..dc262d2c2925 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -424,8 +424,9 @@ error:
static bool amdgpu_mem_visible(struct amdgpu_device *adev,
struct ttm_resource *mem)
{
- uint64_t mem_size = (u64)mem->num_pages << PAGE_SHIFT;
+ u64 mem_size = (u64)mem->num_pages << PAGE_SHIFT;
struct amdgpu_res_cursor cursor;
+ u64 end;
if (mem->mem_type == TTM_PL_SYSTEM ||
mem->mem_type == TTM_PL_TT)
@@ -434,12 +435,18 @@ static bool amdgpu_mem_visible(struct amdgpu_device *adev,
return false;
amdgpu_res_first(mem, 0, mem_size, &cursor);
+ end = cursor.start + cursor.size;
+ while (cursor.remaining) {
+ amdgpu_res_next(&cursor, cursor.size);
- /* ttm_resource_ioremap only supports contiguous memory */
- if (cursor.size != mem_size)
- return false;
+ /* ttm_resource_ioremap only supports contiguous memory */
+ if (end != cursor.start)
+ return false;
+
+ end = cursor.start + cursor.size;
+ }
- return cursor.start + cursor.size <= adev->gmc.visible_vram_size;
+ return end <= adev->gmc.visible_vram_size;
}
/*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
index 939c8614f0e3..dd0bc649a57d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
@@ -164,70 +164,138 @@ void amdgpu_ucode_print_rlc_hdr(const struct common_firmware_header *hdr)
} else if (version_major == 2) {
const struct rlc_firmware_header_v2_0 *rlc_hdr =
container_of(hdr, struct rlc_firmware_header_v2_0, header);
+ const struct rlc_firmware_header_v2_1 *rlc_hdr_v2_1 =
+ container_of(rlc_hdr, struct rlc_firmware_header_v2_1, v2_0);
+ const struct rlc_firmware_header_v2_2 *rlc_hdr_v2_2 =
+ container_of(rlc_hdr_v2_1, struct rlc_firmware_header_v2_2, v2_1);
+ const struct rlc_firmware_header_v2_3 *rlc_hdr_v2_3 =
+ container_of(rlc_hdr_v2_2, struct rlc_firmware_header_v2_3, v2_2);
+ const struct rlc_firmware_header_v2_4 *rlc_hdr_v2_4 =
+ container_of(rlc_hdr_v2_3, struct rlc_firmware_header_v2_4, v2_3);
- DRM_DEBUG("ucode_feature_version: %u\n",
- le32_to_cpu(rlc_hdr->ucode_feature_version));
- DRM_DEBUG("jt_offset: %u\n", le32_to_cpu(rlc_hdr->jt_offset));
- DRM_DEBUG("jt_size: %u\n", le32_to_cpu(rlc_hdr->jt_size));
- DRM_DEBUG("save_and_restore_offset: %u\n",
- le32_to_cpu(rlc_hdr->save_and_restore_offset));
- DRM_DEBUG("clear_state_descriptor_offset: %u\n",
- le32_to_cpu(rlc_hdr->clear_state_descriptor_offset));
- DRM_DEBUG("avail_scratch_ram_locations: %u\n",
- le32_to_cpu(rlc_hdr->avail_scratch_ram_locations));
- DRM_DEBUG("reg_restore_list_size: %u\n",
- le32_to_cpu(rlc_hdr->reg_restore_list_size));
- DRM_DEBUG("reg_list_format_start: %u\n",
- le32_to_cpu(rlc_hdr->reg_list_format_start));
- DRM_DEBUG("reg_list_format_separate_start: %u\n",
- le32_to_cpu(rlc_hdr->reg_list_format_separate_start));
- DRM_DEBUG("starting_offsets_start: %u\n",
- le32_to_cpu(rlc_hdr->starting_offsets_start));
- DRM_DEBUG("reg_list_format_size_bytes: %u\n",
- le32_to_cpu(rlc_hdr->reg_list_format_size_bytes));
- DRM_DEBUG("reg_list_format_array_offset_bytes: %u\n",
- le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes));
- DRM_DEBUG("reg_list_size_bytes: %u\n",
- le32_to_cpu(rlc_hdr->reg_list_size_bytes));
- DRM_DEBUG("reg_list_array_offset_bytes: %u\n",
- le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes));
- DRM_DEBUG("reg_list_format_separate_size_bytes: %u\n",
- le32_to_cpu(rlc_hdr->reg_list_format_separate_size_bytes));
- DRM_DEBUG("reg_list_format_separate_array_offset_bytes: %u\n",
- le32_to_cpu(rlc_hdr->reg_list_format_separate_array_offset_bytes));
- DRM_DEBUG("reg_list_separate_size_bytes: %u\n",
- le32_to_cpu(rlc_hdr->reg_list_separate_size_bytes));
- DRM_DEBUG("reg_list_separate_array_offset_bytes: %u\n",
- le32_to_cpu(rlc_hdr->reg_list_separate_array_offset_bytes));
- if (version_minor == 1) {
- const struct rlc_firmware_header_v2_1 *v2_1 =
- container_of(rlc_hdr, struct rlc_firmware_header_v2_1, v2_0);
+ switch (version_minor) {
+ case 0:
+ /* rlc_hdr v2_0 */
+ DRM_DEBUG("ucode_feature_version: %u\n",
+ le32_to_cpu(rlc_hdr->ucode_feature_version));
+ DRM_DEBUG("jt_offset: %u\n", le32_to_cpu(rlc_hdr->jt_offset));
+ DRM_DEBUG("jt_size: %u\n", le32_to_cpu(rlc_hdr->jt_size));
+ DRM_DEBUG("save_and_restore_offset: %u\n",
+ le32_to_cpu(rlc_hdr->save_and_restore_offset));
+ DRM_DEBUG("clear_state_descriptor_offset: %u\n",
+ le32_to_cpu(rlc_hdr->clear_state_descriptor_offset));
+ DRM_DEBUG("avail_scratch_ram_locations: %u\n",
+ le32_to_cpu(rlc_hdr->avail_scratch_ram_locations));
+ DRM_DEBUG("reg_restore_list_size: %u\n",
+ le32_to_cpu(rlc_hdr->reg_restore_list_size));
+ DRM_DEBUG("reg_list_format_start: %u\n",
+ le32_to_cpu(rlc_hdr->reg_list_format_start));
+ DRM_DEBUG("reg_list_format_separate_start: %u\n",
+ le32_to_cpu(rlc_hdr->reg_list_format_separate_start));
+ DRM_DEBUG("starting_offsets_start: %u\n",
+ le32_to_cpu(rlc_hdr->starting_offsets_start));
+ DRM_DEBUG("reg_list_format_size_bytes: %u\n",
+ le32_to_cpu(rlc_hdr->reg_list_format_size_bytes));
+ DRM_DEBUG("reg_list_format_array_offset_bytes: %u\n",
+ le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes));
+ DRM_DEBUG("reg_list_size_bytes: %u\n",
+ le32_to_cpu(rlc_hdr->reg_list_size_bytes));
+ DRM_DEBUG("reg_list_array_offset_bytes: %u\n",
+ le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes));
+ DRM_DEBUG("reg_list_format_separate_size_bytes: %u\n",
+ le32_to_cpu(rlc_hdr->reg_list_format_separate_size_bytes));
+ DRM_DEBUG("reg_list_format_separate_array_offset_bytes: %u\n",
+ le32_to_cpu(rlc_hdr->reg_list_format_separate_array_offset_bytes));
+ DRM_DEBUG("reg_list_separate_size_bytes: %u\n",
+ le32_to_cpu(rlc_hdr->reg_list_separate_size_bytes));
+ DRM_DEBUG("reg_list_separate_array_offset_bytes: %u\n",
+ le32_to_cpu(rlc_hdr->reg_list_separate_array_offset_bytes));
+ break;
+ case 1:
+ /* rlc_hdr v2_1 */
DRM_DEBUG("reg_list_format_direct_reg_list_length: %u\n",
- le32_to_cpu(v2_1->reg_list_format_direct_reg_list_length));
+ le32_to_cpu(rlc_hdr_v2_1->reg_list_format_direct_reg_list_length));
DRM_DEBUG("save_restore_list_cntl_ucode_ver: %u\n",
- le32_to_cpu(v2_1->save_restore_list_cntl_ucode_ver));
+ le32_to_cpu(rlc_hdr_v2_1->save_restore_list_cntl_ucode_ver));
DRM_DEBUG("save_restore_list_cntl_feature_ver: %u\n",
- le32_to_cpu(v2_1->save_restore_list_cntl_feature_ver));
+ le32_to_cpu(rlc_hdr_v2_1->save_restore_list_cntl_feature_ver));
DRM_DEBUG("save_restore_list_cntl_size_bytes %u\n",
- le32_to_cpu(v2_1->save_restore_list_cntl_size_bytes));
+ le32_to_cpu(rlc_hdr_v2_1->save_restore_list_cntl_size_bytes));
DRM_DEBUG("save_restore_list_cntl_offset_bytes: %u\n",
- le32_to_cpu(v2_1->save_restore_list_cntl_offset_bytes));
+ le32_to_cpu(rlc_hdr_v2_1->save_restore_list_cntl_offset_bytes));
DRM_DEBUG("save_restore_list_gpm_ucode_ver: %u\n",
- le32_to_cpu(v2_1->save_restore_list_gpm_ucode_ver));
+ le32_to_cpu(rlc_hdr_v2_1->save_restore_list_gpm_ucode_ver));
DRM_DEBUG("save_restore_list_gpm_feature_ver: %u\n",
- le32_to_cpu(v2_1->save_restore_list_gpm_feature_ver));
+ le32_to_cpu(rlc_hdr_v2_1->save_restore_list_gpm_feature_ver));
DRM_DEBUG("save_restore_list_gpm_size_bytes %u\n",
- le32_to_cpu(v2_1->save_restore_list_gpm_size_bytes));
+ le32_to_cpu(rlc_hdr_v2_1->save_restore_list_gpm_size_bytes));
DRM_DEBUG("save_restore_list_gpm_offset_bytes: %u\n",
- le32_to_cpu(v2_1->save_restore_list_gpm_offset_bytes));
+ le32_to_cpu(rlc_hdr_v2_1->save_restore_list_gpm_offset_bytes));
DRM_DEBUG("save_restore_list_srm_ucode_ver: %u\n",
- le32_to_cpu(v2_1->save_restore_list_srm_ucode_ver));
+ le32_to_cpu(rlc_hdr_v2_1->save_restore_list_srm_ucode_ver));
DRM_DEBUG("save_restore_list_srm_feature_ver: %u\n",
- le32_to_cpu(v2_1->save_restore_list_srm_feature_ver));
+ le32_to_cpu(rlc_hdr_v2_1->save_restore_list_srm_feature_ver));
DRM_DEBUG("save_restore_list_srm_size_bytes %u\n",
- le32_to_cpu(v2_1->save_restore_list_srm_size_bytes));
+ le32_to_cpu(rlc_hdr_v2_1->save_restore_list_srm_size_bytes));
DRM_DEBUG("save_restore_list_srm_offset_bytes: %u\n",
- le32_to_cpu(v2_1->save_restore_list_srm_offset_bytes));
+ le32_to_cpu(rlc_hdr_v2_1->save_restore_list_srm_offset_bytes));
+ break;
+ case 2:
+ /* rlc_hdr v2_2 */
+ DRM_DEBUG("rlc_iram_ucode_size_bytes: %u\n",
+ le32_to_cpu(rlc_hdr_v2_2->rlc_iram_ucode_size_bytes));
+ DRM_DEBUG("rlc_iram_ucode_offset_bytes: %u\n",
+ le32_to_cpu(rlc_hdr_v2_2->rlc_iram_ucode_offset_bytes));
+ DRM_DEBUG("rlc_dram_ucode_size_bytes: %u\n",
+ le32_to_cpu(rlc_hdr_v2_2->rlc_dram_ucode_size_bytes));
+ DRM_DEBUG("rlc_dram_ucode_offset_bytes: %u\n",
+ le32_to_cpu(rlc_hdr_v2_2->rlc_dram_ucode_offset_bytes));
+ break;
+ case 3:
+ /* rlc_hdr v2_3 */
+ DRM_DEBUG("rlcp_ucode_version: %u\n",
+ le32_to_cpu(rlc_hdr_v2_3->rlcp_ucode_version));
+ DRM_DEBUG("rlcp_ucode_feature_version: %u\n",
+ le32_to_cpu(rlc_hdr_v2_3->rlcp_ucode_feature_version));
+ DRM_DEBUG("rlcp_ucode_size_bytes: %u\n",
+ le32_to_cpu(rlc_hdr_v2_3->rlcp_ucode_size_bytes));
+ DRM_DEBUG("rlcp_ucode_offset_bytes: %u\n",
+ le32_to_cpu(rlc_hdr_v2_3->rlcp_ucode_offset_bytes));
+ DRM_DEBUG("rlcv_ucode_version: %u\n",
+ le32_to_cpu(rlc_hdr_v2_3->rlcv_ucode_version));
+ DRM_DEBUG("rlcv_ucode_feature_version: %u\n",
+ le32_to_cpu(rlc_hdr_v2_3->rlcv_ucode_feature_version));
+ DRM_DEBUG("rlcv_ucode_size_bytes: %u\n",
+ le32_to_cpu(rlc_hdr_v2_3->rlcv_ucode_size_bytes));
+ DRM_DEBUG("rlcv_ucode_offset_bytes: %u\n",
+ le32_to_cpu(rlc_hdr_v2_3->rlcv_ucode_offset_bytes));
+ break;
+ case 4:
+ /* rlc_hdr v2_4 */
+ DRM_DEBUG("global_tap_delays_ucode_size_bytes :%u\n",
+ le32_to_cpu(rlc_hdr_v2_4->global_tap_delays_ucode_size_bytes));
+ DRM_DEBUG("global_tap_delays_ucode_offset_bytes: %u\n",
+ le32_to_cpu(rlc_hdr_v2_4->global_tap_delays_ucode_offset_bytes));
+ DRM_DEBUG("se0_tap_delays_ucode_size_bytes :%u\n",
+ le32_to_cpu(rlc_hdr_v2_4->se0_tap_delays_ucode_size_bytes));
+ DRM_DEBUG("se0_tap_delays_ucode_offset_bytes: %u\n",
+ le32_to_cpu(rlc_hdr_v2_4->se0_tap_delays_ucode_offset_bytes));
+ DRM_DEBUG("se1_tap_delays_ucode_size_bytes :%u\n",
+ le32_to_cpu(rlc_hdr_v2_4->se1_tap_delays_ucode_size_bytes));
+ DRM_DEBUG("se1_tap_delays_ucode_offset_bytes: %u\n",
+ le32_to_cpu(rlc_hdr_v2_4->se1_tap_delays_ucode_offset_bytes));
+ DRM_DEBUG("se2_tap_delays_ucode_size_bytes :%u\n",
+ le32_to_cpu(rlc_hdr_v2_4->se2_tap_delays_ucode_size_bytes));
+ DRM_DEBUG("se2_tap_delays_ucode_offset_bytes: %u\n",
+ le32_to_cpu(rlc_hdr_v2_4->se2_tap_delays_ucode_offset_bytes));
+ DRM_DEBUG("se3_tap_delays_ucode_size_bytes :%u\n",
+ le32_to_cpu(rlc_hdr_v2_4->se3_tap_delays_ucode_size_bytes));
+ DRM_DEBUG("se3_tap_delays_ucode_offset_bytes: %u\n",
+ le32_to_cpu(rlc_hdr_v2_4->se3_tap_delays_ucode_offset_bytes));
+ break;
+ default:
+ DRM_ERROR("Unknown RLC v2 ucode: v2.%u\n", version_minor);
+ break;
}
} else {
DRM_ERROR("Unknown RLC ucode version: %u.%u\n", version_major, version_minor);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
index 96b6cf4c4d54..1c36235b4539 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
@@ -124,6 +124,7 @@ enum psp_fw_type {
PSP_FW_TYPE_PSP_SOC_DRV,
PSP_FW_TYPE_PSP_INTF_DRV,
PSP_FW_TYPE_PSP_DBG_DRV,
+ PSP_FW_TYPE_PSP_RAS_DRV,
};
/* version_major=2, version_minor=0 */
@@ -260,8 +261,12 @@ struct rlc_firmware_header_v2_2 {
/* version_major=2, version_minor=3 */
struct rlc_firmware_header_v2_3 {
struct rlc_firmware_header_v2_2 v2_2;
+ uint32_t rlcp_ucode_version;
+ uint32_t rlcp_ucode_feature_version;
uint32_t rlcp_ucode_size_bytes;
uint32_t rlcp_ucode_offset_bytes;
+ uint32_t rlcv_ucode_version;
+ uint32_t rlcv_ucode_feature_version;
uint32_t rlcv_ucode_size_bytes;
uint32_t rlcv_ucode_offset_bytes;
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
index 3629d8f292ef..e46439274f3a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
@@ -51,6 +51,9 @@ struct amdgpu_umc_ras {
struct amdgpu_ras_block_object ras_block;
void (*err_cnt_init)(struct amdgpu_device *adev);
bool (*query_ras_poison_mode)(struct amdgpu_device *adev);
+ void (*convert_ras_error_address)(struct amdgpu_device *adev,
+ struct ras_err_data *err_data, uint64_t err_addr,
+ uint32_t ch_inst, uint32_t umc_inst);
void (*ecc_info_query_ras_error_count)(struct amdgpu_device *adev,
void *ras_error_status);
void (*ecc_info_query_ras_error_address)(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index f36e4f08db6d..0b52af415b28 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -191,7 +191,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
fw_name = FIRMWARE_VCN4_0_2;
if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
- adev->vcn.indirect_sram = false;
+ adev->vcn.indirect_sram = true;
break;
case IP_VERSION(4, 0, 4):
fw_name = FIRMWARE_VCN4_0_4;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index 80b7a6cfd026..253ea6b159df 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -161,7 +161,8 @@
#define AMDGPU_VCN_SW_RING_FLAG (1 << 9)
#define AMDGPU_VCN_FW_LOGGING_FLAG (1 << 10)
#define AMDGPU_VCN_SMU_VERSION_INFO_FLAG (1 << 11)
-#define AMDGPU_VCN_VF_RB_SETUP_FLAG (1 << 12)
+#define AMDGPU_VCN_SMU_DPM_INTERFACE_FLAG (1 << 11)
+#define AMDGPU_VCN_VF_RB_SETUP_FLAG (1 << 14)
#define AMDGPU_VCN_IB_FLAG_DECODE_BUFFER 0x00000001
#define AMDGPU_VCN_CMD_FLAG_MSG_BUFFER 0x00000001
@@ -171,6 +172,9 @@
#define VCN_CODEC_DISABLE_MASK_HEVC (1 << 2)
#define VCN_CODEC_DISABLE_MASK_H264 (1 << 3)
+#define AMDGPU_VCN_SMU_DPM_INTERFACE_DGPU (0)
+#define AMDGPU_VCN_SMU_DPM_INTERFACE_APU (1)
+
enum fw_queue_mode {
FW_QUEUE_RING_RESET = 1,
FW_QUEUE_DPG_HOLD_OFF = 2,
@@ -335,7 +339,9 @@ struct amdgpu_vcn4_fw_shared {
struct amdgpu_fw_shared_unified_queue_struct sq;
uint8_t pad1[8];
struct amdgpu_fw_shared_fw_logging fw_log;
+ uint8_t pad2[20];
struct amdgpu_fw_shared_rb_setup rb_setup;
+ struct amdgpu_fw_shared_smu_interface_info smu_dpm_interface;
};
struct amdgpu_vcn_fwlog {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 59cac347baa3..83b0c5d86e48 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -183,10 +183,12 @@ static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base *vm_bo)
struct amdgpu_bo *bo = vm_bo->bo;
vm_bo->moved = true;
+ spin_lock(&vm_bo->vm->status_lock);
if (bo->tbo.type == ttm_bo_type_kernel)
list_move(&vm_bo->vm_status, &vm->evicted);
else
list_move_tail(&vm_bo->vm_status, &vm->evicted);
+ spin_unlock(&vm_bo->vm->status_lock);
}
/**
* amdgpu_vm_bo_moved - vm_bo is moved
@@ -198,7 +200,9 @@ static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base *vm_bo)
*/
static void amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base *vm_bo)
{
+ spin_lock(&vm_bo->vm->status_lock);
list_move(&vm_bo->vm_status, &vm_bo->vm->moved);
+ spin_unlock(&vm_bo->vm->status_lock);
}
/**
@@ -211,7 +215,9 @@ static void amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base *vm_bo)
*/
static void amdgpu_vm_bo_idle(struct amdgpu_vm_bo_base *vm_bo)
{
+ spin_lock(&vm_bo->vm->status_lock);
list_move(&vm_bo->vm_status, &vm_bo->vm->idle);
+ spin_unlock(&vm_bo->vm->status_lock);
vm_bo->moved = false;
}
@@ -225,9 +231,9 @@ static void amdgpu_vm_bo_idle(struct amdgpu_vm_bo_base *vm_bo)
*/
static void amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base *vm_bo)
{
- spin_lock(&vm_bo->vm->invalidated_lock);
+ spin_lock(&vm_bo->vm->status_lock);
list_move(&vm_bo->vm_status, &vm_bo->vm->invalidated);
- spin_unlock(&vm_bo->vm->invalidated_lock);
+ spin_unlock(&vm_bo->vm->status_lock);
}
/**
@@ -240,10 +246,13 @@ static void amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base *vm_bo)
*/
static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo)
{
- if (vm_bo->bo->parent)
+ if (vm_bo->bo->parent) {
+ spin_lock(&vm_bo->vm->status_lock);
list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
- else
+ spin_unlock(&vm_bo->vm->status_lock);
+ } else {
amdgpu_vm_bo_idle(vm_bo);
+ }
}
/**
@@ -256,9 +265,9 @@ static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo)
*/
static void amdgpu_vm_bo_done(struct amdgpu_vm_bo_base *vm_bo)
{
- spin_lock(&vm_bo->vm->invalidated_lock);
+ spin_lock(&vm_bo->vm->status_lock);
list_move(&vm_bo->vm_status, &vm_bo->vm->done);
- spin_unlock(&vm_bo->vm->invalidated_lock);
+ spin_unlock(&vm_bo->vm->status_lock);
}
/**
@@ -363,12 +372,20 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
int (*validate)(void *p, struct amdgpu_bo *bo),
void *param)
{
- struct amdgpu_vm_bo_base *bo_base, *tmp;
+ struct amdgpu_vm_bo_base *bo_base;
+ struct amdgpu_bo *shadow;
+ struct amdgpu_bo *bo;
int r;
- list_for_each_entry_safe(bo_base, tmp, &vm->evicted, vm_status) {
- struct amdgpu_bo *bo = bo_base->bo;
- struct amdgpu_bo *shadow = amdgpu_bo_shadowed(bo);
+ spin_lock(&vm->status_lock);
+ while (!list_empty(&vm->evicted)) {
+ bo_base = list_first_entry(&vm->evicted,
+ struct amdgpu_vm_bo_base,
+ vm_status);
+ spin_unlock(&vm->status_lock);
+
+ bo = bo_base->bo;
+ shadow = amdgpu_bo_shadowed(bo);
r = validate(param, bo);
if (r)
@@ -385,7 +402,9 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
vm->update_funcs->map_table(to_amdgpu_bo_vm(bo));
amdgpu_vm_bo_relocated(bo_base);
}
+ spin_lock(&vm->status_lock);
}
+ spin_unlock(&vm->status_lock);
amdgpu_vm_eviction_lock(vm);
vm->evicting = false;
@@ -406,13 +425,18 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
*/
bool amdgpu_vm_ready(struct amdgpu_vm *vm)
{
+ bool empty;
bool ret;
amdgpu_vm_eviction_lock(vm);
ret = !vm->evicting;
amdgpu_vm_eviction_unlock(vm);
- return ret && list_empty(&vm->evicted);
+ spin_lock(&vm->status_lock);
+ empty = list_empty(&vm->evicted);
+ spin_unlock(&vm->status_lock);
+
+ return ret && empty;
}
/**
@@ -680,9 +704,14 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
struct amdgpu_vm_update_params params;
struct amdgpu_vm_bo_base *entry;
bool flush_tlb_needed = false;
+ LIST_HEAD(relocated);
int r, idx;
- if (list_empty(&vm->relocated))
+ spin_lock(&vm->status_lock);
+ list_splice_init(&vm->relocated, &relocated);
+ spin_unlock(&vm->status_lock);
+
+ if (list_empty(&relocated))
return 0;
if (!drm_dev_enter(adev_to_drm(adev), &idx))
@@ -697,7 +726,7 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
if (r)
goto error;
- list_for_each_entry(entry, &vm->relocated, vm_status) {
+ list_for_each_entry(entry, &relocated, vm_status) {
/* vm_flush_needed after updating moved PDEs */
flush_tlb_needed |= entry->moved;
@@ -713,9 +742,8 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
if (flush_tlb_needed)
atomic64_inc(&vm->tlb_seq);
- while (!list_empty(&vm->relocated)) {
- entry = list_first_entry(&vm->relocated,
- struct amdgpu_vm_bo_base,
+ while (!list_empty(&relocated)) {
+ entry = list_first_entry(&relocated, struct amdgpu_vm_bo_base,
vm_status);
amdgpu_vm_bo_idle(entry);
}
@@ -912,6 +940,7 @@ void amdgpu_vm_get_memory(struct amdgpu_vm *vm, uint64_t *vram_mem,
{
struct amdgpu_bo_va *bo_va, *tmp;
+ spin_lock(&vm->status_lock);
list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) {
if (!bo_va->base.bo)
continue;
@@ -936,7 +965,6 @@ void amdgpu_vm_get_memory(struct amdgpu_vm *vm, uint64_t *vram_mem,
amdgpu_bo_get_memory(bo_va->base.bo, vram_mem,
gtt_mem, cpu_mem);
}
- spin_lock(&vm->invalidated_lock);
list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) {
if (!bo_va->base.bo)
continue;
@@ -949,7 +977,7 @@ void amdgpu_vm_get_memory(struct amdgpu_vm *vm, uint64_t *vram_mem,
amdgpu_bo_get_memory(bo_va->base.bo, vram_mem,
gtt_mem, cpu_mem);
}
- spin_unlock(&vm->invalidated_lock);
+ spin_unlock(&vm->status_lock);
}
/**
* amdgpu_vm_bo_update - update all BO mappings in the vm page table
@@ -1278,24 +1306,29 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
struct amdgpu_vm *vm)
{
- struct amdgpu_bo_va *bo_va, *tmp;
+ struct amdgpu_bo_va *bo_va;
struct dma_resv *resv;
bool clear;
int r;
- list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) {
+ spin_lock(&vm->status_lock);
+ while (!list_empty(&vm->moved)) {
+ bo_va = list_first_entry(&vm->moved, struct amdgpu_bo_va,
+ base.vm_status);
+ spin_unlock(&vm->status_lock);
+
/* Per VM BOs never need to bo cleared in the page tables */
r = amdgpu_vm_bo_update(adev, bo_va, false);
if (r)
return r;
+ spin_lock(&vm->status_lock);
}
- spin_lock(&vm->invalidated_lock);
while (!list_empty(&vm->invalidated)) {
bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va,
base.vm_status);
resv = bo_va->base.bo->tbo.base.resv;
- spin_unlock(&vm->invalidated_lock);
+ spin_unlock(&vm->status_lock);
/* Try to reserve the BO to avoid clearing its ptes */
if (!amdgpu_vm_debug && dma_resv_trylock(resv))
@@ -1310,9 +1343,9 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
if (!clear)
dma_resv_unlock(resv);
- spin_lock(&vm->invalidated_lock);
+ spin_lock(&vm->status_lock);
}
- spin_unlock(&vm->invalidated_lock);
+ spin_unlock(&vm->status_lock);
return 0;
}
@@ -1387,7 +1420,7 @@ static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv &&
!bo_va->base.moved) {
- list_move(&bo_va->base.vm_status, &vm->moved);
+ amdgpu_vm_bo_moved(&bo_va->base);
}
trace_amdgpu_vm_bo_map(bo_va, mapping);
}
@@ -1763,9 +1796,9 @@ void amdgpu_vm_bo_del(struct amdgpu_device *adev,
}
}
- spin_lock(&vm->invalidated_lock);
+ spin_lock(&vm->status_lock);
list_del(&bo_va->base.vm_status);
- spin_unlock(&vm->invalidated_lock);
+ spin_unlock(&vm->status_lock);
list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
list_del(&mapping->list);
@@ -2019,9 +2052,11 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
INIT_LIST_HEAD(&vm->moved);
INIT_LIST_HEAD(&vm->idle);
INIT_LIST_HEAD(&vm->invalidated);
- spin_lock_init(&vm->invalidated_lock);
+ spin_lock_init(&vm->status_lock);
INIT_LIST_HEAD(&vm->freed);
INIT_LIST_HEAD(&vm->done);
+ INIT_LIST_HEAD(&vm->pt_freed);
+ INIT_WORK(&vm->pt_free_work, amdgpu_vm_pt_free_work);
/* create scheduler entities for page table updates */
r = drm_sched_entity_init(&vm->immediate, DRM_SCHED_PRIORITY_NORMAL,
@@ -2223,6 +2258,8 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm);
+ flush_work(&vm->pt_free_work);
+
root = amdgpu_bo_ref(vm->root.bo);
amdgpu_bo_reserve(root, true);
amdgpu_vm_set_pasid(adev, vm, 0);
@@ -2484,8 +2521,7 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
/* Intentionally setting invalid PTE flag
* combination to force a no-retry-fault
*/
- flags = AMDGPU_PTE_EXECUTABLE | AMDGPU_PDE_PTE |
- AMDGPU_PTE_TF;
+ flags = AMDGPU_PTE_SNOOPED | AMDGPU_PTE_PRT;
value = 0;
} else if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_NEVER) {
/* Redirect the access to the dummy page */
@@ -2548,6 +2584,7 @@ void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m)
unsigned int total_done_objs = 0;
unsigned int id = 0;
+ spin_lock(&vm->status_lock);
seq_puts(m, "\tIdle BOs:\n");
list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) {
if (!bo_va->base.bo)
@@ -2585,7 +2622,6 @@ void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m)
id = 0;
seq_puts(m, "\tInvalidated BOs:\n");
- spin_lock(&vm->invalidated_lock);
list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) {
if (!bo_va->base.bo)
continue;
@@ -2600,7 +2636,7 @@ void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m)
continue;
total_done += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
}
- spin_unlock(&vm->invalidated_lock);
+ spin_unlock(&vm->status_lock);
total_done_objs = id;
seq_printf(m, "\tTotal idle size: %12lld\tobjs:\t%d\n", total_idle,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 9ecb7f663e19..83acb7bd80fe 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -254,6 +254,9 @@ struct amdgpu_vm {
bool evicting;
unsigned int saved_flags;
+ /* Lock to protect vm_bo add/del/move on all lists of vm */
+ spinlock_t status_lock;
+
/* BOs who needs a validation */
struct list_head evicted;
@@ -268,7 +271,6 @@ struct amdgpu_vm {
/* regular invalidated BOs, but not yet updated in the PT */
struct list_head invalidated;
- spinlock_t invalidated_lock;
/* BO mappings freed, but not yet updated in the PT */
struct list_head freed;
@@ -276,6 +278,10 @@ struct amdgpu_vm {
/* BOs which are invalidated, has been updated in the PTs */
struct list_head done;
+ /* PT BOs scheduled to free and fill with zero if vm_resv is not hold */
+ struct list_head pt_freed;
+ struct work_struct pt_free_work;
+
/* contains the page directory */
struct amdgpu_vm_bo_base root;
struct dma_fence *last_update;
@@ -471,6 +477,7 @@ int amdgpu_vm_pde_update(struct amdgpu_vm_update_params *params,
int amdgpu_vm_ptes_update(struct amdgpu_vm_update_params *params,
uint64_t start, uint64_t end,
uint64_t dst, uint64_t flags);
+void amdgpu_vm_pt_free_work(struct work_struct *work);
#if defined(CONFIG_DEBUG_FS)
void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
index 88de9f0d4728..358b91243e37 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
@@ -637,10 +637,34 @@ static void amdgpu_vm_pt_free(struct amdgpu_vm_bo_base *entry)
}
ttm_bo_set_bulk_move(&entry->bo->tbo, NULL);
entry->bo->vm_bo = NULL;
+
+ spin_lock(&entry->vm->status_lock);
list_del(&entry->vm_status);
+ spin_unlock(&entry->vm->status_lock);
amdgpu_bo_unref(&entry->bo);
}
+void amdgpu_vm_pt_free_work(struct work_struct *work)
+{
+ struct amdgpu_vm_bo_base *entry, *next;
+ struct amdgpu_vm *vm;
+ LIST_HEAD(pt_freed);
+
+ vm = container_of(work, struct amdgpu_vm, pt_free_work);
+
+ spin_lock(&vm->status_lock);
+ list_splice_init(&vm->pt_freed, &pt_freed);
+ spin_unlock(&vm->status_lock);
+
+ /* flush_work in amdgpu_vm_fini ensure vm->root.bo is valid. */
+ amdgpu_bo_reserve(vm->root.bo, true);
+
+ list_for_each_entry_safe(entry, next, &pt_freed, vm_status)
+ amdgpu_vm_pt_free(entry);
+
+ amdgpu_bo_unreserve(vm->root.bo);
+}
+
/**
* amdgpu_vm_pt_free_dfs - free PD/PT levels
*
@@ -652,11 +676,24 @@ static void amdgpu_vm_pt_free(struct amdgpu_vm_bo_base *entry)
*/
static void amdgpu_vm_pt_free_dfs(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
- struct amdgpu_vm_pt_cursor *start)
+ struct amdgpu_vm_pt_cursor *start,
+ bool unlocked)
{
struct amdgpu_vm_pt_cursor cursor;
struct amdgpu_vm_bo_base *entry;
+ if (unlocked) {
+ spin_lock(&vm->status_lock);
+ for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry)
+ list_move(&entry->vm_status, &vm->pt_freed);
+
+ if (start)
+ list_move(&start->entry->vm_status, &vm->pt_freed);
+ spin_unlock(&vm->status_lock);
+ schedule_work(&vm->pt_free_work);
+ return;
+ }
+
for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry)
amdgpu_vm_pt_free(entry);
@@ -673,7 +710,7 @@ static void amdgpu_vm_pt_free_dfs(struct amdgpu_device *adev,
*/
void amdgpu_vm_pt_free_root(struct amdgpu_device *adev, struct amdgpu_vm *vm)
{
- amdgpu_vm_pt_free_dfs(adev, vm, NULL);
+ amdgpu_vm_pt_free_dfs(adev, vm, NULL, false);
}
/**
@@ -966,7 +1003,8 @@ int amdgpu_vm_ptes_update(struct amdgpu_vm_update_params *params,
if (cursor.entry->bo) {
params->table_freed = true;
amdgpu_vm_pt_free_dfs(adev, params->vm,
- &cursor);
+ &cursor,
+ params->unlocked);
}
amdgpu_vm_pt_next(adev, &cursor);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
index 1fd3cbca20a2..2b0669c464f6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
@@ -112,7 +112,8 @@ static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p,
swap(p->vm->last_unlocked, tmp);
dma_fence_put(tmp);
} else {
- amdgpu_bo_fence(p->vm->root.bo, f, true);
+ dma_resv_add_fence(p->vm->root.bo->tbo.base.resv, f,
+ DMA_RESV_USAGE_BOOKKEEP);
}
if (fence && !p->immediate)
@@ -211,12 +212,15 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
int r;
/* Wait for PD/PT moves to be completed */
- dma_resv_for_each_fence(&cursor, bo->tbo.base.resv,
- DMA_RESV_USAGE_KERNEL, fence) {
+ dma_resv_iter_begin(&cursor, bo->tbo.base.resv, DMA_RESV_USAGE_KERNEL);
+ dma_resv_for_each_fence_unlocked(&cursor, fence) {
r = amdgpu_sync_fence(&p->job->sync, fence);
- if (r)
+ if (r) {
+ dma_resv_iter_end(&cursor);
return r;
+ }
}
+ dma_resv_iter_end(&cursor);
do {
ndw = p->num_dw_left;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
index d3b483aa81f8..47159e9a0884 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
@@ -392,12 +392,20 @@ struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev)
}
/**
+ * Only init hive->reset_domain for none SRIOV configuration. For SRIOV,
+ * Host driver decide how to reset the GPU either through FLR or chain reset.
+ * Guest side will get individual notifications from the host for the FLR
+ * if necessary.
+ */
+ if (!amdgpu_sriov_vf(adev)) {
+ /**
* Avoid recreating reset domain when hive is reconstructed for the case
- * of reset the devices in the XGMI hive during probe for SRIOV
+ * of reset the devices in the XGMI hive during probe for passthrough GPU
* See https://www.spinics.net/lists/amd-gfx/msg58836.html
*/
- if (adev->reset_domain->type != XGMI_HIVE) {
- hive->reset_domain = amdgpu_reset_create_reset_domain(XGMI_HIVE, "amdgpu-reset-hive");
+ if (adev->reset_domain->type != XGMI_HIVE) {
+ hive->reset_domain =
+ amdgpu_reset_create_reset_domain(XGMI_HIVE, "amdgpu-reset-hive");
if (!hive->reset_domain) {
dev_err(adev->dev, "XGMI: failed initializing reset domain for xgmi hive\n");
ret = -ENOMEM;
@@ -406,9 +414,10 @@ struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev)
hive = NULL;
goto pro_end;
}
- } else {
- amdgpu_reset_get_reset_domain(adev->reset_domain);
- hive->reset_domain = adev->reset_domain;
+ } else {
+ amdgpu_reset_get_reset_domain(adev->reset_domain);
+ hive->reset_domain = adev->reset_domain;
+ }
}
hive->hive_id = adev->gmc.xgmi.hive_id;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
index 552e6fb55aa8..30dcc1681b4e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
@@ -43,6 +43,7 @@ struct amdgpu_hive_info {
} pstate;
struct amdgpu_reset_domain *reset_domain;
+ uint32_t device_remove_count;
};
struct amdgpu_pcs_ras_field {
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index 5647f13b98d4..cbca9866645c 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -309,14 +309,10 @@ static void cik_sdma_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq
*/
static void cik_sdma_gfx_stop(struct amdgpu_device *adev)
{
- struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
- struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
u32 rb_cntl;
int i;
- if ((adev->mman.buffer_funcs_ring == sdma0) ||
- (adev->mman.buffer_funcs_ring == sdma1))
- amdgpu_ttm_set_buffer_funcs_status(adev, false);
+ amdgpu_sdma_unset_buffer_funcs_helper(adev);
for (i = 0; i < adev->sdma.num_instances; i++) {
rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index e4dde41f2f68..af94ac580d3e 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -3943,56 +3943,6 @@ static void gfx_v10_0_check_fw_write_wait(struct amdgpu_device *adev)
DRM_WARN_ONCE("CP firmware version too old, please update!");
}
-
-static void gfx_v10_0_init_rlc_ext_microcode(struct amdgpu_device *adev)
-{
- const struct rlc_firmware_header_v2_1 *rlc_hdr;
-
- rlc_hdr = (const struct rlc_firmware_header_v2_1 *)adev->gfx.rlc_fw->data;
- adev->gfx.rlc_srlc_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_ucode_ver);
- adev->gfx.rlc_srlc_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_feature_ver);
- adev->gfx.rlc.save_restore_list_cntl_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_cntl_size_bytes);
- adev->gfx.rlc.save_restore_list_cntl = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_cntl_offset_bytes);
- adev->gfx.rlc_srlg_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_ucode_ver);
- adev->gfx.rlc_srlg_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_feature_ver);
- adev->gfx.rlc.save_restore_list_gpm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_gpm_size_bytes);
- adev->gfx.rlc.save_restore_list_gpm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_gpm_offset_bytes);
- adev->gfx.rlc_srls_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_ucode_ver);
- adev->gfx.rlc_srls_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_feature_ver);
- adev->gfx.rlc.save_restore_list_srm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_srm_size_bytes);
- adev->gfx.rlc.save_restore_list_srm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_srm_offset_bytes);
- adev->gfx.rlc.reg_list_format_direct_reg_list_length =
- le32_to_cpu(rlc_hdr->reg_list_format_direct_reg_list_length);
-}
-
-static void gfx_v10_0_init_rlc_iram_dram_microcode(struct amdgpu_device *adev)
-{
- const struct rlc_firmware_header_v2_2 *rlc_hdr;
-
- rlc_hdr = (const struct rlc_firmware_header_v2_2 *)adev->gfx.rlc_fw->data;
- adev->gfx.rlc.rlc_iram_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlc_iram_ucode_size_bytes);
- adev->gfx.rlc.rlc_iram_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlc_iram_ucode_offset_bytes);
- adev->gfx.rlc.rlc_dram_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlc_dram_ucode_size_bytes);
- adev->gfx.rlc.rlc_dram_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlc_dram_ucode_offset_bytes);
-}
-
-static void gfx_v10_0_init_tap_delays_microcode(struct amdgpu_device *adev)
-{
- const struct rlc_firmware_header_v2_4 *rlc_hdr;
-
- rlc_hdr = (const struct rlc_firmware_header_v2_4 *)adev->gfx.rlc_fw->data;
- adev->gfx.rlc.global_tap_delays_ucode_size_bytes = le32_to_cpu(rlc_hdr->global_tap_delays_ucode_size_bytes);
- adev->gfx.rlc.global_tap_delays_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->global_tap_delays_ucode_offset_bytes);
- adev->gfx.rlc.se0_tap_delays_ucode_size_bytes = le32_to_cpu(rlc_hdr->se0_tap_delays_ucode_size_bytes);
- adev->gfx.rlc.se0_tap_delays_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->se0_tap_delays_ucode_offset_bytes);
- adev->gfx.rlc.se1_tap_delays_ucode_size_bytes = le32_to_cpu(rlc_hdr->se1_tap_delays_ucode_size_bytes);
- adev->gfx.rlc.se1_tap_delays_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->se1_tap_delays_ucode_offset_bytes);
- adev->gfx.rlc.se2_tap_delays_ucode_size_bytes = le32_to_cpu(rlc_hdr->se2_tap_delays_ucode_size_bytes);
- adev->gfx.rlc.se2_tap_delays_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->se2_tap_delays_ucode_offset_bytes);
- adev->gfx.rlc.se3_tap_delays_ucode_size_bytes = le32_to_cpu(rlc_hdr->se3_tap_delays_ucode_size_bytes);
- adev->gfx.rlc.se3_tap_delays_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->se3_tap_delays_ucode_offset_bytes);
-}
-
static bool gfx_v10_0_navi10_gfxoff_should_enable(struct amdgpu_device *adev)
{
bool ret = false;
@@ -4028,12 +3978,7 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
char fw_name[40];
char *wks = "";
int err;
- struct amdgpu_firmware_info *info = NULL;
- const struct common_firmware_header *header = NULL;
- const struct gfx_firmware_header_v1_0 *cp_hdr;
const struct rlc_firmware_header_v2_0 *rlc_hdr;
- unsigned int *tmp = NULL;
- unsigned int i = 0;
uint16_t version_major;
uint16_t version_minor;
@@ -4091,9 +4036,7 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
if (err)
goto out;
- cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
- adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
- adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_PFP);
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me%s.bin", chip_name, wks);
err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
@@ -4102,9 +4045,7 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
err = amdgpu_ucode_validate(adev->gfx.me_fw);
if (err)
goto out;
- cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
- adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
- adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_ME);
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce%s.bin", chip_name, wks);
err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
@@ -4113,69 +4054,27 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
err = amdgpu_ucode_validate(adev->gfx.ce_fw);
if (err)
goto out;
- cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
- adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
- adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_CE);
if (!amdgpu_sriov_vf(adev)) {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
if (err)
goto out;
+ /* don't check this. There are apparently firmwares in the wild with
+ * incorrect size in the header
+ */
err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
+ if (err)
+ dev_dbg(adev->dev,
+ "gfx10: amdgpu_ucode_validate() failed \"%s\"\n",
+ fw_name);
rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
-
- adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
- adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
- adev->gfx.rlc.save_and_restore_offset =
- le32_to_cpu(rlc_hdr->save_and_restore_offset);
- adev->gfx.rlc.clear_state_descriptor_offset =
- le32_to_cpu(rlc_hdr->clear_state_descriptor_offset);
- adev->gfx.rlc.avail_scratch_ram_locations =
- le32_to_cpu(rlc_hdr->avail_scratch_ram_locations);
- adev->gfx.rlc.reg_restore_list_size =
- le32_to_cpu(rlc_hdr->reg_restore_list_size);
- adev->gfx.rlc.reg_list_format_start =
- le32_to_cpu(rlc_hdr->reg_list_format_start);
- adev->gfx.rlc.reg_list_format_separate_start =
- le32_to_cpu(rlc_hdr->reg_list_format_separate_start);
- adev->gfx.rlc.starting_offsets_start =
- le32_to_cpu(rlc_hdr->starting_offsets_start);
- adev->gfx.rlc.reg_list_format_size_bytes =
- le32_to_cpu(rlc_hdr->reg_list_format_size_bytes);
- adev->gfx.rlc.reg_list_size_bytes =
- le32_to_cpu(rlc_hdr->reg_list_size_bytes);
- adev->gfx.rlc.register_list_format =
- kmalloc(adev->gfx.rlc.reg_list_format_size_bytes +
- adev->gfx.rlc.reg_list_size_bytes, GFP_KERNEL);
- if (!adev->gfx.rlc.register_list_format) {
- err = -ENOMEM;
+ err = amdgpu_gfx_rlc_init_microcode(adev, version_major, version_minor);
+ if (err)
goto out;
- }
-
- tmp = (unsigned int *)((uintptr_t)rlc_hdr +
- le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes));
- for (i = 0 ; i < (rlc_hdr->reg_list_format_size_bytes >> 2); i++)
- adev->gfx.rlc.register_list_format[i] = le32_to_cpu(tmp[i]);
-
- adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i;
-
- tmp = (unsigned int *)((uintptr_t)rlc_hdr +
- le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes));
- for (i = 0 ; i < (rlc_hdr->reg_list_size_bytes >> 2); i++)
- adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]);
-
- if (version_major == 2) {
- if (version_minor >= 1)
- gfx_v10_0_init_rlc_ext_microcode(adev);
- if (version_minor >= 2)
- gfx_v10_0_init_rlc_iram_dram_microcode(adev);
- if (version_minor == 4) {
- gfx_v10_0_init_tap_delays_microcode(adev);
- }
- }
}
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec%s.bin", chip_name, wks);
@@ -4185,9 +4084,8 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
err = amdgpu_ucode_validate(adev->gfx.mec_fw);
if (err)
goto out;
- cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
- adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
- adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1_JT);
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2%s.bin", chip_name, wks);
err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
@@ -4195,164 +4093,18 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
if (err)
goto out;
- cp_hdr = (const struct gfx_firmware_header_v1_0 *)
- adev->gfx.mec2_fw->data;
- adev->gfx.mec2_fw_version =
- le32_to_cpu(cp_hdr->header.ucode_version);
- adev->gfx.mec2_feature_version =
- le32_to_cpu(cp_hdr->ucode_feature_version);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC2);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC2_JT);
} else {
err = 0;
adev->gfx.mec2_fw = NULL;
}
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_PFP];
- info->ucode_id = AMDGPU_UCODE_ID_CP_PFP;
- info->fw = adev->gfx.pfp_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_ME];
- info->ucode_id = AMDGPU_UCODE_ID_CP_ME;
- info->fw = adev->gfx.me_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_CE];
- info->ucode_id = AMDGPU_UCODE_ID_CP_CE;
- info->fw = adev->gfx.ce_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G];
- info->ucode_id = AMDGPU_UCODE_ID_RLC_G;
- info->fw = adev->gfx.rlc_fw;
- if (info->fw) {
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
- }
- if (adev->gfx.rlc.save_restore_list_cntl_size_bytes &&
- adev->gfx.rlc.save_restore_list_gpm_size_bytes &&
- adev->gfx.rlc.save_restore_list_srm_size_bytes) {
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL];
- info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.save_restore_list_cntl_size_bytes, PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM];
- info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.save_restore_list_gpm_size_bytes, PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM];
- info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.save_restore_list_srm_size_bytes, PAGE_SIZE);
-
- if (adev->gfx.rlc.rlc_iram_ucode_size_bytes &&
- adev->gfx.rlc.rlc_dram_ucode_size_bytes) {
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_IRAM];
- info->ucode_id = AMDGPU_UCODE_ID_RLC_IRAM;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.rlc_iram_ucode_size_bytes, PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_DRAM];
- info->ucode_id = AMDGPU_UCODE_ID_RLC_DRAM;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.rlc_dram_ucode_size_bytes, PAGE_SIZE);
- }
-
- }
-
- if (adev->gfx.rlc.global_tap_delays_ucode_size_bytes) {
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS];
- info->ucode_id = AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.global_tap_delays_ucode_size_bytes, PAGE_SIZE);
- }
-
- if (adev->gfx.rlc.se0_tap_delays_ucode_size_bytes) {
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE0_TAP_DELAYS];
- info->ucode_id = AMDGPU_UCODE_ID_SE0_TAP_DELAYS;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.se0_tap_delays_ucode_size_bytes, PAGE_SIZE);
- }
-
- if (adev->gfx.rlc.se1_tap_delays_ucode_size_bytes) {
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE1_TAP_DELAYS];
- info->ucode_id = AMDGPU_UCODE_ID_SE1_TAP_DELAYS;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.se1_tap_delays_ucode_size_bytes, PAGE_SIZE);
- }
-
- if (adev->gfx.rlc.se2_tap_delays_ucode_size_bytes) {
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE2_TAP_DELAYS];
- info->ucode_id = AMDGPU_UCODE_ID_SE2_TAP_DELAYS;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.se2_tap_delays_ucode_size_bytes, PAGE_SIZE);
- }
-
- if (adev->gfx.rlc.se3_tap_delays_ucode_size_bytes) {
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE3_TAP_DELAYS];
- info->ucode_id = AMDGPU_UCODE_ID_SE3_TAP_DELAYS;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.se3_tap_delays_ucode_size_bytes, PAGE_SIZE);
- }
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
- info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1;
- info->fw = adev->gfx.mec_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(header->ucode_size_bytes) -
- le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1_JT];
- info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1_JT;
- info->fw = adev->gfx.mec_fw;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
-
- if (adev->gfx.mec2_fw) {
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2];
- info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
- info->fw = adev->gfx.mec2_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(header->ucode_size_bytes) -
- le32_to_cpu(cp_hdr->jt_size) * 4,
- PAGE_SIZE);
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2_JT];
- info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2_JT;
- info->fw = adev->gfx.mec2_fw;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4,
- PAGE_SIZE);
- }
- }
-
gfx_v10_0_check_fw_write_wait(adev);
out:
if (err) {
dev_err(adev->dev,
- "gfx10: Failed to load firmware \"%s\"\n",
+ "gfx10: Failed to init firmware \"%s\"\n",
fw_name);
release_firmware(adev->gfx.pfp_fw);
adev->gfx.pfp_fw = NULL;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
index fa718318568e..251109723ab6 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
@@ -438,61 +438,12 @@ static void gfx_v11_0_free_microcode(struct amdgpu_device *adev)
kfree(adev->gfx.rlc.register_list_format);
}
-static void gfx_v11_0_init_rlc_ext_microcode(struct amdgpu_device *adev)
-{
- const struct rlc_firmware_header_v2_1 *rlc_hdr;
-
- rlc_hdr = (const struct rlc_firmware_header_v2_1 *)adev->gfx.rlc_fw->data;
- adev->gfx.rlc_srlc_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_ucode_ver);
- adev->gfx.rlc_srlc_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_feature_ver);
- adev->gfx.rlc.save_restore_list_cntl_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_cntl_size_bytes);
- adev->gfx.rlc.save_restore_list_cntl = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_cntl_offset_bytes);
- adev->gfx.rlc_srlg_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_ucode_ver);
- adev->gfx.rlc_srlg_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_feature_ver);
- adev->gfx.rlc.save_restore_list_gpm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_gpm_size_bytes);
- adev->gfx.rlc.save_restore_list_gpm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_gpm_offset_bytes);
- adev->gfx.rlc_srls_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_ucode_ver);
- adev->gfx.rlc_srls_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_feature_ver);
- adev->gfx.rlc.save_restore_list_srm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_srm_size_bytes);
- adev->gfx.rlc.save_restore_list_srm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_srm_offset_bytes);
- adev->gfx.rlc.reg_list_format_direct_reg_list_length =
- le32_to_cpu(rlc_hdr->reg_list_format_direct_reg_list_length);
-}
-
-static void gfx_v11_0_init_rlc_iram_dram_microcode(struct amdgpu_device *adev)
-{
- const struct rlc_firmware_header_v2_2 *rlc_hdr;
-
- rlc_hdr = (const struct rlc_firmware_header_v2_2 *)adev->gfx.rlc_fw->data;
- adev->gfx.rlc.rlc_iram_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlc_iram_ucode_size_bytes);
- adev->gfx.rlc.rlc_iram_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlc_iram_ucode_offset_bytes);
- adev->gfx.rlc.rlc_dram_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlc_dram_ucode_size_bytes);
- adev->gfx.rlc.rlc_dram_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlc_dram_ucode_offset_bytes);
-}
-
-static void gfx_v11_0_init_rlcp_rlcv_microcode(struct amdgpu_device *adev)
-{
- const struct rlc_firmware_header_v2_3 *rlc_hdr;
-
- rlc_hdr = (const struct rlc_firmware_header_v2_3 *)adev->gfx.rlc_fw->data;
- adev->gfx.rlc.rlcp_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlcp_ucode_size_bytes);
- adev->gfx.rlc.rlcp_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlcp_ucode_offset_bytes);
- adev->gfx.rlc.rlcv_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlcv_ucode_size_bytes);
- adev->gfx.rlc.rlcv_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlcv_ucode_offset_bytes);
-}
-
static int gfx_v11_0_init_microcode(struct amdgpu_device *adev)
{
char fw_name[40];
char ucode_prefix[30];
int err;
- struct amdgpu_firmware_info *info = NULL;
- const struct common_firmware_header *header = NULL;
- const struct gfx_firmware_header_v1_0 *cp_hdr;
- const struct gfx_firmware_header_v2_0 *cp_hdr_v2_0;
const struct rlc_firmware_header_v2_0 *rlc_hdr;
- unsigned int *tmp = NULL;
- unsigned int i = 0;
uint16_t version_major;
uint16_t version_minor;
@@ -513,14 +464,11 @@ static int gfx_v11_0_init_microcode(struct amdgpu_device *adev)
adev->gfx.pfp_fw->data, 2, 0);
if (adev->gfx.rs64_enable) {
dev_info(adev->dev, "CP RS64 enable\n");
- cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)adev->gfx.pfp_fw->data;
- adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr_v2_0->header.ucode_version);
- adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr_v2_0->ucode_feature_version);
-
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_PFP);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK);
} else {
- cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
- adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
- adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_PFP);
}
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", ucode_prefix);
@@ -531,14 +479,11 @@ static int gfx_v11_0_init_microcode(struct amdgpu_device *adev)
if (err)
goto out;
if (adev->gfx.rs64_enable) {
- cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)adev->gfx.me_fw->data;
- adev->gfx.me_fw_version = le32_to_cpu(cp_hdr_v2_0->header.ucode_version);
- adev->gfx.me_feature_version = le32_to_cpu(cp_hdr_v2_0->ucode_feature_version);
-
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_ME);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK);
} else {
- cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
- adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
- adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_ME);
}
if (!amdgpu_sriov_vf(adev)) {
@@ -547,58 +492,14 @@ static int gfx_v11_0_init_microcode(struct amdgpu_device *adev)
if (err)
goto out;
err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
+ if (err)
+ goto out;
rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
-
- adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
- adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
- adev->gfx.rlc.save_and_restore_offset =
- le32_to_cpu(rlc_hdr->save_and_restore_offset);
- adev->gfx.rlc.clear_state_descriptor_offset =
- le32_to_cpu(rlc_hdr->clear_state_descriptor_offset);
- adev->gfx.rlc.avail_scratch_ram_locations =
- le32_to_cpu(rlc_hdr->avail_scratch_ram_locations);
- adev->gfx.rlc.reg_restore_list_size =
- le32_to_cpu(rlc_hdr->reg_restore_list_size);
- adev->gfx.rlc.reg_list_format_start =
- le32_to_cpu(rlc_hdr->reg_list_format_start);
- adev->gfx.rlc.reg_list_format_separate_start =
- le32_to_cpu(rlc_hdr->reg_list_format_separate_start);
- adev->gfx.rlc.starting_offsets_start =
- le32_to_cpu(rlc_hdr->starting_offsets_start);
- adev->gfx.rlc.reg_list_format_size_bytes =
- le32_to_cpu(rlc_hdr->reg_list_format_size_bytes);
- adev->gfx.rlc.reg_list_size_bytes =
- le32_to_cpu(rlc_hdr->reg_list_size_bytes);
- adev->gfx.rlc.register_list_format =
- kmalloc(adev->gfx.rlc.reg_list_format_size_bytes +
- adev->gfx.rlc.reg_list_size_bytes, GFP_KERNEL);
- if (!adev->gfx.rlc.register_list_format) {
- err = -ENOMEM;
+ err = amdgpu_gfx_rlc_init_microcode(adev, version_major, version_minor);
+ if (err)
goto out;
- }
-
- tmp = (unsigned int *)((uintptr_t)rlc_hdr +
- le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes));
- for (i = 0 ; i < (rlc_hdr->reg_list_format_size_bytes >> 2); i++)
- adev->gfx.rlc.register_list_format[i] = le32_to_cpu(tmp[i]);
-
- adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i;
-
- tmp = (unsigned int *)((uintptr_t)rlc_hdr +
- le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes));
- for (i = 0 ; i < (rlc_hdr->reg_list_size_bytes >> 2); i++)
- adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]);
-
- if (version_major == 2) {
- if (version_minor >= 1)
- gfx_v11_0_init_rlc_ext_microcode(adev);
- if (version_minor >= 2)
- gfx_v11_0_init_rlc_iram_dram_microcode(adev);
- if (version_minor == 3)
- gfx_v11_0_init_rlcp_rlcv_microcode(adev);
- }
}
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", ucode_prefix);
@@ -609,190 +510,23 @@ static int gfx_v11_0_init_microcode(struct amdgpu_device *adev)
if (err)
goto out;
if (adev->gfx.rs64_enable) {
- cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)adev->gfx.mec_fw->data;
- adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr_v2_0->header.ucode_version);
- adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr_v2_0->ucode_feature_version);
-
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK);
} else {
- cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
- adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
- adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1_JT);
}
/* only one MEC for gfx 11.0.0. */
adev->gfx.mec2_fw = NULL;
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
- if (adev->gfx.rs64_enable) {
- cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)adev->gfx.pfp_fw->data;
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_RS64_PFP];
- info->ucode_id = AMDGPU_UCODE_ID_CP_RS64_PFP;
- info->fw = adev->gfx.pfp_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(cp_hdr_v2_0->ucode_size_bytes), PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK];
- info->ucode_id = AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK;
- info->fw = adev->gfx.pfp_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(cp_hdr_v2_0->data_size_bytes), PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK];
- info->ucode_id = AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK;
- info->fw = adev->gfx.pfp_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(cp_hdr_v2_0->data_size_bytes), PAGE_SIZE);
-
- cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)adev->gfx.me_fw->data;
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_RS64_ME];
- info->ucode_id = AMDGPU_UCODE_ID_CP_RS64_ME;
- info->fw = adev->gfx.me_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(cp_hdr_v2_0->ucode_size_bytes), PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK];
- info->ucode_id = AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK;
- info->fw = adev->gfx.me_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(cp_hdr_v2_0->data_size_bytes), PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK];
- info->ucode_id = AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK;
- info->fw = adev->gfx.me_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(cp_hdr_v2_0->data_size_bytes), PAGE_SIZE);
-
- cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)adev->gfx.mec_fw->data;
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_RS64_MEC];
- info->ucode_id = AMDGPU_UCODE_ID_CP_RS64_MEC;
- info->fw = adev->gfx.mec_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(cp_hdr_v2_0->ucode_size_bytes), PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK];
- info->ucode_id = AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK;
- info->fw = adev->gfx.mec_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(cp_hdr_v2_0->data_size_bytes), PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK];
- info->ucode_id = AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK;
- info->fw = adev->gfx.mec_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(cp_hdr_v2_0->data_size_bytes), PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK];
- info->ucode_id = AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK;
- info->fw = adev->gfx.mec_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(cp_hdr_v2_0->data_size_bytes), PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK];
- info->ucode_id = AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK;
- info->fw = adev->gfx.mec_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(cp_hdr_v2_0->data_size_bytes), PAGE_SIZE);
- } else {
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_PFP];
- info->ucode_id = AMDGPU_UCODE_ID_CP_PFP;
- info->fw = adev->gfx.pfp_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_ME];
- info->ucode_id = AMDGPU_UCODE_ID_CP_ME;
- info->fw = adev->gfx.me_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
- info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1;
- info->fw = adev->gfx.mec_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(header->ucode_size_bytes) -
- le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1_JT];
- info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1_JT;
- info->fw = adev->gfx.mec_fw;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
- }
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G];
- info->ucode_id = AMDGPU_UCODE_ID_RLC_G;
- info->fw = adev->gfx.rlc_fw;
- if (info->fw) {
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
- }
- if (adev->gfx.rlc.save_restore_list_gpm_size_bytes &&
- adev->gfx.rlc.save_restore_list_srm_size_bytes) {
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM];
- info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.save_restore_list_gpm_size_bytes, PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM];
- info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.save_restore_list_srm_size_bytes, PAGE_SIZE);
- }
-
- if (adev->gfx.rlc.rlc_iram_ucode_size_bytes &&
- adev->gfx.rlc.rlc_dram_ucode_size_bytes) {
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_IRAM];
- info->ucode_id = AMDGPU_UCODE_ID_RLC_IRAM;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.rlc_iram_ucode_size_bytes, PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_DRAM];
- info->ucode_id = AMDGPU_UCODE_ID_RLC_DRAM;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.rlc_dram_ucode_size_bytes, PAGE_SIZE);
- }
-
- if (adev->gfx.rlc.rlcp_ucode_size_bytes) {
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_P];
- info->ucode_id = AMDGPU_UCODE_ID_RLC_P;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.rlcp_ucode_size_bytes, PAGE_SIZE);
- }
-
- if (adev->gfx.rlc.rlcv_ucode_size_bytes) {
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_V];
- info->ucode_id = AMDGPU_UCODE_ID_RLC_V;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.rlcv_ucode_size_bytes, PAGE_SIZE);
- }
- }
-
out:
if (err) {
dev_err(adev->dev,
- "gfx11: Failed to load firmware \"%s\"\n",
+ "gfx11: Failed to init firmware \"%s\"\n",
fw_name);
release_firmware(adev->gfx.pfp_fw);
adev->gfx.pfp_fw = NULL;
@@ -5240,6 +4974,8 @@ static void gfx_v11_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
{
u32 reg, data;
+ amdgpu_gfx_off_ctrl(adev, false);
+
reg = SOC15_REG_OFFSET(GC, 0, regRLC_SPM_MC_CNTL);
if (amdgpu_sriov_is_pp_one_vf(adev))
data = RREG32_NO_KIQ(reg);
@@ -5253,6 +4989,8 @@ static void gfx_v11_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
WREG32_SOC15_NO_KIQ(GC, 0, regRLC_SPM_MC_CNTL, data);
else
WREG32_SOC15(GC, 0, regRLC_SPM_MC_CNTL, data);
+
+ amdgpu_gfx_off_ctrl(adev, true);
}
static const struct amdgpu_rlc_funcs gfx_v11_0_rlc_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 1d6d3a852a0b..0320be4a5fc6 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -1091,27 +1091,6 @@ static void gfx_v9_0_free_microcode(struct amdgpu_device *adev)
kfree(adev->gfx.rlc.register_list_format);
}
-static void gfx_v9_0_init_rlc_ext_microcode(struct amdgpu_device *adev)
-{
- const struct rlc_firmware_header_v2_1 *rlc_hdr;
-
- rlc_hdr = (const struct rlc_firmware_header_v2_1 *)adev->gfx.rlc_fw->data;
- adev->gfx.rlc_srlc_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_ucode_ver);
- adev->gfx.rlc_srlc_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_feature_ver);
- adev->gfx.rlc.save_restore_list_cntl_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_cntl_size_bytes);
- adev->gfx.rlc.save_restore_list_cntl = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_cntl_offset_bytes);
- adev->gfx.rlc_srlg_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_ucode_ver);
- adev->gfx.rlc_srlg_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_feature_ver);
- adev->gfx.rlc.save_restore_list_gpm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_gpm_size_bytes);
- adev->gfx.rlc.save_restore_list_gpm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_gpm_offset_bytes);
- adev->gfx.rlc_srls_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_ucode_ver);
- adev->gfx.rlc_srls_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_feature_ver);
- adev->gfx.rlc.save_restore_list_srm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_srm_size_bytes);
- adev->gfx.rlc.save_restore_list_srm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_srm_offset_bytes);
- adev->gfx.rlc.reg_list_format_direct_reg_list_length =
- le32_to_cpu(rlc_hdr->reg_list_format_direct_reg_list_length);
-}
-
static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev)
{
adev->gfx.me_fw_write_wait = false;
@@ -1273,9 +1252,6 @@ static int gfx_v9_0_init_cp_gfx_microcode(struct amdgpu_device *adev,
{
char fw_name[30];
int err;
- struct amdgpu_firmware_info *info = NULL;
- const struct common_firmware_header *header = NULL;
- const struct gfx_firmware_header_v1_0 *cp_hdr;
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
@@ -1284,9 +1260,7 @@ static int gfx_v9_0_init_cp_gfx_microcode(struct amdgpu_device *adev,
err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
if (err)
goto out;
- cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
- adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
- adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_PFP);
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
@@ -1295,9 +1269,7 @@ static int gfx_v9_0_init_cp_gfx_microcode(struct amdgpu_device *adev,
err = amdgpu_ucode_validate(adev->gfx.me_fw);
if (err)
goto out;
- cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
- adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
- adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_ME);
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
@@ -1306,37 +1278,12 @@ static int gfx_v9_0_init_cp_gfx_microcode(struct amdgpu_device *adev,
err = amdgpu_ucode_validate(adev->gfx.ce_fw);
if (err)
goto out;
- cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
- adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
- adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
-
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_PFP];
- info->ucode_id = AMDGPU_UCODE_ID_CP_PFP;
- info->fw = adev->gfx.pfp_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_ME];
- info->ucode_id = AMDGPU_UCODE_ID_CP_ME;
- info->fw = adev->gfx.me_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_CE];
- info->ucode_id = AMDGPU_UCODE_ID_CP_CE;
- info->fw = adev->gfx.ce_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
- }
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_CE);
out:
if (err) {
dev_err(adev->dev,
- "gfx9: Failed to load firmware \"%s\"\n",
+ "gfx9: Failed to init firmware \"%s\"\n",
fw_name);
release_firmware(adev->gfx.pfp_fw);
adev->gfx.pfp_fw = NULL;
@@ -1353,11 +1300,7 @@ static int gfx_v9_0_init_rlc_microcode(struct amdgpu_device *adev,
{
char fw_name[30];
int err;
- struct amdgpu_firmware_info *info = NULL;
- const struct common_firmware_header *header = NULL;
const struct rlc_firmware_header_v2_0 *rlc_hdr;
- unsigned int *tmp = NULL;
- unsigned int i = 0;
uint16_t version_major;
uint16_t version_minor;
uint32_t smu_version;
@@ -1386,92 +1329,17 @@ static int gfx_v9_0_init_rlc_microcode(struct amdgpu_device *adev,
if (err)
goto out;
err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
+ if (err)
+ goto out;
rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
- if (version_major == 2 && version_minor == 1)
- adev->gfx.rlc.is_rlc_v2_1 = true;
-
- adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
- adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
- adev->gfx.rlc.save_and_restore_offset =
- le32_to_cpu(rlc_hdr->save_and_restore_offset);
- adev->gfx.rlc.clear_state_descriptor_offset =
- le32_to_cpu(rlc_hdr->clear_state_descriptor_offset);
- adev->gfx.rlc.avail_scratch_ram_locations =
- le32_to_cpu(rlc_hdr->avail_scratch_ram_locations);
- adev->gfx.rlc.reg_restore_list_size =
- le32_to_cpu(rlc_hdr->reg_restore_list_size);
- adev->gfx.rlc.reg_list_format_start =
- le32_to_cpu(rlc_hdr->reg_list_format_start);
- adev->gfx.rlc.reg_list_format_separate_start =
- le32_to_cpu(rlc_hdr->reg_list_format_separate_start);
- adev->gfx.rlc.starting_offsets_start =
- le32_to_cpu(rlc_hdr->starting_offsets_start);
- adev->gfx.rlc.reg_list_format_size_bytes =
- le32_to_cpu(rlc_hdr->reg_list_format_size_bytes);
- adev->gfx.rlc.reg_list_size_bytes =
- le32_to_cpu(rlc_hdr->reg_list_size_bytes);
- adev->gfx.rlc.register_list_format =
- kmalloc(adev->gfx.rlc.reg_list_format_size_bytes +
- adev->gfx.rlc.reg_list_size_bytes, GFP_KERNEL);
- if (!adev->gfx.rlc.register_list_format) {
- err = -ENOMEM;
- goto out;
- }
-
- tmp = (unsigned int *)((uintptr_t)rlc_hdr +
- le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes));
- for (i = 0 ; i < (adev->gfx.rlc.reg_list_format_size_bytes >> 2); i++)
- adev->gfx.rlc.register_list_format[i] = le32_to_cpu(tmp[i]);
-
- adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i;
-
- tmp = (unsigned int *)((uintptr_t)rlc_hdr +
- le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes));
- for (i = 0 ; i < (adev->gfx.rlc.reg_list_size_bytes >> 2); i++)
- adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]);
-
- if (adev->gfx.rlc.is_rlc_v2_1)
- gfx_v9_0_init_rlc_ext_microcode(adev);
-
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G];
- info->ucode_id = AMDGPU_UCODE_ID_RLC_G;
- info->fw = adev->gfx.rlc_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
-
- if (adev->gfx.rlc.is_rlc_v2_1 &&
- adev->gfx.rlc.save_restore_list_cntl_size_bytes &&
- adev->gfx.rlc.save_restore_list_gpm_size_bytes &&
- adev->gfx.rlc.save_restore_list_srm_size_bytes) {
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL];
- info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.save_restore_list_cntl_size_bytes, PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM];
- info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.save_restore_list_gpm_size_bytes, PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM];
- info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.save_restore_list_srm_size_bytes, PAGE_SIZE);
- }
- }
-
+ err = amdgpu_gfx_rlc_init_microcode(adev, version_major, version_minor);
out:
if (err) {
dev_err(adev->dev,
- "gfx9: Failed to load firmware \"%s\"\n",
+ "gfx9: Failed to init firmware \"%s\"\n",
fw_name);
release_firmware(adev->gfx.rlc_fw);
adev->gfx.rlc_fw = NULL;
@@ -1494,9 +1362,6 @@ static int gfx_v9_0_init_cp_compute_microcode(struct amdgpu_device *adev,
{
char fw_name[30];
int err;
- struct amdgpu_firmware_info *info = NULL;
- const struct common_firmware_header *header = NULL;
- const struct gfx_firmware_header_v1_0 *cp_hdr;
if (amdgpu_sriov_vf(adev) && (adev->asic_type == CHIP_ALDEBARAN))
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sjt_mec.bin", chip_name);
@@ -1509,10 +1374,8 @@ static int gfx_v9_0_init_cp_compute_microcode(struct amdgpu_device *adev,
err = amdgpu_ucode_validate(adev->gfx.mec_fw);
if (err)
goto out;
- cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
- adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
- adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
-
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1_JT);
if (gfx_v9_0_load_mec2_fw_bin_support(adev)) {
if (amdgpu_sriov_vf(adev) && (adev->asic_type == CHIP_ALDEBARAN))
@@ -1525,12 +1388,8 @@ static int gfx_v9_0_init_cp_compute_microcode(struct amdgpu_device *adev,
err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
if (err)
goto out;
- cp_hdr = (const struct gfx_firmware_header_v1_0 *)
- adev->gfx.mec2_fw->data;
- adev->gfx.mec2_fw_version =
- le32_to_cpu(cp_hdr->header.ucode_version);
- adev->gfx.mec2_feature_version =
- le32_to_cpu(cp_hdr->ucode_feature_version);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC2);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC2_JT);
} else {
err = 0;
adev->gfx.mec2_fw = NULL;
@@ -1540,49 +1399,12 @@ static int gfx_v9_0_init_cp_compute_microcode(struct amdgpu_device *adev,
adev->gfx.mec2_feature_version = adev->gfx.mec_feature_version;
}
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
- info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1;
- info->fw = adev->gfx.mec_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(header->ucode_size_bytes) - le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
-
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1_JT];
- info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1_JT;
- info->fw = adev->gfx.mec_fw;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
-
- if (adev->gfx.mec2_fw) {
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2];
- info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
- info->fw = adev->gfx.mec2_fw;
- header = (const struct common_firmware_header *)info->fw->data;
- cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(header->ucode_size_bytes) - le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
-
- /* TODO: Determine if MEC2 JT FW loading can be removed
- for all GFX V9 asic and above */
- if (gfx_v9_0_load_mec2_fw_bin_support(adev)) {
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2_JT];
- info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2_JT;
- info->fw = adev->gfx.mec2_fw;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4,
- PAGE_SIZE);
- }
- }
- }
-
out:
gfx_v9_0_check_if_need_gfxoff(adev);
gfx_v9_0_check_fw_write_wait(adev);
if (err) {
dev_err(adev->dev,
- "gfx9: Failed to load firmware \"%s\"\n",
+ "gfx9: Failed to init firmware \"%s\"\n",
fw_name);
release_firmware(adev->gfx.mec_fw);
adev->gfx.mec_fw = NULL;
@@ -5607,7 +5429,7 @@ static void gfx_v9_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigne
BUG_ON(offset > ring->buf_mask);
BUG_ON(ring->ring[offset] != 0x55aa55aa);
- cur = (ring->wptr & ring->buf_mask) - 1;
+ cur = (ring->wptr - 1) & ring->buf_mask;
if (likely(cur > offset))
ring->ring[offset] = cur - offset;
else
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 4603653916f5..67ca16a8027c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -1103,10 +1103,13 @@ static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
*flags |= AMDGPU_PDE_BFS(0x9);
} else if (level == AMDGPU_VM_PDB0) {
- if (*flags & AMDGPU_PDE_PTE)
+ if (*flags & AMDGPU_PDE_PTE) {
*flags &= ~AMDGPU_PDE_PTE;
- else
+ if (!(*flags & AMDGPU_PTE_VALID))
+ *addr |= 1 << PAGE_SHIFT;
+ } else {
*flags |= AMDGPU_PTE_TF;
+ }
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/imu_v11_0_3.c b/drivers/gpu/drm/amd/amdgpu/imu_v11_0_3.c
index 536dafb57ee0..fc69c1a29e23 100644
--- a/drivers/gpu/drm/amd/amdgpu/imu_v11_0_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/imu_v11_0_3.c
@@ -22,6 +22,7 @@
*/
#include "amdgpu.h"
#include "amdgpu_imu.h"
+#include "imu_v11_0_3.h"
#include "gc/gc_11_0_3_offset.h"
#include "gc/gc_11_0_3_sh_mask.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
index b64cd46a159a..5cec6b259b7f 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
@@ -187,6 +187,19 @@ static int mes_v11_0_add_hw_queue(struct amdgpu_mes *mes,
mes_add_queue_pkt.is_kfd_process = input->is_kfd_process;
mes_add_queue_pkt.trap_en = 1;
+ /* For KFD, gds_size is re-used for queue size (needed in MES for AQL queues) */
+ mes_add_queue_pkt.is_aql_queue = input->is_aql_queue;
+ mes_add_queue_pkt.gds_size = input->queue_size;
+
+ if (!(((adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) >= 4) &&
+ (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(11, 0, 0)) &&
+ (adev->ip_versions[GC_HWIP][0] <= IP_VERSION(11, 0, 3))))
+ mes_add_queue_pkt.trap_en = 1;
+
+ /* For KFD, gds_size is re-used for queue size (needed in MES for AQL queues) */
+ mes_add_queue_pkt.is_aql_queue = input->is_aql_queue;
+ mes_add_queue_pkt.gds_size = input->queue_size;
+
return mes_v11_0_submit_pkt_and_poll_completion(mes,
&mes_add_queue_pkt, sizeof(mes_add_queue_pkt),
offsetof(union MESAPI__ADD_QUEUE, api_status));
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
index b465baa26762..aa761ff3a5fa 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
@@ -380,6 +380,7 @@ static void nbio_v2_3_enable_aspm(struct amdgpu_device *adev,
WREG32_PCIE(smnPCIE_LC_CNTL, data);
}
+#ifdef CONFIG_PCIEASPM
static void nbio_v2_3_program_ltr(struct amdgpu_device *adev)
{
uint32_t def, data;
@@ -401,9 +402,11 @@ static void nbio_v2_3_program_ltr(struct amdgpu_device *adev)
if (def != data)
WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2, data);
}
+#endif
static void nbio_v2_3_program_aspm(struct amdgpu_device *adev)
{
+#ifdef CONFIG_PCIEASPM
uint32_t def, data;
def = data = RREG32_PCIE(smnPCIE_LC_CNTL);
@@ -459,7 +462,10 @@ static void nbio_v2_3_program_aspm(struct amdgpu_device *adev)
if (def != data)
WREG32_PCIE(smnPCIE_LC_CNTL6, data);
- nbio_v2_3_program_ltr(adev);
+ /* Don't bother about LTR if LTR is not enabled
+ * in the path */
+ if (adev->pdev->ltr_path)
+ nbio_v2_3_program_ltr(adev);
def = data = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP3);
data |= 0x5DE0 << RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER__SHIFT;
@@ -483,6 +489,7 @@ static void nbio_v2_3_program_aspm(struct amdgpu_device *adev)
data &= ~PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK;
if (def != data)
WREG32_PCIE(smnPCIE_LC_CNTL3, data);
+#endif
}
static void nbio_v2_3_apply_lc_spc_mode_wa(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
index f7f6ddebd3e4..37615a77287b 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
@@ -282,6 +282,7 @@ static void nbio_v6_1_init_registers(struct amdgpu_device *adev)
mmBIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL) << 2;
}
+#ifdef CONFIG_PCIEASPM
static void nbio_v6_1_program_ltr(struct amdgpu_device *adev)
{
uint32_t def, data;
@@ -303,9 +304,11 @@ static void nbio_v6_1_program_ltr(struct amdgpu_device *adev)
if (def != data)
WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2, data);
}
+#endif
static void nbio_v6_1_program_aspm(struct amdgpu_device *adev)
{
+#ifdef CONFIG_PCIEASPM
uint32_t def, data;
def = data = RREG32_PCIE(smnPCIE_LC_CNTL);
@@ -361,7 +364,10 @@ static void nbio_v6_1_program_aspm(struct amdgpu_device *adev)
if (def != data)
WREG32_PCIE(smnPCIE_LC_CNTL6, data);
- nbio_v6_1_program_ltr(adev);
+ /* Don't bother about LTR if LTR is not enabled
+ * in the path */
+ if (adev->pdev->ltr_path)
+ nbio_v6_1_program_ltr(adev);
def = data = RREG32_PCIE(smnRCC_BIF_STRAP3);
data |= 0x5DE0 << RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER__SHIFT;
@@ -385,6 +391,7 @@ static void nbio_v6_1_program_aspm(struct amdgpu_device *adev)
data &= ~PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK;
if (def != data)
WREG32_PCIE(smnPCIE_LC_CNTL3, data);
+#endif
}
const struct amdgpu_nbio_funcs nbio_v6_1_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
index 11848d1e238b..19455a725939 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
@@ -673,6 +673,7 @@ struct amdgpu_nbio_ras nbio_v7_4_ras = {
};
+#ifdef CONFIG_PCIEASPM
static void nbio_v7_4_program_ltr(struct amdgpu_device *adev)
{
uint32_t def, data;
@@ -694,9 +695,11 @@ static void nbio_v7_4_program_ltr(struct amdgpu_device *adev)
if (def != data)
WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2, data);
}
+#endif
static void nbio_v7_4_program_aspm(struct amdgpu_device *adev)
{
+#ifdef CONFIG_PCIEASPM
uint32_t def, data;
if (adev->ip_versions[NBIO_HWIP][0] == IP_VERSION(7, 4, 4))
@@ -755,7 +758,10 @@ static void nbio_v7_4_program_aspm(struct amdgpu_device *adev)
if (def != data)
WREG32_PCIE(smnPCIE_LC_CNTL6, data);
- nbio_v7_4_program_ltr(adev);
+ /* Don't bother about LTR if LTR is not enabled
+ * in the path */
+ if (adev->pdev->ltr_path)
+ nbio_v7_4_program_ltr(adev);
def = data = RREG32_PCIE(smnRCC_BIF_STRAP3);
data |= 0x5DE0 << RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER__SHIFT;
@@ -779,6 +785,7 @@ static void nbio_v7_4_program_aspm(struct amdgpu_device *adev)
data &= ~PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK;
if (def != data)
WREG32_PCIE(smnPCIE_LC_CNTL3, data);
+#endif
}
const struct amdgpu_nbio_funcs nbio_v7_4_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c
index f30bc826a878..def89379b51a 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c
@@ -28,6 +28,14 @@
#include "nbio/nbio_7_7_0_sh_mask.h"
#include <uapi/linux/kfd_ioctl.h>
+static void nbio_v7_7_remap_hdp_registers(struct amdgpu_device *adev)
+{
+ WREG32_SOC15(NBIO, 0, regBIF_BX0_REMAP_HDP_MEM_FLUSH_CNTL,
+ adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL);
+ WREG32_SOC15(NBIO, 0, regBIF_BX0_REMAP_HDP_REG_FLUSH_CNTL,
+ adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_REG_FLUSH_CNTL);
+}
+
static u32 nbio_v7_7_get_rev_id(struct amdgpu_device *adev)
{
u32 tmp;
@@ -336,4 +344,5 @@ const struct amdgpu_nbio_funcs nbio_v7_7_funcs = {
.get_clockgating_state = nbio_v7_7_get_clockgating_state,
.ih_control = nbio_v7_7_ih_control,
.init_registers = nbio_v7_7_init_registers,
+ .remap_hdp_registers = nbio_v7_7_remap_hdp_registers,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
index 5b5b1ef0c2b1..21d822b1d589 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
@@ -224,6 +224,12 @@ static int psp_v13_0_bootloader_load_dbg_drv(struct psp_context *psp)
return psp_v13_0_bootloader_load_component(psp, &psp->dbg_drv, PSP_BL__LOAD_DBGDRV);
}
+static int psp_v13_0_bootloader_load_ras_drv(struct psp_context *psp)
+{
+ return psp_v13_0_bootloader_load_component(psp, &psp->ras_drv, PSP_BL__LOAD_RASDRV);
+}
+
+
static int psp_v13_0_bootloader_load_sos(struct psp_context *psp)
{
int ret;
@@ -720,6 +726,7 @@ static const struct psp_funcs psp_v13_0_funcs = {
.bootloader_load_soc_drv = psp_v13_0_bootloader_load_soc_drv,
.bootloader_load_intf_drv = psp_v13_0_bootloader_load_intf_drv,
.bootloader_load_dbg_drv = psp_v13_0_bootloader_load_dbg_drv,
+ .bootloader_load_ras_drv = psp_v13_0_bootloader_load_ras_drv,
.bootloader_load_sos = psp_v13_0_bootloader_load_sos,
.ring_init = psp_v13_0_ring_init,
.ring_create = psp_v13_0_ring_create,
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index 6bdffdc1c0b9..c52d246a1d96 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -342,14 +342,10 @@ static void sdma_v2_4_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
*/
static void sdma_v2_4_gfx_stop(struct amdgpu_device *adev)
{
- struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
- struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
u32 rb_cntl, ib_cntl;
int i;
- if ((adev->mman.buffer_funcs_ring == sdma0) ||
- (adev->mman.buffer_funcs_ring == sdma1))
- amdgpu_ttm_set_buffer_funcs_status(adev, false);
+ amdgpu_sdma_unset_buffer_funcs_helper(adev);
for (i = 0; i < adev->sdma.num_instances; i++) {
rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index 2584fa3cb13e..486d9b5c1b9e 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -516,14 +516,10 @@ static void sdma_v3_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
*/
static void sdma_v3_0_gfx_stop(struct amdgpu_device *adev)
{
- struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
- struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
u32 rb_cntl, ib_cntl;
int i;
- if ((adev->mman.buffer_funcs_ring == sdma0) ||
- (adev->mman.buffer_funcs_ring == sdma1))
- amdgpu_ttm_set_buffer_funcs_status(adev, false);
+ amdgpu_sdma_unset_buffer_funcs_helper(adev);
for (i = 0; i < adev->sdma.num_instances; i++) {
rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 0cf9d3b486b2..298fa11702e7 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -561,44 +561,6 @@ static void sdma_v4_0_setup_ulv(struct amdgpu_device *adev)
}
}
-static int sdma_v4_0_init_inst_ctx(struct amdgpu_sdma_instance *sdma_inst)
-{
- int err = 0;
- const struct sdma_firmware_header_v1_0 *hdr;
-
- err = amdgpu_ucode_validate(sdma_inst->fw);
- if (err)
- return err;
-
- hdr = (const struct sdma_firmware_header_v1_0 *)sdma_inst->fw->data;
- sdma_inst->fw_version = le32_to_cpu(hdr->header.ucode_version);
- sdma_inst->feature_version = le32_to_cpu(hdr->ucode_feature_version);
-
- if (sdma_inst->feature_version >= 20)
- sdma_inst->burst_nop = true;
-
- return 0;
-}
-
-static void sdma_v4_0_destroy_inst_ctx(struct amdgpu_device *adev)
-{
- int i;
-
- for (i = 0; i < adev->sdma.num_instances; i++) {
- release_firmware(adev->sdma.instance[i].fw);
- adev->sdma.instance[i].fw = NULL;
-
- /* arcturus shares the same FW memory across
- all SDMA isntances */
- if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 2) ||
- adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 4, 0))
- break;
- }
-
- memset((void *)adev->sdma.instance, 0,
- sizeof(struct amdgpu_sdma_instance) * AMDGPU_MAX_SDMA_INSTANCES);
-}
-
/**
* sdma_v4_0_init_microcode - load ucode images from disk
*
@@ -615,9 +577,7 @@ static int sdma_v4_0_init_microcode(struct amdgpu_device *adev)
{
const char *chip_name;
char fw_name[30];
- int err = 0, i;
- struct amdgpu_firmware_info *info = NULL;
- const struct common_firmware_header *header = NULL;
+ int ret, i;
DRM_DEBUG("\n");
@@ -656,58 +616,25 @@ static int sdma_v4_0_init_microcode(struct amdgpu_device *adev)
BUG();
}
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name);
-
- err = request_firmware(&adev->sdma.instance[0].fw, fw_name, adev->dev);
- if (err)
- goto out;
-
- err = sdma_v4_0_init_inst_ctx(&adev->sdma.instance[0]);
- if (err)
- goto out;
-
- for (i = 1; i < adev->sdma.num_instances; i++) {
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ if (i == 0)
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name);
+ else
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma%d.bin", chip_name, i);
if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 2) ||
adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 4, 0)) {
/* Acturus & Aldebaran will leverage the same FW memory
for every SDMA instance */
- memcpy((void *)&adev->sdma.instance[i],
- (void *)&adev->sdma.instance[0],
- sizeof(struct amdgpu_sdma_instance));
- }
- else {
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma%d.bin", chip_name, i);
-
- err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev);
- if (err)
- goto out;
-
- err = sdma_v4_0_init_inst_ctx(&adev->sdma.instance[i]);
- if (err)
- goto out;
- }
- }
-
- DRM_DEBUG("psp_load == '%s'\n",
- adev->firmware.load_type == AMDGPU_FW_LOAD_PSP ? "true" : "false");
-
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
- for (i = 0; i < adev->sdma.num_instances; i++) {
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
- info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i;
- info->fw = adev->sdma.instance[i].fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
+ ret = amdgpu_sdma_init_microcode(adev, fw_name, 0, true);
+ break;
+ } else {
+ ret = amdgpu_sdma_init_microcode(adev, fw_name, i, false);
+ if (ret)
+ return ret;
}
}
-out:
- if (err) {
- DRM_ERROR("sdma_v4_0: Failed to load firmware \"%s\"\n", fw_name);
- sdma_v4_0_destroy_inst_ctx(adev);
- }
- return err;
+ return ret;
}
/**
@@ -988,18 +915,12 @@ static void sdma_v4_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
*/
static void sdma_v4_0_gfx_stop(struct amdgpu_device *adev)
{
- struct amdgpu_ring *sdma[AMDGPU_MAX_SDMA_INSTANCES];
u32 rb_cntl, ib_cntl;
- int i, unset = 0;
-
- for (i = 0; i < adev->sdma.num_instances; i++) {
- sdma[i] = &adev->sdma.instance[i].ring;
+ int i;
- if ((adev->mman.buffer_funcs_ring == sdma[i]) && unset != 1) {
- amdgpu_ttm_set_buffer_funcs_status(adev, false);
- unset = 1;
- }
+ amdgpu_sdma_unset_buffer_funcs_helper(adev);
+ for (i = 0; i < adev->sdma.num_instances; i++) {
rb_cntl = RREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL);
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
WREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL, rb_cntl);
@@ -1030,20 +951,12 @@ static void sdma_v4_0_rlc_stop(struct amdgpu_device *adev)
*/
static void sdma_v4_0_page_stop(struct amdgpu_device *adev)
{
- struct amdgpu_ring *sdma[AMDGPU_MAX_SDMA_INSTANCES];
u32 rb_cntl, ib_cntl;
int i;
- bool unset = false;
- for (i = 0; i < adev->sdma.num_instances; i++) {
- sdma[i] = &adev->sdma.instance[i].page;
-
- if ((adev->mman.buffer_funcs_ring == sdma[i]) &&
- (!unset)) {
- amdgpu_ttm_set_buffer_funcs_status(adev, false);
- unset = true;
- }
+ amdgpu_sdma_unset_buffer_funcs_helper(adev);
+ for (i = 0; i < adev->sdma.num_instances; i++) {
rb_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_RB_CNTL);
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_PAGE_RB_CNTL,
RB_ENABLE, 0);
@@ -1504,6 +1417,11 @@ static int sdma_v4_0_start(struct amdgpu_device *adev)
WREG32_SDMA(i, mmSDMA0_CNTL, temp);
if (!amdgpu_sriov_vf(adev)) {
+ ring = &adev->sdma.instance[i].ring;
+ adev->nbio.funcs->sdma_doorbell_range(adev, i,
+ ring->use_doorbell, ring->doorbell_index,
+ adev->doorbell_index.sdma_doorbell_range);
+
/* unhalt engine */
temp = RREG32_SDMA(i, mmSDMA0_F32_CNTL);
temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0);
@@ -1995,7 +1913,11 @@ static int sdma_v4_0_sw_fini(void *handle)
amdgpu_ring_fini(&adev->sdma.instance[i].page);
}
- sdma_v4_0_destroy_inst_ctx(adev);
+ if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 0) ||
+ adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 4, 0))
+ amdgpu_sdma_destroy_inst_ctx(adev, true);
+ else
+ amdgpu_sdma_destroy_inst_ctx(adev, false);
return 0;
}
@@ -2018,8 +1940,11 @@ static int sdma_v4_0_hw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int i;
- if (amdgpu_sriov_vf(adev))
+ if (amdgpu_sriov_vf(adev)) {
+ /* disable the scheduler for SDMA */
+ amdgpu_sdma_unset_buffer_funcs_helper(adev);
return 0;
+ }
for (i = 0; i < adev->sdma.num_instances; i++) {
amdgpu_irq_put(adev, &adev->sdma.ecc_irq,
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
index a019ac92edb7..d4d9f196db83 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
@@ -240,10 +240,7 @@ static int sdma_v5_0_init_microcode(struct amdgpu_device *adev)
{
const char *chip_name;
char fw_name[40];
- int err = 0, i;
- struct amdgpu_firmware_info *info = NULL;
- const struct common_firmware_header *header = NULL;
- const struct sdma_firmware_header_v1_0 *hdr;
+ int ret, i;
if (amdgpu_sriov_vf(adev) && (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(5, 0, 5)))
return 0;
@@ -272,38 +269,12 @@ static int sdma_v5_0_init_microcode(struct amdgpu_device *adev)
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name);
else
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma1.bin", chip_name);
- err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->sdma.instance[i].fw);
- if (err)
- goto out;
- hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
- adev->sdma.instance[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
- adev->sdma.instance[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
- if (adev->sdma.instance[i].feature_version >= 20)
- adev->sdma.instance[i].burst_nop = true;
- DRM_DEBUG("psp_load == '%s'\n",
- adev->firmware.load_type == AMDGPU_FW_LOAD_PSP ? "true" : "false");
-
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
- info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i;
- info->fw = adev->sdma.instance[i].fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
- }
+ ret = amdgpu_sdma_init_microcode(adev, fw_name, i, false);
+ if (ret)
+ return ret;
}
-out:
- if (err) {
- DRM_ERROR("sdma_v5_0: Failed to load firmware \"%s\"\n", fw_name);
- for (i = 0; i < adev->sdma.num_instances; i++) {
- release_firmware(adev->sdma.instance[i].fw);
- adev->sdma.instance[i].fw = NULL;
- }
- }
- return err;
+
+ return ret;
}
static unsigned sdma_v5_0_ring_init_cond_exec(struct amdgpu_ring *ring)
@@ -613,14 +584,10 @@ static void sdma_v5_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
*/
static void sdma_v5_0_gfx_stop(struct amdgpu_device *adev)
{
- struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
- struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
u32 rb_cntl, ib_cntl;
int i;
- if ((adev->mman.buffer_funcs_ring == sdma0) ||
- (adev->mman.buffer_funcs_ring == sdma1))
- amdgpu_ttm_set_buffer_funcs_status(adev, false);
+ amdgpu_sdma_unset_buffer_funcs_helper(adev);
for (i = 0; i < adev->sdma.num_instances; i++) {
rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
@@ -1465,12 +1432,10 @@ static int sdma_v5_0_sw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int i;
- for (i = 0; i < adev->sdma.num_instances; i++) {
- release_firmware(adev->sdma.instance[i].fw);
- adev->sdma.instance[i].fw = NULL;
-
+ for (i = 0; i < adev->sdma.num_instances; i++)
amdgpu_ring_fini(&adev->sdma.instance[i].ring);
- }
+
+ amdgpu_sdma_destroy_inst_ctx(adev, false);
return 0;
}
@@ -1491,8 +1456,11 @@ static int sdma_v5_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (amdgpu_sriov_vf(adev))
+ if (amdgpu_sriov_vf(adev)) {
+ /* disable the scheduler for SDMA */
+ amdgpu_sdma_unset_buffer_funcs_helper(adev);
return 0;
+ }
sdma_v5_0_ctx_switch_enable(adev, false);
sdma_v5_0_enable(adev, false);
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
index 95689ef4be10..809eca54fc61 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
@@ -89,33 +89,6 @@ static u32 sdma_v5_2_get_reg_offset(struct amdgpu_device *adev, u32 instance, u3
return base + internal_offset;
}
-static int sdma_v5_2_init_inst_ctx(struct amdgpu_sdma_instance *sdma_inst)
-{
- int err = 0;
- const struct sdma_firmware_header_v1_0 *hdr;
-
- err = amdgpu_ucode_validate(sdma_inst->fw);
- if (err)
- return err;
-
- hdr = (const struct sdma_firmware_header_v1_0 *)sdma_inst->fw->data;
- sdma_inst->fw_version = le32_to_cpu(hdr->header.ucode_version);
- sdma_inst->feature_version = le32_to_cpu(hdr->ucode_feature_version);
-
- if (sdma_inst->feature_version >= 20)
- sdma_inst->burst_nop = true;
-
- return 0;
-}
-
-static void sdma_v5_2_destroy_inst_ctx(struct amdgpu_device *adev)
-{
- release_firmware(adev->sdma.instance[0].fw);
-
- memset((void *)adev->sdma.instance, 0,
- sizeof(struct amdgpu_sdma_instance) * AMDGPU_MAX_SDMA_INSTANCES);
-}
-
/**
* sdma_v5_2_init_microcode - load ucode images from disk
*
@@ -132,9 +105,6 @@ static int sdma_v5_2_init_microcode(struct amdgpu_device *adev)
{
const char *chip_name;
char fw_name[40];
- int err = 0, i;
- struct amdgpu_firmware_info *info = NULL;
- const struct common_firmware_header *header = NULL;
DRM_DEBUG("\n");
@@ -169,42 +139,7 @@ static int sdma_v5_2_init_microcode(struct amdgpu_device *adev)
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", chip_name);
- err = request_firmware(&adev->sdma.instance[0].fw, fw_name, adev->dev);
- if (err)
- goto out;
-
- err = sdma_v5_2_init_inst_ctx(&adev->sdma.instance[0]);
- if (err)
- goto out;
-
- for (i = 1; i < adev->sdma.num_instances; i++)
- memcpy((void *)&adev->sdma.instance[i],
- (void *)&adev->sdma.instance[0],
- sizeof(struct amdgpu_sdma_instance));
-
- if (amdgpu_sriov_vf(adev) && (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(5, 2, 0)))
- return 0;
-
- DRM_DEBUG("psp_load == '%s'\n",
- adev->firmware.load_type == AMDGPU_FW_LOAD_PSP ? "true" : "false");
-
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
- for (i = 0; i < adev->sdma.num_instances; i++) {
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
- info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i;
- info->fw = adev->sdma.instance[i].fw;
- header = (const struct common_firmware_header *)info->fw->data;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
- }
- }
-
-out:
- if (err) {
- DRM_ERROR("sdma_v5_2: Failed to load firmware \"%s\"\n", fw_name);
- sdma_v5_2_destroy_inst_ctx(adev);
- }
- return err;
+ return amdgpu_sdma_init_microcode(adev, fw_name, 0, true);
}
static unsigned sdma_v5_2_ring_init_cond_exec(struct amdgpu_ring *ring)
@@ -479,18 +414,10 @@ static void sdma_v5_2_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
*/
static void sdma_v5_2_gfx_stop(struct amdgpu_device *adev)
{
- struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
- struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
- struct amdgpu_ring *sdma2 = &adev->sdma.instance[2].ring;
- struct amdgpu_ring *sdma3 = &adev->sdma.instance[3].ring;
u32 rb_cntl, ib_cntl;
int i;
- if ((adev->mman.buffer_funcs_ring == sdma0) ||
- (adev->mman.buffer_funcs_ring == sdma1) ||
- (adev->mman.buffer_funcs_ring == sdma2) ||
- (adev->mman.buffer_funcs_ring == sdma3))
- amdgpu_ttm_set_buffer_funcs_status(adev, false);
+ amdgpu_sdma_unset_buffer_funcs_helper(adev);
for (i = 0; i < adev->sdma.num_instances; i++) {
rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
@@ -1406,7 +1333,7 @@ static int sdma_v5_2_sw_fini(void *handle)
for (i = 0; i < adev->sdma.num_instances; i++)
amdgpu_ring_fini(&adev->sdma.instance[i].ring);
- sdma_v5_2_destroy_inst_ctx(adev);
+ amdgpu_sdma_destroy_inst_ctx(adev, true);
return 0;
}
@@ -1422,8 +1349,11 @@ static int sdma_v5_2_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (amdgpu_sriov_vf(adev))
+ if (amdgpu_sriov_vf(adev)) {
+ /* disable the scheduler for SDMA */
+ amdgpu_sdma_unset_buffer_funcs_helper(adev);
return 0;
+ }
sdma_v5_2_ctx_switch_enable(adev, false);
sdma_v5_2_enable(adev, false);
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
index 7ae572a08cb3..da3beb0bf2fa 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
@@ -78,33 +78,6 @@ static u32 sdma_v6_0_get_reg_offset(struct amdgpu_device *adev, u32 instance, u3
return base + internal_offset;
}
-static int sdma_v6_0_init_inst_ctx(struct amdgpu_sdma_instance *sdma_inst)
-{
- int err = 0;
- const struct sdma_firmware_header_v2_0 *hdr;
-
- err = amdgpu_ucode_validate(sdma_inst->fw);
- if (err)
- return err;
-
- hdr = (const struct sdma_firmware_header_v2_0 *)sdma_inst->fw->data;
- sdma_inst->fw_version = le32_to_cpu(hdr->header.ucode_version);
- sdma_inst->feature_version = le32_to_cpu(hdr->ucode_feature_version);
-
- if (sdma_inst->feature_version >= 20)
- sdma_inst->burst_nop = true;
-
- return 0;
-}
-
-static void sdma_v6_0_destroy_inst_ctx(struct amdgpu_device *adev)
-{
- release_firmware(adev->sdma.instance[0].fw);
-
- memset((void*)adev->sdma.instance, 0,
- sizeof(struct amdgpu_sdma_instance) * AMDGPU_MAX_SDMA_INSTANCES);
-}
-
/**
* sdma_v6_0_init_microcode - load ucode images from disk
*
@@ -114,16 +87,10 @@ static void sdma_v6_0_destroy_inst_ctx(struct amdgpu_device *adev)
* the driver (not loaded into hw).
* Returns 0 on success, error on failure.
*/
-
-// emulation only, won't work on real chip
-// sdma 6.0.0 real chip need to use PSP to load firmware
static int sdma_v6_0_init_microcode(struct amdgpu_device *adev)
{
char fw_name[30];
char ucode_prefix[30];
- int err = 0, i;
- struct amdgpu_firmware_info *info = NULL;
- const struct sdma_firmware_header_v2_0 *sdma_hdr;
DRM_DEBUG("\n");
@@ -131,43 +98,7 @@ static int sdma_v6_0_init_microcode(struct amdgpu_device *adev)
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", ucode_prefix);
- err = request_firmware(&adev->sdma.instance[0].fw, fw_name, adev->dev);
- if (err)
- goto out;
-
- err = sdma_v6_0_init_inst_ctx(&adev->sdma.instance[0]);
- if (err)
- goto out;
-
- for (i = 1; i < adev->sdma.num_instances; i++) {
- memcpy((void*)&adev->sdma.instance[i],
- (void*)&adev->sdma.instance[0],
- sizeof(struct amdgpu_sdma_instance));
- }
-
- DRM_DEBUG("psp_load == '%s'\n",
- adev->firmware.load_type == AMDGPU_FW_LOAD_PSP ? "true" : "false");
-
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
- sdma_hdr = (const struct sdma_firmware_header_v2_0 *)adev->sdma.instance[0].fw->data;
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA_UCODE_TH0];
- info->ucode_id = AMDGPU_UCODE_ID_SDMA_UCODE_TH0;
- info->fw = adev->sdma.instance[0].fw;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(sdma_hdr->ctx_ucode_size_bytes), PAGE_SIZE);
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA_UCODE_TH1];
- info->ucode_id = AMDGPU_UCODE_ID_SDMA_UCODE_TH1;
- info->fw = adev->sdma.instance[0].fw;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(sdma_hdr->ctl_ucode_size_bytes), PAGE_SIZE);
- }
-
-out:
- if (err) {
- DRM_ERROR("sdma_v6_0: Failed to load firmware \"%s\"\n", fw_name);
- sdma_v6_0_destroy_inst_ctx(adev);
- }
- return err;
+ return amdgpu_sdma_init_microcode(adev, fw_name, 0, true);
}
static unsigned sdma_v6_0_ring_init_cond_exec(struct amdgpu_ring *ring)
@@ -467,14 +398,10 @@ static void sdma_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
*/
static void sdma_v6_0_gfx_stop(struct amdgpu_device *adev)
{
- struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
- struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
u32 rb_cntl, ib_cntl;
int i;
- if ((adev->mman.buffer_funcs_ring == sdma0) ||
- (adev->mman.buffer_funcs_ring == sdma1))
- amdgpu_ttm_set_buffer_funcs_status(adev, false);
+ amdgpu_sdma_unset_buffer_funcs_helper(adev);
for (i = 0; i < adev->sdma.num_instances; i++) {
rb_cntl = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL));
@@ -484,9 +411,6 @@ static void sdma_v6_0_gfx_stop(struct amdgpu_device *adev)
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_QUEUE0_IB_CNTL, IB_ENABLE, 0);
WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_IB_CNTL), ib_cntl);
}
-
- sdma0->sched.ready = false;
- sdma1->sched.ready = false;
}
/**
@@ -915,7 +839,8 @@ static int sdma_v6_0_mqd_init(struct amdgpu_device *adev, void *mqd,
m->sdmax_rlcx_rb_cntl =
order_base_2(prop->queue_size / 4) << SDMA0_QUEUE0_RB_CNTL__RB_SIZE__SHIFT |
1 << SDMA0_QUEUE0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT |
- 4 << SDMA0_QUEUE0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT;
+ 4 << SDMA0_QUEUE0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT |
+ 1 << SDMA0_QUEUE0_RB_CNTL__F32_WPTR_POLL_ENABLE__SHIFT;
m->sdmax_rlcx_rb_base = lower_32_bits(prop->hqd_base_gpu_addr >> 8);
m->sdmax_rlcx_rb_base_hi = upper_32_bits(prop->hqd_base_gpu_addr >> 8);
@@ -1370,27 +1295,27 @@ static int sdma_v6_0_sw_fini(void *handle)
for (i = 0; i < adev->sdma.num_instances; i++)
amdgpu_ring_fini(&adev->sdma.instance[i].ring);
- sdma_v6_0_destroy_inst_ctx(adev);
+ amdgpu_sdma_destroy_inst_ctx(adev, true);
return 0;
}
static int sdma_v6_0_hw_init(void *handle)
{
- int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = sdma_v6_0_start(adev);
-
- return r;
+ return sdma_v6_0_start(adev);
}
static int sdma_v6_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (amdgpu_sriov_vf(adev))
+ if (amdgpu_sriov_vf(adev)) {
+ /* disable the scheduler for SDMA */
+ amdgpu_sdma_unset_buffer_funcs_helper(adev);
return 0;
+ }
sdma_v6_0_ctx_switch_enable(adev, false);
sdma_v6_0_enable(adev, false);
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c
index f675111ace20..4d5e718540aa 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dma.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c
@@ -116,15 +116,14 @@ static void si_dma_stop(struct amdgpu_device *adev)
u32 rb_cntl;
unsigned i;
+ amdgpu_sdma_unset_buffer_funcs_helper(adev);
+
for (i = 0; i < adev->sdma.num_instances; i++) {
ring = &adev->sdma.instance[i].ring;
/* dma0 */
rb_cntl = RREG32(DMA_RB_CNTL + sdma_offsets[i]);
rb_cntl &= ~DMA_RB_ENABLE;
WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl);
-
- if (adev->mman.buffer_funcs_ring == ring)
- amdgpu_ttm_set_buffer_funcs_status(adev, false);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index fde6154f2009..183024d7c184 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -1211,25 +1211,6 @@ static int soc15_common_sw_fini(void *handle)
return 0;
}
-static void soc15_doorbell_range_init(struct amdgpu_device *adev)
-{
- int i;
- struct amdgpu_ring *ring;
-
- /* sdma/ih doorbell range are programed by hypervisor */
- if (!amdgpu_sriov_vf(adev)) {
- for (i = 0; i < adev->sdma.num_instances; i++) {
- ring = &adev->sdma.instance[i].ring;
- adev->nbio.funcs->sdma_doorbell_range(adev, i,
- ring->use_doorbell, ring->doorbell_index,
- adev->doorbell_index.sdma_doorbell_range);
- }
-
- adev->nbio.funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell,
- adev->irq.ih.doorbell_index);
- }
-}
-
static int soc15_common_hw_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -1249,12 +1230,6 @@ static int soc15_common_hw_init(void *handle)
/* enable the doorbell aperture */
soc15_enable_doorbell_aperture(adev, true);
- /* HW doorbell routing policy: doorbell writing not
- * in SDMA/IH/MM/ACV range will be routed to CP. So
- * we need to init SDMA/IH/MM/ACV doorbell range prior
- * to CP ip block init and ring test.
- */
- soc15_doorbell_range_init(adev);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c
index a26c5723c46e..795706b3b092 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc21.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc21.c
@@ -421,6 +421,7 @@ static bool soc21_need_full_reset(struct amdgpu_device *adev)
{
switch (adev->ip_versions[GC_HWIP][0]) {
case IP_VERSION(11, 0, 0):
+ return amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC);
case IP_VERSION(11, 0, 2):
return false;
default:
@@ -628,6 +629,8 @@ static int soc21_common_early_init(void *handle)
AMD_CG_SUPPORT_JPEG_MGCG;
adev->pg_flags =
AMD_PG_SUPPORT_GFX_PG |
+ AMD_PG_SUPPORT_VCN |
+ AMD_PG_SUPPORT_VCN_DPG |
AMD_PG_SUPPORT_JPEG;
adev->external_rev_id = adev->rev_id + 0x1;
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c
index 939cb203f7ad..f17d297b594b 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c
@@ -327,10 +327,9 @@ static void umc_v6_1_query_error_address(struct amdgpu_device *adev,
return;
}
- /* calculate error address if ue/ce error is detected */
+ /* calculate error address if ue error is detected */
if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
- (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
- REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)) {
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1) {
err_addr = RREG64_PCIE((mc_umc_addrt0 + umc_reg_offset) * 4);
/* the lowest lsb bits should be ignored */
@@ -343,10 +342,7 @@ static void umc_v6_1_query_error_address(struct amdgpu_device *adev,
ADDR_OF_256B_BLOCK(channel_index) |
OFFSET_IN_256B_BLOCK(err_addr);
- /* we only save ue error information currently, ce is skipped */
- if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC)
- == 1)
- amdgpu_umc_fill_error_record(err_data, err_addr,
+ amdgpu_umc_fill_error_record(err_data, err_addr,
retired_page, channel_index, umc_inst);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c
index bf7524f16b66..5d5d031c9e7d 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c
@@ -187,20 +187,51 @@ static void umc_v6_7_ecc_info_query_ras_error_count(struct amdgpu_device *adev,
}
}
+static void umc_v6_7_convert_error_address(struct amdgpu_device *adev,
+ struct ras_err_data *err_data, uint64_t err_addr,
+ uint32_t ch_inst, uint32_t umc_inst)
+{
+ uint32_t channel_index;
+ uint64_t soc_pa, retired_page, column;
+
+ channel_index =
+ adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst];
+ /* translate umc channel address to soc pa, 3 parts are included */
+ soc_pa = ADDR_OF_8KB_BLOCK(err_addr) |
+ ADDR_OF_256B_BLOCK(channel_index) |
+ OFFSET_IN_256B_BLOCK(err_addr);
+
+ /* The umc channel bits are not original values, they are hashed */
+ SET_CHANNEL_HASH(channel_index, soc_pa);
+
+ /* clear [C4 C3 C2] in soc physical address */
+ soc_pa &= ~(0x7ULL << UMC_V6_7_PA_C2_BIT);
+
+ /* loop for all possibilities of [C4 C3 C2] */
+ for (column = 0; column < UMC_V6_7_NA_MAP_PA_NUM; column++) {
+ retired_page = soc_pa | (column << UMC_V6_7_PA_C2_BIT);
+ dev_info(adev->dev, "Error Address(PA): 0x%llx\n", retired_page);
+ amdgpu_umc_fill_error_record(err_data, err_addr,
+ retired_page, channel_index, umc_inst);
+
+ /* shift R14 bit */
+ retired_page ^= (0x1ULL << UMC_V6_7_PA_R14_BIT);
+ dev_info(adev->dev, "Error Address(PA): 0x%llx\n", retired_page);
+ amdgpu_umc_fill_error_record(err_data, err_addr,
+ retired_page, channel_index, umc_inst);
+ }
+}
+
static void umc_v6_7_ecc_info_query_error_address(struct amdgpu_device *adev,
struct ras_err_data *err_data,
uint32_t ch_inst,
uint32_t umc_inst)
{
- uint64_t mc_umc_status, err_addr, soc_pa, retired_page, column;
- uint32_t channel_index;
+ uint64_t mc_umc_status, err_addr;
uint32_t eccinfo_table_idx;
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
eccinfo_table_idx = umc_inst * adev->umc.channel_inst_num + ch_inst;
- channel_index =
- adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst];
-
mc_umc_status = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_status;
if (mc_umc_status == 0)
@@ -209,42 +240,15 @@ static void umc_v6_7_ecc_info_query_error_address(struct amdgpu_device *adev,
if (!err_data->err_addr)
return;
- /* calculate error address if ue/ce error is detected */
+ /* calculate error address if ue error is detected */
if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
- (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
- REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)) {
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1) {
err_addr = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_addr;
err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
- /* translate umc channel address to soc pa, 3 parts are included */
- soc_pa = ADDR_OF_8KB_BLOCK(err_addr) |
- ADDR_OF_256B_BLOCK(channel_index) |
- OFFSET_IN_256B_BLOCK(err_addr);
-
- /* The umc channel bits are not original values, they are hashed */
- SET_CHANNEL_HASH(channel_index, soc_pa);
-
- /* clear [C4 C3 C2] in soc physical address */
- soc_pa &= ~(0x7ULL << UMC_V6_7_PA_C2_BIT);
-
- /* we only save ue error information currently, ce is skipped */
- if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC)
- == 1) {
- /* loop for all possibilities of [C4 C3 C2] */
- for (column = 0; column < UMC_V6_7_NA_MAP_PA_NUM; column++) {
- retired_page = soc_pa | (column << UMC_V6_7_PA_C2_BIT);
- dev_info(adev->dev, "Error Address(PA): 0x%llx\n", retired_page);
- amdgpu_umc_fill_error_record(err_data, err_addr,
- retired_page, channel_index, umc_inst);
-
- /* shift R14 bit */
- retired_page ^= (0x1ULL << UMC_V6_7_PA_R14_BIT);
- dev_info(adev->dev, "Error Address(PA): 0x%llx\n", retired_page);
- amdgpu_umc_fill_error_record(err_data, err_addr,
- retired_page, channel_index, umc_inst);
- }
- }
+ umc_v6_7_convert_error_address(adev, err_data, err_addr,
+ ch_inst, umc_inst);
}
}
@@ -452,14 +456,11 @@ static void umc_v6_7_query_ras_error_count(struct amdgpu_device *adev,
static void umc_v6_7_query_error_address(struct amdgpu_device *adev,
struct ras_err_data *err_data,
- uint32_t umc_reg_offset,
- uint32_t ch_inst,
+ uint32_t umc_reg_offset, uint32_t ch_inst,
uint32_t umc_inst)
{
uint32_t mc_umc_status_addr;
- uint32_t channel_index;
- uint64_t mc_umc_status, mc_umc_addrt0;
- uint64_t err_addr, soc_pa, retired_page, column;
+ uint64_t mc_umc_status = 0, mc_umc_addrt0, err_addr;
mc_umc_status_addr =
SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0);
@@ -477,45 +478,15 @@ static void umc_v6_7_query_error_address(struct amdgpu_device *adev,
return;
}
- channel_index =
- adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst];
-
- /* calculate error address if ue/ce error is detected */
+ /* calculate error address if ue error is detected */
if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
- (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
- REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)) {
-
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1) {
err_addr = RREG64_PCIE((mc_umc_addrt0 + umc_reg_offset) * 4);
- err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
+ err_addr =
+ REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
- /* translate umc channel address to soc pa, 3 parts are included */
- soc_pa = ADDR_OF_8KB_BLOCK(err_addr) |
- ADDR_OF_256B_BLOCK(channel_index) |
- OFFSET_IN_256B_BLOCK(err_addr);
-
- /* The umc channel bits are not original values, they are hashed */
- SET_CHANNEL_HASH(channel_index, soc_pa);
-
- /* clear [C4 C3 C2] in soc physical address */
- soc_pa &= ~(0x7ULL << UMC_V6_7_PA_C2_BIT);
-
- /* we only save ue error information currently, ce is skipped */
- if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC)
- == 1) {
- /* loop for all possibilities of [C4 C3 C2] */
- for (column = 0; column < UMC_V6_7_NA_MAP_PA_NUM; column++) {
- retired_page = soc_pa | (column << UMC_V6_7_PA_C2_BIT);
- dev_info(adev->dev, "Error Address(PA): 0x%llx\n", retired_page);
- amdgpu_umc_fill_error_record(err_data, err_addr,
- retired_page, channel_index, umc_inst);
-
- /* shift R14 bit */
- retired_page ^= (0x1ULL << UMC_V6_7_PA_R14_BIT);
- dev_info(adev->dev, "Error Address(PA): 0x%llx\n", retired_page);
- amdgpu_umc_fill_error_record(err_data, err_addr,
- retired_page, channel_index, umc_inst);
- }
- }
+ umc_v6_7_convert_error_address(adev, err_data, err_addr,
+ ch_inst, umc_inst);
}
/* clear umc status */
@@ -540,8 +511,7 @@ static void umc_v6_7_query_ras_error_address(struct amdgpu_device *adev,
ch_inst);
umc_v6_7_query_error_address(adev,
err_data,
- umc_reg_offset,
- ch_inst,
+ umc_reg_offset, ch_inst,
umc_inst);
}
}
@@ -583,4 +553,5 @@ struct amdgpu_umc_ras umc_v6_7_ras = {
.query_ras_poison_mode = umc_v6_7_query_ras_poison_mode,
.ecc_info_query_ras_error_count = umc_v6_7_ecc_info_query_ras_error_count,
.ecc_info_query_ras_error_address = umc_v6_7_ecc_info_query_ras_error_address,
+ .convert_ras_error_address = umc_v6_7_convert_error_address,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c b/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c
index 36a2053f2e8b..91235df54e22 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c
@@ -101,22 +101,16 @@ static void umc_v8_10_query_correctable_error_count(struct amdgpu_device *adev,
uint32_t umc_reg_offset,
unsigned long *error_count)
{
- uint32_t ecc_err_cnt, ecc_err_cnt_addr;
uint64_t mc_umc_status;
uint32_t mc_umc_status_addr;
/* UMC 8_10 registers */
- ecc_err_cnt_addr =
- SOC15_REG_OFFSET(UMC, 0, regUMCCH0_0_GeccErrCnt);
mc_umc_status_addr =
SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0);
- ecc_err_cnt = RREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4);
- *error_count +=
- (REG_GET_FIELD(ecc_err_cnt, UMCCH0_0_GeccErrCnt, GeccErrCnt) -
- UMC_V8_10_CE_CNT_INIT);
-
- /* Check for SRAM correctable error, MCUMC_STATUS is a 64 bit register */
+ /* Rely on MCUMC_STATUS for correctable error counter
+ * MCUMC_STATUS is a 64 bit register
+ */
mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4);
if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)
@@ -214,7 +208,10 @@ static void umc_v8_10_query_error_address(struct amdgpu_device *adev,
{
uint64_t mc_umc_status_addr;
uint64_t mc_umc_status, err_addr;
- uint32_t channel_index;
+ uint64_t mc_umc_addrt0, na_err_addr_base;
+ uint64_t na_err_addr, retired_page_addr;
+ uint32_t channel_index, addr_lsb, col = 0;
+ int ret = 0;
mc_umc_status_addr =
SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0);
@@ -235,13 +232,10 @@ static void umc_v8_10_query_error_address(struct amdgpu_device *adev,
umc_inst * adev->umc.channel_inst_num +
ch_inst];
- /* calculate error address if ue/ce error is detected */
+ /* calculate error address if ue error is detected */
if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, AddrV) == 1 &&
- (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
- REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)) {
- uint32_t addr_lsb;
- uint64_t mc_umc_addrt0;
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1) {
mc_umc_addrt0 = SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_ADDRT0);
err_addr = RREG64_PCIE((mc_umc_addrt0 + umc_reg_offset) * 4);
@@ -249,32 +243,24 @@ static void umc_v8_10_query_error_address(struct amdgpu_device *adev,
/* the lowest lsb bits should be ignored */
addr_lsb = REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, AddrLsb);
-
err_addr &= ~((0x1ULL << addr_lsb) - 1);
-
- /* we only save ue error information currently, ce is skipped */
- if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1) {
- uint64_t na_err_addr_base = err_addr & ~(0x3ULL << UMC_V8_10_NA_C5_BIT);
- uint64_t na_err_addr, retired_page_addr;
- uint32_t col = 0;
- int ret = 0;
-
- /* loop for all possibilities of [C6 C5] in normal address. */
- for (col = 0; col < UMC_V8_10_NA_COL_2BITS_POWER_OF_2_NUM; col++) {
- na_err_addr = na_err_addr_base | (col << UMC_V8_10_NA_C5_BIT);
-
- /* Mapping normal error address to retired soc physical address. */
- ret = umc_v8_10_swizzle_mode_na_to_pa(adev, channel_index,
- na_err_addr, &retired_page_addr);
- if (ret) {
- dev_err(adev->dev, "Failed to map pa from umc na.\n");
- break;
- }
- dev_info(adev->dev, "Error Address(PA): 0x%llx\n",
- retired_page_addr);
- amdgpu_umc_fill_error_record(err_data, na_err_addr,
- retired_page_addr, channel_index, umc_inst);
+ na_err_addr_base = err_addr & ~(0x3ULL << UMC_V8_10_NA_C5_BIT);
+
+ /* loop for all possibilities of [C6 C5] in normal address. */
+ for (col = 0; col < UMC_V8_10_NA_COL_2BITS_POWER_OF_2_NUM; col++) {
+ na_err_addr = na_err_addr_base | (col << UMC_V8_10_NA_C5_BIT);
+
+ /* Mapping normal error address to retired soc physical address. */
+ ret = umc_v8_10_swizzle_mode_na_to_pa(adev, channel_index,
+ na_err_addr, &retired_page_addr);
+ if (ret) {
+ dev_err(adev->dev, "Failed to map pa from umc na.\n");
+ break;
}
+ dev_info(adev->dev, "Error Address(PA): 0x%llx\n",
+ retired_page_addr);
+ amdgpu_umc_fill_error_record(err_data, na_err_addr,
+ retired_page_addr, channel_index, umc_inst);
}
}
@@ -344,6 +330,31 @@ static void umc_v8_10_err_cnt_init(struct amdgpu_device *adev)
}
}
+static uint32_t umc_v8_10_query_ras_poison_mode_per_channel(
+ struct amdgpu_device *adev,
+ uint32_t umc_reg_offset)
+{
+ uint32_t ecc_ctrl_addr, ecc_ctrl;
+
+ ecc_ctrl_addr =
+ SOC15_REG_OFFSET(UMC, 0, regUMCCH0_0_GeccCtrl);
+ ecc_ctrl = RREG32_PCIE((ecc_ctrl_addr +
+ umc_reg_offset) * 4);
+
+ return REG_GET_FIELD(ecc_ctrl, UMCCH0_0_GeccCtrl, UCFatalEn);
+}
+
+static bool umc_v8_10_query_ras_poison_mode(struct amdgpu_device *adev)
+{
+ uint32_t umc_reg_offset = 0;
+
+ /* Enabling fatal error in umc node0 instance0 channel0 will be
+ * considered as fatal error mode
+ */
+ umc_reg_offset = get_umc_v8_10_reg_offset(adev, 0, 0, 0);
+ return !umc_v8_10_query_ras_poison_mode_per_channel(adev, umc_reg_offset);
+}
+
const struct amdgpu_ras_block_hw_ops umc_v8_10_ras_hw_ops = {
.query_ras_error_count = umc_v8_10_query_ras_error_count,
.query_ras_error_address = umc_v8_10_query_ras_error_address,
@@ -354,4 +365,5 @@ struct amdgpu_umc_ras umc_v8_10_ras = {
.hw_ops = &umc_v8_10_ras_hw_ops,
},
.err_cnt_init = umc_v8_10_err_cnt_init,
+ .query_ras_poison_mode = umc_v8_10_query_ras_poison_mode,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c b/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c
index f35253e0eaa6..b717fdaa46e4 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c
@@ -108,20 +108,35 @@ static void umc_v8_7_ecc_info_query_ras_error_count(struct amdgpu_device *adev,
}
}
+static void umc_v8_7_convert_error_address(struct amdgpu_device *adev,
+ struct ras_err_data *err_data, uint64_t err_addr,
+ uint32_t ch_inst, uint32_t umc_inst)
+{
+ uint64_t retired_page;
+ uint32_t channel_index;
+
+ channel_index =
+ adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst];
+
+ /* translate umc channel address to soc pa, 3 parts are included */
+ retired_page = ADDR_OF_4KB_BLOCK(err_addr) |
+ ADDR_OF_256B_BLOCK(channel_index) |
+ OFFSET_IN_256B_BLOCK(err_addr);
+
+ amdgpu_umc_fill_error_record(err_data, err_addr,
+ retired_page, channel_index, umc_inst);
+}
+
static void umc_v8_7_ecc_info_query_error_address(struct amdgpu_device *adev,
struct ras_err_data *err_data,
uint32_t ch_inst,
uint32_t umc_inst)
{
- uint64_t mc_umc_status, err_addr, retired_page;
- uint32_t channel_index;
+ uint64_t mc_umc_status, err_addr;
uint32_t eccinfo_table_idx;
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
eccinfo_table_idx = umc_inst * adev->umc.channel_inst_num + ch_inst;
- channel_index =
- adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst];
-
mc_umc_status = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_status;
if (mc_umc_status == 0)
@@ -130,24 +145,15 @@ static void umc_v8_7_ecc_info_query_error_address(struct amdgpu_device *adev,
if (!err_data->err_addr)
return;
- /* calculate error address if ue/ce error is detected */
+ /* calculate error address if ue error is detected */
if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
- (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
- REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)) {
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1) {
err_addr = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_addr;
err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
- /* translate umc channel address to soc pa, 3 parts are included */
- retired_page = ADDR_OF_4KB_BLOCK(err_addr) |
- ADDR_OF_256B_BLOCK(channel_index) |
- OFFSET_IN_256B_BLOCK(err_addr);
-
- /* we only save ue error information currently, ce is skipped */
- if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC)
- == 1)
- amdgpu_umc_fill_error_record(err_data, err_addr,
- retired_page, channel_index, umc_inst);
+ umc_v8_7_convert_error_address(adev, err_data, err_addr,
+ ch_inst, umc_inst);
}
}
@@ -324,14 +330,12 @@ static void umc_v8_7_query_error_address(struct amdgpu_device *adev,
uint32_t umc_inst)
{
uint32_t lsb, mc_umc_status_addr;
- uint64_t mc_umc_status, err_addr, retired_page, mc_umc_addrt0;
- uint32_t channel_index = adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst];
+ uint64_t mc_umc_status, err_addr, mc_umc_addrt0;
mc_umc_status_addr =
SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0);
mc_umc_addrt0 =
SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_ADDRT0);
-
mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4);
if (mc_umc_status == 0)
@@ -343,10 +347,9 @@ static void umc_v8_7_query_error_address(struct amdgpu_device *adev,
return;
}
- /* calculate error address if ue/ce error is detected */
+ /* calculate error address if ue error is detected */
if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
- (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
- REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)) {
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1) {
err_addr = RREG64_PCIE((mc_umc_addrt0 + umc_reg_offset) * 4);
/* the lowest lsb bits should be ignored */
@@ -354,16 +357,8 @@ static void umc_v8_7_query_error_address(struct amdgpu_device *adev,
err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
err_addr &= ~((0x1ULL << lsb) - 1);
- /* translate umc channel address to soc pa, 3 parts are included */
- retired_page = ADDR_OF_4KB_BLOCK(err_addr) |
- ADDR_OF_256B_BLOCK(channel_index) |
- OFFSET_IN_256B_BLOCK(err_addr);
-
- /* we only save ue error information currently, ce is skipped */
- if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC)
- == 1)
- amdgpu_umc_fill_error_record(err_data, err_addr,
- retired_page, channel_index, umc_inst);
+ umc_v8_7_convert_error_address(adev, err_data, err_addr,
+ ch_inst, umc_inst);
}
/* clear umc status */
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
index 39405f0db824..9c8b5fd99037 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
@@ -1761,21 +1761,23 @@ static const struct amdgpu_ring_funcs vcn_v3_0_dec_sw_ring_vm_funcs = {
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
};
-static int vcn_v3_0_limit_sched(struct amdgpu_cs_parser *p)
+static int vcn_v3_0_limit_sched(struct amdgpu_cs_parser *p,
+ struct amdgpu_job *job)
{
struct drm_gpu_scheduler **scheds;
/* The create msg must be in the first IB submitted */
- if (atomic_read(&p->entity->fence_seq))
+ if (atomic_read(&job->base.entity->fence_seq))
return -EINVAL;
scheds = p->adev->gpu_sched[AMDGPU_HW_IP_VCN_DEC]
[AMDGPU_RING_PRIO_DEFAULT].sched;
- drm_sched_entity_modify_sched(p->entity, scheds, 1);
+ drm_sched_entity_modify_sched(job->base.entity, scheds, 1);
return 0;
}
-static int vcn_v3_0_dec_msg(struct amdgpu_cs_parser *p, uint64_t addr)
+static int vcn_v3_0_dec_msg(struct amdgpu_cs_parser *p, struct amdgpu_job *job,
+ uint64_t addr)
{
struct ttm_operation_ctx ctx = { false, false };
struct amdgpu_bo_va_mapping *map;
@@ -1846,7 +1848,7 @@ static int vcn_v3_0_dec_msg(struct amdgpu_cs_parser *p, uint64_t addr)
if (create[0] == 0x7 || create[0] == 0x10 || create[0] == 0x11)
continue;
- r = vcn_v3_0_limit_sched(p);
+ r = vcn_v3_0_limit_sched(p, job);
if (r)
goto out;
}
@@ -1860,7 +1862,7 @@ static int vcn_v3_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
struct amdgpu_job *job,
struct amdgpu_ib *ib)
{
- struct amdgpu_ring *ring = to_amdgpu_ring(p->entity->rq->sched);
+ struct amdgpu_ring *ring = amdgpu_job_ring(job);
uint32_t msg_lo = 0, msg_hi = 0;
unsigned i;
int r;
@@ -1879,7 +1881,8 @@ static int vcn_v3_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
msg_hi = val;
} else if (reg == PACKET0(p->adev->vcn.internal.cmd, 0) &&
val == 0) {
- r = vcn_v3_0_dec_msg(p, ((u64)msg_hi) << 32 | msg_lo);
+ r = vcn_v3_0_dec_msg(p, job,
+ ((u64)msg_hi) << 32 | msg_lo);
if (r)
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
index 09c89faa8c27..897a5ce9c9da 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
@@ -150,6 +150,10 @@ static int vcn_v4_0_sw_init(void *handle)
fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE);
fw_shared->sq.is_enabled = 1;
+ fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_SMU_DPM_INTERFACE_FLAG);
+ fw_shared->smu_dpm_interface.smu_interface_type = (adev->flags & AMD_IS_APU) ?
+ AMDGPU_VCN_SMU_DPM_INTERFACE_APU : AMDGPU_VCN_SMU_DPM_INTERFACE_DGPU;
+
if (amdgpu_sriov_vf(adev))
fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_VF_RB_SETUP_FLAG);
@@ -1591,21 +1595,23 @@ static void vcn_v4_0_unified_ring_set_wptr(struct amdgpu_ring *ring)
}
}
-static int vcn_v4_0_limit_sched(struct amdgpu_cs_parser *p)
+static int vcn_v4_0_limit_sched(struct amdgpu_cs_parser *p,
+ struct amdgpu_job *job)
{
struct drm_gpu_scheduler **scheds;
/* The create msg must be in the first IB submitted */
- if (atomic_read(&p->entity->fence_seq))
+ if (atomic_read(&job->base.entity->fence_seq))
return -EINVAL;
scheds = p->adev->gpu_sched[AMDGPU_HW_IP_VCN_ENC]
[AMDGPU_RING_PRIO_0].sched;
- drm_sched_entity_modify_sched(p->entity, scheds, 1);
+ drm_sched_entity_modify_sched(job->base.entity, scheds, 1);
return 0;
}
-static int vcn_v4_0_dec_msg(struct amdgpu_cs_parser *p, uint64_t addr)
+static int vcn_v4_0_dec_msg(struct amdgpu_cs_parser *p, struct amdgpu_job *job,
+ uint64_t addr)
{
struct ttm_operation_ctx ctx = { false, false };
struct amdgpu_bo_va_mapping *map;
@@ -1676,7 +1682,7 @@ static int vcn_v4_0_dec_msg(struct amdgpu_cs_parser *p, uint64_t addr)
if (create[0] == 0x7 || create[0] == 0x10 || create[0] == 0x11)
continue;
- r = vcn_v4_0_limit_sched(p);
+ r = vcn_v4_0_limit_sched(p, job);
if (r)
goto out;
}
@@ -1689,32 +1695,34 @@ out:
#define RADEON_VCN_ENGINE_TYPE_DECODE (0x00000003)
static int vcn_v4_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
- struct amdgpu_job *job,
- struct amdgpu_ib *ib)
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib)
{
- struct amdgpu_ring *ring = to_amdgpu_ring(p->entity->rq->sched);
- struct amdgpu_vcn_decode_buffer *decode_buffer = NULL;
+ struct amdgpu_ring *ring = amdgpu_job_ring(job);
+ struct amdgpu_vcn_decode_buffer *decode_buffer;
+ uint64_t addr;
uint32_t val;
- int r = 0;
/* The first instance can decode anything */
if (!ring->me)
- return r;
+ return 0;
/* unified queue ib header has 8 double words. */
if (ib->length_dw < 8)
- return r;
+ return 0;
val = amdgpu_ib_get_value(ib, 6); //RADEON_VCN_ENGINE_TYPE
+ if (val != RADEON_VCN_ENGINE_TYPE_DECODE)
+ return 0;
- if (val == RADEON_VCN_ENGINE_TYPE_DECODE) {
- decode_buffer = (struct amdgpu_vcn_decode_buffer *)&ib->ptr[10];
+ decode_buffer = (struct amdgpu_vcn_decode_buffer *)&ib->ptr[10];
- if (decode_buffer->valid_buf_flag & 0x1)
- r = vcn_v4_0_dec_msg(p, ((u64)decode_buffer->msg_buffer_address_hi) << 32 |
- decode_buffer->msg_buffer_address_lo);
- }
- return r;
+ if (!(decode_buffer->valid_buf_flag & 0x1))
+ return 0;
+
+ addr = ((u64)decode_buffer->msg_buffer_address_hi) << 32 |
+ decode_buffer->msg_buffer_address_lo;
+ return vcn_v4_0_dec_msg(p, job, addr);
}
static const struct amdgpu_ring_funcs vcn_v4_0_unified_ring_vm_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
index 03b7066471f9..1e83db0c5438 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
@@ -289,6 +289,10 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
}
}
+ if (!amdgpu_sriov_vf(adev))
+ adev->nbio.funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell,
+ adev->irq.ih.doorbell_index);
+
pci_set_master(adev->pdev);
/* enable interrupts */
diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
index 2022ffbb8dba..59dfca093155 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
@@ -340,6 +340,10 @@ static int vega20_ih_irq_init(struct amdgpu_device *adev)
}
}
+ if (!amdgpu_sriov_vf(adev))
+ adev->nbio.funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell,
+ adev->irq.ih.doorbell_index);
+
pci_set_master(adev->pdev);
/* enable interrupts */
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
index 60a81649cf12..c7118843db05 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
@@ -742,7 +742,7 @@ static const uint32_t cwsr_trap_nv1x_hex[] = {
0xbf88fffe, 0x877aff7f,
0x04000000, 0x8f7a857a,
0x886d7a6d, 0xb97b02dc,
- 0x8f7b997b, 0xb97a2a05,
+ 0x8f7b997b, 0xb97a3a05,
0x807a817a, 0xbf0d997b,
0xbf850002, 0x8f7a897a,
0xbf820001, 0x8f7a8a7a,
@@ -819,7 +819,7 @@ static const uint32_t cwsr_trap_nv1x_hex[] = {
0xbefe037c, 0xbefc0370,
0xf4611c7a, 0xf8000000,
0x80708470, 0xbefc037e,
- 0xb9702a05, 0x80708170,
+ 0xb9703a05, 0x80708170,
0xbf0d9973, 0xbf850002,
0x8f708970, 0xbf820001,
0x8f708a70, 0xb97a1e06,
@@ -1069,7 +1069,7 @@ static const uint32_t cwsr_trap_nv1x_hex[] = {
0xb9f9f816, 0x876f7bff,
0xfffff800, 0x906f8b6f,
0xb9efa2c3, 0xb9f3f801,
- 0xb96e2a05, 0x806e816e,
+ 0xb96e3a05, 0x806e816e,
0xbf0d9972, 0xbf850002,
0x8f6e896e, 0xbf820001,
0x8f6e8a6e, 0xb96f1e06,
@@ -2114,7 +2114,7 @@ static const uint32_t cwsr_trap_gfx10_hex[] = {
0x007a0000, 0x7e000280,
0xbefe037a, 0xbeff037b,
0xb97b02dc, 0x8f7b997b,
- 0xb97a2a05, 0x807a817a,
+ 0xb97a3a05, 0x807a817a,
0xbf0d997b, 0xbf850002,
0x8f7a897a, 0xbf820001,
0x8f7a8a7a, 0xb97b1e06,
@@ -2157,7 +2157,7 @@ static const uint32_t cwsr_trap_gfx10_hex[] = {
0x01000000, 0xe0704100,
0x705d0100, 0xe0704200,
0x705d0200, 0xe0704300,
- 0x705d0300, 0xb9702a05,
+ 0x705d0300, 0xb9703a05,
0x80708170, 0xbf0d9973,
0xbf850002, 0x8f708970,
0xbf820001, 0x8f708a70,
@@ -2189,7 +2189,7 @@ static const uint32_t cwsr_trap_gfx10_hex[] = {
0xbefe03ff, 0x0000ffff,
0xbeff0380, 0xe0704000,
0x705d0200, 0xbefe03c1,
- 0xb9702a05, 0x80708170,
+ 0xb9703a05, 0x80708170,
0xbf0d9973, 0xbf850002,
0x8f708970, 0xbf820001,
0x8f708a70, 0xb97a1e06,
@@ -2475,7 +2475,7 @@ static const uint32_t cwsr_trap_gfx10_hex[] = {
0xb9ef4803, 0x876f7bff,
0xfffff800, 0x906f8b6f,
0xb9efa2c3, 0xb9f3f801,
- 0xb96e2a05, 0x806e816e,
+ 0xb96e3a05, 0x806e816e,
0xbf0d9972, 0xbf850002,
0x8f6e896e, 0xbf820001,
0x8f6e8a6e, 0xb96f1e06,
@@ -2494,438 +2494,441 @@ static const uint32_t cwsr_trap_gfx10_hex[] = {
0xbf9f0000, 0xbf9f0000,
0xbf9f0000, 0x00000000,
};
-
static const uint32_t cwsr_trap_gfx11_hex[] = {
- 0xbfa00001, 0xbfa0021b,
+ 0xbfa00001, 0xbfa0021e,
0xb0804006, 0xb8f8f802,
- 0x91788678, 0xb8fbf803,
- 0x8b6eff78, 0x00002000,
- 0xbfa10009, 0x8b6eff6d,
- 0x00ff0000, 0xbfa2001e,
- 0x8b6eff7b, 0x00000400,
- 0xbfa20041, 0xbf830010,
- 0xb8fbf803, 0xbfa0fffa,
- 0x8b6eff7b, 0x00000900,
- 0xbfa20015, 0x8b6eff7b,
- 0x000071ff, 0xbfa10008,
- 0x8b6fff7b, 0x00007080,
- 0xbfa10001, 0xbeee1287,
- 0xb8eff801, 0x846e8c6e,
- 0x8b6e6f6e, 0xbfa2000a,
+ 0x9178ff78, 0x00020006,
+ 0xb8fbf803, 0xbf0d9f6d,
+ 0xbfa20006, 0x8b6eff78,
+ 0x00002000, 0xbfa10009,
0x8b6eff6d, 0x00ff0000,
- 0xbfa20007, 0xb8eef801,
- 0x8b6eff6e, 0x00000800,
- 0xbfa20003, 0x8b6eff7b,
- 0x00000400, 0xbfa20026,
- 0xbefa4d82, 0xbf89fc07,
- 0x84fa887a, 0xf4005bbd,
- 0xf8000010, 0xbf89fc07,
- 0x846e976e, 0x9177ff77,
- 0x00800000, 0x8c776e77,
- 0xf4045bbd, 0xf8000000,
- 0xbf89fc07, 0xf4045ebd,
- 0xf8000008, 0xbf89fc07,
- 0x8bee6e6e, 0xbfa10001,
- 0xbe80486e, 0x8b6eff6d,
- 0x01ff0000, 0xbfa20005,
- 0x8c78ff78, 0x00002000,
- 0x80ec886c, 0x82ed806d,
- 0xbfa00005, 0x8b6eff6d,
- 0x01000000, 0xbfa20002,
- 0x806c846c, 0x826d806d,
- 0x8b6dff6d, 0x0000ffff,
- 0x8bfe7e7e, 0x8bea6a6a,
- 0xb978f802, 0xbe804a6c,
- 0x8b6dff6d, 0x0000ffff,
- 0xbefa0080, 0xb97a0283,
- 0xbeee007e, 0xbeef007f,
- 0xbefe0180, 0xbefe4d84,
- 0xbf89fc07, 0x8b7aff7f,
- 0x04000000, 0x847a857a,
- 0x8c6d7a6d, 0xbefa007e,
- 0x8b7bff7f, 0x0000ffff,
- 0xbefe00c1, 0xbeff00c1,
- 0xdca6c000, 0x007a0000,
- 0x7e000280, 0xbefe007a,
- 0xbeff007b, 0xb8fb02dc,
- 0x847b997b, 0xb8fa3b05,
- 0x807a817a, 0xbf0d997b,
- 0xbfa20002, 0x847a897a,
- 0xbfa00001, 0x847a8a7a,
- 0xb8fb1e06, 0x847b8a7b,
- 0x807a7b7a, 0x8b7bff7f,
- 0x0000ffff, 0x807aff7a,
- 0x00000200, 0x807a7e7a,
- 0x827b807b, 0xd7610000,
- 0x00010870, 0xd7610000,
- 0x00010a71, 0xd7610000,
- 0x00010c72, 0xd7610000,
- 0x00010e73, 0xd7610000,
- 0x00011074, 0xd7610000,
- 0x00011275, 0xd7610000,
- 0x00011476, 0xd7610000,
- 0x00011677, 0xd7610000,
- 0x00011a79, 0xd7610000,
- 0x00011c7e, 0xd7610000,
- 0x00011e7f, 0xbefe00ff,
- 0x00003fff, 0xbeff0080,
- 0xdca6c040, 0x007a0000,
- 0xd760007a, 0x00011d00,
- 0xd760007b, 0x00011f00,
+ 0xbfa2001e, 0x8b6eff7b,
+ 0x00000400, 0xbfa20041,
+ 0xbf830010, 0xb8fbf803,
+ 0xbfa0fffa, 0x8b6eff7b,
+ 0x00000900, 0xbfa20015,
+ 0x8b6eff7b, 0x000071ff,
+ 0xbfa10008, 0x8b6fff7b,
+ 0x00007080, 0xbfa10001,
+ 0xbeee1287, 0xb8eff801,
+ 0x846e8c6e, 0x8b6e6f6e,
+ 0xbfa2000a, 0x8b6eff6d,
+ 0x00ff0000, 0xbfa20007,
+ 0xb8eef801, 0x8b6eff6e,
+ 0x00000800, 0xbfa20003,
+ 0x8b6eff7b, 0x00000400,
+ 0xbfa20026, 0xbefa4d82,
+ 0xbf89fc07, 0x84fa887a,
+ 0xf4005bbd, 0xf8000010,
+ 0xbf89fc07, 0x846e976e,
+ 0x9177ff77, 0x00800000,
+ 0x8c776e77, 0xf4045bbd,
+ 0xf8000000, 0xbf89fc07,
+ 0xf4045ebd, 0xf8000008,
+ 0xbf89fc07, 0x8bee6e6e,
+ 0xbfa10001, 0xbe80486e,
+ 0x8b6eff6d, 0x01ff0000,
+ 0xbfa20005, 0x8c78ff78,
+ 0x00002000, 0x80ec886c,
+ 0x82ed806d, 0xbfa00005,
+ 0x8b6eff6d, 0x01000000,
+ 0xbfa20002, 0x806c846c,
+ 0x826d806d, 0x8b6dff6d,
+ 0x0000ffff, 0x8bfe7e7e,
+ 0x8bea6a6a, 0xb978f802,
+ 0xbe804a6c, 0x8b6dff6d,
+ 0x0000ffff, 0xbefa0080,
+ 0xb97a0283, 0xbeee007e,
+ 0xbeef007f, 0xbefe0180,
+ 0xbefe4d84, 0xbf89fc07,
+ 0x8b7aff7f, 0x04000000,
+ 0x847a857a, 0x8c6d7a6d,
+ 0xbefa007e, 0x8b7bff7f,
+ 0x0000ffff, 0xbefe00c1,
+ 0xbeff00c1, 0xdca6c000,
+ 0x007a0000, 0x7e000280,
0xbefe007a, 0xbeff007b,
- 0xbef4007e, 0x8b75ff7f,
- 0x0000ffff, 0x8c75ff75,
- 0x00040000, 0xbef60080,
- 0xbef700ff, 0x10807fac,
- 0xbef1007d, 0xbef00080,
- 0xb8f302dc, 0x84739973,
- 0xbefe00c1, 0x857d9973,
- 0x8b7d817d, 0xbf06817d,
- 0xbfa20002, 0xbeff0080,
- 0xbfa00002, 0xbeff00c1,
- 0xbfa00009, 0xbef600ff,
- 0x01000000, 0xe0685080,
- 0x701d0100, 0xe0685100,
- 0x701d0200, 0xe0685180,
- 0x701d0300, 0xbfa00008,
+ 0xb8fb02dc, 0x847b997b,
+ 0xb8fa3b05, 0x807a817a,
+ 0xbf0d997b, 0xbfa20002,
+ 0x847a897a, 0xbfa00001,
+ 0x847a8a7a, 0xb8fb1e06,
+ 0x847b8a7b, 0x807a7b7a,
+ 0x8b7bff7f, 0x0000ffff,
+ 0x807aff7a, 0x00000200,
+ 0x807a7e7a, 0x827b807b,
+ 0xd7610000, 0x00010870,
+ 0xd7610000, 0x00010a71,
+ 0xd7610000, 0x00010c72,
+ 0xd7610000, 0x00010e73,
+ 0xd7610000, 0x00011074,
+ 0xd7610000, 0x00011275,
+ 0xd7610000, 0x00011476,
+ 0xd7610000, 0x00011677,
+ 0xd7610000, 0x00011a79,
+ 0xd7610000, 0x00011c7e,
+ 0xd7610000, 0x00011e7f,
+ 0xbefe00ff, 0x00003fff,
+ 0xbeff0080, 0xdca6c040,
+ 0x007a0000, 0xd760007a,
+ 0x00011d00, 0xd760007b,
+ 0x00011f00, 0xbefe007a,
+ 0xbeff007b, 0xbef4007e,
+ 0x8b75ff7f, 0x0000ffff,
+ 0x8c75ff75, 0x00040000,
+ 0xbef60080, 0xbef700ff,
+ 0x10807fac, 0xbef1007d,
+ 0xbef00080, 0xb8f302dc,
+ 0x84739973, 0xbefe00c1,
+ 0x857d9973, 0x8b7d817d,
+ 0xbf06817d, 0xbfa20002,
+ 0xbeff0080, 0xbfa00002,
+ 0xbeff00c1, 0xbfa00009,
0xbef600ff, 0x01000000,
- 0xe0685100, 0x701d0100,
- 0xe0685200, 0x701d0200,
- 0xe0685300, 0x701d0300,
+ 0xe0685080, 0x701d0100,
+ 0xe0685100, 0x701d0200,
+ 0xe0685180, 0x701d0300,
+ 0xbfa00008, 0xbef600ff,
+ 0x01000000, 0xe0685100,
+ 0x701d0100, 0xe0685200,
+ 0x701d0200, 0xe0685300,
+ 0x701d0300, 0xb8f03b05,
+ 0x80708170, 0xbf0d9973,
+ 0xbfa20002, 0x84708970,
+ 0xbfa00001, 0x84708a70,
+ 0xb8fa1e06, 0x847a8a7a,
+ 0x80707a70, 0x8070ff70,
+ 0x00000200, 0xbef600ff,
+ 0x01000000, 0x7e000280,
+ 0x7e020280, 0x7e040280,
+ 0xbefd0080, 0xd7610002,
+ 0x0000fa71, 0x807d817d,
+ 0xd7610002, 0x0000fa6c,
+ 0x807d817d, 0x917aff6d,
+ 0x80000000, 0xd7610002,
+ 0x0000fa7a, 0x807d817d,
+ 0xd7610002, 0x0000fa6e,
+ 0x807d817d, 0xd7610002,
+ 0x0000fa6f, 0x807d817d,
+ 0xd7610002, 0x0000fa78,
+ 0x807d817d, 0xb8faf803,
+ 0xd7610002, 0x0000fa7a,
+ 0x807d817d, 0xd7610002,
+ 0x0000fa7b, 0x807d817d,
+ 0xb8f1f801, 0xd7610002,
+ 0x0000fa71, 0x807d817d,
+ 0xb8f1f814, 0xd7610002,
+ 0x0000fa71, 0x807d817d,
+ 0xb8f1f815, 0xd7610002,
+ 0x0000fa71, 0x807d817d,
+ 0xbefe00ff, 0x0000ffff,
+ 0xbeff0080, 0xe0685000,
+ 0x701d0200, 0xbefe00c1,
0xb8f03b05, 0x80708170,
0xbf0d9973, 0xbfa20002,
0x84708970, 0xbfa00001,
0x84708a70, 0xb8fa1e06,
0x847a8a7a, 0x80707a70,
- 0x8070ff70, 0x00000200,
0xbef600ff, 0x01000000,
- 0x7e000280, 0x7e020280,
- 0x7e040280, 0xbefd0080,
- 0xd7610002, 0x0000fa71,
- 0x807d817d, 0xd7610002,
- 0x0000fa6c, 0x807d817d,
- 0x917aff6d, 0x80000000,
- 0xd7610002, 0x0000fa7a,
- 0x807d817d, 0xd7610002,
- 0x0000fa6e, 0x807d817d,
- 0xd7610002, 0x0000fa6f,
- 0x807d817d, 0xd7610002,
- 0x0000fa78, 0x807d817d,
- 0xb8faf803, 0xd7610002,
- 0x0000fa7a, 0x807d817d,
- 0xd7610002, 0x0000fa7b,
- 0x807d817d, 0xb8f1f801,
- 0xd7610002, 0x0000fa71,
- 0x807d817d, 0xb8f1f814,
- 0xd7610002, 0x0000fa71,
- 0x807d817d, 0xb8f1f815,
- 0xd7610002, 0x0000fa71,
- 0x807d817d, 0xbefe00ff,
- 0x0000ffff, 0xbeff0080,
- 0xe0685000, 0x701d0200,
- 0xbefe00c1, 0xb8f03b05,
- 0x80708170, 0xbf0d9973,
- 0xbfa20002, 0x84708970,
- 0xbfa00001, 0x84708a70,
- 0xb8fa1e06, 0x847a8a7a,
- 0x80707a70, 0xbef600ff,
- 0x01000000, 0xbef90080,
- 0xbefd0080, 0xbf800000,
- 0xbe804100, 0xbe824102,
- 0xbe844104, 0xbe864106,
- 0xbe884108, 0xbe8a410a,
- 0xbe8c410c, 0xbe8e410e,
- 0xd7610002, 0x0000f200,
- 0x80798179, 0xd7610002,
- 0x0000f201, 0x80798179,
- 0xd7610002, 0x0000f202,
- 0x80798179, 0xd7610002,
- 0x0000f203, 0x80798179,
- 0xd7610002, 0x0000f204,
+ 0xbef90080, 0xbefd0080,
+ 0xbf800000, 0xbe804100,
+ 0xbe824102, 0xbe844104,
+ 0xbe864106, 0xbe884108,
+ 0xbe8a410a, 0xbe8c410c,
+ 0xbe8e410e, 0xd7610002,
+ 0x0000f200, 0x80798179,
+ 0xd7610002, 0x0000f201,
0x80798179, 0xd7610002,
- 0x0000f205, 0x80798179,
- 0xd7610002, 0x0000f206,
+ 0x0000f202, 0x80798179,
+ 0xd7610002, 0x0000f203,
0x80798179, 0xd7610002,
- 0x0000f207, 0x80798179,
- 0xd7610002, 0x0000f208,
+ 0x0000f204, 0x80798179,
+ 0xd7610002, 0x0000f205,
0x80798179, 0xd7610002,
- 0x0000f209, 0x80798179,
- 0xd7610002, 0x0000f20a,
+ 0x0000f206, 0x80798179,
+ 0xd7610002, 0x0000f207,
0x80798179, 0xd7610002,
- 0x0000f20b, 0x80798179,
- 0xd7610002, 0x0000f20c,
+ 0x0000f208, 0x80798179,
+ 0xd7610002, 0x0000f209,
0x80798179, 0xd7610002,
- 0x0000f20d, 0x80798179,
- 0xd7610002, 0x0000f20e,
+ 0x0000f20a, 0x80798179,
+ 0xd7610002, 0x0000f20b,
0x80798179, 0xd7610002,
- 0x0000f20f, 0x80798179,
- 0xbf06a079, 0xbfa10006,
- 0xe0685000, 0x701d0200,
- 0x8070ff70, 0x00000080,
- 0xbef90080, 0x7e040280,
- 0x807d907d, 0xbf0aff7d,
- 0x00000060, 0xbfa2ffbc,
- 0xbe804100, 0xbe824102,
- 0xbe844104, 0xbe864106,
- 0xbe884108, 0xbe8a410a,
- 0xd7610002, 0x0000f200,
+ 0x0000f20c, 0x80798179,
+ 0xd7610002, 0x0000f20d,
0x80798179, 0xd7610002,
- 0x0000f201, 0x80798179,
- 0xd7610002, 0x0000f202,
+ 0x0000f20e, 0x80798179,
+ 0xd7610002, 0x0000f20f,
+ 0x80798179, 0xbf06a079,
+ 0xbfa10006, 0xe0685000,
+ 0x701d0200, 0x8070ff70,
+ 0x00000080, 0xbef90080,
+ 0x7e040280, 0x807d907d,
+ 0xbf0aff7d, 0x00000060,
+ 0xbfa2ffbc, 0xbe804100,
+ 0xbe824102, 0xbe844104,
+ 0xbe864106, 0xbe884108,
+ 0xbe8a410a, 0xd7610002,
+ 0x0000f200, 0x80798179,
+ 0xd7610002, 0x0000f201,
0x80798179, 0xd7610002,
- 0x0000f203, 0x80798179,
- 0xd7610002, 0x0000f204,
+ 0x0000f202, 0x80798179,
+ 0xd7610002, 0x0000f203,
0x80798179, 0xd7610002,
- 0x0000f205, 0x80798179,
- 0xd7610002, 0x0000f206,
+ 0x0000f204, 0x80798179,
+ 0xd7610002, 0x0000f205,
0x80798179, 0xd7610002,
- 0x0000f207, 0x80798179,
- 0xd7610002, 0x0000f208,
+ 0x0000f206, 0x80798179,
+ 0xd7610002, 0x0000f207,
0x80798179, 0xd7610002,
- 0x0000f209, 0x80798179,
- 0xd7610002, 0x0000f20a,
+ 0x0000f208, 0x80798179,
+ 0xd7610002, 0x0000f209,
0x80798179, 0xd7610002,
- 0x0000f20b, 0x80798179,
- 0xe0685000, 0x701d0200,
+ 0x0000f20a, 0x80798179,
+ 0xd7610002, 0x0000f20b,
+ 0x80798179, 0xe0685000,
+ 0x701d0200, 0xbefe00c1,
+ 0x857d9973, 0x8b7d817d,
+ 0xbf06817d, 0xbfa20002,
+ 0xbeff0080, 0xbfa00001,
+ 0xbeff00c1, 0xb8fb4306,
+ 0x8b7bc17b, 0xbfa10044,
+ 0xbfbd0000, 0x8b7aff6d,
+ 0x80000000, 0xbfa10040,
+ 0x847b867b, 0x847b827b,
+ 0xbef6007b, 0xb8f03b05,
+ 0x80708170, 0xbf0d9973,
+ 0xbfa20002, 0x84708970,
+ 0xbfa00001, 0x84708a70,
+ 0xb8fa1e06, 0x847a8a7a,
+ 0x80707a70, 0x8070ff70,
+ 0x00000200, 0x8070ff70,
+ 0x00000080, 0xbef600ff,
+ 0x01000000, 0xd71f0000,
+ 0x000100c1, 0xd7200000,
+ 0x000200c1, 0x16000084,
+ 0x857d9973, 0x8b7d817d,
+ 0xbf06817d, 0xbefd0080,
+ 0xbfa20012, 0xbe8300ff,
+ 0x00000080, 0xbf800000,
+ 0xbf800000, 0xbf800000,
+ 0xd8d80000, 0x01000000,
+ 0xbf890000, 0xe0685000,
+ 0x701d0100, 0x807d037d,
+ 0x80700370, 0xd5250000,
+ 0x0001ff00, 0x00000080,
+ 0xbf0a7b7d, 0xbfa2fff4,
+ 0xbfa00011, 0xbe8300ff,
+ 0x00000100, 0xbf800000,
+ 0xbf800000, 0xbf800000,
+ 0xd8d80000, 0x01000000,
+ 0xbf890000, 0xe0685000,
+ 0x701d0100, 0x807d037d,
+ 0x80700370, 0xd5250000,
+ 0x0001ff00, 0x00000100,
+ 0xbf0a7b7d, 0xbfa2fff4,
0xbefe00c1, 0x857d9973,
0x8b7d817d, 0xbf06817d,
- 0xbfa20002, 0xbeff0080,
- 0xbfa00001, 0xbeff00c1,
- 0xb8fb4306, 0x8b7bc17b,
- 0xbfa10044, 0xbfbd0000,
- 0x8b7aff6d, 0x80000000,
- 0xbfa10040, 0x847b867b,
- 0x847b827b, 0xbef6007b,
- 0xb8f03b05, 0x80708170,
- 0xbf0d9973, 0xbfa20002,
- 0x84708970, 0xbfa00001,
- 0x84708a70, 0xb8fa1e06,
- 0x847a8a7a, 0x80707a70,
- 0x8070ff70, 0x00000200,
- 0x8070ff70, 0x00000080,
- 0xbef600ff, 0x01000000,
- 0xd71f0000, 0x000100c1,
- 0xd7200000, 0x000200c1,
- 0x16000084, 0x857d9973,
+ 0xbfa20004, 0xbef000ff,
+ 0x00000200, 0xbeff0080,
+ 0xbfa00003, 0xbef000ff,
+ 0x00000400, 0xbeff00c1,
+ 0xb8fb3b05, 0x807b817b,
+ 0x847b827b, 0x857d9973,
0x8b7d817d, 0xbf06817d,
- 0xbefd0080, 0xbfa20012,
- 0xbe8300ff, 0x00000080,
- 0xbf800000, 0xbf800000,
- 0xbf800000, 0xd8d80000,
- 0x01000000, 0xbf890000,
- 0xe0685000, 0x701d0100,
- 0x807d037d, 0x80700370,
- 0xd5250000, 0x0001ff00,
- 0x00000080, 0xbf0a7b7d,
- 0xbfa2fff4, 0xbfa00011,
- 0xbe8300ff, 0x00000100,
- 0xbf800000, 0xbf800000,
- 0xbf800000, 0xd8d80000,
- 0x01000000, 0xbf890000,
- 0xe0685000, 0x701d0100,
- 0x807d037d, 0x80700370,
- 0xd5250000, 0x0001ff00,
- 0x00000100, 0xbf0a7b7d,
- 0xbfa2fff4, 0xbefe00c1,
- 0x857d9973, 0x8b7d817d,
- 0xbf06817d, 0xbfa20004,
- 0xbef000ff, 0x00000200,
- 0xbeff0080, 0xbfa00003,
- 0xbef000ff, 0x00000400,
- 0xbeff00c1, 0xb8fb3b05,
- 0x807b817b, 0x847b827b,
- 0x857d9973, 0x8b7d817d,
- 0xbf06817d, 0xbfa20017,
+ 0xbfa20017, 0xbef600ff,
+ 0x01000000, 0xbefd0084,
+ 0xbf0a7b7d, 0xbfa10037,
+ 0x7e008700, 0x7e028701,
+ 0x7e048702, 0x7e068703,
+ 0xe0685000, 0x701d0000,
+ 0xe0685080, 0x701d0100,
+ 0xe0685100, 0x701d0200,
+ 0xe0685180, 0x701d0300,
+ 0x807d847d, 0x8070ff70,
+ 0x00000200, 0xbf0a7b7d,
+ 0xbfa2ffef, 0xbfa00025,
0xbef600ff, 0x01000000,
0xbefd0084, 0xbf0a7b7d,
- 0xbfa10037, 0x7e008700,
+ 0xbfa10011, 0x7e008700,
0x7e028701, 0x7e048702,
0x7e068703, 0xe0685000,
- 0x701d0000, 0xe0685080,
- 0x701d0100, 0xe0685100,
- 0x701d0200, 0xe0685180,
+ 0x701d0000, 0xe0685100,
+ 0x701d0100, 0xe0685200,
+ 0x701d0200, 0xe0685300,
0x701d0300, 0x807d847d,
- 0x8070ff70, 0x00000200,
+ 0x8070ff70, 0x00000400,
0xbf0a7b7d, 0xbfa2ffef,
- 0xbfa00025, 0xbef600ff,
- 0x01000000, 0xbefd0084,
- 0xbf0a7b7d, 0xbfa10011,
- 0x7e008700, 0x7e028701,
- 0x7e048702, 0x7e068703,
+ 0xb8fb1e06, 0x8b7bc17b,
+ 0xbfa1000c, 0x847b837b,
+ 0x807b7d7b, 0xbefe00c1,
+ 0xbeff0080, 0x7e008700,
0xe0685000, 0x701d0000,
- 0xe0685100, 0x701d0100,
- 0xe0685200, 0x701d0200,
- 0xe0685300, 0x701d0300,
- 0x807d847d, 0x8070ff70,
- 0x00000400, 0xbf0a7b7d,
- 0xbfa2ffef, 0xb8fb1e06,
- 0x8b7bc17b, 0xbfa1000c,
- 0x847b837b, 0x807b7d7b,
- 0xbefe00c1, 0xbeff0080,
- 0x7e008700, 0xe0685000,
- 0x701d0000, 0x807d817d,
- 0x8070ff70, 0x00000080,
- 0xbf0a7b7d, 0xbfa2fff8,
- 0xbfa00141, 0xbef4007e,
- 0x8b75ff7f, 0x0000ffff,
- 0x8c75ff75, 0x00040000,
- 0xbef60080, 0xbef700ff,
- 0x10807fac, 0xb8f202dc,
- 0x84729972, 0x8b6eff7f,
- 0x04000000, 0xbfa1003a,
+ 0x807d817d, 0x8070ff70,
+ 0x00000080, 0xbf0a7b7d,
+ 0xbfa2fff8, 0xbfa00146,
+ 0xbef4007e, 0x8b75ff7f,
+ 0x0000ffff, 0x8c75ff75,
+ 0x00040000, 0xbef60080,
+ 0xbef700ff, 0x10807fac,
+ 0xb8f202dc, 0x84729972,
+ 0x8b6eff7f, 0x04000000,
+ 0xbfa1003a, 0xbefe00c1,
+ 0x857d9972, 0x8b7d817d,
+ 0xbf06817d, 0xbfa20002,
+ 0xbeff0080, 0xbfa00001,
+ 0xbeff00c1, 0xb8ef4306,
+ 0x8b6fc16f, 0xbfa1002f,
+ 0x846f866f, 0x846f826f,
+ 0xbef6006f, 0xb8f83b05,
+ 0x80788178, 0xbf0d9972,
+ 0xbfa20002, 0x84788978,
+ 0xbfa00001, 0x84788a78,
+ 0xb8ee1e06, 0x846e8a6e,
+ 0x80786e78, 0x8078ff78,
+ 0x00000200, 0x8078ff78,
+ 0x00000080, 0xbef600ff,
+ 0x01000000, 0x857d9972,
+ 0x8b7d817d, 0xbf06817d,
+ 0xbefd0080, 0xbfa2000c,
+ 0xe0500000, 0x781d0000,
+ 0xbf8903f7, 0xdac00000,
+ 0x00000000, 0x807dff7d,
+ 0x00000080, 0x8078ff78,
+ 0x00000080, 0xbf0a6f7d,
+ 0xbfa2fff5, 0xbfa0000b,
+ 0xe0500000, 0x781d0000,
+ 0xbf8903f7, 0xdac00000,
+ 0x00000000, 0x807dff7d,
+ 0x00000100, 0x8078ff78,
+ 0x00000100, 0xbf0a6f7d,
+ 0xbfa2fff5, 0xbef80080,
0xbefe00c1, 0x857d9972,
0x8b7d817d, 0xbf06817d,
0xbfa20002, 0xbeff0080,
0xbfa00001, 0xbeff00c1,
- 0xb8ef4306, 0x8b6fc16f,
- 0xbfa1002f, 0x846f866f,
- 0x846f826f, 0xbef6006f,
- 0xb8f83b05, 0x80788178,
- 0xbf0d9972, 0xbfa20002,
- 0x84788978, 0xbfa00001,
- 0x84788a78, 0xb8ee1e06,
- 0x846e8a6e, 0x80786e78,
+ 0xb8ef3b05, 0x806f816f,
+ 0x846f826f, 0x857d9972,
+ 0x8b7d817d, 0xbf06817d,
+ 0xbfa20024, 0xbef600ff,
+ 0x01000000, 0xbeee0078,
0x8078ff78, 0x00000200,
- 0x8078ff78, 0x00000080,
- 0xbef600ff, 0x01000000,
- 0x857d9972, 0x8b7d817d,
- 0xbf06817d, 0xbefd0080,
- 0xbfa2000c, 0xe0500000,
- 0x781d0000, 0xbf8903f7,
- 0xdac00000, 0x00000000,
- 0x807dff7d, 0x00000080,
- 0x8078ff78, 0x00000080,
- 0xbf0a6f7d, 0xbfa2fff5,
- 0xbfa0000b, 0xe0500000,
- 0x781d0000, 0xbf8903f7,
- 0xdac00000, 0x00000000,
- 0x807dff7d, 0x00000100,
- 0x8078ff78, 0x00000100,
- 0xbf0a6f7d, 0xbfa2fff5,
- 0xbef80080, 0xbefe00c1,
- 0x857d9972, 0x8b7d817d,
- 0xbf06817d, 0xbfa20002,
- 0xbeff0080, 0xbfa00001,
- 0xbeff00c1, 0xb8ef3b05,
- 0x806f816f, 0x846f826f,
- 0x857d9972, 0x8b7d817d,
- 0xbf06817d, 0xbfa20024,
- 0xbef600ff, 0x01000000,
- 0xbeee0078, 0x8078ff78,
- 0x00000200, 0xbefd0084,
- 0xbf0a6f7d, 0xbfa10050,
+ 0xbefd0084, 0xbf0a6f7d,
+ 0xbfa10050, 0xe0505000,
+ 0x781d0000, 0xe0505080,
+ 0x781d0100, 0xe0505100,
+ 0x781d0200, 0xe0505180,
+ 0x781d0300, 0xbf8903f7,
+ 0x7e008500, 0x7e028501,
+ 0x7e048502, 0x7e068503,
+ 0x807d847d, 0x8078ff78,
+ 0x00000200, 0xbf0a6f7d,
+ 0xbfa2ffee, 0xe0505000,
+ 0x6e1d0000, 0xe0505080,
+ 0x6e1d0100, 0xe0505100,
+ 0x6e1d0200, 0xe0505180,
+ 0x6e1d0300, 0xbf8903f7,
+ 0xbfa00034, 0xbef600ff,
+ 0x01000000, 0xbeee0078,
+ 0x8078ff78, 0x00000400,
+ 0xbefd0084, 0xbf0a6f7d,
+ 0xbfa10012, 0xe0505000,
+ 0x781d0000, 0xe0505100,
+ 0x781d0100, 0xe0505200,
+ 0x781d0200, 0xe0505300,
+ 0x781d0300, 0xbf8903f7,
+ 0x7e008500, 0x7e028501,
+ 0x7e048502, 0x7e068503,
+ 0x807d847d, 0x8078ff78,
+ 0x00000400, 0xbf0a6f7d,
+ 0xbfa2ffee, 0xb8ef1e06,
+ 0x8b6fc16f, 0xbfa1000e,
+ 0x846f836f, 0x806f7d6f,
+ 0xbefe00c1, 0xbeff0080,
0xe0505000, 0x781d0000,
- 0xe0505080, 0x781d0100,
- 0xe0505100, 0x781d0200,
- 0xe0505180, 0x781d0300,
0xbf8903f7, 0x7e008500,
- 0x7e028501, 0x7e048502,
- 0x7e068503, 0x807d847d,
- 0x8078ff78, 0x00000200,
- 0xbf0a6f7d, 0xbfa2ffee,
+ 0x807d817d, 0x8078ff78,
+ 0x00000080, 0xbf0a6f7d,
+ 0xbfa2fff7, 0xbeff00c1,
0xe0505000, 0x6e1d0000,
- 0xe0505080, 0x6e1d0100,
- 0xe0505100, 0x6e1d0200,
- 0xe0505180, 0x6e1d0300,
- 0xbf8903f7, 0xbfa00034,
- 0xbef600ff, 0x01000000,
- 0xbeee0078, 0x8078ff78,
- 0x00000400, 0xbefd0084,
- 0xbf0a6f7d, 0xbfa10012,
- 0xe0505000, 0x781d0000,
- 0xe0505100, 0x781d0100,
- 0xe0505200, 0x781d0200,
- 0xe0505300, 0x781d0300,
- 0xbf8903f7, 0x7e008500,
- 0x7e028501, 0x7e048502,
- 0x7e068503, 0x807d847d,
- 0x8078ff78, 0x00000400,
- 0xbf0a6f7d, 0xbfa2ffee,
- 0xb8ef1e06, 0x8b6fc16f,
- 0xbfa1000e, 0x846f836f,
- 0x806f7d6f, 0xbefe00c1,
- 0xbeff0080, 0xe0505000,
- 0x781d0000, 0xbf8903f7,
- 0x7e008500, 0x807d817d,
- 0x8078ff78, 0x00000080,
- 0xbf0a6f7d, 0xbfa2fff7,
- 0xbeff00c1, 0xe0505000,
- 0x6e1d0000, 0xe0505100,
- 0x6e1d0100, 0xe0505200,
- 0x6e1d0200, 0xe0505300,
- 0x6e1d0300, 0xbf8903f7,
+ 0xe0505100, 0x6e1d0100,
+ 0xe0505200, 0x6e1d0200,
+ 0xe0505300, 0x6e1d0300,
+ 0xbf8903f7, 0xb8f83b05,
+ 0x80788178, 0xbf0d9972,
+ 0xbfa20002, 0x84788978,
+ 0xbfa00001, 0x84788a78,
+ 0xb8ee1e06, 0x846e8a6e,
+ 0x80786e78, 0x8078ff78,
+ 0x00000200, 0x80f8ff78,
+ 0x00000050, 0xbef600ff,
+ 0x01000000, 0xbefd00ff,
+ 0x0000006c, 0x80f89078,
+ 0xf428403a, 0xf0000000,
+ 0xbf89fc07, 0x80fd847d,
+ 0xbf800000, 0xbe804300,
+ 0xbe824302, 0x80f8a078,
+ 0xf42c403a, 0xf0000000,
+ 0xbf89fc07, 0x80fd887d,
+ 0xbf800000, 0xbe804300,
+ 0xbe824302, 0xbe844304,
+ 0xbe864306, 0x80f8c078,
+ 0xf430403a, 0xf0000000,
+ 0xbf89fc07, 0x80fd907d,
+ 0xbf800000, 0xbe804300,
+ 0xbe824302, 0xbe844304,
+ 0xbe864306, 0xbe884308,
+ 0xbe8a430a, 0xbe8c430c,
+ 0xbe8e430e, 0xbf06807d,
+ 0xbfa1fff0, 0xb980f801,
+ 0x00000000, 0xbfbd0000,
0xb8f83b05, 0x80788178,
0xbf0d9972, 0xbfa20002,
0x84788978, 0xbfa00001,
0x84788a78, 0xb8ee1e06,
0x846e8a6e, 0x80786e78,
0x8078ff78, 0x00000200,
- 0x80f8ff78, 0x00000050,
0xbef600ff, 0x01000000,
- 0xbefd00ff, 0x0000006c,
- 0x80f89078, 0xf428403a,
- 0xf0000000, 0xbf89fc07,
- 0x80fd847d, 0xbf800000,
- 0xbe804300, 0xbe824302,
- 0x80f8a078, 0xf42c403a,
- 0xf0000000, 0xbf89fc07,
- 0x80fd887d, 0xbf800000,
- 0xbe804300, 0xbe824302,
- 0xbe844304, 0xbe864306,
- 0x80f8c078, 0xf430403a,
- 0xf0000000, 0xbf89fc07,
- 0x80fd907d, 0xbf800000,
- 0xbe804300, 0xbe824302,
- 0xbe844304, 0xbe864306,
- 0xbe884308, 0xbe8a430a,
- 0xbe8c430c, 0xbe8e430e,
- 0xbf06807d, 0xbfa1fff0,
- 0xb980f801, 0x00000000,
- 0xbfbd0000, 0xb8f83b05,
- 0x80788178, 0xbf0d9972,
- 0xbfa20002, 0x84788978,
- 0xbfa00001, 0x84788a78,
- 0xb8ee1e06, 0x846e8a6e,
- 0x80786e78, 0x8078ff78,
- 0x00000200, 0xbef600ff,
- 0x01000000, 0xf4205bfa,
+ 0xf4205bfa, 0xf0000000,
+ 0x80788478, 0xf4205b3a,
0xf0000000, 0x80788478,
- 0xf4205b3a, 0xf0000000,
- 0x80788478, 0xf4205b7a,
+ 0xf4205b7a, 0xf0000000,
+ 0x80788478, 0xf4205c3a,
0xf0000000, 0x80788478,
- 0xf4205c3a, 0xf0000000,
- 0x80788478, 0xf4205c7a,
+ 0xf4205c7a, 0xf0000000,
+ 0x80788478, 0xf4205eba,
0xf0000000, 0x80788478,
- 0xf4205eba, 0xf0000000,
- 0x80788478, 0xf4205efa,
+ 0xf4205efa, 0xf0000000,
+ 0x80788478, 0xf4205e7a,
0xf0000000, 0x80788478,
- 0xf4205e7a, 0xf0000000,
- 0x80788478, 0xf4205cfa,
+ 0xf4205cfa, 0xf0000000,
+ 0x80788478, 0xf4205bba,
0xf0000000, 0x80788478,
+ 0xbf89fc07, 0xb96ef814,
0xf4205bba, 0xf0000000,
0x80788478, 0xbf89fc07,
- 0xb96ef814, 0xf4205bba,
- 0xf0000000, 0x80788478,
- 0xbf89fc07, 0xb96ef815,
- 0xbefd006f, 0xbefe0070,
- 0xbeff0071, 0x8b6f7bff,
- 0x000003ff, 0xb96f4803,
- 0x8b6f7bff, 0xfffff800,
- 0x856f8b6f, 0xb96fa2c3,
- 0xb973f801, 0xb8ee3b05,
- 0x806e816e, 0xbf0d9972,
- 0xbfa20002, 0x846e896e,
- 0xbfa00001, 0x846e8a6e,
- 0xb8ef1e06, 0x846f8a6f,
- 0x806e6f6e, 0x806eff6e,
- 0x00000200, 0x806e746e,
- 0x826f8075, 0x8b6fff6f,
- 0x0000ffff, 0xf4085c37,
- 0xf8000050, 0xf4085d37,
- 0xf8000060, 0xf4005e77,
- 0xf8000074, 0xbf89fc07,
- 0x8b6dff6d, 0x0000ffff,
- 0x8bfe7e7e, 0x8bea6a6a,
+ 0xb96ef815, 0xbefd006f,
+ 0xbefe0070, 0xbeff0071,
+ 0x8b6f7bff, 0x000003ff,
+ 0xb96f4803, 0x8b6f7bff,
+ 0xfffff800, 0x856f8b6f,
+ 0xb96fa2c3, 0xb973f801,
+ 0xb8ee3b05, 0x806e816e,
+ 0xbf0d9972, 0xbfa20002,
+ 0x846e896e, 0xbfa00001,
+ 0x846e8a6e, 0xb8ef1e06,
+ 0x846f8a6f, 0x806e6f6e,
+ 0x806eff6e, 0x00000200,
+ 0x806e746e, 0x826f8075,
+ 0x8b6fff6f, 0x0000ffff,
+ 0xf4085c37, 0xf8000050,
+ 0xf4085d37, 0xf8000060,
+ 0xf4005e77, 0xf8000074,
+ 0xbf89fc07, 0x8b6dff6d,
+ 0x0000ffff, 0x8bfe7e7e,
+ 0x8bea6a6a, 0xb8eef802,
+ 0xbf0d866e, 0xbfa20002,
+ 0xb97af802, 0xbe80486c,
0xb97af802, 0xbe804a6c,
0xbfb00000, 0xbf9f0000,
0xbf9f0000, 0xbf9f0000,
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm
index 250ab007399b..0f81670f6f9c 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm
@@ -43,12 +43,14 @@
#define HAVE_XNACK (ASIC_FAMILY < CHIP_SIENNA_CICHLID)
#define HAVE_SENDMSG_RTN (ASIC_FAMILY >= CHIP_PLUM_BONITO)
#define HAVE_BUFFER_LDS_LOAD (ASIC_FAMILY < CHIP_PLUM_BONITO)
+#define SW_SA_TRAP (ASIC_FAMILY >= CHIP_PLUM_BONITO)
var SINGLE_STEP_MISSED_WORKAROUND = 1 //workaround for lost MODE.DEBUG_EN exception when SAVECTX raised
var SQ_WAVE_STATUS_SPI_PRIO_MASK = 0x00000006
var SQ_WAVE_STATUS_HALT_MASK = 0x2000
var SQ_WAVE_STATUS_ECC_ERR_MASK = 0x20000
+var SQ_WAVE_STATUS_TRAP_EN_SHIFT = 6
var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT = 12
var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE = 9
@@ -183,6 +185,13 @@ L_SKIP_RESTORE:
s_getreg_b32 s_save_trapsts, hwreg(HW_REG_TRAPSTS)
+#if SW_SA_TRAP
+ // If ttmp1[31] is set then trap may occur early.
+ // Spin wait until SAVECTX exception is raised.
+ s_bitcmp1_b32 s_save_pc_hi, 31
+ s_cbranch_scc1 L_CHECK_SAVE
+#endif
+
s_and_b32 ttmp2, s_save_status, SQ_WAVE_STATUS_HALT_MASK
s_cbranch_scc0 L_NOT_HALTED
@@ -1061,8 +1070,20 @@ L_RESTORE_HWREG:
s_and_b32 s_restore_pc_hi, s_restore_pc_hi, 0x0000ffff //pc[47:32] //Do it here in order not to affect STATUS
s_and_b64 exec, exec, exec // Restore STATUS.EXECZ, not writable by s_setreg_b32
s_and_b64 vcc, vcc, vcc // Restore STATUS.VCCZ, not writable by s_setreg_b32
+
+#if SW_SA_TRAP
+ // If traps are enabled then return to the shader with PRIV=0.
+ // Otherwise retain PRIV=1 for subsequent context save requests.
+ s_getreg_b32 s_restore_tmp, hwreg(HW_REG_STATUS)
+ s_bitcmp1_b32 s_restore_tmp, SQ_WAVE_STATUS_TRAP_EN_SHIFT
+ s_cbranch_scc1 L_RETURN_WITHOUT_PRIV
+
s_setreg_b32 hwreg(HW_REG_STATUS), s_restore_status // SCC is included, which is changed by previous salu
+ s_setpc_b64 [s_restore_pc_lo, s_restore_pc_hi]
+L_RETURN_WITHOUT_PRIV:
+#endif
+ s_setreg_b32 hwreg(HW_REG_STATUS), s_restore_status // SCC is included, which is changed by previous salu
s_rfe_b64 s_restore_pc_lo //Return to the main shader program and resume execution
L_END_PGM:
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 84da1a9ce37c..5feaba6a77de 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -1584,6 +1584,8 @@ static int kfd_ioctl_smi_events(struct file *filep,
return kfd_smi_event_open(pdd->dev, &args->anon_fd);
}
+#if IS_ENABLED(CONFIG_HSA_AMD_SVM)
+
static int kfd_ioctl_set_xnack_mode(struct file *filep,
struct kfd_process *p, void *data)
{
@@ -1594,22 +1596,29 @@ static int kfd_ioctl_set_xnack_mode(struct file *filep,
if (args->xnack_enabled >= 0) {
if (!list_empty(&p->pqm.queues)) {
pr_debug("Process has user queues running\n");
- mutex_unlock(&p->mutex);
- return -EBUSY;
+ r = -EBUSY;
+ goto out_unlock;
}
- if (args->xnack_enabled && !kfd_process_xnack_mode(p, true))
+
+ if (p->xnack_enabled == args->xnack_enabled)
+ goto out_unlock;
+
+ if (args->xnack_enabled && !kfd_process_xnack_mode(p, true)) {
r = -EPERM;
- else
- p->xnack_enabled = args->xnack_enabled;
+ goto out_unlock;
+ }
+
+ r = svm_range_switch_xnack_reserve_mem(p, args->xnack_enabled);
} else {
args->xnack_enabled = p->xnack_enabled;
}
+
+out_unlock:
mutex_unlock(&p->mutex);
return r;
}
-#if IS_ENABLED(CONFIG_HSA_AMD_SVM)
static int kfd_ioctl_svm(struct file *filep, struct kfd_process *p, void *data)
{
struct kfd_ioctl_svm_args *args = data;
@@ -1629,6 +1638,11 @@ static int kfd_ioctl_svm(struct file *filep, struct kfd_process *p, void *data)
return r;
}
#else
+static int kfd_ioctl_set_xnack_mode(struct file *filep,
+ struct kfd_process *p, void *data)
+{
+ return -EPERM;
+}
static int kfd_ioctl_svm(struct file *filep, struct kfd_process *p, void *data)
{
return -EPERM;
@@ -2153,6 +2167,12 @@ static int criu_restore_devices(struct kfd_process *p,
ret = PTR_ERR(pdd);
goto exit;
}
+
+ if (!pdd->doorbell_index &&
+ kfd_alloc_process_doorbells(pdd->dev, &pdd->doorbell_index) < 0) {
+ ret = -ENOMEM;
+ goto exit;
+ }
}
/*
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
index 24b414cff3ec..cd5f8b219bf9 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
@@ -2284,7 +2284,7 @@ static int kfd_create_vcrat_image_gpu(void *pcrat_image,
/* Fill in Subtype: IO_LINKS
* Only direct links are added here which is Link from GPU to
- * to its NUMA node. Indirect links are added by userspace.
+ * its NUMA node. Indirect links are added by userspace.
*/
sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
cache_mem_filled);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index e83725a28106..ecb4c3abc629 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -205,6 +205,8 @@ static int add_queue_mes(struct device_queue_manager *dqm, struct queue *q,
}
queue_input.is_kfd_process = 1;
+ queue_input.is_aql_queue = (q->properties.format == KFD_QUEUE_FORMAT_AQL);
+ queue_input.queue_size = q->properties.queue_size >> 2;
queue_input.paging = false;
queue_input.tba_addr = qpd->tba_addr;
@@ -1240,6 +1242,24 @@ static void init_interrupts(struct device_queue_manager *dqm)
dqm->dev->kfd2kgd->init_interrupts(dqm->dev->adev, i);
}
+static void init_sdma_bitmaps(struct device_queue_manager *dqm)
+{
+ unsigned int num_sdma_queues =
+ min_t(unsigned int, sizeof(dqm->sdma_bitmap)*8,
+ get_num_sdma_queues(dqm));
+ unsigned int num_xgmi_sdma_queues =
+ min_t(unsigned int, sizeof(dqm->xgmi_sdma_bitmap)*8,
+ get_num_xgmi_sdma_queues(dqm));
+
+ if (num_sdma_queues)
+ dqm->sdma_bitmap = GENMASK_ULL(num_sdma_queues-1, 0);
+ if (num_xgmi_sdma_queues)
+ dqm->xgmi_sdma_bitmap = GENMASK_ULL(num_xgmi_sdma_queues-1, 0);
+
+ dqm->sdma_bitmap &= ~get_reserved_sdma_queues_bitmap(dqm);
+ pr_info("sdma_bitmap: %llx\n", dqm->sdma_bitmap);
+}
+
static int initialize_nocpsch(struct device_queue_manager *dqm)
{
int pipe, queue;
@@ -1268,11 +1288,7 @@ static int initialize_nocpsch(struct device_queue_manager *dqm)
memset(dqm->vmid_pasid, 0, sizeof(dqm->vmid_pasid));
- dqm->sdma_bitmap = ~0ULL >> (64 - get_num_sdma_queues(dqm));
- dqm->sdma_bitmap &= ~(get_reserved_sdma_queues_bitmap(dqm));
- pr_info("sdma_bitmap: %llx\n", dqm->sdma_bitmap);
-
- dqm->xgmi_sdma_bitmap = ~0ULL >> (64 - get_num_xgmi_sdma_queues(dqm));
+ init_sdma_bitmaps(dqm);
return 0;
}
@@ -1450,9 +1466,6 @@ static int set_sched_resources(struct device_queue_manager *dqm)
static int initialize_cpsch(struct device_queue_manager *dqm)
{
- uint64_t num_sdma_queues;
- uint64_t num_xgmi_sdma_queues;
-
pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm));
mutex_init(&dqm->lock_hidden);
@@ -1461,24 +1474,10 @@ static int initialize_cpsch(struct device_queue_manager *dqm)
dqm->active_cp_queue_count = 0;
dqm->gws_queue_count = 0;
dqm->active_runlist = false;
-
- num_sdma_queues = get_num_sdma_queues(dqm);
- if (num_sdma_queues >= BITS_PER_TYPE(dqm->sdma_bitmap))
- dqm->sdma_bitmap = ULLONG_MAX;
- else
- dqm->sdma_bitmap = (BIT_ULL(num_sdma_queues) - 1);
-
- dqm->sdma_bitmap &= ~(get_reserved_sdma_queues_bitmap(dqm));
- pr_info("sdma_bitmap: %llx\n", dqm->sdma_bitmap);
-
- num_xgmi_sdma_queues = get_num_xgmi_sdma_queues(dqm);
- if (num_xgmi_sdma_queues >= BITS_PER_TYPE(dqm->xgmi_sdma_bitmap))
- dqm->xgmi_sdma_bitmap = ULLONG_MAX;
- else
- dqm->xgmi_sdma_bitmap = (BIT_ULL(num_xgmi_sdma_queues) - 1);
-
INIT_WORK(&dqm->hw_exception_work, kfd_process_hw_exception);
+ init_sdma_bitmaps(dqm);
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
index b33798f89ef0..cd4e61bf0493 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
@@ -303,6 +303,9 @@ int kfd_alloc_process_doorbells(struct kfd_dev *kfd, unsigned int *doorbell_inde
if (r > 0)
*doorbell_index = r;
+ if (r < 0)
+ pr_err("Failed to allocate process doorbells\n");
+
return r;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c
index a6fcbeeb7428..0d53f6067422 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c
@@ -350,11 +350,11 @@ static void event_interrupt_wq_v11(struct kfd_dev *dev,
print_sq_intr_info_inst(context_id0, context_id1);
sq_int_priv = REG_GET_FIELD(context_id0,
SQ_INTERRUPT_WORD_WAVE_CTXID0, PRIV);
- if (sq_int_priv /*&& (kfd_set_dbg_ev_from_interrupt(dev, pasid,
+ /*if (sq_int_priv && (kfd_set_dbg_ev_from_interrupt(dev, pasid,
KFD_CTXID0_DOORBELL_ID(context_id0),
KFD_CTXID0_TRAP_CODE(context_id0),
- NULL, 0))*/)
- return;
+ NULL, 0)))
+ return;*/
break;
case SQ_INTERRUPT_WORD_ENCODING_ERROR:
print_sq_intr_info_error(context_id0, context_id1);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
index b059a77b6081..2797029bd500 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
@@ -223,7 +223,7 @@ svm_migrate_get_vram_page(struct svm_range *prange, unsigned long pfn)
page = pfn_to_page(pfn);
svm_range_bo_ref(prange->svm_bo);
page->zone_device_data = prange->svm_bo;
- lock_page(page);
+ zone_device_page_init(page);
}
static void
@@ -322,12 +322,13 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
for (i = j = 0; i < npages; i++) {
struct page *spage;
+ dst[i] = cursor.start + (j << PAGE_SHIFT);
+ migrate->dst[i] = svm_migrate_addr_to_pfn(adev, dst[i]);
+ svm_migrate_get_vram_page(prange, migrate->dst[i]);
+ migrate->dst[i] = migrate_pfn(migrate->dst[i]);
+
spage = migrate_pfn_to_page(migrate->src[i]);
if (spage && !is_zone_device_page(spage)) {
- dst[i] = cursor.start + (j << PAGE_SHIFT);
- migrate->dst[i] = svm_migrate_addr_to_pfn(adev, dst[i]);
- svm_migrate_get_vram_page(prange, migrate->dst[i]);
- migrate->dst[i] = migrate_pfn(migrate->dst[i]);
src[i] = dma_map_page(dev, spage, 0, PAGE_SIZE,
DMA_TO_DEVICE);
r = dma_mapping_error(dev, src[i]);
@@ -409,7 +410,7 @@ svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
uint64_t npages = (end - start) >> PAGE_SHIFT;
struct kfd_process_device *pdd;
struct dma_fence *mfence = NULL;
- struct migrate_vma migrate;
+ struct migrate_vma migrate = { 0 };
unsigned long cpages = 0;
dma_addr_t *scratch;
void *buf;
@@ -522,9 +523,6 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
pr_debug("svms 0x%p [0x%lx 0x%lx] to gpu 0x%x\n", prange->svms,
prange->start, prange->last, best_loc);
- /* FIXME: workaround for page locking bug with invalid pages */
- svm_range_prefault(prange, mm, SVM_ADEV_PGMAP_OWNER(adev));
-
start = prange->start << PAGE_SHIFT;
end = (prange->last + 1) << PAGE_SHIFT;
@@ -668,7 +666,7 @@ out_oom:
static long
svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
struct vm_area_struct *vma, uint64_t start, uint64_t end,
- uint32_t trigger)
+ uint32_t trigger, struct page *fault_page)
{
struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms);
uint64_t npages = (end - start) >> PAGE_SHIFT;
@@ -676,7 +674,7 @@ svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
unsigned long cpages = 0;
struct kfd_process_device *pdd;
struct dma_fence *mfence = NULL;
- struct migrate_vma migrate;
+ struct migrate_vma migrate = { 0 };
dma_addr_t *scratch;
void *buf;
int r = -ENOMEM;
@@ -699,6 +697,7 @@ svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
migrate.src = buf;
migrate.dst = migrate.src + npages;
+ migrate.fault_page = fault_page;
scratch = (dma_addr_t *)(migrate.dst + npages);
kfd_smi_event_migration_start(adev->kfd.dev, p->lead_thread->pid,
@@ -766,7 +765,7 @@ out:
* 0 - OK, otherwise error code
*/
int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm,
- uint32_t trigger)
+ uint32_t trigger, struct page *fault_page)
{
struct amdgpu_device *adev;
struct vm_area_struct *vma;
@@ -807,7 +806,8 @@ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm,
}
next = min(vma->vm_end, end);
- r = svm_migrate_vma_to_ram(adev, prange, vma, addr, next, trigger);
+ r = svm_migrate_vma_to_ram(adev, prange, vma, addr, next, trigger,
+ fault_page);
if (r < 0) {
pr_debug("failed %ld to migrate prange %p\n", r, prange);
break;
@@ -851,7 +851,7 @@ svm_migrate_vram_to_vram(struct svm_range *prange, uint32_t best_loc,
pr_debug("from gpu 0x%x to gpu 0x%x\n", prange->actual_loc, best_loc);
do {
- r = svm_migrate_vram_to_ram(prange, mm, trigger);
+ r = svm_migrate_vram_to_ram(prange, mm, trigger, NULL);
if (r)
return r;
} while (prange->actual_loc && --retries);
@@ -886,7 +886,7 @@ svm_migrate_to_vram(struct svm_range *prange, uint32_t best_loc,
static vm_fault_t svm_migrate_to_ram(struct vm_fault *vmf)
{
unsigned long addr = vmf->address;
- struct vm_area_struct *vma;
+ struct svm_range_bo *svm_bo;
enum svm_work_list_ops op;
struct svm_range *parent;
struct svm_range *prange;
@@ -894,29 +894,42 @@ static vm_fault_t svm_migrate_to_ram(struct vm_fault *vmf)
struct mm_struct *mm;
int r = 0;
- vma = vmf->vma;
- mm = vma->vm_mm;
+ svm_bo = vmf->page->zone_device_data;
+ if (!svm_bo) {
+ pr_debug("failed get device page at addr 0x%lx\n", addr);
+ return VM_FAULT_SIGBUS;
+ }
+ if (!mmget_not_zero(svm_bo->eviction_fence->mm)) {
+ pr_debug("addr 0x%lx of process mm is destroyed\n", addr);
+ return VM_FAULT_SIGBUS;
+ }
+
+ mm = svm_bo->eviction_fence->mm;
+ if (mm != vmf->vma->vm_mm)
+ pr_debug("addr 0x%lx is COW mapping in child process\n", addr);
- p = kfd_lookup_process_by_mm(vma->vm_mm);
+ p = kfd_lookup_process_by_mm(mm);
if (!p) {
pr_debug("failed find process at fault address 0x%lx\n", addr);
- return VM_FAULT_SIGBUS;
+ r = VM_FAULT_SIGBUS;
+ goto out_mmput;
}
if (READ_ONCE(p->svms.faulting_task) == current) {
pr_debug("skipping ram migration\n");
- kfd_unref_process(p);
- return 0;
+ r = 0;
+ goto out_unref_process;
}
- addr >>= PAGE_SHIFT;
+
pr_debug("CPU page fault svms 0x%p address 0x%lx\n", &p->svms, addr);
+ addr >>= PAGE_SHIFT;
mutex_lock(&p->svms.lock);
prange = svm_range_from_addr(&p->svms, addr, &parent);
if (!prange) {
- pr_debug("cannot find svm range at 0x%lx\n", addr);
+ pr_debug("failed get range svms 0x%p addr 0x%lx\n", &p->svms, addr);
r = -EFAULT;
- goto out;
+ goto out_unlock_svms;
}
mutex_lock(&parent->migrate_mutex);
@@ -938,10 +951,12 @@ static vm_fault_t svm_migrate_to_ram(struct vm_fault *vmf)
goto out_unlock_prange;
}
- r = svm_migrate_vram_to_ram(prange, mm, KFD_MIGRATE_TRIGGER_PAGEFAULT_CPU);
+ r = svm_migrate_vram_to_ram(prange, vmf->vma->vm_mm,
+ KFD_MIGRATE_TRIGGER_PAGEFAULT_CPU,
+ vmf->page);
if (r)
- pr_debug("failed %d migrate 0x%p [0x%lx 0x%lx] to ram\n", r,
- prange, prange->start, prange->last);
+ pr_debug("failed %d migrate svms 0x%p range 0x%p [0x%lx 0x%lx]\n",
+ r, prange->svms, prange, prange->start, prange->last);
/* xnack on, update mapping on GPUs with ACCESS_IN_PLACE */
if (p->xnack_enabled && parent == prange)
@@ -955,9 +970,12 @@ out_unlock_prange:
if (prange != parent)
mutex_unlock(&prange->migrate_mutex);
mutex_unlock(&parent->migrate_mutex);
-out:
+out_unlock_svms:
mutex_unlock(&p->svms.lock);
+out_unref_process:
kfd_unref_process(p);
+out_mmput:
+ mmput(mm);
pr_debug("CPU fault svms 0x%p address 0x%lx done\n", &p->svms, addr);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h
index b3f0754b32fa..a5d7e6d22264 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h
@@ -43,7 +43,7 @@ enum MIGRATION_COPY_DIR {
int svm_migrate_to_vram(struct svm_range *prange, uint32_t best_loc,
struct mm_struct *mm, uint32_t trigger);
int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm,
- uint32_t trigger);
+ uint32_t trigger, struct page *fault_page);
unsigned long
svm_migrate_addr_to_pfn(struct amdgpu_device *adev, unsigned long addr);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
index b8e14c2cc295..4f6390f3236e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
@@ -126,6 +126,10 @@ static void init_mqd(struct mqd_manager *mm, void **mqd,
m->compute_static_thread_mgmt_se1 = 0xFFFFFFFF;
m->compute_static_thread_mgmt_se2 = 0xFFFFFFFF;
m->compute_static_thread_mgmt_se3 = 0xFFFFFFFF;
+ m->compute_static_thread_mgmt_se4 = 0xFFFFFFFF;
+ m->compute_static_thread_mgmt_se5 = 0xFFFFFFFF;
+ m->compute_static_thread_mgmt_se6 = 0xFFFFFFFF;
+ m->compute_static_thread_mgmt_se7 = 0xFFFFFFFF;
m->cp_hqd_persistent_state = CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK |
0x55 << CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT;
@@ -177,14 +181,6 @@ static int load_mqd(struct mqd_manager *mm, void *mqd,
return r;
}
-static int hiq_load_mqd_kiq(struct mqd_manager *mm, void *mqd,
- uint32_t pipe_id, uint32_t queue_id,
- struct queue_properties *p, struct mm_struct *mms)
-{
- return mm->dev->kfd2kgd->hiq_mqd_load(mm->dev->adev, mqd, pipe_id,
- queue_id, p->doorbell_off);
-}
-
static void update_mqd(struct mqd_manager *mm, void *mqd,
struct queue_properties *q,
struct mqd_update_info *minfo)
@@ -256,31 +252,6 @@ static uint32_t read_doorbell_id(void *mqd)
return m->queue_doorbell_id0;
}
-static int destroy_mqd(struct mqd_manager *mm, void *mqd,
- enum kfd_preempt_type type,
- unsigned int timeout, uint32_t pipe_id,
- uint32_t queue_id)
-{
- return mm->dev->kfd2kgd->hqd_destroy
- (mm->dev->adev, mqd, type, timeout,
- pipe_id, queue_id);
-}
-
-static void free_mqd(struct mqd_manager *mm, void *mqd,
- struct kfd_mem_obj *mqd_mem_obj)
-{
- kfd_gtt_sa_free(mm->dev, mqd_mem_obj);
-}
-
-static bool is_occupied(struct mqd_manager *mm, void *mqd,
- uint64_t queue_address, uint32_t pipe_id,
- uint32_t queue_id)
-{
- return mm->dev->kfd2kgd->hqd_is_occupied(
- mm->dev->adev, queue_address,
- pipe_id, queue_id);
-}
-
static int get_wave_state(struct mqd_manager *mm, void *mqd,
void __user *ctl_stack,
u32 *ctl_stack_used_size,
@@ -349,15 +320,6 @@ static void init_mqd_sdma(struct mqd_manager *mm, void **mqd,
mm->update_mqd(mm, m, q, NULL);
}
-static int load_mqd_sdma(struct mqd_manager *mm, void *mqd,
- uint32_t pipe_id, uint32_t queue_id,
- struct queue_properties *p, struct mm_struct *mms)
-{
- return mm->dev->kfd2kgd->hqd_sdma_load(mm->dev->adev, mqd,
- (uint32_t __user *)p->write_ptr,
- mms);
-}
-
#define SDMA_RLC_DUMMY_DEFAULT 0xf
static void update_mqd_sdma(struct mqd_manager *mm, void *mqd,
@@ -371,7 +333,8 @@ static void update_mqd_sdma(struct mqd_manager *mm, void *mqd,
<< SDMA0_QUEUE0_RB_CNTL__RB_SIZE__SHIFT |
q->vmid << SDMA0_QUEUE0_RB_CNTL__RB_VMID__SHIFT |
1 << SDMA0_QUEUE0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT |
- 6 << SDMA0_QUEUE0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT;
+ 6 << SDMA0_QUEUE0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT |
+ 1 << SDMA0_QUEUE0_RB_CNTL__F32_WPTR_POLL_ENABLE__SHIFT;
m->sdmax_rlcx_rb_base = lower_32_bits(q->queue_address >> 8);
m->sdmax_rlcx_rb_base_hi = upper_32_bits(q->queue_address >> 8);
@@ -389,25 +352,6 @@ static void update_mqd_sdma(struct mqd_manager *mm, void *mqd,
q->is_active = QUEUE_IS_ACTIVE(*q);
}
-/*
- * * preempt type here is ignored because there is only one way
- * * to preempt sdma queue
- */
-static int destroy_mqd_sdma(struct mqd_manager *mm, void *mqd,
- enum kfd_preempt_type type,
- unsigned int timeout, uint32_t pipe_id,
- uint32_t queue_id)
-{
- return mm->dev->kfd2kgd->hqd_sdma_destroy(mm->dev->adev, mqd, timeout);
-}
-
-static bool is_occupied_sdma(struct mqd_manager *mm, void *mqd,
- uint64_t queue_address, uint32_t pipe_id,
- uint32_t queue_id)
-{
- return mm->dev->kfd2kgd->hqd_sdma_is_occupied(mm->dev->adev, mqd);
-}
-
#if defined(CONFIG_DEBUG_FS)
static int debugfs_show_mqd(struct seq_file *m, void *data)
@@ -445,11 +389,11 @@ struct mqd_manager *mqd_manager_init_v11(enum KFD_MQD_TYPE type,
pr_debug("%s@%i\n", __func__, __LINE__);
mqd->allocate_mqd = allocate_mqd;
mqd->init_mqd = init_mqd;
- mqd->free_mqd = free_mqd;
+ mqd->free_mqd = kfd_free_mqd_cp;
mqd->load_mqd = load_mqd;
mqd->update_mqd = update_mqd;
- mqd->destroy_mqd = destroy_mqd;
- mqd->is_occupied = is_occupied;
+ mqd->destroy_mqd = kfd_destroy_mqd_cp;
+ mqd->is_occupied = kfd_is_occupied_cp;
mqd->mqd_size = sizeof(struct v11_compute_mqd);
mqd->get_wave_state = get_wave_state;
#if defined(CONFIG_DEBUG_FS)
@@ -462,10 +406,10 @@ struct mqd_manager *mqd_manager_init_v11(enum KFD_MQD_TYPE type,
mqd->allocate_mqd = allocate_hiq_mqd;
mqd->init_mqd = init_mqd_hiq;
mqd->free_mqd = free_mqd_hiq_sdma;
- mqd->load_mqd = hiq_load_mqd_kiq;
+ mqd->load_mqd = kfd_hiq_load_mqd_kiq;
mqd->update_mqd = update_mqd;
- mqd->destroy_mqd = destroy_mqd;
- mqd->is_occupied = is_occupied;
+ mqd->destroy_mqd = kfd_destroy_mqd_cp;
+ mqd->is_occupied = kfd_is_occupied_cp;
mqd->mqd_size = sizeof(struct v11_compute_mqd);
#if defined(CONFIG_DEBUG_FS)
mqd->debugfs_show_mqd = debugfs_show_mqd;
@@ -476,11 +420,11 @@ struct mqd_manager *mqd_manager_init_v11(enum KFD_MQD_TYPE type,
case KFD_MQD_TYPE_DIQ:
mqd->allocate_mqd = allocate_mqd;
mqd->init_mqd = init_mqd_hiq;
- mqd->free_mqd = free_mqd;
+ mqd->free_mqd = kfd_free_mqd_cp;
mqd->load_mqd = load_mqd;
mqd->update_mqd = update_mqd;
- mqd->destroy_mqd = destroy_mqd;
- mqd->is_occupied = is_occupied;
+ mqd->destroy_mqd = kfd_destroy_mqd_cp;
+ mqd->is_occupied = kfd_is_occupied_cp;
mqd->mqd_size = sizeof(struct v11_compute_mqd);
#if defined(CONFIG_DEBUG_FS)
mqd->debugfs_show_mqd = debugfs_show_mqd;
@@ -491,10 +435,10 @@ struct mqd_manager *mqd_manager_init_v11(enum KFD_MQD_TYPE type,
mqd->allocate_mqd = allocate_sdma_mqd;
mqd->init_mqd = init_mqd_sdma;
mqd->free_mqd = free_mqd_hiq_sdma;
- mqd->load_mqd = load_mqd_sdma;
+ mqd->load_mqd = kfd_load_mqd_sdma;
mqd->update_mqd = update_mqd_sdma;
- mqd->destroy_mqd = destroy_mqd_sdma;
- mqd->is_occupied = is_occupied_sdma;
+ mqd->destroy_mqd = kfd_destroy_mqd_sdma;
+ mqd->is_occupied = kfd_is_occupied_sdma;
mqd->mqd_size = sizeof(struct v11_sdma_mqd);
#if defined(CONFIG_DEBUG_FS)
mqd->debugfs_show_mqd = debugfs_show_mqd_sdma;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index 6e3e7f54381b..5137476ec18e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -857,6 +857,13 @@ int kfd_criu_restore_queue(struct kfd_process *p,
ret = -EINVAL;
goto exit;
}
+
+ if (!pdd->doorbell_index &&
+ kfd_alloc_process_doorbells(pdd->dev, &pdd->doorbell_index) < 0) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
/* data stored in this order: mqd, ctl_stack */
mqd = q_extra_data;
ctl_stack = mqd + q_data->mqd_size;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index 11074cc8c333..64fdf63093a0 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -278,7 +278,7 @@ static void svm_range_free(struct svm_range *prange, bool update_mem_usage)
svm_range_free_dma_mappings(prange);
if (update_mem_usage && !p->xnack_enabled) {
- pr_debug("unreserve mem limit: %lld\n", size);
+ pr_debug("unreserve prange 0x%p size: 0x%llx\n", prange, size);
amdgpu_amdkfd_unreserve_mem_limit(NULL, size,
KFD_IOC_ALLOC_MEM_FLAGS_USERPTR);
}
@@ -2913,13 +2913,15 @@ retry_write_locked:
*/
if (prange->actual_loc)
r = svm_migrate_vram_to_ram(prange, mm,
- KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU);
+ KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU,
+ NULL);
else
r = 0;
}
} else {
r = svm_migrate_vram_to_ram(prange, mm,
- KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU);
+ KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU,
+ NULL);
}
if (r) {
pr_debug("failed %d to migrate svms %p [0x%lx 0x%lx]\n",
@@ -2956,6 +2958,64 @@ out:
return r;
}
+int
+svm_range_switch_xnack_reserve_mem(struct kfd_process *p, bool xnack_enabled)
+{
+ struct svm_range *prange, *pchild;
+ uint64_t reserved_size = 0;
+ uint64_t size;
+ int r = 0;
+
+ pr_debug("switching xnack from %d to %d\n", p->xnack_enabled, xnack_enabled);
+
+ mutex_lock(&p->svms.lock);
+
+ list_for_each_entry(prange, &p->svms.list, list) {
+ svm_range_lock(prange);
+ list_for_each_entry(pchild, &prange->child_list, child_list) {
+ size = (pchild->last - pchild->start + 1) << PAGE_SHIFT;
+ if (xnack_enabled) {
+ amdgpu_amdkfd_unreserve_mem_limit(NULL, size,
+ KFD_IOC_ALLOC_MEM_FLAGS_USERPTR);
+ } else {
+ r = amdgpu_amdkfd_reserve_mem_limit(NULL, size,
+ KFD_IOC_ALLOC_MEM_FLAGS_USERPTR);
+ if (r)
+ goto out_unlock;
+ reserved_size += size;
+ }
+ }
+
+ size = (prange->last - prange->start + 1) << PAGE_SHIFT;
+ if (xnack_enabled) {
+ amdgpu_amdkfd_unreserve_mem_limit(NULL, size,
+ KFD_IOC_ALLOC_MEM_FLAGS_USERPTR);
+ } else {
+ r = amdgpu_amdkfd_reserve_mem_limit(NULL, size,
+ KFD_IOC_ALLOC_MEM_FLAGS_USERPTR);
+ if (r)
+ goto out_unlock;
+ reserved_size += size;
+ }
+out_unlock:
+ svm_range_unlock(prange);
+ if (r)
+ break;
+ }
+
+ if (r)
+ amdgpu_amdkfd_unreserve_mem_limit(NULL, reserved_size,
+ KFD_IOC_ALLOC_MEM_FLAGS_USERPTR);
+ else
+ /* Change xnack mode must be inside svms lock, to avoid race with
+ * svm_range_deferred_list_work unreserve memory in parallel.
+ */
+ p->xnack_enabled = xnack_enabled;
+
+ mutex_unlock(&p->svms.lock);
+ return r;
+}
+
void svm_range_list_fini(struct kfd_process *p)
{
struct svm_range *prange;
@@ -3181,28 +3241,6 @@ out:
return best_loc;
}
-/* FIXME: This is a workaround for page locking bug when some pages are
- * invalid during migration to VRAM
- */
-void svm_range_prefault(struct svm_range *prange, struct mm_struct *mm,
- void *owner)
-{
- struct hmm_range *hmm_range;
- int r;
-
- if (prange->validated_once)
- return;
-
- r = amdgpu_hmm_range_get_pages(&prange->notifier, mm, NULL,
- prange->start << PAGE_SHIFT,
- prange->npages, &hmm_range,
- false, true, owner);
- if (!r) {
- amdgpu_hmm_range_get_pages_done(hmm_range);
- prange->validated_once = true;
- }
-}
-
/* svm_range_trigger_migration - start page migration if prefetch loc changed
* @mm: current process mm_struct
* @prange: svm range structure
@@ -3242,7 +3280,8 @@ svm_range_trigger_migration(struct mm_struct *mm, struct svm_range *prange,
return 0;
if (!best_loc) {
- r = svm_migrate_vram_to_ram(prange, mm, KFD_MIGRATE_TRIGGER_PREFETCH);
+ r = svm_migrate_vram_to_ram(prange, mm,
+ KFD_MIGRATE_TRIGGER_PREFETCH, NULL);
*migrated = !r;
return r;
}
@@ -3303,7 +3342,7 @@ static void svm_range_evict_svm_bo_worker(struct work_struct *work)
mutex_lock(&prange->migrate_mutex);
do {
r = svm_migrate_vram_to_ram(prange, mm,
- KFD_MIGRATE_TRIGGER_TTM_EVICTION);
+ KFD_MIGRATE_TRIGGER_TTM_EVICTION, NULL);
} while (!r && prange->actual_loc && --retries);
if (!r && prange->actual_loc)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
index cfac13ad06ef..7a33b93f9df6 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
@@ -181,8 +181,6 @@ void schedule_deferred_list_work(struct svm_range_list *svms);
void svm_range_dma_unmap(struct device *dev, dma_addr_t *dma_addr,
unsigned long offset, unsigned long npages);
void svm_range_free_dma_mappings(struct svm_range *prange);
-void svm_range_prefault(struct svm_range *prange, struct mm_struct *mm,
- void *owner);
int svm_range_get_info(struct kfd_process *p, uint32_t *num_svm_ranges,
uint64_t *svm_priv_data_size);
int kfd_criu_checkpoint_svm(struct kfd_process *p,
@@ -205,6 +203,7 @@ void svm_range_list_lock_and_flush_work(struct svm_range_list *svms, struct mm_s
void svm_range_bo_unref_async(struct svm_range_bo *svm_bo);
void svm_range_set_max_pages(struct amdgpu_device *adev);
+int svm_range_switch_xnack_reserve_mem(struct kfd_process *p, bool xnack_enabled);
#else
diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig
index 413d8c6d592f..6925e0280dbe 100644
--- a/drivers/gpu/drm/amd/display/Kconfig
+++ b/drivers/gpu/drm/amd/display/Kconfig
@@ -28,7 +28,6 @@ config DRM_AMD_DC_SI
bool "AMD DC support for Southern Islands ASICs"
depends on DRM_AMDGPU_SI
depends on DRM_AMD_DC
- default n
help
Choose this option to enable new AMD DC support for SI asics
by default. This includes Tahiti, Pitcairn, Cape Verde, Oland.
@@ -43,7 +42,6 @@ config DEBUG_KERNEL_DC
config DRM_AMD_SECURE_DISPLAY
bool "Enable secure display support"
- default n
depends on DEBUG_FS
depends on DRM_AMD_DC_DCN
help
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 7b19f444624c..c053cb79cd06 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -100,8 +100,6 @@
#include "soc15_common.h"
#include "vega10_ip_offset.h"
-#include "soc15_common.h"
-
#include "gc/gc_11_0_0_offset.h"
#include "gc/gc_11_0_0_sh_mask.h"
@@ -1112,7 +1110,8 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
hw_params.fb[i] = &fb_info->fb[i];
switch (adev->ip_versions[DCE_HWIP][0]) {
- case IP_VERSION(3, 1, 3): /* Only for this asic hw internal rev B0 */
+ case IP_VERSION(3, 1, 3):
+ case IP_VERSION(3, 1, 4):
hw_params.dpia_supported = true;
hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia;
break;
@@ -1298,13 +1297,21 @@ static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct
if (hpd_rx_offload_wq[i].wq == NULL) {
DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
- return NULL;
+ goto out_err;
}
spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
}
return hpd_rx_offload_wq;
+
+out_err:
+ for (i = 0; i < max_caps; i++) {
+ if (hpd_rx_offload_wq[i].wq)
+ destroy_workqueue(hpd_rx_offload_wq[i].wq);
+ }
+ kfree(hpd_rx_offload_wq);
+ return NULL;
}
struct amdgpu_stutter_quirk {
@@ -4742,7 +4749,7 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
plane_info->visible = true;
plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
- plane_info->layer_index = 0;
+ plane_info->layer_index = plane_state->normalized_zpos;
ret = fill_plane_color_attributes(plane_state, plane_info->format,
&plane_info->color_space);
@@ -4810,7 +4817,7 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev,
dc_plane_state->global_alpha = plane_info.global_alpha;
dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
dc_plane_state->dcc = plane_info.dcc;
- dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
+ dc_plane_state->layer_index = plane_info.layer_index;
dc_plane_state->flip_int_enabled = true;
/*
@@ -7379,11 +7386,6 @@ static void update_freesync_state_on_stream(
&vrr_infopacket,
pack_sdp_v1_3);
- new_crtc_state->freesync_timing_changed |=
- (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
- &vrr_params.adjust,
- sizeof(vrr_params.adjust)) != 0);
-
new_crtc_state->freesync_vrr_info_changed |=
(memcmp(&new_crtc_state->vrr_infopacket,
&vrr_infopacket,
@@ -7392,7 +7394,6 @@ static void update_freesync_state_on_stream(
acrtc->dm_irq_params.vrr_params = vrr_params;
new_crtc_state->vrr_infopacket = vrr_infopacket;
- new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
new_stream->vrr_infopacket = vrr_infopacket;
if (new_crtc_state->freesync_vrr_info_changed)
@@ -7455,10 +7456,6 @@ static void update_stream_irq_parameters(
new_stream,
&config, &vrr_params);
- new_crtc_state->freesync_timing_changed |=
- (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
- &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
-
new_crtc_state->freesync_config = config;
/* Copy state for access from DM IRQ handler */
acrtc->dm_irq_params.freesync_config = config;
@@ -7482,15 +7479,15 @@ static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
* We also need vupdate irq for the actual core vblank handling
* at end of vblank.
*/
- dm_set_vupdate_irq(new_state->base.crtc, true);
- drm_crtc_vblank_get(new_state->base.crtc);
+ WARN_ON(dm_set_vupdate_irq(new_state->base.crtc, true) != 0);
+ WARN_ON(drm_crtc_vblank_get(new_state->base.crtc) != 0);
DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
__func__, new_state->base.crtc->base.id);
} else if (old_vrr_active && !new_vrr_active) {
/* Transition VRR active -> inactive:
* Allow vblank irq disable again for fixed refresh rate.
*/
- dm_set_vupdate_irq(new_state->base.crtc, false);
+ WARN_ON(dm_set_vupdate_irq(new_state->base.crtc, false) != 0);
drm_crtc_vblank_put(new_state->base.crtc);
DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
__func__, new_state->base.crtc->base.id);
@@ -8246,23 +8243,6 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
mutex_unlock(&dm->dc_lock);
}
- /* Count number of newly disabled CRTCs for dropping PM refs later. */
- for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
- new_crtc_state, i) {
- if (old_crtc_state->active && !new_crtc_state->active)
- crtc_disable_count++;
-
- dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
- dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
-
- /* For freesync config update on crtc state and params for irq */
- update_stream_irq_parameters(dm, dm_new_crtc_state);
-
- /* Handle vrr on->off / off->on transitions */
- amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
- dm_new_crtc_state);
- }
-
/**
* Enable interrupts for CRTCs that are newly enabled or went through
* a modeset. It was intentionally deferred until after the front end
@@ -8272,16 +8252,29 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
#ifdef CONFIG_DEBUG_FS
- bool configure_crc = false;
enum amdgpu_dm_pipe_crc_source cur_crc_src;
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
- struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
+ struct crc_rd_work *crc_rd_wrk;
+#endif
+#endif
+ /* Count number of newly disabled CRTCs for dropping PM refs later. */
+ if (old_crtc_state->active && !new_crtc_state->active)
+ crtc_disable_count++;
+
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+ dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
+
+ /* For freesync config update on crtc state and params for irq */
+ update_stream_irq_parameters(dm, dm_new_crtc_state);
+
+#ifdef CONFIG_DEBUG_FS
+#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+ crc_rd_wrk = dm->crc_rd_wrk;
#endif
spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
cur_crc_src = acrtc->dm_irq_params.crc_src;
spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
#endif
- dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
if (new_crtc_state->active &&
(!old_crtc_state->active ||
@@ -8289,16 +8282,19 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
dc_stream_retain(dm_new_crtc_state->stream);
acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
manage_dm_interrupts(adev, acrtc, true);
+ }
+ /* Handle vrr on->off / off->on transitions */
+ amdgpu_dm_handle_vrr_transition(dm_old_crtc_state, dm_new_crtc_state);
#ifdef CONFIG_DEBUG_FS
+ if (new_crtc_state->active &&
+ (!old_crtc_state->active ||
+ drm_atomic_crtc_needs_modeset(new_crtc_state))) {
/**
* Frontend may have changed so reapply the CRC capture
* settings for the stream.
*/
- dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
-
if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
- configure_crc = true;
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
if (amdgpu_dm_crc_window_is_activated(crtc)) {
spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
@@ -8310,14 +8306,12 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
}
#endif
- }
-
- if (configure_crc)
if (amdgpu_dm_crtc_configure_crc_source(
crtc, dm_new_crtc_state, cur_crc_src))
DRM_DEBUG_DRIVER("Failed to configure crc source");
-#endif
+ }
}
+#endif
}
for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
@@ -9396,10 +9390,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
}
}
}
- if (!pre_validate_dsc(state, &dm_state, vars)) {
- ret = -EINVAL;
- goto fail;
- }
}
#endif
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
@@ -9473,6 +9463,14 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
}
}
+ /*
+ * DC consults the zpos (layer_index in DC terminology) to determine the
+ * hw plane on which to enable the hw cursor (see
+ * `dcn10_can_pipe_disable_cursor`). By now, all modified planes are in
+ * atomic state, so call drm helper to normalize zpos.
+ */
+ drm_atomic_normalize_zpos(dev, state);
+
/* Remove exiting planes if they are modified */
for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
ret = dm_update_plane_state(dc, state, plane,
@@ -9525,6 +9523,15 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
}
}
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (dc_resource_is_dsc_encoding_supported(dc)) {
+ if (!pre_validate_dsc(state, &dm_state, vars)) {
+ ret = -EINVAL;
+ goto fail;
+ }
+ }
+#endif
+
/* Run this here since we want to validate the streams we created */
ret = drm_atomic_helper_check_planes(dev, state);
if (ret) {
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index b44faaad9b0b..b5ce15c43bcc 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -681,7 +681,6 @@ struct dm_crtc_state {
int crc_skip_count;
- bool freesync_timing_changed;
bool freesync_vrr_info_changed;
bool dsc_force_changed;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index b8077fcd4651..f0b01c8dc4a6 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -880,8 +880,17 @@ void dm_helpers_smu_timeout(struct dc_context *ctx, unsigned int msg_id, unsigne
void dm_helpers_init_panel_settings(
struct dc_context *ctx,
- struct dc_panel_config *panel_config)
-{
+ struct dc_panel_config *panel_config,
+ struct dc_sink *sink)
+{
+ // Extra Panel Power Sequence
+ panel_config->pps.extra_t3_ms = sink->edid_caps.panel_patch.extra_t3_ms;
+ panel_config->pps.extra_t7_ms = sink->edid_caps.panel_patch.extra_t7_ms;
+ panel_config->pps.extra_delay_backlight_off = sink->edid_caps.panel_patch.extra_delay_backlight_off;
+ panel_config->pps.extra_post_t7_ms = 0;
+ panel_config->pps.extra_pre_t11_ms = 0;
+ panel_config->pps.extra_t12_ms = sink->edid_caps.panel_patch.extra_t12_ms;
+ panel_config->pps.extra_post_OUI_ms = 0;
// Feature DSC
panel_config->dsc.disable_dsc_edp = false;
panel_config->dsc.force_dsc_edp_policy = 0;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
index c8da18e45b0e..26291db0a3cf 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
@@ -60,11 +60,15 @@ static bool link_supports_psrsu(struct dc_link *link)
*/
void amdgpu_dm_set_psr_caps(struct dc_link *link)
{
- if (!(link->connector_signal & SIGNAL_TYPE_EDP))
+ if (!(link->connector_signal & SIGNAL_TYPE_EDP)) {
+ link->psr_settings.psr_feature_enabled = false;
return;
+ }
- if (link->type == dc_connection_none)
+ if (link->type == dc_connection_none) {
+ link->psr_settings.psr_feature_enabled = false;
return;
+ }
if (link->dpcd_caps.psr_info.psr_version == 0) {
link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
@@ -170,7 +174,13 @@ bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
&stream, 1,
&params);
- power_opt |= psr_power_opt_z10_static_screen;
+ /*
+ * Only enable static-screen optimizations for PSR1. For PSR SU, this
+ * causes vstartup interrupt issues, used by amdgpu_dm to send vblank
+ * events.
+ */
+ if (link->psr_settings.psr_version < DC_PSR_VERSION_SU_1)
+ power_opt |= psr_power_opt_z10_static_screen;
return dc_link_set_psr_allow_active(link, &psr_enable, false, false, &power_opt);
}
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index ead4da11a992..ee0456b5e14e 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -24,6 +24,7 @@
*/
#include "dm_services.h"
+#include "core_types.h"
#include "ObjectID.h"
#include "atomfirmware.h"
@@ -50,13 +51,6 @@
#define LAST_RECORD_TYPE 0xff
#define SMU9_SYSPLL0_ID 0
-struct i2c_id_config_access {
- uint8_t bfI2C_LineMux:4;
- uint8_t bfHW_EngineID:3;
- uint8_t bfHW_Capable:1;
- uint8_t ucAccess;
-};
-
static enum bp_result get_gpio_i2c_info(struct bios_parser *bp,
struct atom_i2c_record *record,
struct graphics_object_i2c_info *info);
@@ -849,6 +843,8 @@ static enum bp_result get_ss_info_v4_1(
disp_cntl_tbl->dvi_ss_rate_10hz * 10;
if (disp_cntl_tbl->dvi_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE)
ss_info->type.CENTER_MODE = true;
+
+ DC_LOG_BIOS("AS_SIGNAL_TYPE_DVI ss_percentage: %d\n", ss_info->spread_spectrum_percentage);
break;
case AS_SIGNAL_TYPE_HDMI:
ss_info->spread_spectrum_percentage =
@@ -857,6 +853,8 @@ static enum bp_result get_ss_info_v4_1(
disp_cntl_tbl->hdmi_ss_rate_10hz * 10;
if (disp_cntl_tbl->hdmi_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE)
ss_info->type.CENTER_MODE = true;
+
+ DC_LOG_BIOS("AS_SIGNAL_TYPE_HDMI ss_percentage: %d\n", ss_info->spread_spectrum_percentage);
break;
/* TODO LVDS not support anymore? */
case AS_SIGNAL_TYPE_DISPLAY_PORT:
@@ -866,6 +864,8 @@ static enum bp_result get_ss_info_v4_1(
disp_cntl_tbl->dp_ss_rate_10hz * 10;
if (disp_cntl_tbl->dp_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE)
ss_info->type.CENTER_MODE = true;
+
+ DC_LOG_BIOS("AS_SIGNAL_TYPE_DISPLAY_PORT ss_percentage: %d\n", ss_info->spread_spectrum_percentage);
break;
case AS_SIGNAL_TYPE_GPU_PLL:
/* atom_firmware: DAL only get data from dce_info table.
@@ -879,13 +879,15 @@ static enum bp_result get_ss_info_v4_1(
DATA_TABLES(smu_info));
if (!smu_info)
return BP_RESULT_BADBIOSTABLE;
-
+ DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", smu_info->gpuclk_ss_percentage);
ss_info->spread_spectrum_percentage =
smu_info->waflclk_ss_percentage;
ss_info->spread_spectrum_range =
smu_info->gpuclk_ss_rate_10hz * 10;
if (smu_info->waflclk_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE)
ss_info->type.CENTER_MODE = true;
+
+ DC_LOG_BIOS("AS_SIGNAL_TYPE_XGMI ss_percentage: %d\n", ss_info->spread_spectrum_percentage);
break;
default:
result = BP_RESULT_UNSUPPORTED;
@@ -922,6 +924,7 @@ static enum bp_result get_ss_info_v4_2(
if (!smu_info)
return BP_RESULT_BADBIOSTABLE;
+ DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", smu_info->gpuclk_ss_percentage);
ss_info->type.STEP_AND_DELAY_INFO = false;
ss_info->spread_percentage_divider = 1000;
/* BIOS no longer uses target clock. Always enable for now */
@@ -935,6 +938,8 @@ static enum bp_result get_ss_info_v4_2(
disp_cntl_tbl->dvi_ss_rate_10hz * 10;
if (disp_cntl_tbl->dvi_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE)
ss_info->type.CENTER_MODE = true;
+
+ DC_LOG_BIOS("AS_SIGNAL_TYPE_DVI ss_percentage: %d\n", ss_info->spread_spectrum_percentage);
break;
case AS_SIGNAL_TYPE_HDMI:
ss_info->spread_spectrum_percentage =
@@ -943,6 +948,8 @@ static enum bp_result get_ss_info_v4_2(
disp_cntl_tbl->hdmi_ss_rate_10hz * 10;
if (disp_cntl_tbl->hdmi_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE)
ss_info->type.CENTER_MODE = true;
+
+ DC_LOG_BIOS("AS_SIGNAL_TYPE_HDMI ss_percentage: %d\n", ss_info->spread_spectrum_percentage);
break;
/* TODO LVDS not support anymore? */
case AS_SIGNAL_TYPE_DISPLAY_PORT:
@@ -952,6 +959,8 @@ static enum bp_result get_ss_info_v4_2(
smu_info->gpuclk_ss_rate_10hz * 10;
if (smu_info->gpuclk_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE)
ss_info->type.CENTER_MODE = true;
+
+ DC_LOG_BIOS("AS_SIGNAL_TYPE_DISPLAY_PORT ss_percentage: %d\n", ss_info->spread_spectrum_percentage);
break;
case AS_SIGNAL_TYPE_GPU_PLL:
/* atom_firmware: DAL only get data from dce_info table.
@@ -1000,6 +1009,8 @@ static enum bp_result get_ss_info_v4_5(
disp_cntl_tbl->dvi_ss_rate_10hz * 10;
if (disp_cntl_tbl->dvi_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE)
ss_info->type.CENTER_MODE = true;
+
+ DC_LOG_BIOS("AS_SIGNAL_TYPE_DVI ss_percentage: %d\n", ss_info->spread_spectrum_percentage);
break;
case AS_SIGNAL_TYPE_HDMI:
ss_info->spread_spectrum_percentage =
@@ -1008,6 +1019,8 @@ static enum bp_result get_ss_info_v4_5(
disp_cntl_tbl->hdmi_ss_rate_10hz * 10;
if (disp_cntl_tbl->hdmi_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE)
ss_info->type.CENTER_MODE = true;
+
+ DC_LOG_BIOS("AS_SIGNAL_TYPE_HDMI ss_percentage: %d\n", ss_info->spread_spectrum_percentage);
break;
case AS_SIGNAL_TYPE_DISPLAY_PORT:
ss_info->spread_spectrum_percentage =
@@ -1016,6 +1029,8 @@ static enum bp_result get_ss_info_v4_5(
disp_cntl_tbl->dp_ss_rate_10hz * 10;
if (disp_cntl_tbl->dp_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE)
ss_info->type.CENTER_MODE = true;
+
+ DC_LOG_BIOS("AS_SIGNAL_TYPE_DISPLAY_PORT ss_percentage: %d\n", ss_info->spread_spectrum_percentage);
break;
case AS_SIGNAL_TYPE_GPU_PLL:
/* atom_smu_info_v4_0 does not have fields for SS for SMU Display PLL anymore.
@@ -1353,7 +1368,7 @@ static enum bp_result bios_parser_get_lttpr_interop(
default:
break;
}
-
+ DC_LOG_BIOS("DCE_INFO_CAPS_VBIOS_LTTPR_TRANSPARENT_ENABLE: %d tbl_revision.major = %d tbl_revision.minor = %d\n", *dce_caps, tbl_revision.major, tbl_revision.minor);
return result;
}
@@ -1369,6 +1384,7 @@ static enum bp_result bios_parser_get_lttpr_caps(
if (!DATA_TABLES(dce_info))
return BP_RESULT_UNSUPPORTED;
+ *dce_caps = 0;
header = GET_IMAGE(struct atom_common_table_header,
DATA_TABLES(dce_info));
get_atom_data_table_revision(header, &tbl_revision);
@@ -1402,7 +1418,11 @@ static enum bp_result bios_parser_get_lttpr_caps(
default:
break;
}
-
+ DC_LOG_BIOS("DCE_INFO_CAPS_LTTPR_SUPPORT_ENABLE: %d tbl_revision.major = %d tbl_revision.minor = %d\n", *dce_caps, tbl_revision.major, tbl_revision.minor);
+ if (dcb->ctx->dc->config.force_bios_enable_lttpr && *dce_caps == 0) {
+ *dce_caps = 1;
+ DC_LOG_BIOS("DCE_INFO_CAPS_VBIOS_LTTPR_TRANSPARENT_ENABLE: forced enabled");
+ }
return result;
}
@@ -1840,7 +1860,7 @@ static enum bp_result get_firmware_info_v3_2(
/* Vega12 */
smu_info_v3_2 = GET_IMAGE(struct atom_smu_info_v3_2,
DATA_TABLES(smu_info));
-
+ DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", smu_info_v3_2->gpuclk_ss_percentage);
if (!smu_info_v3_2)
return BP_RESULT_BADBIOSTABLE;
@@ -1849,7 +1869,7 @@ static enum bp_result get_firmware_info_v3_2(
/* Vega20 */
smu_info_v3_3 = GET_IMAGE(struct atom_smu_info_v3_3,
DATA_TABLES(smu_info));
-
+ DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", smu_info_v3_3->gpuclk_ss_percentage);
if (!smu_info_v3_3)
return BP_RESULT_BADBIOSTABLE;
@@ -1991,7 +2011,7 @@ static enum bp_result get_firmware_info_v3_4(
if (!smu_info_v3_5)
return BP_RESULT_BADBIOSTABLE;
-
+ DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", smu_info_v3_5->gpuclk_ss_percentage);
info->default_engine_clk = smu_info_v3_5->bootup_dcefclk_10khz * 10;
break;
@@ -2397,6 +2417,7 @@ static enum bp_result get_integrated_info_v11(
info_v11 = GET_IMAGE(struct atom_integrated_system_info_v1_11,
DATA_TABLES(integratedsysteminfo));
+ DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v11->gpuclk_ss_percentage);
if (info_v11 == NULL)
return BP_RESULT_BADBIOSTABLE;
@@ -2611,6 +2632,7 @@ static enum bp_result get_integrated_info_v2_1(
info_v2_1 = GET_IMAGE(struct atom_integrated_system_info_v2_1,
DATA_TABLES(integratedsysteminfo));
+ DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v2_1->gpuclk_ss_percentage);
if (info_v2_1 == NULL)
return BP_RESULT_BADBIOSTABLE;
@@ -2772,6 +2794,8 @@ static enum bp_result get_integrated_info_v2_2(
info_v2_2 = GET_IMAGE(struct atom_integrated_system_info_v2_2,
DATA_TABLES(integratedsysteminfo));
+ DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v2_2->gpuclk_ss_percentage);
+
if (info_v2_2 == NULL)
return BP_RESULT_BADBIOSTABLE;
@@ -2923,6 +2947,27 @@ static enum bp_result construct_integrated_info(
default:
return result;
}
+ if (result == BP_RESULT_OK) {
+
+ DC_LOG_BIOS("edp1:\n"
+ "\tedp_pwr_on_off_delay = %d\n"
+ "\tedp_pwr_on_vary_bl_to_blon = %d\n"
+ "\tedp_pwr_down_bloff_to_vary_bloff = %d\n"
+ "\tedp_bootup_bl_level = %d\n",
+ info->edp1_info.edp_pwr_on_off_delay,
+ info->edp1_info.edp_pwr_on_vary_bl_to_blon,
+ info->edp1_info.edp_pwr_down_bloff_to_vary_bloff,
+ info->edp1_info.edp_bootup_bl_level);
+ DC_LOG_BIOS("edp2:\n"
+ "\tedp_pwr_on_off_delayv = %d\n"
+ "\tedp_pwr_on_vary_bl_to_blon = %d\n"
+ "\tedp_pwr_down_bloff_to_vary_bloff = %d\n"
+ "\tedp_bootup_bl_level = %d\n",
+ info->edp2_info.edp_pwr_on_off_delay,
+ info->edp2_info.edp_pwr_on_vary_bl_to_blon,
+ info->edp2_info.edp_pwr_down_bloff_to_vary_bloff,
+ info->edp2_info.edp_bootup_bl_level);
+ }
}
if (result != BP_RESULT_OK)
@@ -2948,13 +2993,22 @@ static enum bp_result construct_integrated_info(
info->ext_disp_conn_info.path[i].ext_encoder_obj_id.id,
info->ext_disp_conn_info.path[i].caps
);
+ if (info->ext_disp_conn_info.path[i].caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN)
+ DC_LOG_BIOS("BIOS EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN on path %d\n", i);
+ else if (bp->base.ctx->dc->config.force_bios_fixed_vs) {
+ info->ext_disp_conn_info.path[i].caps |= EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN;
+ DC_LOG_BIOS("driver forced EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN on path %d\n", i);
+ }
}
-
// Log the Checksum and Voltage Swing
DC_LOG_BIOS("Integrated info table CHECKSUM: %d\n"
"Integrated info table FIX_DP_VOLTAGE_SWING: %d\n",
info->ext_disp_conn_info.checksum,
info->ext_disp_conn_info.fixdpvoltageswing);
+ if (bp->base.ctx->dc->config.force_bios_fixed_vs && info->ext_disp_conn_info.fixdpvoltageswing == 0) {
+ info->ext_disp_conn_info.fixdpvoltageswing = bp->base.ctx->dc->config.force_bios_fixed_vs & 0xF;
+ DC_LOG_BIOS("driver forced fixdpvoltageswing = %d\n", info->ext_disp_conn_info.fixdpvoltageswing);
+ }
}
/* Sort voltage table from low to high*/
for (i = 1; i < NUMBER_OF_DISP_CLK_VOLTAGE; ++i) {
@@ -3300,6 +3354,7 @@ static enum bp_result bios_get_board_layout_info(
struct bios_parser *bp;
static enum bp_result record_result;
+ unsigned int max_slots;
const unsigned int slot_index_to_vbios_id[MAX_BOARD_SLOTS] = {
GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID1,
@@ -3316,8 +3371,14 @@ static enum bp_result bios_get_board_layout_info(
}
board_layout_info->num_of_slots = 0;
+ max_slots = MAX_BOARD_SLOTS;
+
+ // Assume single slot on v1_5
+ if (bp->object_info_tbl.revision.minor == 5) {
+ max_slots = 1;
+ }
- for (i = 0; i < MAX_BOARD_SLOTS; ++i) {
+ for (i = 0; i < max_slots; ++i) {
record_result = get_bracket_layout_record(dcb,
slot_index_to_vbios_id[i],
&board_layout_info->slots[i]);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
index 0d30d1d9d67e..650f3b4b562e 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
@@ -179,7 +179,7 @@ void dcn20_update_clocks_update_dentist(struct clk_mgr_internal *clk_mgr, struct
} else if (dispclk_wdivider == 127 && current_dispclk_wdivider != 127) {
REG_UPDATE(DENTIST_DISPCLK_CNTL,
DENTIST_DISPCLK_WDIVIDER, 126);
- REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, 1, 50, 100);
+ REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, 1, 50, 2000);
for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
struct dccg *dccg = clk_mgr->base.ctx->dc->res_pool->dccg;
@@ -206,7 +206,7 @@ void dcn20_update_clocks_update_dentist(struct clk_mgr_internal *clk_mgr, struct
REG_UPDATE(DENTIST_DISPCLK_CNTL,
DENTIST_DISPCLK_WDIVIDER, dispclk_wdivider);
- REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, 1, 50, 1000);
+ REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, 1, 50, 2000);
REG_UPDATE(DENTIST_DISPCLK_CNTL,
DENTIST_DPPCLK_WDIVIDER, dppclk_wdivider);
REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DPPCLK_CHG_DONE, 1, 5, 100);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
index d43258e3cd4f..c1eaf571407a 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
@@ -104,7 +104,7 @@ static int dcn31_get_active_display_cnt_wa(
return display_count;
}
-static void dcn31_disable_otg_wa(struct clk_mgr *clk_mgr_base, bool disable)
+static void dcn31_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool disable)
{
struct dc *dc = clk_mgr_base->ctx->dc;
int i;
@@ -115,9 +115,10 @@ static void dcn31_disable_otg_wa(struct clk_mgr *clk_mgr_base, bool disable)
if (pipe->top_pipe || pipe->prev_odm_pipe)
continue;
if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal))) {
- if (disable)
+ if (disable) {
pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
- else
+ reset_sync_context_for_pipe(dc, context, i);
+ } else
pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg);
}
}
@@ -216,11 +217,11 @@ void dcn31_update_clocks(struct clk_mgr *clk_mgr_base,
}
if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
- dcn31_disable_otg_wa(clk_mgr_base, true);
+ dcn31_disable_otg_wa(clk_mgr_base, context, true);
clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
dcn31_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz);
- dcn31_disable_otg_wa(clk_mgr_base, false);
+ dcn31_disable_otg_wa(clk_mgr_base, context, false);
update_dispclk = true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
index 171c38fac6a3..1131c6d73f6c 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
@@ -126,7 +126,7 @@ static int dcn314_get_active_display_cnt_wa(
return display_count;
}
-static void dcn314_disable_otg_wa(struct clk_mgr *clk_mgr_base, bool disable)
+static void dcn314_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool disable)
{
struct dc *dc = clk_mgr_base->ctx->dc;
int i;
@@ -136,12 +136,21 @@ static void dcn314_disable_otg_wa(struct clk_mgr *clk_mgr_base, bool disable)
if (pipe->top_pipe || pipe->prev_odm_pipe)
continue;
- if (pipe->stream && (pipe->stream->dpms_off || pipe->plane_state == NULL ||
- dc_is_virtual_signal(pipe->stream->signal))) {
- if (disable)
+ if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal))) {
+ struct stream_encoder *stream_enc = pipe->stream_res.stream_enc;
+
+ if (disable) {
+ if (stream_enc && stream_enc->funcs->disable_fifo)
+ pipe->stream_res.stream_enc->funcs->disable_fifo(stream_enc);
+
pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
- else
+ reset_sync_context_for_pipe(dc, context, i);
+ } else {
pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg);
+
+ if (stream_enc && stream_enc->funcs->enable_fifo)
+ pipe->stream_res.stream_enc->funcs->enable_fifo(stream_enc);
+ }
}
}
}
@@ -240,11 +249,11 @@ void dcn314_update_clocks(struct clk_mgr *clk_mgr_base,
}
if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
- dcn314_disable_otg_wa(clk_mgr_base, true);
+ dcn314_disable_otg_wa(clk_mgr_base, context, true);
clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
dcn314_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz);
- dcn314_disable_otg_wa(clk_mgr_base, false);
+ dcn314_disable_otg_wa(clk_mgr_base, context, false);
update_dispclk = true;
}
@@ -677,6 +686,8 @@ static void dcn314_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *cl
}
ASSERT(bw_params->clk_table.entries[i-1].dcfclk_mhz);
bw_params->vram_type = bios_info->memory_type;
+
+ bw_params->dram_channel_width_bytes = bios_info->memory_type == 0x22 ? 8 : 4;
bw_params->num_channels = bios_info->ma_channel_number ? bios_info->ma_channel_number : 4;
for (i = 0; i < WM_SET_COUNT; i++) {
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c
index 897105d1c111..ef0795b14a1f 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c
@@ -339,29 +339,24 @@ void dcn314_smu_set_zstate_support(struct clk_mgr_internal *clk_mgr, enum dcn_zs
if (!clk_mgr->smu_present)
return;
- if (!clk_mgr->base.ctx->dc->debug.enable_z9_disable_interface &&
- (support == DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY))
- support = DCN_ZSTATE_SUPPORT_DISALLOW;
-
-
// Arg[15:0] = 8/9/0 for Z8/Z9/disallow -> existing bits
// Arg[16] = Disallow Z9 -> new bit
switch (support) {
case DCN_ZSTATE_SUPPORT_ALLOW:
msg_id = VBIOSSMC_MSG_AllowZstatesEntry;
- param = 9;
+ param = (1 << 10) | (1 << 9) | (1 << 8);
break;
case DCN_ZSTATE_SUPPORT_DISALLOW:
msg_id = VBIOSSMC_MSG_AllowZstatesEntry;
- param = 8;
+ param = 0;
break;
case DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY:
msg_id = VBIOSSMC_MSG_AllowZstatesEntry;
- param = 0x00010008;
+ param = (1 << 10);
break;
default: //DCN_ZSTATE_SUPPORT_UNKNOWN
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
index 14071aef5eab..893991a0eb97 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
@@ -51,6 +51,9 @@
#define TO_CLK_MGR_DCN315(clk_mgr)\
container_of(clk_mgr, struct clk_mgr_dcn315, base)
+#define UNSUPPORTED_DCFCLK 10000000
+#define MIN_DPP_DISP_CLK 100000
+
static int dcn315_get_active_display_cnt_wa(
struct dc *dc,
struct dc_state *context)
@@ -84,7 +87,7 @@ static int dcn315_get_active_display_cnt_wa(
return display_count;
}
-static void dcn315_disable_otg_wa(struct clk_mgr *clk_mgr_base, bool disable)
+static void dcn315_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool disable)
{
struct dc *dc = clk_mgr_base->ctx->dc;
int i;
@@ -96,9 +99,10 @@ static void dcn315_disable_otg_wa(struct clk_mgr *clk_mgr_base, bool disable)
continue;
if (pipe->stream && (pipe->stream->dpms_off || pipe->plane_state == NULL ||
dc_is_virtual_signal(pipe->stream->signal))) {
- if (disable)
+ if (disable) {
pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
- else
+ reset_sync_context_for_pipe(dc, context, i);
+ } else
pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg);
}
}
@@ -151,6 +155,9 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base,
}
}
+ /* Lock pstate by requesting unsupported dcfclk if change is unsupported */
+ if (!new_clocks->p_state_change_support)
+ new_clocks->dcfclk_khz = UNSUPPORTED_DCFCLK;
if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr_base->clks.dcfclk_khz)) {
clk_mgr_base->clks.dcfclk_khz = new_clocks->dcfclk_khz;
dcn315_smu_set_hard_min_dcfclk(clk_mgr, clk_mgr_base->clks.dcfclk_khz);
@@ -164,10 +171,10 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base,
// workaround: Limit dppclk to 100Mhz to avoid lower eDP panel switch to plus 4K monitor underflow.
if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
- if (new_clocks->dppclk_khz < 100000)
- new_clocks->dppclk_khz = 100000;
- if (new_clocks->dispclk_khz < 100000)
- new_clocks->dispclk_khz = 100000;
+ if (new_clocks->dppclk_khz < MIN_DPP_DISP_CLK)
+ new_clocks->dppclk_khz = MIN_DPP_DISP_CLK;
+ if (new_clocks->dispclk_khz < MIN_DPP_DISP_CLK)
+ new_clocks->dispclk_khz = MIN_DPP_DISP_CLK;
}
if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) {
@@ -180,12 +187,12 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base,
if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
/* No need to apply the w/a if we haven't taken over from bios yet */
if (clk_mgr_base->clks.dispclk_khz)
- dcn315_disable_otg_wa(clk_mgr_base, true);
+ dcn315_disable_otg_wa(clk_mgr_base, context, true);
clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
dcn315_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz);
if (clk_mgr_base->clks.dispclk_khz)
- dcn315_disable_otg_wa(clk_mgr_base, false);
+ dcn315_disable_otg_wa(clk_mgr_base, context, false);
update_dispclk = true;
}
@@ -280,7 +287,7 @@ static struct wm_table ddr5_wm_table = {
{
.wm_inst = WM_A,
.wm_type = WM_TYPE_PSTATE_CHG,
- .pstate_latency_us = 64.0,
+ .pstate_latency_us = 129.0,
.sr_exit_time_us = 11.5,
.sr_enter_plus_exit_time_us = 14.5,
.valid = true,
@@ -288,7 +295,7 @@ static struct wm_table ddr5_wm_table = {
{
.wm_inst = WM_B,
.wm_type = WM_TYPE_PSTATE_CHG,
- .pstate_latency_us = 64.0,
+ .pstate_latency_us = 129.0,
.sr_exit_time_us = 11.5,
.sr_enter_plus_exit_time_us = 14.5,
.valid = true,
@@ -296,7 +303,7 @@ static struct wm_table ddr5_wm_table = {
{
.wm_inst = WM_C,
.wm_type = WM_TYPE_PSTATE_CHG,
- .pstate_latency_us = 64.0,
+ .pstate_latency_us = 129.0,
.sr_exit_time_us = 11.5,
.sr_enter_plus_exit_time_us = 14.5,
.valid = true,
@@ -304,7 +311,7 @@ static struct wm_table ddr5_wm_table = {
{
.wm_inst = WM_D,
.wm_type = WM_TYPE_PSTATE_CHG,
- .pstate_latency_us = 64.0,
+ .pstate_latency_us = 129.0,
.sr_exit_time_us = 11.5,
.sr_enter_plus_exit_time_us = 14.5,
.valid = true,
@@ -561,8 +568,7 @@ static void dcn315_clk_mgr_helper_populate_bw_params(
ASSERT(bw_params->clk_table.entries[i-1].dcfclk_mhz);
bw_params->vram_type = bios_info->memory_type;
bw_params->num_channels = bios_info->ma_channel_number;
- if (!bw_params->num_channels)
- bw_params->num_channels = 2;
+ bw_params->dram_channel_width_bytes = bios_info->memory_type == 0x22 ? 8 : 4;
for (i = 0; i < WM_SET_COUNT; i++) {
bw_params->wm_table.entries[i].wm_inst = i;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
index 0cd3d2eb7ac7..187f5b27fdc8 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
@@ -112,7 +112,7 @@ static int dcn316_get_active_display_cnt_wa(
return display_count;
}
-static void dcn316_disable_otg_wa(struct clk_mgr *clk_mgr_base, bool disable)
+static void dcn316_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool disable)
{
struct dc *dc = clk_mgr_base->ctx->dc;
int i;
@@ -124,9 +124,10 @@ static void dcn316_disable_otg_wa(struct clk_mgr *clk_mgr_base, bool disable)
continue;
if (pipe->stream && (pipe->stream->dpms_off || pipe->plane_state == NULL ||
dc_is_virtual_signal(pipe->stream->signal))) {
- if (disable)
+ if (disable) {
pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
- else
+ reset_sync_context_for_pipe(dc, context, i);
+ } else
pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg);
}
}
@@ -221,11 +222,11 @@ static void dcn316_update_clocks(struct clk_mgr *clk_mgr_base,
}
if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
- dcn316_disable_otg_wa(clk_mgr_base, true);
+ dcn316_disable_otg_wa(clk_mgr_base, context, true);
clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
dcn316_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz);
- dcn316_disable_otg_wa(clk_mgr_base, false);
+ dcn316_disable_otg_wa(clk_mgr_base, context, false);
update_dispclk = true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
index c6785969eb1a..1c612ccf1944 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
@@ -156,12 +156,14 @@ void dcn32_init_clocks(struct clk_mgr *clk_mgr_base)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
unsigned int num_levels;
+ struct clk_limit_num_entries *num_entries_per_clk = &clk_mgr_base->bw_params->clk_table.num_entries_per_clk;
memset(&(clk_mgr_base->clks), 0, sizeof(struct dc_clocks));
clk_mgr_base->clks.p_state_change_support = true;
clk_mgr_base->clks.prev_p_state_change_support = true;
clk_mgr_base->clks.fclk_prev_p_state_change_support = true;
clk_mgr->smu_present = false;
+ clk_mgr->dpm_present = false;
if (!clk_mgr_base->bw_params)
return;
@@ -178,22 +180,29 @@ void dcn32_init_clocks(struct clk_mgr *clk_mgr_base)
/* DCFCLK */
dcn32_init_single_clock(clk_mgr, PPCLK_DCFCLK,
&clk_mgr_base->bw_params->clk_table.entries[0].dcfclk_mhz,
- &num_levels);
+ &num_entries_per_clk->num_dcfclk_levels);
/* SOCCLK */
dcn32_init_single_clock(clk_mgr, PPCLK_SOCCLK,
&clk_mgr_base->bw_params->clk_table.entries[0].socclk_mhz,
- &num_levels);
+ &num_entries_per_clk->num_socclk_levels);
+
/* DTBCLK */
if (!clk_mgr->base.ctx->dc->debug.disable_dtb_ref_clk_switch)
dcn32_init_single_clock(clk_mgr, PPCLK_DTBCLK,
&clk_mgr_base->bw_params->clk_table.entries[0].dtbclk_mhz,
- &num_levels);
+ &num_entries_per_clk->num_dtbclk_levels);
/* DISPCLK */
dcn32_init_single_clock(clk_mgr, PPCLK_DISPCLK,
&clk_mgr_base->bw_params->clk_table.entries[0].dispclk_mhz,
- &num_levels);
+ &num_entries_per_clk->num_dispclk_levels);
+ num_levels = num_entries_per_clk->num_dispclk_levels;
+
+ if (num_entries_per_clk->num_dcfclk_levels &&
+ num_entries_per_clk->num_dtbclk_levels &&
+ num_entries_per_clk->num_dispclk_levels)
+ clk_mgr->dpm_present = true;
if (clk_mgr_base->ctx->dc->debug.min_disp_clk_khz) {
unsigned int i;
@@ -325,6 +334,21 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
if (enter_display_off == safe_to_lower)
dcn30_smu_set_num_of_displays(clk_mgr, display_count);
+ clk_mgr_base->clks.fclk_prev_p_state_change_support = clk_mgr_base->clks.fclk_p_state_change_support;
+
+ total_plane_count = clk_mgr_helper_get_active_plane_cnt(dc, context);
+ fclk_p_state_change_support = new_clocks->fclk_p_state_change_support || (total_plane_count == 0);
+
+ if (should_update_pstate_support(safe_to_lower, fclk_p_state_change_support, clk_mgr_base->clks.fclk_p_state_change_support)) {
+ clk_mgr_base->clks.fclk_p_state_change_support = fclk_p_state_change_support;
+
+ /* To enable FCLK P-state switching, send FCLK_PSTATE_SUPPORTED message to PMFW */
+ if (clk_mgr_base->ctx->dce_version != DCN_VERSION_3_21 && clk_mgr_base->clks.fclk_p_state_change_support) {
+ /* Handle the code for sending a message to PMFW that FCLK P-state change is supported */
+ dcn32_smu_send_fclk_pstate_message(clk_mgr, FCLK_PSTATE_SUPPORTED);
+ }
+ }
+
if (dc->debug.force_min_dcfclk_mhz > 0)
new_clocks->dcfclk_khz = (new_clocks->dcfclk_khz > (dc->debug.force_min_dcfclk_mhz * 1000)) ?
new_clocks->dcfclk_khz : (dc->debug.force_min_dcfclk_mhz * 1000);
@@ -344,7 +368,6 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
clk_mgr_base->clks.socclk_khz = new_clocks->socclk_khz;
clk_mgr_base->clks.prev_p_state_change_support = clk_mgr_base->clks.p_state_change_support;
- clk_mgr_base->clks.fclk_prev_p_state_change_support = clk_mgr_base->clks.fclk_p_state_change_support;
clk_mgr_base->clks.prev_num_ways = clk_mgr_base->clks.num_ways;
if (clk_mgr_base->clks.num_ways != new_clocks->num_ways &&
@@ -353,27 +376,25 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
dcn32_smu_send_cab_for_uclk_message(clk_mgr, clk_mgr_base->clks.num_ways);
}
- total_plane_count = clk_mgr_helper_get_active_plane_cnt(dc, context);
+
p_state_change_support = new_clocks->p_state_change_support || (total_plane_count == 0);
- fclk_p_state_change_support = new_clocks->fclk_p_state_change_support || (total_plane_count == 0);
if (should_update_pstate_support(safe_to_lower, p_state_change_support, clk_mgr_base->clks.p_state_change_support)) {
clk_mgr_base->clks.p_state_change_support = p_state_change_support;
/* to disable P-State switching, set UCLK min = max */
if (!clk_mgr_base->clks.p_state_change_support)
dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK,
- clk_mgr_base->bw_params->clk_table.entries[clk_mgr_base->bw_params->clk_table.num_entries - 1].memclk_mhz);
+ clk_mgr_base->bw_params->clk_table.entries[clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_memclk_levels - 1].memclk_mhz);
}
- if (should_update_pstate_support(safe_to_lower, fclk_p_state_change_support, clk_mgr_base->clks.fclk_p_state_change_support) &&
- clk_mgr_base->ctx->dce_version != DCN_VERSION_3_21) {
- clk_mgr_base->clks.fclk_p_state_change_support = fclk_p_state_change_support;
+ /* Always update saved value, even if new value not set due to P-State switching unsupported. Also check safe_to_lower for FCLK */
+ if (safe_to_lower && (clk_mgr_base->clks.fclk_p_state_change_support != clk_mgr_base->clks.fclk_prev_p_state_change_support)) {
+ update_fclk = true;
+ }
- /* To disable FCLK P-state switching, send FCLK_PSTATE_NOTSUPPORTED message to PMFW */
- if (clk_mgr_base->ctx->dce_version != DCN_VERSION_3_21 && !clk_mgr_base->clks.fclk_p_state_change_support) {
- /* Handle code for sending a message to PMFW that FCLK P-state change is not supported */
- dcn32_smu_send_fclk_pstate_message(clk_mgr, FCLK_PSTATE_NOTSUPPORTED);
- }
+ if (clk_mgr_base->ctx->dce_version != DCN_VERSION_3_21 && !clk_mgr_base->clks.fclk_p_state_change_support && update_fclk) {
+ /* Handle code for sending a message to PMFW that FCLK P-state change is not supported */
+ dcn32_smu_send_fclk_pstate_message(clk_mgr, FCLK_PSTATE_NOTSUPPORTED);
}
/* Always update saved value, even if new value not set due to P-State switching unsupported */
@@ -382,21 +403,11 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
update_uclk = true;
}
- /* Always update saved value, even if new value not set due to P-State switching unsupported. Also check safe_to_lower for FCLK */
- if (safe_to_lower && (clk_mgr_base->clks.fclk_p_state_change_support != clk_mgr_base->clks.fclk_prev_p_state_change_support)) {
- update_fclk = true;
- }
-
/* set UCLK to requested value if P-State switching is supported, or to re-enable P-State switching */
if (clk_mgr_base->clks.p_state_change_support &&
(update_uclk || !clk_mgr_base->clks.prev_p_state_change_support))
dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, khz_to_mhz_ceil(clk_mgr_base->clks.dramclk_khz));
- if (clk_mgr_base->ctx->dce_version != DCN_VERSION_3_21 && clk_mgr_base->clks.fclk_p_state_change_support && update_fclk) {
- /* Handle the code for sending a message to PMFW that FCLK P-state change is supported */
- dcn32_smu_send_fclk_pstate_message(clk_mgr, FCLK_PSTATE_SUPPORTED);
- }
-
if (clk_mgr_base->clks.num_ways != new_clocks->num_ways &&
clk_mgr_base->clks.num_ways > new_clocks->num_ways) {
clk_mgr_base->clks.num_ways = new_clocks->num_ways;
@@ -624,7 +635,7 @@ static void dcn32_set_hard_min_memclk(struct clk_mgr *clk_mgr_base, bool current
khz_to_mhz_ceil(clk_mgr_base->clks.dramclk_khz));
else
dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK,
- clk_mgr_base->bw_params->clk_table.entries[clk_mgr_base->bw_params->clk_table.num_entries - 1].memclk_mhz);
+ clk_mgr_base->bw_params->clk_table.entries[clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_memclk_levels - 1].memclk_mhz);
} else {
dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK,
clk_mgr_base->bw_params->clk_table.entries[0].memclk_mhz);
@@ -640,24 +651,42 @@ static void dcn32_set_hard_max_memclk(struct clk_mgr *clk_mgr_base)
return;
dcn30_smu_set_hard_max_by_freq(clk_mgr, PPCLK_UCLK,
- clk_mgr_base->bw_params->clk_table.entries[clk_mgr_base->bw_params->clk_table.num_entries - 1].memclk_mhz);
+ clk_mgr_base->bw_params->clk_table.entries[clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_memclk_levels - 1].memclk_mhz);
}
/* Get current memclk states, update bounding box */
static void dcn32_get_memclk_states_from_smu(struct clk_mgr *clk_mgr_base)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+ struct clk_limit_num_entries *num_entries_per_clk = &clk_mgr_base->bw_params->clk_table.num_entries_per_clk;
unsigned int num_levels;
if (!clk_mgr->smu_present)
return;
- /* Refresh memclk states */
+ /* Refresh memclk and fclk states */
dcn32_init_single_clock(clk_mgr, PPCLK_UCLK,
&clk_mgr_base->bw_params->clk_table.entries[0].memclk_mhz,
- &num_levels);
+ &num_entries_per_clk->num_memclk_levels);
+
+ dcn32_init_single_clock(clk_mgr, PPCLK_FCLK,
+ &clk_mgr_base->bw_params->clk_table.entries[0].fclk_mhz,
+ &num_entries_per_clk->num_fclk_levels);
+
+ if (num_entries_per_clk->num_memclk_levels >= num_entries_per_clk->num_fclk_levels) {
+ num_levels = num_entries_per_clk->num_memclk_levels;
+ } else {
+ num_levels = num_entries_per_clk->num_fclk_levels;
+ }
+
clk_mgr_base->bw_params->clk_table.num_entries = num_levels ? num_levels : 1;
+ if (clk_mgr->dpm_present && !num_levels)
+ clk_mgr->dpm_present = false;
+
+ if (!clk_mgr->dpm_present)
+ dcn32_patch_dpm_table(clk_mgr_base->bw_params);
+
DC_FP_START();
/* Refresh bounding box */
clk_mgr_base->ctx->dc->res_pool->funcs->update_bw_bounding_box(
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 9860bf38c547..997ab031f816 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -401,6 +401,9 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc,
{
int i;
+ if (memcmp(adjust, &stream->adjust, sizeof(struct dc_crtc_timing_adjust)) == 0)
+ return true;
+
stream->adjust.v_total_max = adjust->v_total_max;
stream->adjust.v_total_mid = adjust->v_total_mid;
stream->adjust.v_total_mid_frame_num = adjust->v_total_mid_frame_num;
@@ -1181,11 +1184,7 @@ static void disable_vbios_mode_if_required(
pipe->stream_res.pix_clk_params.requested_pix_clk_100hz;
if (pix_clk_100hz != requested_pix_clk_100hz) {
- if (dc->hwss.update_phy_state)
- dc->hwss.update_phy_state(dc->current_state,
- pipe, TX_OFF_SYMCLK_OFF);
- else
- core_link_disable_stream(pipe);
+ core_link_disable_stream(pipe);
pipe->stream->dpms_off = false;
}
}
@@ -1202,7 +1201,7 @@ static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context)
int count = 0;
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
- if (!pipe->plane_state)
+ if (!pipe->plane_state || pipe->stream->mall_stream_config.type == SUBVP_PHANTOM)
continue;
/* Timeout 100 ms */
@@ -1735,10 +1734,20 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
int i, k, l;
struct dc_stream_state *dc_streams[MAX_STREAMS] = {0};
struct dc_state *old_state;
+ bool subvp_prev_use = false;
dc_z10_restore(dc);
dc_allow_idle_optimizations(dc, false);
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ /* Check old context for SubVP */
+ subvp_prev_use |= (old_pipe->stream && old_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM);
+ if (subvp_prev_use)
+ break;
+ }
+
for (i = 0; i < context->stream_count; i++)
dc_streams[i] = context->streams[i];
@@ -1751,6 +1760,9 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
context->stream_count == 0)
dc->hwss.prepare_bandwidth(dc, context);
+ if (dc->debug.enable_double_buffered_dsc_pg_support)
+ dc->hwss.update_dsc_pg(dc, context, false);
+
disable_dangling_plane(dc, context);
/* re-program planes for existing stream, in case we need to
* free up plane resource for later use
@@ -1775,6 +1787,9 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe);
}
+ if (dc->hwss.subvp_pipe_control_lock)
+ dc->hwss.subvp_pipe_control_lock(dc, context, true, true, NULL, subvp_prev_use);
+
result = dc->hwss.apply_ctx_to_hw(dc, context);
if (result != DC_OK) {
@@ -1792,6 +1807,12 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
dc->hwss.interdependent_update_lock(dc, context, false);
dc->hwss.post_unlock_program_front_end(dc, context);
}
+
+ if (dc->hwss.commit_subvp_config)
+ dc->hwss.commit_subvp_config(dc, context);
+ if (dc->hwss.subvp_pipe_control_lock)
+ dc->hwss.subvp_pipe_control_lock(dc, context, false, true, NULL, subvp_prev_use);
+
for (i = 0; i < context->stream_count; i++) {
const struct dc_link *link = context->streams[i]->link;
@@ -1841,6 +1862,9 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
dc->hwss.optimize_bandwidth(dc, context);
}
+ if (dc->debug.enable_double_buffered_dsc_pg_support)
+ dc->hwss.update_dsc_pg(dc, context, true);
+
if (dc->ctx->dce_version >= DCE_VERSION_MAX)
TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk);
else
@@ -2004,6 +2028,9 @@ void dc_post_update_surfaces_to_stream(struct dc *dc)
dc->hwss.optimize_bandwidth(dc, context);
+ if (dc->debug.enable_double_buffered_dsc_pg_support)
+ dc->hwss.update_dsc_pg(dc, context, true);
+
dc->optimized_required = false;
dc->wm_optimized_required = false;
}
@@ -2323,9 +2350,13 @@ static enum surface_update_type det_surface_update(const struct dc *dc,
type = get_scaling_info_update_type(u);
elevate_update_type(&overall_type, type);
- if (u->flip_addr)
+ if (u->flip_addr) {
update_flags->bits.addr_update = 1;
-
+ if (u->flip_addr->address.tmz_surface != u->surface->address.tmz_surface) {
+ update_flags->bits.tmz_changed = 1;
+ elevate_update_type(&overall_type, UPDATE_TYPE_FULL);
+ }
+ }
if (u->in_transfer_func)
update_flags->bits.in_transfer_func_change = 1;
@@ -2760,11 +2791,8 @@ static void copy_stream_update_to_stream(struct dc *dc,
if (update->abm_level)
stream->abm_level = *update->abm_level;
- if (update->periodic_interrupt0)
- stream->periodic_interrupt0 = *update->periodic_interrupt0;
-
- if (update->periodic_interrupt1)
- stream->periodic_interrupt1 = *update->periodic_interrupt1;
+ if (update->periodic_interrupt)
+ stream->periodic_interrupt = *update->periodic_interrupt;
if (update->gamut_remap)
stream->gamut_remap_matrix = *update->gamut_remap;
@@ -2849,16 +2877,6 @@ static void copy_stream_update_to_stream(struct dc *dc,
}
}
-void dc_reset_state(struct dc *dc, struct dc_state *context)
-{
- dc_resource_state_destruct(context);
-
- /* clear the structure, but don't reset the reference count */
- memset(context, 0, offsetof(struct dc_state, refcount));
-
- init_state(dc, context);
-}
-
static bool update_planes_and_stream_state(struct dc *dc,
struct dc_surface_update *srf_updates, int surface_count,
struct dc_stream_state *stream,
@@ -2928,6 +2946,12 @@ static bool update_planes_and_stream_state(struct dc *dc,
dc_resource_state_copy_construct(
dc->current_state, context);
+ /* For each full update, remove all existing phantom pipes first.
+ * Ensures that we have enough pipes for newly added MPO planes
+ */
+ if (dc->res_pool->funcs->remove_phantom_pipes)
+ dc->res_pool->funcs->remove_phantom_pipes(dc, context);
+
/*remove old surfaces from context */
if (!dc_rem_all_planes_for_stream(dc, stream, context)) {
@@ -2994,13 +3018,8 @@ static void commit_planes_do_stream_update(struct dc *dc,
if (!pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe && pipe_ctx->stream == stream) {
- if (stream_update->periodic_interrupt0 &&
- dc->hwss.setup_periodic_interrupt)
- dc->hwss.setup_periodic_interrupt(dc, pipe_ctx, VLINE0);
-
- if (stream_update->periodic_interrupt1 &&
- dc->hwss.setup_periodic_interrupt)
- dc->hwss.setup_periodic_interrupt(dc, pipe_ctx, VLINE1);
+ if (stream_update->periodic_interrupt && dc->hwss.setup_periodic_interrupt)
+ dc->hwss.setup_periodic_interrupt(dc, pipe_ctx);
if ((stream_update->hdr_static_metadata && !stream->use_dynamic_meta) ||
stream_update->vrr_infopacket ||
@@ -3068,11 +3087,7 @@ static void commit_planes_do_stream_update(struct dc *dc,
if (stream_update->dpms_off) {
if (*stream_update->dpms_off) {
- if (dc->hwss.update_phy_state)
- dc->hwss.update_phy_state(dc->current_state,
- pipe_ctx, TX_OFF_SYMCLK_ON);
- else
- core_link_disable_stream(pipe_ctx);
+ core_link_disable_stream(pipe_ctx);
/* for dpms, keep acquired resources*/
if (pipe_ctx->stream_res.audio && !dc->debug.az_endpoint_mute_only)
pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
@@ -3082,12 +3097,7 @@ static void commit_planes_do_stream_update(struct dc *dc,
} else {
if (get_seamless_boot_stream_count(context) == 0)
dc->hwss.prepare_bandwidth(dc, dc->current_state);
-
- if (dc->hwss.update_phy_state)
- dc->hwss.update_phy_state(dc->current_state,
- pipe_ctx, TX_ON_SYMCLK_ON);
- else
- core_link_enable_stream(dc->current_state, pipe_ctx);
+ core_link_enable_stream(dc->current_state, pipe_ctx);
}
}
@@ -3218,6 +3228,9 @@ static void commit_planes_for_stream(struct dc *dc,
if (get_seamless_boot_stream_count(context) == 0)
dc->hwss.prepare_bandwidth(dc, context);
+ if (dc->debug.enable_double_buffered_dsc_pg_support)
+ dc->hwss.update_dsc_pg(dc, context, false);
+
context_clock_trace(dc, context);
}
@@ -3346,8 +3359,14 @@ static void commit_planes_for_stream(struct dc *dc,
/* Since phantom pipe programming is moved to post_unlock_program_front_end,
* move the SubVP lock to after the phantom pipes have been setup
*/
- if (dc->hwss.subvp_pipe_control_lock)
- dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use);
+ if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
+ if (dc->hwss.subvp_pipe_control_lock)
+ dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use);
+ } else {
+ if (dc->hwss.subvp_pipe_control_lock)
+ dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use);
+ }
+
return;
}
@@ -3507,6 +3526,9 @@ static void commit_planes_for_stream(struct dc *dc,
if (update_type != UPDATE_TYPE_FAST)
dc->hwss.post_unlock_program_front_end(dc, context);
+ if (update_type != UPDATE_TYPE_FAST)
+ if (dc->hwss.commit_subvp_config)
+ dc->hwss.commit_subvp_config(dc, context);
if (update_type != UPDATE_TYPE_FAST)
if (dc->hwss.commit_subvp_config)
@@ -3541,11 +3563,91 @@ static void commit_planes_for_stream(struct dc *dc,
}
}
+/* Determines if the incoming context requires a applying transition state with unnecessary
+ * pipe splitting and ODM disabled, due to hardware limitations. In a case where
+ * the OPP associated with an MPCC might change due to plane additions, this function
+ * returns true.
+ */
+static bool could_mpcc_tree_change_for_active_pipes(struct dc *dc,
+ struct dc_stream_state *stream,
+ int surface_count,
+ bool *is_plane_addition)
+{
+
+ struct dc_stream_status *cur_stream_status = stream_get_status(dc->current_state, stream);
+ bool force_minimal_pipe_splitting = false;
+ uint32_t i;
+
+ *is_plane_addition = false;
+
+ if (cur_stream_status &&
+ dc->current_state->stream_count > 0 &&
+ dc->debug.pipe_split_policy != MPC_SPLIT_AVOID) {
+ /* determine if minimal transition is required due to MPC*/
+ if (surface_count > 0) {
+ if (cur_stream_status->plane_count > surface_count) {
+ force_minimal_pipe_splitting = true;
+ } else if (cur_stream_status->plane_count < surface_count) {
+ force_minimal_pipe_splitting = true;
+ *is_plane_addition = true;
+ }
+ }
+ }
+
+ if (cur_stream_status &&
+ dc->current_state->stream_count == 1 &&
+ dc->debug.enable_single_display_2to1_odm_policy) {
+ /* determine if minimal transition is required due to dynamic ODM*/
+ if (surface_count > 0) {
+ if (cur_stream_status->plane_count > 2 && cur_stream_status->plane_count > surface_count) {
+ force_minimal_pipe_splitting = true;
+ } else if (surface_count > 2 && cur_stream_status->plane_count < surface_count) {
+ force_minimal_pipe_splitting = true;
+ *is_plane_addition = true;
+ }
+ }
+ }
+
+ /* For SubVP pipe split case when adding MPO video
+ * we need to add a minimal transition. In this case
+ * there will be 2 streams (1 main stream, 1 phantom
+ * stream).
+ */
+ if (cur_stream_status &&
+ dc->current_state->stream_count == 2 &&
+ stream->mall_stream_config.type == SUBVP_MAIN) {
+ bool is_pipe_split = false;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream &&
+ (dc->current_state->res_ctx.pipe_ctx[i].bottom_pipe ||
+ dc->current_state->res_ctx.pipe_ctx[i].next_odm_pipe)) {
+ is_pipe_split = true;
+ break;
+ }
+ }
+
+ /* determine if minimal transition is required due to SubVP*/
+ if (surface_count > 0 && is_pipe_split) {
+ if (cur_stream_status->plane_count > surface_count) {
+ force_minimal_pipe_splitting = true;
+ } else if (cur_stream_status->plane_count < surface_count) {
+ force_minimal_pipe_splitting = true;
+ *is_plane_addition = true;
+ }
+ }
+ }
+
+ return force_minimal_pipe_splitting;
+}
+
static bool commit_minimal_transition_state(struct dc *dc,
struct dc_state *transition_base_context)
{
struct dc_state *transition_context = dc_create_state(dc);
- enum pipe_split_policy tmp_policy;
+ enum pipe_split_policy tmp_mpc_policy;
+ bool temp_dynamic_odm_policy;
+ bool temp_subvp_policy;
enum dc_status ret = DC_ERROR_UNEXPECTED;
unsigned int i, j;
@@ -3553,10 +3655,16 @@ static bool commit_minimal_transition_state(struct dc *dc,
return false;
if (!dc->config.is_vmin_only_asic) {
- tmp_policy = dc->debug.pipe_split_policy;
+ tmp_mpc_policy = dc->debug.pipe_split_policy;
dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
}
+ temp_dynamic_odm_policy = dc->debug.enable_single_display_2to1_odm_policy;
+ dc->debug.enable_single_display_2to1_odm_policy = false;
+
+ temp_subvp_policy = dc->debug.force_disable_subvp;
+ dc->debug.force_disable_subvp = true;
+
dc_resource_state_copy_construct(transition_base_context, transition_context);
//commit minimal state
@@ -3577,20 +3685,23 @@ static bool commit_minimal_transition_state(struct dc *dc,
ret = dc_commit_state_no_check(dc, transition_context);
}
- //always release as dc_commit_state_no_check retains in good case
+ /*always release as dc_commit_state_no_check retains in good case*/
dc_release_state(transition_context);
- //restore previous pipe split policy
+ /*restore previous pipe split and odm policy*/
if (!dc->config.is_vmin_only_asic)
- dc->debug.pipe_split_policy = tmp_policy;
+ dc->debug.pipe_split_policy = tmp_mpc_policy;
+
+ dc->debug.enable_single_display_2to1_odm_policy = temp_dynamic_odm_policy;
+ dc->debug.force_disable_subvp = temp_subvp_policy;
if (ret != DC_OK) {
- //this should never happen
+ /*this should never happen*/
BREAK_TO_DEBUGGER();
return false;
}
- //force full surface update
+ /*force full surface update*/
for (i = 0; i < dc->current_state->stream_count; i++) {
for (j = 0; j < dc->current_state->stream_status[i].plane_count; j++) {
dc->current_state->stream_status[i].plane_states[j]->update_flags.raw = 0xFFFFFFFF;
@@ -3613,22 +3724,14 @@ bool dc_update_planes_and_stream(struct dc *dc,
* cause underflow. Apply stream configuration with minimal pipe
* split first to avoid unsupported transitions for active pipes.
*/
- bool force_minimal_pipe_splitting = false;
- bool is_plane_addition = false;
+ bool force_minimal_pipe_splitting;
+ bool is_plane_addition;
- struct dc_stream_status *cur_stream_status = stream_get_status(dc->current_state, stream);
-
- if (cur_stream_status &&
- dc->current_state->stream_count > 0 &&
- dc->debug.pipe_split_policy != MPC_SPLIT_AVOID) {
- /* determine if minimal transition is required */
- if (cur_stream_status->plane_count > surface_count) {
- force_minimal_pipe_splitting = true;
- } else if (cur_stream_status->plane_count < surface_count) {
- force_minimal_pipe_splitting = true;
- is_plane_addition = true;
- }
- }
+ force_minimal_pipe_splitting = could_mpcc_tree_change_for_active_pipes(
+ dc,
+ stream,
+ surface_count,
+ &is_plane_addition);
/* on plane addition, minimal state is the current one */
if (force_minimal_pipe_splitting && is_plane_addition &&
@@ -3645,7 +3748,7 @@ bool dc_update_planes_and_stream(struct dc *dc,
&context))
return false;
- /* on plane addition, minimal state is the new one */
+ /* on plane removal, minimal state is the new one */
if (force_minimal_pipe_splitting && !is_plane_addition) {
if (!commit_minimal_transition_state(dc, context)) {
dc_release_state(context);
@@ -4032,7 +4135,7 @@ struct dc_sink *dc_link_add_remote_sink(
* Treat device as no EDID device if EDID
* parsing fails
*/
- if (edid_status != EDID_OK) {
+ if (edid_status != EDID_OK && edid_status != EDID_PARTIAL_VALID) {
dc_sink->dc_edid.length = 0;
dm_error("Bad EDID, status%d!\n", edid_status);
}
@@ -4554,6 +4657,37 @@ enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc,
}
/**
+ *****************************************************************************
+ * Function: dc_process_dmub_dpia_hpd_int_enable
+ *
+ * @brief
+ * Submits dpia hpd int enable command to dmub via inbox message
+ *
+ * @param
+ * [in] dc: dc structure
+ * [in] hpd_int_enable: 1 for hpd int enable, 0 to disable
+ *
+ * @return
+ * None
+ *****************************************************************************
+ */
+void dc_process_dmub_dpia_hpd_int_enable(const struct dc *dc,
+ uint32_t hpd_int_enable)
+{
+ union dmub_rb_cmd cmd = {0};
+ struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv;
+
+ cmd.dpia_hpd_int_enable.header.type = DMUB_CMD__DPIA_HPD_INT_ENABLE;
+ cmd.dpia_hpd_int_enable.enable = hpd_int_enable;
+
+ dc_dmub_srv_cmd_queue(dmub_srv, &cmd);
+ dc_dmub_srv_cmd_execute(dmub_srv);
+ dc_dmub_srv_wait_idle(dmub_srv);
+
+ DC_LOG_DEBUG("%s: hpd_int_enable(%d)\n", __func__, hpd_int_enable);
+}
+
+/**
* dc_disable_accelerated_mode - disable accelerated mode
* @dc: dc structure
*/
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
index 9dd705b985b9..7c2e3b8dc26a 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
@@ -417,8 +417,8 @@ void get_subvp_visual_confirm_color(
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe->stream && pipe->stream->mall_stream_config.paired_stream &&
- pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
+ if (pipe->stream && pipe->stream->mall_stream_config.paired_stream &&
+ pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
/* SubVP enable - red */
color->color_r_cr = color_value;
enable_subvp = true;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 4ab27e231337..d7b1ace6328a 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -832,8 +832,9 @@ static bool discover_dp_mst_topology(struct dc_link *link, enum dc_detect_reason
LINK_INFO("link=%d, mst branch is now Connected\n",
link->link_index);
- apply_dpia_mst_dsc_always_on_wa(link);
link->type = dc_connection_mst_branch;
+ apply_dpia_mst_dsc_always_on_wa(link);
+
dm_helpers_dp_update_branch_info(link->ctx, link);
if (dm_helpers_dp_mst_start_top_mgr(link->ctx,
link, (reason == DETECT_REASON_BOOT || reason == DETECT_REASON_RESUMEFROMS3S4))) {
@@ -847,20 +848,13 @@ static bool discover_dp_mst_topology(struct dc_link *link, enum dc_detect_reason
bool reset_cur_dp_mst_topology(struct dc_link *link)
{
- bool result = false;
DC_LOGGER_INIT(link->ctx->logger);
LINK_INFO("link=%d, mst branch is now Disconnected\n",
link->link_index);
revert_dpia_mst_dsc_always_on_wa(link);
- result = dm_helpers_dp_mst_stop_top_mgr(link->ctx, link);
-
- link->mst_stream_alloc_table.stream_count = 0;
- memset(link->mst_stream_alloc_table.stream_allocations,
- 0,
- sizeof(link->mst_stream_alloc_table.stream_allocations));
- return result;
+ return dm_helpers_dp_mst_stop_top_mgr(link->ctx, link);
}
static bool should_prepare_phy_clocks_for_link_verification(const struct dc *dc,
@@ -1313,8 +1307,11 @@ static bool detect_link_and_local_sink(struct dc_link *link,
}
if (link->connector_signal == SIGNAL_TYPE_EDP) {
- // Init dc_panel_config
- dm_helpers_init_panel_settings(dc_ctx, &link->panel_config);
+ /* Init dc_panel_config by HW config */
+ if (dc_ctx->dc->res_pool->funcs->get_panel_config_defaults)
+ dc_ctx->dc->res_pool->funcs->get_panel_config_defaults(&link->panel_config);
+ /* Pickup base DM settings */
+ dm_helpers_init_panel_settings(dc_ctx, &link->panel_config, sink);
// Override dc_panel_config if system has specific settings
dm_helpers_override_panel_settings(dc_ctx, &link->panel_config);
}
@@ -1983,7 +1980,7 @@ static enum dc_status enable_link_dp(struct dc_state *state,
int i;
bool apply_seamless_boot_optimization = false;
uint32_t bl_oled_enable_delay = 50; // in ms
- const uint32_t post_oui_delay = 30; // 30ms
+ uint32_t post_oui_delay = 30; // 30ms
/* Reduce link bandwidth between failed link training attempts. */
bool do_fallback = false;
@@ -2030,8 +2027,10 @@ static enum dc_status enable_link_dp(struct dc_state *state,
// during mode switch we do DP_SET_POWER off then on, and OUI is lost
dpcd_set_source_specific_data(link);
- if (link->dpcd_sink_ext_caps.raw != 0)
+ if (link->dpcd_sink_ext_caps.raw != 0) {
+ post_oui_delay += link->panel_config.pps.extra_post_OUI_ms;
msleep(post_oui_delay);
+ }
// similarly, mode switch can cause loss of cable ID
dpcd_write_cable_id_to_dprx(link);
@@ -2643,9 +2642,8 @@ static void disable_link(struct dc_link *link, const struct link_resource *link_
dp_set_fec_ready(link, link_res, false);
}
}
- } else {
- if (signal != SIGNAL_TYPE_VIRTUAL)
- link->link_enc->funcs->disable_output(link->link_enc, signal);
+ } else if (signal != SIGNAL_TYPE_VIRTUAL) {
+ link->dc->hwss.disable_link_output(link, link_res, signal);
}
if (signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
@@ -2667,6 +2665,7 @@ static void enable_link_hdmi(struct pipe_ctx *pipe_ctx)
bool is_over_340mhz = false;
bool is_vga_mode = (stream->timing.h_addressable == 640)
&& (stream->timing.v_addressable == 480);
+ struct dc *dc = pipe_ctx->stream->ctx->dc;
if (stream->phy_pix_clk == 0)
stream->phy_pix_clk = stream->timing.pix_clk_100hz / 10;
@@ -2706,11 +2705,12 @@ static void enable_link_hdmi(struct pipe_ctx *pipe_ctx)
if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR422)
display_color_depth = COLOR_DEPTH_888;
- link->link_enc->funcs->enable_tmds_output(
- link->link_enc,
+ dc->hwss.enable_tmds_link_output(
+ link,
+ &pipe_ctx->link_res,
+ pipe_ctx->stream->signal,
pipe_ctx->clock_source->id,
display_color_depth,
- pipe_ctx->stream->signal,
stream->phy_pix_clk);
if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
@@ -2721,15 +2721,16 @@ static void enable_link_lvds(struct pipe_ctx *pipe_ctx)
{
struct dc_stream_state *stream = pipe_ctx->stream;
struct dc_link *link = stream->link;
+ struct dc *dc = stream->ctx->dc;
if (stream->phy_pix_clk == 0)
stream->phy_pix_clk = stream->timing.pix_clk_100hz / 10;
memset(&stream->link->cur_link_settings, 0,
sizeof(struct dc_link_settings));
-
- link->link_enc->funcs->enable_lvds_output(
- link->link_enc,
+ dc->hwss.enable_lvds_link_output(
+ link,
+ &pipe_ctx->link_res,
pipe_ctx->clock_source->id,
stream->phy_pix_clk);
@@ -3145,7 +3146,7 @@ bool dc_link_set_psr_allow_active(struct dc_link *link, const bool *allow_active
if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst))
return false;
- if (allow_active && link->type == dc_connection_none) {
+ if ((allow_active != NULL) && (*allow_active == true) && (link->type == dc_connection_none)) {
// Don't enter PSR if panel is not connected
return false;
}
@@ -3377,8 +3378,8 @@ bool dc_link_setup_psr(struct dc_link *link,
case FAMILY_YELLOW_CARP:
case AMDGPU_FAMILY_GC_10_3_6:
case AMDGPU_FAMILY_GC_11_0_1:
- if(!dc->debug.disable_z10)
- psr_context->psr_level.bits.SKIP_CRTC_DISABLE = false;
+ if (dc->debug.disable_z10)
+ psr_context->psr_level.bits.SKIP_CRTC_DISABLE = true;
break;
default:
psr_context->psr_level.bits.SKIP_CRTC_DISABLE = true;
@@ -3567,6 +3568,35 @@ static void update_mst_stream_alloc_table(
work_table[i];
}
+static void remove_stream_from_alloc_table(
+ struct dc_link *link,
+ struct stream_encoder *dio_stream_enc,
+ struct hpo_dp_stream_encoder *hpo_dp_stream_enc)
+{
+ int i = 0;
+ struct link_mst_stream_allocation_table *table =
+ &link->mst_stream_alloc_table;
+
+ if (hpo_dp_stream_enc) {
+ for (; i < table->stream_count; i++)
+ if (hpo_dp_stream_enc == table->stream_allocations[i].hpo_dp_stream_enc)
+ break;
+ } else {
+ for (; i < table->stream_count; i++)
+ if (dio_stream_enc == table->stream_allocations[i].stream_enc)
+ break;
+ }
+
+ if (i < table->stream_count) {
+ i++;
+ for (; i < table->stream_count; i++)
+ table->stream_allocations[i-1] = table->stream_allocations[i];
+ memset(&table->stream_allocations[table->stream_count-1], 0,
+ sizeof(struct link_mst_stream_allocation));
+ table->stream_count--;
+ }
+}
+
static void dc_log_vcp_x_y(const struct dc_link *link, struct fixed31_32 avg_time_slots_per_mtp)
{
const uint32_t VCP_Y_PRECISION = 1000;
@@ -3984,26 +4014,32 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
&empty_link_settings,
avg_time_slots_per_mtp);
- /* TODO: which component is responsible for remove payload table? */
if (mst_mode) {
+ /* when link is in mst mode, reply on mst manager to remove
+ * payload
+ */
if (dm_helpers_dp_mst_write_payload_allocation_table(
stream->ctx,
stream,
&proposed_table,
- false)) {
+ false))
update_mst_stream_alloc_table(
- link,
- pipe_ctx->stream_res.stream_enc,
- pipe_ctx->stream_res.hpo_dp_stream_enc,
- &proposed_table);
- }
- else {
- DC_LOG_WARNING("Failed to update"
- "MST allocation table for"
- "pipe idx:%d\n",
- pipe_ctx->pipe_idx);
- }
+ link,
+ pipe_ctx->stream_res.stream_enc,
+ pipe_ctx->stream_res.hpo_dp_stream_enc,
+ &proposed_table);
+ else
+ DC_LOG_WARNING("Failed to update"
+ "MST allocation table for"
+ "pipe idx:%d\n",
+ pipe_ctx->pipe_idx);
+ } else {
+ /* when link is no longer in mst mode (mst hub unplugged),
+ * remove payload with default dc logic
+ */
+ remove_stream_from_alloc_table(link, pipe_ctx->stream_res.stream_enc,
+ pipe_ctx->stream_res.hpo_dp_stream_enc);
}
DC_LOG_MST("%s"
@@ -4299,6 +4335,19 @@ void core_link_enable_stream(
if (pipe_ctx->stream->dpms_off)
return;
+ /* Have to setup DSC before DIG FE and BE are connected (which happens before the
+ * link training). This is to make sure the bandwidth sent to DIG BE won't be
+ * bigger than what the link and/or DIG BE can handle. VBID[6]/CompressedStream_flag
+ * will be automatically set at a later time when the video is enabled
+ * (DP_VID_STREAM_EN = 1).
+ */
+ if (pipe_ctx->stream->timing.flags.DSC) {
+ if (dc_is_dp_signal(pipe_ctx->stream->signal) ||
+ dc_is_virtual_signal(pipe_ctx->stream->signal))
+ dp_set_dsc_enable(pipe_ctx, true);
+
+ }
+
status = enable_link(state, pipe_ctx);
if (status != DC_OK) {
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
index d01d2eeed813..651231387043 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
@@ -35,6 +35,8 @@
#include "dc_link_ddc.h"
#include "dce/dce_aux.h"
#include "dmub/inc/dmub_cmd.h"
+#include "link_dpcd.h"
+#include "include/dal_asic_id.h"
#define DC_LOGGER_INIT(logger)
@@ -683,6 +685,21 @@ bool dc_link_aux_try_to_configure_timeout(struct ddc_service *ddc,
bool result = false;
struct ddc *ddc_pin = ddc->ddc_pin;
+ if ((ddc->link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
+ !ddc->link->dc->debug.disable_fixed_vs_aux_timeout_wa &&
+ ASICREV_IS_YELLOW_CARP(ddc->ctx->asic_id.hw_internal_rev)) {
+ /* Fixed VS workaround for AUX timeout */
+ const uint32_t fixed_vs_address = 0xF004F;
+ const uint8_t fixed_vs_data[4] = {0x1, 0x22, 0x63, 0xc};
+
+ core_link_write_dpcd(ddc->link,
+ fixed_vs_address,
+ fixed_vs_data,
+ sizeof(fixed_vs_data));
+
+ timeout = 3072;
+ }
+
/* Do not try to access nonexistent DDC pin. */
if (ddc->link->ep_type != DISPLAY_ENDPOINT_PHY)
return true;
@@ -691,6 +708,7 @@ bool dc_link_aux_try_to_configure_timeout(struct ddc_service *ddc,
ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en]->funcs->configure_timeout(ddc, timeout);
result = true;
}
+
return result;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index e2413d2908c9..1254d38f1778 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -526,9 +526,9 @@ uint8_t dc_dp_initialize_scrambling_data_symbols(
return disable_scrabled_data_symbols;
}
-static inline bool is_repeater(struct dc_link *link, uint32_t offset)
+static inline bool is_repeater(const struct link_training_settings *lt_settings, uint32_t offset)
{
- return (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) && (offset != 0);
+ return (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) && (offset != 0);
}
static void dpcd_set_lt_pattern_and_lane_settings(
@@ -545,7 +545,7 @@ static void dpcd_set_lt_pattern_and_lane_settings(
bool edp_workaround = false; /* TODO link_prop.INTERNAL */
dpcd_base_lt_offset = DP_TRAINING_PATTERN_SET;
- if (is_repeater(link, offset))
+ if (is_repeater(lt_settings, offset))
dpcd_base_lt_offset = DP_TRAINING_PATTERN_SET_PHY_REPEATER1 +
((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
@@ -561,7 +561,7 @@ static void dpcd_set_lt_pattern_and_lane_settings(
dpcd_lt_buffer[DP_TRAINING_PATTERN_SET - DP_TRAINING_PATTERN_SET]
= dpcd_pattern.raw;
- if (is_repeater(link, offset)) {
+ if (is_repeater(lt_settings, offset)) {
DC_LOG_HW_LINK_TRAINING("%s\n LTTPR Repeater ID: %d\n 0x%X pattern = %x\n",
__func__,
offset,
@@ -584,7 +584,7 @@ static void dpcd_set_lt_pattern_and_lane_settings(
lt_settings->dpcd_lane_settings,
size_in_bytes);
- if (is_repeater(link, offset)) {
+ if (is_repeater(lt_settings, offset)) {
if (dp_get_link_encoding_format(&lt_settings->link_settings) ==
DP_128b_132b_ENCODING)
DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n"
@@ -873,7 +873,7 @@ enum dc_status dp_get_lane_status_and_lane_adjust(
uint32_t lane;
enum dc_status status;
- if (is_repeater(link, offset)) {
+ if (is_repeater(link_training_setting, offset)) {
lane01_status_address =
DP_LANE0_1_STATUS_PHY_REPEATER1 +
((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
@@ -906,7 +906,7 @@ enum dc_status dp_get_lane_status_and_lane_adjust(
ln_align->raw = dpcd_buf[2];
- if (is_repeater(link, offset)) {
+ if (is_repeater(link_training_setting, offset)) {
DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n"
" 0x%X Lane01Status = %x\n 0x%X Lane23Status = %x\n ",
__func__,
@@ -944,6 +944,23 @@ enum dc_status dp_get_lane_status_and_lane_adjust(
return status;
}
+static enum dc_status dpcd_128b_132b_set_lane_settings(
+ struct dc_link *link,
+ const struct link_training_settings *link_training_setting)
+{
+ enum dc_status status = core_link_write_dpcd(link,
+ DP_TRAINING_LANE0_SET,
+ (uint8_t *)(link_training_setting->dpcd_lane_settings),
+ sizeof(link_training_setting->dpcd_lane_settings));
+
+ DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X TX_FFE_PRESET_VALUE = %x\n",
+ __func__,
+ DP_TRAINING_LANE0_SET,
+ link_training_setting->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE);
+ return status;
+}
+
+
enum dc_status dpcd_set_lane_settings(
struct dc_link *link,
const struct link_training_settings *link_training_setting,
@@ -954,7 +971,7 @@ enum dc_status dpcd_set_lane_settings(
lane0_set_address = DP_TRAINING_LANE0_SET;
- if (is_repeater(link, offset))
+ if (is_repeater(link_training_setting, offset))
lane0_set_address = DP_TRAINING_LANE0_SET_PHY_REPEATER1 +
((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
@@ -963,17 +980,7 @@ enum dc_status dpcd_set_lane_settings(
(uint8_t *)(link_training_setting->dpcd_lane_settings),
link_training_setting->link_settings.lane_count);
- if (is_repeater(link, offset)) {
- if (dp_get_link_encoding_format(&link_training_setting->link_settings) ==
- DP_128b_132b_ENCODING)
- DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n"
- " 0x%X TX_FFE_PRESET_VALUE = %x\n",
- __func__,
- offset,
- lane0_set_address,
- link_training_setting->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE);
- else if (dp_get_link_encoding_format(&link_training_setting->link_settings) ==
- DP_8b_10b_ENCODING)
+ if (is_repeater(link_training_setting, offset)) {
DC_LOG_HW_LINK_TRAINING("%s\n LTTPR Repeater ID: %d\n"
" 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n",
__func__,
@@ -985,14 +992,6 @@ enum dc_status dpcd_set_lane_settings(
link_training_setting->dpcd_lane_settings[0].bits.MAX_PRE_EMPHASIS_REACHED);
} else {
- if (dp_get_link_encoding_format(&link_training_setting->link_settings) ==
- DP_128b_132b_ENCODING)
- DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X TX_FFE_PRESET_VALUE = %x\n",
- __func__,
- lane0_set_address,
- link_training_setting->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE);
- else if (dp_get_link_encoding_format(&link_training_setting->link_settings) ==
- DP_8b_10b_ENCODING)
DC_LOG_HW_LINK_TRAINING("%s\n 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n",
__func__,
lane0_set_address,
@@ -1172,7 +1171,7 @@ static enum link_training_result perform_channel_equalization_sequence(
/* Note: also check that TPS4 is a supported feature*/
tr_pattern = lt_settings->pattern_for_eq;
- if (is_repeater(link, offset) && dp_get_link_encoding_format(&lt_settings->link_settings) == DP_8b_10b_ENCODING)
+ if (is_repeater(lt_settings, offset) && dp_get_link_encoding_format(&lt_settings->link_settings) == DP_8b_10b_ENCODING)
tr_pattern = DP_TRAINING_PATTERN_SEQUENCE_4;
dp_set_hw_training_pattern(link, link_res, tr_pattern, offset);
@@ -1198,7 +1197,7 @@ static enum link_training_result perform_channel_equalization_sequence(
/* 3. wait for receiver to lock-on*/
wait_time_microsec = lt_settings->eq_pattern_time;
- if (is_repeater(link, offset))
+ if (is_repeater(lt_settings, offset))
wait_time_microsec =
dp_translate_training_aux_read_interval(
link->dpcd_caps.lttpr_caps.aux_rd_interval[offset - 1]);
@@ -1469,7 +1468,6 @@ static inline void decide_8b_10b_training_settings(
*/
lt_settings->link_settings.link_spread = link->dp_ss_off ?
LINK_SPREAD_DISABLED : LINK_SPREAD_05_DOWNSPREAD_30KHZ;
- lt_settings->lttpr_mode = link->lttpr_mode;
lt_settings->cr_pattern_time = get_cr_training_aux_rd_interval(link, link_setting);
lt_settings->eq_pattern_time = get_eq_training_aux_rd_interval(link, link_setting);
lt_settings->pattern_for_cr = decide_cr_training_pattern(link_setting);
@@ -1478,6 +1476,7 @@ static inline void decide_8b_10b_training_settings(
lt_settings->should_set_fec_ready = true;
lt_settings->disallow_per_lane_settings = true;
lt_settings->always_match_dpcd_with_hw_lane_settings = true;
+ lt_settings->lttpr_mode = dp_decide_8b_10b_lttpr_mode(link);
dp_hw_to_dpcd_lane_settings(lt_settings, lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
}
@@ -1501,9 +1500,8 @@ static inline void decide_128b_132b_training_settings(struct dc_link *link,
lt_settings->cds_pattern_time = 2500;
lt_settings->cds_wait_time_limit = (dp_convert_to_count(
link->dpcd_caps.lttpr_caps.phy_repeater_cnt) + 1) * 20000;
- lt_settings->lttpr_mode = dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) ?
- LTTPR_MODE_NON_TRANSPARENT : LTTPR_MODE_TRANSPARENT;
lt_settings->disallow_per_lane_settings = true;
+ lt_settings->lttpr_mode = dp_decide_128b_132b_lttpr_mode(link);
dp_hw_to_dpcd_lane_settings(lt_settings,
lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
}
@@ -1543,7 +1541,7 @@ static void override_training_settings(
lt_settings->ffe_preset = overrides->ffe_preset;
/* Override HW lane settings with BIOS forced values if present */
if (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN &&
- link->lttpr_mode == LTTPR_MODE_TRANSPARENT) {
+ lt_settings->lttpr_mode == LTTPR_MODE_TRANSPARENT) {
lt_settings->voltage_swing = &link->bios_forced_drive_settings.VOLTAGE_SWING;
lt_settings->pre_emphasis = &link->bios_forced_drive_settings.PRE_EMPHASIS;
lt_settings->always_match_dpcd_with_hw_lane_settings = false;
@@ -1584,6 +1582,15 @@ static void override_training_settings(
if (link->preferred_training_settings.fec_enable != NULL)
lt_settings->should_set_fec_ready = *link->preferred_training_settings.fec_enable;
+
+ #if defined(CONFIG_DRM_AMD_DC_DCN)
+ /* Check DP tunnel LTTPR mode debug option. */
+ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && link->dc->debug.dpia_debug.bits.force_non_lttpr)
+ lt_settings->lttpr_mode = LTTPR_MODE_NON_LTTPR;
+
+#endif
+ dp_get_lttpr_mode_override(link, &lt_settings->lttpr_mode);
+
}
uint8_t dp_convert_to_count(uint8_t lttpr_repeater_count)
@@ -1649,7 +1656,7 @@ static enum dc_status configure_lttpr_mode_non_transparent(
link->dpcd_caps.lttpr_caps.mode = repeater_mode;
}
- if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) {
+ if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) {
DC_LOG_HW_LINK_TRAINING("%s\n Set LTTPR to Non Transparent Mode\n", __func__);
@@ -2015,7 +2022,7 @@ static enum link_training_result dp_perform_128b_132b_channel_eq_done_sequence(
result = DP_128b_132b_LT_FAILED;
} else {
dp_set_hw_lane_settings(link, link_res, lt_settings, DPRX);
- dpcd_set_lane_settings(link, lt_settings, DPRX);
+ dpcd_128b_132b_set_lane_settings(link, lt_settings);
}
loop_count++;
}
@@ -2099,7 +2106,7 @@ static enum link_training_result dp_perform_8b_10b_link_training(
/* 1. set link rate, lane count and spread. */
dpcd_set_link_settings(link, lt_settings);
- if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) {
+ if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) {
/* 2. perform link training (set link training done
* to false is done as well)
@@ -2216,7 +2223,7 @@ static enum link_training_result perform_fixed_vs_pe_nontransparent_training_seq
link->vendor_specific_lttpr_link_rate_wa = target_rate;
- if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) {
+ if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) {
/* 2. perform link training (set link training done
* to false is done as well)
@@ -2288,7 +2295,7 @@ static enum link_training_result dp_perform_fixed_vs_pe_training_sequence(
ASSERT(dp_get_link_encoding_format(&lt_settings->link_settings) ==
DP_8b_10b_ENCODING);
- if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) {
+ if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) {
status = perform_fixed_vs_pe_nontransparent_training_sequence(link, link_res, lt_settings);
return status;
}
@@ -2635,6 +2642,7 @@ enum link_training_result dc_link_dp_perform_link_training(
link,
link_settings,
&lt_settings);
+
override_training_settings(
link,
&link->preferred_training_settings,
@@ -2652,7 +2660,7 @@ enum link_training_result dc_link_dp_perform_link_training(
* Per DP specs starting from here, DPTX device shall not issue
* Non-LT AUX transactions inside training mode.
*/
- if (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN)
+ if (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN && encoding == DP_8b_10b_ENCODING)
status = dp_perform_fixed_vs_pe_training_sequence(link, link_res, &lt_settings);
else if (encoding == DP_8b_10b_ENCODING)
status = dp_perform_8b_10b_link_training(link, link_res, &lt_settings);
@@ -2758,8 +2766,14 @@ bool perform_link_training_with_retries(
skip_video_pattern);
/* Transmit idle pattern once training successful. */
- if (status == LINK_TRAINING_SUCCESS && !is_link_bw_low)
+ if (status == LINK_TRAINING_SUCCESS && !is_link_bw_low) {
dp_set_hw_test_pattern(link, &pipe_ctx->link_res, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0);
+ /* Update verified link settings to current one
+ * Because DPIA LT might fallback to lower link setting.
+ */
+ link->verified_link_cap.link_rate = link->cur_link_settings.link_rate;
+ link->verified_link_cap.lane_count = link->cur_link_settings.lane_count;
+ }
} else {
status = dc_link_dp_perform_link_training(link,
&pipe_ctx->link_res,
@@ -3080,7 +3094,7 @@ struct dc_link_settings dp_get_max_link_cap(struct dc_link *link)
* account for lttpr repeaters cap
* notes: repeaters do not snoop in the DPRX Capabilities addresses (3.6.3).
*/
- if (link->lttpr_mode != LTTPR_MODE_NON_LTTPR) {
+ if (dp_is_lttpr_present(link)) {
if (link->dpcd_caps.lttpr_caps.max_lane_count < max_link_cap.lane_count)
max_link_cap.lane_count = link->dpcd_caps.lttpr_caps.max_lane_count;
lttpr_max_link_rate = get_lttpr_max_link_rate(link);
@@ -3234,7 +3248,7 @@ static bool dp_verify_link_cap(
cur_link_settings = max_link_settings;
/* Grant extended timeout request */
- if ((link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) && (link->dpcd_caps.lttpr_caps.max_ext_timeout > 0)) {
+ if (dp_is_lttpr_present(link) && link->dpcd_caps.lttpr_caps.max_ext_timeout > 0) {
uint8_t grant = link->dpcd_caps.lttpr_caps.max_ext_timeout & 0x80;
core_link_write_dpcd(link, DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT, &grant, sizeof(grant));
@@ -4095,8 +4109,13 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link)
&dpcd_lane_adjustment[0].raw,
sizeof(dpcd_lane_adjustment));
+ /* prepare link training settings */
+ link_training_settings.link_settings = link->cur_link_settings;
+
+ link_training_settings.lttpr_mode = dp_decide_lttpr_mode(link, &link->cur_link_settings);
+
if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
- link->lttpr_mode == LTTPR_MODE_TRANSPARENT)
+ link_training_settings.lttpr_mode == LTTPR_MODE_TRANSPARENT)
dp_fixed_vs_pe_read_lane_adjust(
link,
link_training_settings.dpcd_lane_settings);
@@ -4203,9 +4222,6 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link)
test_pattern_size);
}
- /* prepare link training settings */
- link_training_settings.link_settings = link->cur_link_settings;
-
for (lane = 0; lane <
(unsigned int)(link->cur_link_settings.lane_count);
lane++) {
@@ -4518,25 +4534,15 @@ void dc_link_dp_handle_link_loss(struct dc_link *link)
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx && pipe_ctx->stream && !pipe_ctx->stream->dpms_off &&
- pipe_ctx->stream->link == link && !pipe_ctx->prev_odm_pipe) {
- if (link->dc->hwss.update_phy_state)
- link->dc->hwss.update_phy_state(link->dc->current_state,
- pipe_ctx, TX_OFF_SYMCLK_OFF);
- else
- core_link_disable_stream(pipe_ctx);
- }
+ pipe_ctx->stream->link == link && !pipe_ctx->prev_odm_pipe)
+ core_link_disable_stream(pipe_ctx);
}
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx && pipe_ctx->stream && !pipe_ctx->stream->dpms_off &&
- pipe_ctx->stream->link == link && !pipe_ctx->prev_odm_pipe) {
- if (link->dc->hwss.update_phy_state)
- link->dc->hwss.update_phy_state(link->dc->current_state,
- pipe_ctx, TX_ON_SYMCLK_ON);
- else
- core_link_enable_stream(link->dc->current_state, pipe_ctx);
- }
+ pipe_ctx->stream->link == link && !pipe_ctx->prev_odm_pipe)
+ core_link_enable_stream(link->dc->current_state, pipe_ctx);
}
}
@@ -5025,125 +5031,151 @@ static bool dpcd_read_sink_ext_caps(struct dc_link *link)
return true;
}
-/* Logic to determine LTTPR mode */
-static void determine_lttpr_mode(struct dc_link *link)
+bool dp_retrieve_lttpr_cap(struct dc_link *link)
{
- bool allow_lttpr_non_transparent_mode = 0;
- bool vbios_lttpr_enable = link->dc->caps.vbios_lttpr_enable;
+ uint8_t lttpr_dpcd_data[8];
+ enum dc_status status = DC_ERROR_UNEXPECTED;
+ bool is_lttpr_present = false;
+
+ /* Logic to determine LTTPR support*/
bool vbios_lttpr_interop = link->dc->caps.vbios_lttpr_aware;
- if (link->ctx->dc->debug.lttpr_mode_override != 0) {
- link->lttpr_mode = link->ctx->dc->debug.lttpr_mode_override;
- return;
- }
+ if (!vbios_lttpr_interop || !link->dc->caps.extended_aux_timeout_support)
+ return false;
- if ((link->dc->config.allow_lttpr_non_transparent_mode.bits.DP2_0 &&
- link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED)) {
- allow_lttpr_non_transparent_mode = 1;
- } else if (link->dc->config.allow_lttpr_non_transparent_mode.bits.DP1_4A &&
- !link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED) {
- allow_lttpr_non_transparent_mode = 1;
+ /* By reading LTTPR capability, RX assumes that we will enable
+ * LTTPR extended aux timeout if LTTPR is present.
+ */
+ status = core_link_read_dpcd(link,
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV,
+ lttpr_dpcd_data,
+ sizeof(lttpr_dpcd_data));
+
+ link->dpcd_caps.lttpr_caps.revision.raw =
+ lttpr_dpcd_data[DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+
+ link->dpcd_caps.lttpr_caps.max_link_rate =
+ lttpr_dpcd_data[DP_MAX_LINK_RATE_PHY_REPEATER -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+
+ link->dpcd_caps.lttpr_caps.phy_repeater_cnt =
+ lttpr_dpcd_data[DP_PHY_REPEATER_CNT -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+
+ link->dpcd_caps.lttpr_caps.max_lane_count =
+ lttpr_dpcd_data[DP_MAX_LANE_COUNT_PHY_REPEATER -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+
+ link->dpcd_caps.lttpr_caps.mode =
+ lttpr_dpcd_data[DP_PHY_REPEATER_MODE -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+
+ link->dpcd_caps.lttpr_caps.max_ext_timeout =
+ lttpr_dpcd_data[DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+ link->dpcd_caps.lttpr_caps.main_link_channel_coding.raw =
+ lttpr_dpcd_data[DP_MAIN_LINK_CHANNEL_CODING_PHY_REPEATER -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+
+ link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.raw =
+ lttpr_dpcd_data[DP_PHY_REPEATER_128B132B_RATES -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+
+ /* If this chip cap is set, at least one retimer must exist in the chain
+ * Override count to 1 if we receive a known bad count (0 or an invalid value)
+ */
+ if (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN &&
+ (dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) == 0)) {
+ ASSERT(0);
+ link->dpcd_caps.lttpr_caps.phy_repeater_cnt = 0x80;
+ DC_LOG_DC("lttpr_caps forced phy_repeater_cnt = %d\n", link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
}
- link->lttpr_mode = LTTPR_MODE_NON_LTTPR;
- if (vbios_lttpr_enable && vbios_lttpr_interop)
- link->lttpr_mode = LTTPR_MODE_NON_TRANSPARENT;
- else if (!vbios_lttpr_enable && vbios_lttpr_interop) {
- if (allow_lttpr_non_transparent_mode)
- link->lttpr_mode = LTTPR_MODE_NON_TRANSPARENT;
- else
- link->lttpr_mode = LTTPR_MODE_TRANSPARENT;
- } else if (!vbios_lttpr_enable && !vbios_lttpr_interop) {
- if (!allow_lttpr_non_transparent_mode || !link->dc->caps.extended_aux_timeout_support)
- link->lttpr_mode = LTTPR_MODE_NON_LTTPR;
- else
- link->lttpr_mode = LTTPR_MODE_NON_TRANSPARENT;
- }
+ /* Attempt to train in LTTPR transparent mode if repeater count exceeds 8. */
+ is_lttpr_present = dp_is_lttpr_present(link);
-#if defined(CONFIG_DRM_AMD_DC_DCN)
- /* Check DP tunnel LTTPR mode debug option. */
- if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA &&
- link->dc->debug.dpia_debug.bits.force_non_lttpr)
- link->lttpr_mode = LTTPR_MODE_NON_LTTPR;
-#endif
+ if (is_lttpr_present)
+ CONN_DATA_DETECT(link, lttpr_dpcd_data, sizeof(lttpr_dpcd_data), "LTTPR Caps: ");
+
+ DC_LOG_DC("is_lttpr_present = %d\n", is_lttpr_present);
+ return is_lttpr_present;
}
-bool dp_retrieve_lttpr_cap(struct dc_link *link)
+bool dp_is_lttpr_present(struct dc_link *link)
{
- uint8_t lttpr_dpcd_data[8];
- enum dc_status status = DC_ERROR_UNEXPECTED;
- bool is_lttpr_present = false;
+ return (dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) != 0 &&
+ link->dpcd_caps.lttpr_caps.max_lane_count > 0 &&
+ link->dpcd_caps.lttpr_caps.max_lane_count <= 4 &&
+ link->dpcd_caps.lttpr_caps.revision.raw >= 0x14);
+}
- memset(lttpr_dpcd_data, '\0', sizeof(lttpr_dpcd_data));
+enum lttpr_mode dp_decide_lttpr_mode(struct dc_link *link, struct dc_link_settings *link_setting)
+{
+ enum dp_link_encoding encoding = dp_get_link_encoding_format(link_setting);
- /* Logic to determine LTTPR mode*/
- determine_lttpr_mode(link);
+ if (encoding == DP_8b_10b_ENCODING)
+ return dp_decide_8b_10b_lttpr_mode(link);
+ else if (encoding == DP_128b_132b_ENCODING)
+ return dp_decide_128b_132b_lttpr_mode(link);
- if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT || link->lttpr_mode == LTTPR_MODE_TRANSPARENT) {
- if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
- !link->dc->debug.disable_fixed_vs_aux_timeout_wa) {
- /* Fixed VS workaround for AUX timeout */
- const uint32_t fixed_vs_address = 0xF004F;
- const uint8_t fixed_vs_data[4] = {0x1, 0x22, 0x63, 0xc};
+ ASSERT(0);
+ return LTTPR_MODE_NON_LTTPR;
+}
- core_link_write_dpcd(
- link,
- fixed_vs_address,
- fixed_vs_data,
- sizeof(fixed_vs_data));
+void dp_get_lttpr_mode_override(struct dc_link *link, enum lttpr_mode *override)
+{
+ if (!dp_is_lttpr_present(link))
+ return;
+
+ if (link->dc->debug.lttpr_mode_override == LTTPR_MODE_TRANSPARENT) {
+ *override = LTTPR_MODE_TRANSPARENT;
+ } else if (link->dc->debug.lttpr_mode_override == LTTPR_MODE_NON_TRANSPARENT) {
+ *override = LTTPR_MODE_NON_TRANSPARENT;
+ } else if (link->dc->debug.lttpr_mode_override == LTTPR_MODE_NON_LTTPR) {
+ *override = LTTPR_MODE_NON_LTTPR;
+ }
+ DC_LOG_DC("lttpr_mode_override chose LTTPR_MODE = %d\n", (uint8_t)(*override));
+}
+
+enum lttpr_mode dp_decide_8b_10b_lttpr_mode(struct dc_link *link)
+{
+ bool is_lttpr_present = dp_is_lttpr_present(link);
+ bool vbios_lttpr_force_non_transparent = link->dc->caps.vbios_lttpr_enable;
+ bool vbios_lttpr_aware = link->dc->caps.vbios_lttpr_aware;
+
+ if (!is_lttpr_present)
+ return LTTPR_MODE_NON_LTTPR;
+
+ if (vbios_lttpr_aware) {
+ if (vbios_lttpr_force_non_transparent) {
+ DC_LOG_DC("chose LTTPR_MODE_NON_TRANSPARENT due to VBIOS DCE_INFO_CAPS_LTTPR_SUPPORT_ENABLE set to 1.\n");
+ return LTTPR_MODE_NON_TRANSPARENT;
+ } else {
+ DC_LOG_DC("chose LTTPR_MODE_NON_TRANSPARENT by default due to VBIOS not set DCE_INFO_CAPS_LTTPR_SUPPORT_ENABLE set to 1.\n");
+ return LTTPR_MODE_TRANSPARENT;
}
+ }
- /* By reading LTTPR capability, RX assumes that we will enable
- * LTTPR extended aux timeout if LTTPR is present.
- */
- status = core_link_read_dpcd(
- link,
- DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV,
- lttpr_dpcd_data,
- sizeof(lttpr_dpcd_data));
-
- link->dpcd_caps.lttpr_caps.revision.raw =
- lttpr_dpcd_data[DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV -
- DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
-
- link->dpcd_caps.lttpr_caps.max_link_rate =
- lttpr_dpcd_data[DP_MAX_LINK_RATE_PHY_REPEATER -
- DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
-
- link->dpcd_caps.lttpr_caps.phy_repeater_cnt =
- lttpr_dpcd_data[DP_PHY_REPEATER_CNT -
- DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
-
- link->dpcd_caps.lttpr_caps.max_lane_count =
- lttpr_dpcd_data[DP_MAX_LANE_COUNT_PHY_REPEATER -
- DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
-
- link->dpcd_caps.lttpr_caps.mode =
- lttpr_dpcd_data[DP_PHY_REPEATER_MODE -
- DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
-
- link->dpcd_caps.lttpr_caps.max_ext_timeout =
- lttpr_dpcd_data[DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT -
- DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
- link->dpcd_caps.lttpr_caps.main_link_channel_coding.raw =
- lttpr_dpcd_data[DP_MAIN_LINK_CHANNEL_CODING_PHY_REPEATER -
- DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
-
- link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.raw =
- lttpr_dpcd_data[DP_PHY_REPEATER_128B132B_RATES -
- DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
-
- /* Attempt to train in LTTPR transparent mode if repeater count exceeds 8. */
- is_lttpr_present = (link->dpcd_caps.lttpr_caps.max_lane_count > 0 &&
- link->dpcd_caps.lttpr_caps.max_lane_count <= 4 &&
- link->dpcd_caps.lttpr_caps.revision.raw >= 0x14);
- if (is_lttpr_present) {
- CONN_DATA_DETECT(link, lttpr_dpcd_data, sizeof(lttpr_dpcd_data), "LTTPR Caps: ");
- configure_lttpr_mode_transparent(link);
- } else
- link->lttpr_mode = LTTPR_MODE_NON_LTTPR;
+ if (link->dc->config.allow_lttpr_non_transparent_mode.bits.DP1_4A &&
+ link->dc->caps.extended_aux_timeout_support) {
+ DC_LOG_DC("chose LTTPR_MODE_NON_TRANSPARENT by default and dc->config.allow_lttpr_non_transparent_mode.bits.DP1_4A set to 1.\n");
+ return LTTPR_MODE_NON_TRANSPARENT;
}
- return is_lttpr_present;
+
+ DC_LOG_DC("chose LTTPR_MODE_NON_LTTPR.\n");
+ return LTTPR_MODE_NON_LTTPR;
+}
+
+enum lttpr_mode dp_decide_128b_132b_lttpr_mode(struct dc_link *link)
+{
+ enum lttpr_mode mode = LTTPR_MODE_NON_LTTPR;
+
+ if (dp_is_lttpr_present(link))
+ mode = LTTPR_MODE_NON_TRANSPARENT;
+
+ DC_LOG_DC("128b_132b chose LTTPR_MODE %d.\n", mode);
+ return mode;
}
static bool get_usbc_cable_id(struct dc_link *link, union dp_cable_id *cable_id)
@@ -5161,9 +5193,10 @@ static bool get_usbc_cable_id(struct dc_link *link, union dp_cable_id *cable_id)
cmd.cable_id.data.input.phy_inst = resource_transmitter_to_phy_idx(
link->dc, link->link_enc->transmitter);
if (dc_dmub_srv_cmd_with_reply_data(link->ctx->dmub_srv, &cmd) &&
- cmd.cable_id.header.ret_status == 1)
+ cmd.cable_id.header.ret_status == 1) {
cable_id->raw = cmd.cable_id.data.output_raw;
-
+ DC_LOG_DC("usbc_cable_id = %d.\n", cable_id->raw);
+ }
return cmd.cable_id.header.ret_status == 1;
}
@@ -5205,13 +5238,17 @@ static enum dc_status wa_try_to_wake_dprx(struct dc_link *link, uint64_t timeout
uint64_t current_ts = 0;
uint64_t time_taken_ms = 0;
enum dc_connection_type type = dc_connection_none;
+ bool lttpr_present;
+ bool vbios_lttpr_interop = link->dc->caps.vbios_lttpr_aware;
- determine_lttpr_mode(link);
+ lttpr_present = dp_is_lttpr_present(link) ||
+ (!vbios_lttpr_interop || !link->dc->caps.extended_aux_timeout_support);
+ DC_LOG_DC("lttpr_present = %d.\n", lttpr_present ? 1 : 0);
/* Issue an AUX read to test DPRX responsiveness. If LTTPR is supported the first read is expected to
* be to determine LTTPR capabilities. Otherwise trying to read power state should be an innocuous AUX read.
*/
- if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT || link->lttpr_mode == LTTPR_MODE_TRANSPARENT)
+ if (lttpr_present)
status = core_link_read_dpcd(
link,
DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV,
@@ -5341,6 +5378,10 @@ static bool retrieve_link_cap(struct dc_link *link)
}
is_lttpr_present = dp_retrieve_lttpr_cap(link);
+
+ if (is_lttpr_present)
+ configure_lttpr_mode_transparent(link);
+
/* Read DP tunneling information. */
status = dpcd_get_tunneling_device_data(link);
@@ -5770,7 +5811,7 @@ void detect_edp_sink_caps(struct dc_link *link)
* Per VESA eDP spec, "The DPCD revision for eDP v1.4 is 13h"
*/
if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_13 &&
- (link->dc->debug.optimize_edp_link_rate ||
+ (link->panel_config.ilr.optimize_edp_link_rate ||
link->reported_link_cap.link_rate == LINK_RATE_UNKNOWN)) {
// Read DPCD 00010h - 0001Fh 16 bytes at one shot
core_link_read_dpcd(link, DP_SUPPORTED_LINK_RATES,
@@ -6092,7 +6133,7 @@ bool dc_link_dp_set_test_pattern(
/* Set DPCD Lane Settings before running test pattern */
if (p_link_settings != NULL) {
if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
- link->lttpr_mode == LTTPR_MODE_TRANSPARENT) {
+ p_link_settings->lttpr_mode == LTTPR_MODE_TRANSPARENT) {
dp_fixed_vs_pe_set_retimer_lane_settings(
link,
p_link_settings->dpcd_lane_settings,
@@ -6719,7 +6760,7 @@ bool is_edp_ilr_optimization_required(struct dc_link *link, struct dc_crtc_timin
ASSERT(link || crtc_timing); // invalid input
if (link->dpcd_caps.edp_supported_link_rates_count == 0 ||
- !link->dc->debug.optimize_edp_link_rate)
+ !link->panel_config.ilr.optimize_edp_link_rate)
return false;
@@ -7069,68 +7110,16 @@ void dp_enable_link_phy(
enum clock_source_id clock_source,
const struct dc_link_settings *link_settings)
{
- struct dc *dc = link->ctx->dc;
- struct dmcu *dmcu = dc->res_pool->dmcu;
- struct pipe_ctx *pipes =
- link->dc->current_state->res_ctx.pipe_ctx;
- struct clock_source *dp_cs =
- link->dc->res_pool->dp_clock_source;
- const struct link_hwss *link_hwss = get_link_hwss(link, link_res);
- unsigned int i;
-
- if (link->connector_signal == SIGNAL_TYPE_EDP) {
- if (!link->dc->config.edp_no_power_sequencing)
- link->dc->hwss.edp_power_control(link, true);
- link->dc->hwss.edp_wait_for_hpd_ready(link, true);
- }
-
- /* If the current pixel clock source is not DTO(happens after
- * switching from HDMI passive dongle to DP on the same connector),
- * switch the pixel clock source to DTO.
- */
- for (i = 0; i < MAX_PIPES; i++) {
- if (pipes[i].stream != NULL &&
- pipes[i].stream->link == link) {
- if (pipes[i].clock_source != NULL &&
- pipes[i].clock_source->id != CLOCK_SOURCE_ID_DP_DTO) {
- pipes[i].clock_source = dp_cs;
- pipes[i].stream_res.pix_clk_params.requested_pix_clk_100hz =
- pipes[i].stream->timing.pix_clk_100hz;
- pipes[i].clock_source->funcs->program_pix_clk(
- pipes[i].clock_source,
- &pipes[i].stream_res.pix_clk_params,
- dp_get_link_encoding_format(link_settings),
- &pipes[i].pll_settings);
- }
- }
- }
-
link->cur_link_settings = *link_settings;
-
- if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING) {
- if (dc->clk_mgr->funcs->notify_link_rate_change)
- dc->clk_mgr->funcs->notify_link_rate_change(dc->clk_mgr, link);
- }
-
- if (dmcu != NULL && dmcu->funcs->lock_phy)
- dmcu->funcs->lock_phy(dmcu);
-
- if (link_hwss->ext.enable_dp_link_output)
- link_hwss->ext.enable_dp_link_output(link, link_res, signal,
- clock_source, link_settings);
-
- if (dmcu != NULL && dmcu->funcs->unlock_phy)
- dmcu->funcs->unlock_phy(dmcu);
-
- dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_LINK_PHY);
+ link->dc->hwss.enable_dp_link_output(link, link_res, signal,
+ clock_source, link_settings);
dp_receiver_power_ctrl(link, true);
}
void edp_add_delay_for_T9(struct dc_link *link)
{
- if (link->local_sink &&
- link->local_sink->edid_caps.panel_patch.extra_delay_backlight_off > 0)
- udelay(link->local_sink->edid_caps.panel_patch.extra_delay_backlight_off * 1000);
+ if (link && link->panel_config.pps.extra_delay_backlight_off > 0)
+ udelay(link->panel_config.pps.extra_delay_backlight_off * 1000);
}
bool edp_receiver_ready_T9(struct dc_link *link)
@@ -7186,9 +7175,8 @@ bool edp_receiver_ready_T7(struct dc_link *link)
} while (time_taken_in_ns < 50 * 1000000); //MAx T7 is 50ms
}
- if (link->local_sink &&
- link->local_sink->edid_caps.panel_patch.extra_t7_ms > 0)
- udelay(link->local_sink->edid_caps.panel_patch.extra_t7_ms * 1000);
+ if (link && link->panel_config.pps.extra_t7_ms > 0)
+ udelay(link->panel_config.pps.extra_t7_ms * 1000);
return result;
}
@@ -7197,29 +7185,11 @@ void dp_disable_link_phy(struct dc_link *link, const struct link_resource *link_
enum signal_type signal)
{
struct dc *dc = link->ctx->dc;
- struct dmcu *dmcu = dc->res_pool->dmcu;
- const struct link_hwss *link_hwss = get_link_hwss(link, link_res);
if (!link->wa_flags.dp_keep_receiver_powered)
dp_receiver_power_ctrl(link, false);
- if (signal == SIGNAL_TYPE_EDP) {
- if (link->dc->hwss.edp_backlight_control)
- link->dc->hwss.edp_backlight_control(link, false);
- if (link_hwss->ext.disable_dp_link_output)
- link_hwss->ext.disable_dp_link_output(link, link_res, signal);
- link->dc->hwss.edp_power_control(link, false);
- } else {
- if (dmcu != NULL && dmcu->funcs->lock_phy)
- dmcu->funcs->lock_phy(dmcu);
- if (link_hwss->ext.disable_dp_link_output)
- link_hwss->ext.disable_dp_link_output(link, link_res, signal);
- if (dmcu != NULL && dmcu->funcs->unlock_phy)
- dmcu->funcs->unlock_phy(dmcu);
- }
-
- dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY);
-
+ dc->hwss.disable_link_output(link, link_res, signal);
/* Clear current link setting.*/
memset(&link->cur_link_settings, 0,
sizeof(link->cur_link_settings));
@@ -7285,7 +7255,7 @@ void dp_set_hw_lane_settings(
{
const struct link_hwss *link_hwss = get_link_hwss(link, link_res);
- if ((link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) && !is_immediate_downstream(link, offset))
+ if ((link_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) && !is_immediate_downstream(link, offset))
return;
if (link_hwss->ext.set_dp_lane_settings)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c
index 468e39589ed8..74e36b34d3f7 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c
@@ -115,12 +115,14 @@ static enum link_training_result dpia_configure_link(
DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) configuring\n - LTTPR mode(%d)\n",
__func__,
link->link_id.enum_id - ENUM_ID_1,
- link->lttpr_mode);
+ lt_settings->lttpr_mode);
dp_decide_training_settings(link,
link_setting,
lt_settings);
+ dp_get_lttpr_mode_override(link, &lt_settings->lttpr_mode);
+
status = dpcd_configure_channel_coding(link, lt_settings);
if (status != DC_OK && link->is_hpd_pending)
return LINK_TRAINING_ABORT;
@@ -178,7 +180,7 @@ static uint8_t dpia_build_set_config_data(enum dpia_set_config_type type,
switch (type) {
case DPIA_SET_CFG_SET_LINK:
- data.set_link.mode = link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT ? 1 : 0;
+ data.set_link.mode = lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT ? 1 : 0;
break;
case DPIA_SET_CFG_SET_PHY_TEST_MODE:
break;
@@ -553,7 +555,7 @@ static enum link_training_result dpia_training_cr_phase(
{
enum link_training_result result = LINK_TRAINING_CR_FAIL_LANE0;
- if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT)
+ if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT)
result = dpia_training_cr_non_transparent(link, link_res, lt_settings, hop);
else
result = dpia_training_cr_transparent(link, link_res, lt_settings);
@@ -830,7 +832,7 @@ static enum link_training_result dpia_training_eq_phase(
{
enum link_training_result result;
- if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT)
+ if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT)
result = dpia_training_eq_non_transparent(link, link_res, lt_settings, hop);
else
result = dpia_training_eq_transparent(link, link_res, lt_settings);
@@ -870,13 +872,14 @@ static enum dc_status dpcd_clear_lt_pattern(struct dc_link *link, uint32_t hop)
* @param hop The Hop in display path. DPRX = 0.
*/
static enum link_training_result dpia_training_end(struct dc_link *link,
+ struct link_training_settings *lt_settings,
uint32_t hop)
{
enum link_training_result result = LINK_TRAINING_SUCCESS;
uint8_t repeater_cnt = 0; /* Number of hops/repeaters in display path. */
enum dc_status status;
- if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) {
+ if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) {
repeater_cnt = dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
if (hop == repeater_cnt) { /* DPTX-to-DPIA */
@@ -916,7 +919,7 @@ static enum link_training_result dpia_training_end(struct dc_link *link,
link->link_id.enum_id - ENUM_ID_1,
hop,
result,
- link->lttpr_mode);
+ lt_settings->lttpr_mode);
return result;
}
@@ -928,7 +931,9 @@ static enum link_training_result dpia_training_end(struct dc_link *link,
* @param link DPIA link being trained.
* @param hop The Hop in display path. DPRX = 0.
*/
-static void dpia_training_abort(struct dc_link *link, uint32_t hop)
+static void dpia_training_abort(struct dc_link *link,
+ struct link_training_settings *lt_settings,
+ uint32_t hop)
{
uint8_t data = 0;
uint32_t dpcd_tps_offset = DP_TRAINING_PATTERN_SET;
@@ -936,7 +941,7 @@ static void dpia_training_abort(struct dc_link *link, uint32_t hop)
DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) aborting\n - LTTPR mode(%d)\n - HPD(%d)\n",
__func__,
link->link_id.enum_id - ENUM_ID_1,
- link->lttpr_mode,
+ lt_settings->lttpr_mode,
link->is_hpd_pending);
/* Abandon clean-up if sink unplugged. */
@@ -964,12 +969,16 @@ enum link_training_result dc_link_dpia_perform_link_training(
uint8_t repeater_cnt = 0; /* Number of hops/repeaters in display path. */
int8_t repeater_id; /* Current hop. */
+ struct dc_link_settings link_settings = *link_setting; // non-const copy to pass in
+
+ lt_settings.lttpr_mode = dp_decide_lttpr_mode(link, &link_settings);
+
/* Configure link as prescribed in link_setting and set LTTPR mode. */
result = dpia_configure_link(link, link_res, link_setting, &lt_settings);
if (result != LINK_TRAINING_SUCCESS)
return result;
- if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT)
+ if (lt_settings.lttpr_mode == LTTPR_MODE_NON_TRANSPARENT)
repeater_cnt = dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
/* Train each hop in turn starting with the one closest to DPTX.
@@ -987,7 +996,7 @@ enum link_training_result dc_link_dpia_perform_link_training(
break;
/* Stop training hop. */
- result = dpia_training_end(link, repeater_id);
+ result = dpia_training_end(link, &lt_settings, repeater_id);
if (result != LINK_TRAINING_SUCCESS)
break;
}
@@ -1001,9 +1010,9 @@ enum link_training_result dc_link_dpia_perform_link_training(
msleep(5);
result = dp_check_link_loss_status(link, &lt_settings);
} else if (result == LINK_TRAINING_ABORT) {
- dpia_training_abort(link, repeater_id);
+ dpia_training_abort(link, &lt_settings, repeater_id);
} else {
- dpia_training_end(link, repeater_id);
+ dpia_training_end(link, &lt_settings, repeater_id);
}
return result;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 29f27e3fe3ac..fd8db482e56f 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -1747,7 +1747,6 @@ bool dc_remove_plane_from_context(
for (i = 0; i < stream_status->plane_count; i++) {
if (stream_status->plane_states[i] == plane_state) {
-
dc_plane_state_release(stream_status->plane_states[i]);
break;
}
@@ -3581,6 +3580,23 @@ void check_syncd_pipes_for_disabled_master_pipe(struct dc *dc,
}
}
+void reset_sync_context_for_pipe(const struct dc *dc,
+ struct dc_state *context,
+ uint8_t pipe_idx)
+{
+ int i;
+ struct pipe_ctx *pipe_ctx_reset;
+
+ /* reset the otg sync context for the pipe and its slave pipes if any */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe_ctx_reset = &context->res_ctx.pipe_ctx[i];
+
+ if (((GET_PIPE_SYNCD_FROM_PIPE(pipe_ctx_reset) == pipe_idx) &&
+ IS_PIPE_SYNCD_VALID(pipe_ctx_reset)) || (i == pipe_idx))
+ SET_PIPE_SYNCD_TO_PIPE(pipe_ctx_reset, i);
+ }
+}
+
uint8_t resource_transmitter_to_phy_idx(const struct dc *dc, enum transmitter transmitter)
{
/* TODO - get transmitter to phy idx mapping from DMUB */
@@ -3645,3 +3661,77 @@ const struct link_hwss *get_link_hwss(const struct dc_link *link,
else
return get_virtual_link_hwss();
}
+
+bool is_h_timing_divisible_by_2(struct dc_stream_state *stream)
+{
+ bool divisible = false;
+ uint16_t h_blank_start = 0;
+ uint16_t h_blank_end = 0;
+
+ if (stream) {
+ h_blank_start = stream->timing.h_total - stream->timing.h_front_porch;
+ h_blank_end = h_blank_start - stream->timing.h_addressable;
+
+ /* HTOTAL, Hblank start/end, and Hsync start/end all must be
+ * divisible by 2 in order for the horizontal timing params
+ * to be considered divisible by 2. Hsync start is always 0.
+ */
+ divisible = (stream->timing.h_total % 2 == 0) &&
+ (h_blank_start % 2 == 0) &&
+ (h_blank_end % 2 == 0) &&
+ (stream->timing.h_sync_width % 2 == 0);
+ }
+ return divisible;
+}
+
+bool dc_resource_acquire_secondary_pipe_for_mpc_odm(
+ const struct dc *dc,
+ struct dc_state *state,
+ struct pipe_ctx *pri_pipe,
+ struct pipe_ctx *sec_pipe,
+ bool odm)
+{
+ int pipe_idx = sec_pipe->pipe_idx;
+ struct pipe_ctx *sec_top, *sec_bottom, *sec_next, *sec_prev;
+ const struct resource_pool *pool = dc->res_pool;
+
+ sec_top = sec_pipe->top_pipe;
+ sec_bottom = sec_pipe->bottom_pipe;
+ sec_next = sec_pipe->next_odm_pipe;
+ sec_prev = sec_pipe->prev_odm_pipe;
+
+ *sec_pipe = *pri_pipe;
+
+ sec_pipe->top_pipe = sec_top;
+ sec_pipe->bottom_pipe = sec_bottom;
+ sec_pipe->next_odm_pipe = sec_next;
+ sec_pipe->prev_odm_pipe = sec_prev;
+
+ sec_pipe->pipe_idx = pipe_idx;
+ sec_pipe->plane_res.mi = pool->mis[pipe_idx];
+ sec_pipe->plane_res.hubp = pool->hubps[pipe_idx];
+ sec_pipe->plane_res.ipp = pool->ipps[pipe_idx];
+ sec_pipe->plane_res.xfm = pool->transforms[pipe_idx];
+ sec_pipe->plane_res.dpp = pool->dpps[pipe_idx];
+ sec_pipe->plane_res.mpcc_inst = pool->dpps[pipe_idx]->inst;
+ sec_pipe->stream_res.dsc = NULL;
+ if (odm) {
+ if (!sec_pipe->top_pipe)
+ sec_pipe->stream_res.opp = pool->opps[pipe_idx];
+ else
+ sec_pipe->stream_res.opp = sec_pipe->top_pipe->stream_res.opp;
+ if (sec_pipe->stream->timing.flags.DSC == 1) {
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ dcn20_acquire_dsc(dc, &state->res_ctx, &sec_pipe->stream_res.dsc, pipe_idx);
+#endif
+ ASSERT(sec_pipe->stream_res.dsc);
+ if (sec_pipe->stream_res.dsc == NULL)
+ return false;
+ }
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ dcn20_build_mapped_resource(dc, state, sec_pipe->stream);
+#endif
+ }
+
+ return true;
+} \ No newline at end of file
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index f62d50901d92..38d71b5c1f2d 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -30,6 +30,7 @@
#include "resource.h"
#include "ipp.h"
#include "timing_generator.h"
+#include "dc_dmub_srv.h"
#define DC_LOGGER dc->ctx->logger
@@ -275,6 +276,8 @@ static void program_cursor_attributes(
}
dc->hwss.set_cursor_attribute(pipe_ctx);
+
+ dc_send_update_cursor_info_to_dmu(pipe_ctx, i);
if (dc->hwss.set_cursor_sdr_white_level)
dc->hwss.set_cursor_sdr_white_level(pipe_ctx);
}
@@ -329,7 +332,7 @@ bool dc_stream_set_cursor_attributes(
dc = stream->ctx->dc;
- if (attributes->height * attributes->width * 4 > 16384)
+ if (dc->debug.allow_sw_cursor_fallback && attributes->height * attributes->width * 4 > 16384)
if (stream->mall_stream_config.type == SUBVP_MAIN)
return false;
@@ -381,6 +384,8 @@ static void program_cursor_position(
}
dc->hwss.set_cursor_position(pipe_ctx);
+
+ dc_send_update_cursor_info_to_dmu(pipe_ctx, i);
}
if (pipe_to_program)
@@ -521,7 +526,7 @@ bool dc_stream_remove_writeback(struct dc *dc,
/* remove writeback info for disabled writeback pipes from stream */
for (i = 0, j = 0; i < stream->num_wb_info; i++) {
if (stream->writeback_info[i].wb_enabled) {
- if (i != j)
+ if (j < i)
/* trim the array */
stream->writeback_info[j] = stream->writeback_info[i];
j++;
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 75dbc665f435..bfc5474c0f4c 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -47,7 +47,7 @@ struct aux_payload;
struct set_config_cmd_payload;
struct dmub_notification;
-#define DC_VER "3.2.201"
+#define DC_VER "3.2.207"
#define MAX_SURFACES 3
#define MAX_PLANES 6
@@ -406,6 +406,9 @@ struct dc_config {
bool ignore_dpref_ss;
bool enable_mipi_converter_optimization;
bool use_default_clock_table;
+ bool force_bios_enable_lttpr;
+ uint8_t force_bios_fixed_vs;
+
};
enum visual_confirm {
@@ -818,7 +821,6 @@ struct dc_debug_options {
/* Enable dmub aux for legacy ddc */
bool enable_dmub_aux_for_legacy_ddc;
bool disable_fams;
- bool optimize_edp_link_rate; /* eDP ILR */
/* FEC/PSR1 sequence enable delay in 100us */
uint8_t fec_enable_delay_in100us;
bool enable_driver_sequence_debug;
@@ -830,6 +832,10 @@ struct dc_debug_options {
bool disable_fixed_vs_aux_timeout_wa;
bool force_disable_subvp;
bool force_subvp_mclk_switch;
+ bool allow_sw_cursor_fallback;
+ unsigned int force_subvp_num_ways;
+ unsigned int force_mall_ss_num_ways;
+ bool alloc_extra_way_for_cursor;
bool force_usr_allow;
/* uses value at boot and disables switch */
bool disable_dtb_ref_clk_switch;
@@ -843,6 +849,7 @@ struct dc_debug_options {
bool use_legacy_soc_bb_mechanism;
bool exit_idle_opt_for_cursor_updates;
bool enable_single_display_2to1_odm_policy;
+ bool enable_double_buffered_dsc_pg_support;
bool enable_dp_dig_pixel_rate_div_policy;
enum lttpr_mode lttpr_mode_override;
};
@@ -1114,6 +1121,7 @@ union surface_update_flags {
uint32_t clock_change:1;
uint32_t stereo_format_change:1;
uint32_t lut_3d:1;
+ uint32_t tmz_changed:1;
uint32_t full_update:1;
} bits;
@@ -1183,6 +1191,8 @@ struct dc_plane_state {
enum dc_irq_source irq_source;
struct kref refcount;
struct tg_color visual_confirm_color;
+
+ bool is_statically_allocated;
};
struct dc_plane_info {
@@ -1602,6 +1612,9 @@ enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc,
uint8_t mst_alloc_slots,
uint8_t *mst_slots_in_use);
+void dc_process_dmub_dpia_hpd_int_enable(const struct dc *dc,
+ uint32_t hpd_int_enable);
+
/*******************************************************************************
* DSC Interfaces
******************************************************************************/
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
index 7b765efe0825..0541e87e4f38 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
@@ -30,6 +30,7 @@
#include "dc_hw_types.h"
#include "core_types.h"
#include "../basics/conversion.h"
+#include "cursor_reg_cache.h"
#define CTX dc_dmub_srv->ctx
#define DC_LOGGER CTX->logger
@@ -450,44 +451,42 @@ static void populate_subvp_cmd_drr_info(struct dc *dc,
struct dc_crtc_timing *main_timing = &subvp_pipe->stream->timing;
struct dc_crtc_timing *phantom_timing = &subvp_pipe->stream->mall_stream_config.paired_stream->timing;
struct dc_crtc_timing *drr_timing = &vblank_pipe->stream->timing;
- int16_t drr_frame_us = 0;
- int16_t min_drr_supported_us = 0;
- int16_t max_drr_supported_us = 0;
- int16_t max_drr_vblank_us = 0;
- int16_t max_drr_mallregion_us = 0;
- int16_t mall_region_us = 0;
- int16_t prefetch_us = 0;
- int16_t subvp_active_us = 0;
- int16_t drr_active_us = 0;
- int16_t min_vtotal_supported = 0;
- int16_t max_vtotal_supported = 0;
+ uint16_t drr_frame_us = 0;
+ uint16_t min_drr_supported_us = 0;
+ uint16_t max_drr_supported_us = 0;
+ uint16_t max_drr_vblank_us = 0;
+ uint16_t max_drr_mallregion_us = 0;
+ uint16_t mall_region_us = 0;
+ uint16_t prefetch_us = 0;
+ uint16_t subvp_active_us = 0;
+ uint16_t drr_active_us = 0;
+ uint16_t min_vtotal_supported = 0;
+ uint16_t max_vtotal_supported = 0;
pipe_data->pipe_config.vblank_data.drr_info.drr_in_use = true;
pipe_data->pipe_config.vblank_data.drr_info.use_ramping = false; // for now don't use ramping
pipe_data->pipe_config.vblank_data.drr_info.drr_window_size_ms = 4; // hardcode 4ms DRR window for now
- drr_frame_us = div64_s64(drr_timing->v_total * drr_timing->h_total,
- (int64_t)(drr_timing->pix_clk_100hz * 100) * 1000000);
+ drr_frame_us = div64_u64(((uint64_t)drr_timing->v_total * drr_timing->h_total * 1000000),
+ (((uint64_t)drr_timing->pix_clk_100hz * 100)));
// P-State allow width and FW delays already included phantom_timing->v_addressable
- mall_region_us = div64_s64(phantom_timing->v_addressable * phantom_timing->h_total,
- (int64_t)(phantom_timing->pix_clk_100hz * 100) * 1000000);
+ mall_region_us = div64_u64(((uint64_t)phantom_timing->v_addressable * phantom_timing->h_total * 1000000),
+ (((uint64_t)phantom_timing->pix_clk_100hz * 100)));
min_drr_supported_us = drr_frame_us + mall_region_us + SUBVP_DRR_MARGIN_US;
- min_vtotal_supported = div64_s64(drr_timing->pix_clk_100hz * 100 *
- (div64_s64((int64_t)min_drr_supported_us, 1000000)),
- (int64_t)drr_timing->h_total);
-
- prefetch_us = div64_s64((phantom_timing->v_total - phantom_timing->v_front_porch) * phantom_timing->h_total,
- (int64_t)(phantom_timing->pix_clk_100hz * 100) * 1000000 +
- dc->caps.subvp_prefetch_end_to_mall_start_us);
- subvp_active_us = div64_s64(main_timing->v_addressable * main_timing->h_total,
- (int64_t)(main_timing->pix_clk_100hz * 100) * 1000000);
- drr_active_us = div64_s64(drr_timing->v_addressable * drr_timing->h_total,
- (int64_t)(drr_timing->pix_clk_100hz * 100) * 1000000);
- max_drr_vblank_us = div64_s64((int64_t)(subvp_active_us - prefetch_us - drr_active_us), 2) + drr_active_us;
+ min_vtotal_supported = div64_u64(((uint64_t)drr_timing->pix_clk_100hz * 100 * min_drr_supported_us),
+ (((uint64_t)drr_timing->h_total * 1000000)));
+
+ prefetch_us = div64_u64(((uint64_t)(phantom_timing->v_total - phantom_timing->v_front_porch) * phantom_timing->h_total * 1000000),
+ (((uint64_t)phantom_timing->pix_clk_100hz * 100) + dc->caps.subvp_prefetch_end_to_mall_start_us));
+ subvp_active_us = div64_u64(((uint64_t)main_timing->v_addressable * main_timing->h_total * 1000000),
+ (((uint64_t)main_timing->pix_clk_100hz * 100)));
+ drr_active_us = div64_u64(((uint64_t)drr_timing->v_addressable * drr_timing->h_total * 1000000),
+ (((uint64_t)drr_timing->pix_clk_100hz * 100)));
+ max_drr_vblank_us = div64_u64((subvp_active_us - prefetch_us - drr_active_us), 2) + drr_active_us;
max_drr_mallregion_us = subvp_active_us - prefetch_us - mall_region_us;
max_drr_supported_us = max_drr_vblank_us > max_drr_mallregion_us ? max_drr_vblank_us : max_drr_mallregion_us;
- max_vtotal_supported = div64_s64(drr_timing->pix_clk_100hz * 100 * (div64_s64((int64_t)max_drr_supported_us, 1000000)),
- (int64_t)drr_timing->h_total);
+ max_vtotal_supported = div64_u64(((uint64_t)drr_timing->pix_clk_100hz * 100 * max_drr_supported_us),
+ (((uint64_t)drr_timing->h_total * 1000000)));
pipe_data->pipe_config.vblank_data.drr_info.min_vtotal_supported = min_vtotal_supported;
pipe_data->pipe_config.vblank_data.drr_info.max_vtotal_supported = max_vtotal_supported;
@@ -581,10 +580,12 @@ static void update_subvp_prefetch_end_to_mall_start(struct dc *dc,
struct dc_crtc_timing *phantom_timing1 = &subvp_pipes[1]->stream->mall_stream_config.paired_stream->timing;
struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 *pipe_data = NULL;
- subvp0_prefetch_us = div64_s64((phantom_timing0->v_total - phantom_timing0->v_front_porch) * phantom_timing0->h_total,
- (int64_t)(phantom_timing0->pix_clk_100hz * 100) * 1000000 + dc->caps.subvp_prefetch_end_to_mall_start_us);
- subvp1_prefetch_us = div64_s64((phantom_timing1->v_total - phantom_timing1->v_front_porch) * phantom_timing1->h_total,
- (int64_t)(phantom_timing1->pix_clk_100hz * 100) * 1000000 + dc->caps.subvp_prefetch_end_to_mall_start_us);
+ subvp0_prefetch_us = div64_u64(((uint64_t)(phantom_timing0->v_total - phantom_timing0->v_front_porch) *
+ (uint64_t)phantom_timing0->h_total * 1000000),
+ (((uint64_t)phantom_timing0->pix_clk_100hz * 100) + dc->caps.subvp_prefetch_end_to_mall_start_us));
+ subvp1_prefetch_us = div64_u64(((uint64_t)(phantom_timing1->v_total - phantom_timing1->v_front_porch) *
+ (uint64_t)phantom_timing1->h_total * 1000000),
+ (((uint64_t)phantom_timing1->pix_clk_100hz * 100) + dc->caps.subvp_prefetch_end_to_mall_start_us));
// Whichever SubVP PIPE has the smaller prefetch (including the prefetch end to mall start time)
// should increase it's prefetch time to match the other
@@ -592,16 +593,17 @@ static void update_subvp_prefetch_end_to_mall_start(struct dc *dc,
pipe_data = &cmd->fw_assisted_mclk_switch_v2.config_data.pipe_data[1];
prefetch_delta_us = subvp0_prefetch_us - subvp1_prefetch_us;
pipe_data->pipe_config.subvp_data.prefetch_to_mall_start_lines =
- div64_s64(((div64_s64((int64_t)(dc->caps.subvp_prefetch_end_to_mall_start_us + prefetch_delta_us), 1000000)) *
- (phantom_timing1->pix_clk_100hz * 100) + phantom_timing1->h_total - 1),
- (int64_t)phantom_timing1->h_total);
+ div64_u64(((uint64_t)(dc->caps.subvp_prefetch_end_to_mall_start_us + prefetch_delta_us) *
+ ((uint64_t)phantom_timing1->pix_clk_100hz * 100) + ((uint64_t)phantom_timing1->h_total * 1000000 - 1)),
+ ((uint64_t)phantom_timing1->h_total * 1000000));
+
} else if (subvp1_prefetch_us > subvp0_prefetch_us) {
pipe_data = &cmd->fw_assisted_mclk_switch_v2.config_data.pipe_data[0];
prefetch_delta_us = subvp1_prefetch_us - subvp0_prefetch_us;
pipe_data->pipe_config.subvp_data.prefetch_to_mall_start_lines =
- div64_s64(((div64_s64((int64_t)(dc->caps.subvp_prefetch_end_to_mall_start_us + prefetch_delta_us), 1000000)) *
- (phantom_timing0->pix_clk_100hz * 100) + phantom_timing0->h_total - 1),
- (int64_t)phantom_timing0->h_total);
+ div64_u64(((uint64_t)(dc->caps.subvp_prefetch_end_to_mall_start_us + prefetch_delta_us) *
+ ((uint64_t)phantom_timing0->pix_clk_100hz * 100) + ((uint64_t)phantom_timing0->h_total * 1000000 - 1)),
+ ((uint64_t)phantom_timing0->h_total * 1000000));
}
}
@@ -668,19 +670,33 @@ static void populate_subvp_cmd_pipe_info(struct dc *dc,
// Round up
pipe_data->pipe_config.subvp_data.prefetch_to_mall_start_lines =
- div64_s64(((div64_s64((int64_t)dc->caps.subvp_prefetch_end_to_mall_start_us, 1000000)) *
- (phantom_timing->pix_clk_100hz * 100) + phantom_timing->h_total - 1),
- (int64_t)phantom_timing->h_total);
+ div64_u64(((uint64_t)dc->caps.subvp_prefetch_end_to_mall_start_us * ((uint64_t)phantom_timing->pix_clk_100hz * 100) +
+ ((uint64_t)phantom_timing->h_total * 1000000 - 1)), ((uint64_t)phantom_timing->h_total * 1000000));
pipe_data->pipe_config.subvp_data.processing_delay_lines =
- div64_s64(((div64_s64((int64_t)dc->caps.subvp_fw_processing_delay_us, 1000000)) *
- (phantom_timing->pix_clk_100hz * 100) + phantom_timing->h_total - 1),
- (int64_t)phantom_timing->h_total);
+ div64_u64(((uint64_t)(dc->caps.subvp_fw_processing_delay_us) * ((uint64_t)phantom_timing->pix_clk_100hz * 100) +
+ ((uint64_t)phantom_timing->h_total * 1000000 - 1)), ((uint64_t)phantom_timing->h_total * 1000000));
+
+ if (subvp_pipe->bottom_pipe) {
+ pipe_data->pipe_config.subvp_data.main_split_pipe_index = subvp_pipe->bottom_pipe->pipe_idx;
+ } else if (subvp_pipe->next_odm_pipe) {
+ pipe_data->pipe_config.subvp_data.main_split_pipe_index = subvp_pipe->next_odm_pipe->pipe_idx;
+ } else {
+ pipe_data->pipe_config.subvp_data.main_split_pipe_index = 0;
+ }
+
// Find phantom pipe index based on phantom stream
for (j = 0; j < dc->res_pool->pipe_count; j++) {
struct pipe_ctx *phantom_pipe = &context->res_ctx.pipe_ctx[j];
if (phantom_pipe->stream == subvp_pipe->stream->mall_stream_config.paired_stream) {
pipe_data->pipe_config.subvp_data.phantom_pipe_index = phantom_pipe->pipe_idx;
+ if (phantom_pipe->bottom_pipe) {
+ pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = phantom_pipe->bottom_pipe->pipe_idx;
+ } else if (phantom_pipe->next_odm_pipe) {
+ pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = phantom_pipe->next_odm_pipe->pipe_idx;
+ } else {
+ pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = 0;
+ }
break;
}
}
@@ -725,7 +741,9 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc,
if (!pipe->stream)
continue;
- if (pipe->plane_state && !pipe->top_pipe &&
+ /* For SubVP pipe count, only count the top most (ODM / MPC) pipe
+ */
+ if (pipe->plane_state && !pipe->top_pipe && !pipe->prev_odm_pipe &&
pipe->stream->mall_stream_config.type == SUBVP_MAIN)
subvp_pipes[subvp_count++] = pipe;
}
@@ -738,7 +756,12 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc,
if (!pipe->stream)
continue;
+ /* When populating subvp cmd info, only pass in the top most (ODM / MPC) pipe.
+ * Any ODM or MPC splits being used in SubVP will be handled internally in
+ * populate_subvp_cmd_pipe_info
+ */
if (pipe->plane_state && pipe->stream->mall_stream_config.paired_stream &&
+ !pipe->top_pipe && !pipe->prev_odm_pipe &&
pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
populate_subvp_cmd_pipe_info(dc, context, &cmd, pipe, cmd_pipe_index++);
} else if (pipe->plane_state && pipe->stream->mall_stream_config.type == SUBVP_NONE) {
@@ -758,7 +781,7 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc,
// Store the original watermark value for this SubVP config so we can lower it when the
// MCLK switch starts
wm_val_refclk = context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns *
- dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000 / 1000;
+ (dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000) / 1000;
cmd.fw_assisted_mclk_switch_v2.config_data.watermark_a_cache = wm_val_refclk < 0xFFFF ? wm_val_refclk : 0xFFFF;
}
@@ -858,3 +881,147 @@ void dc_dmub_srv_log_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv)
diag_data.is_cw0_enabled,
diag_data.is_cw6_enabled);
}
+
+static bool dc_dmub_should_update_cursor_data(struct pipe_ctx *pipe_ctx)
+{
+ if (pipe_ctx->plane_state != NULL) {
+ if (pipe_ctx->plane_state->address.type == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
+ return false;
+ }
+
+ if ((pipe_ctx->stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1 ||
+ pipe_ctx->stream->link->psr_settings.psr_version == DC_PSR_VERSION_1) &&
+ pipe_ctx->stream->ctx->dce_version >= DCN_VERSION_3_1)
+ return true;
+
+ return false;
+}
+
+static void dc_build_cursor_update_payload0(
+ struct pipe_ctx *pipe_ctx, uint8_t p_idx,
+ struct dmub_cmd_update_cursor_payload0 *payload)
+{
+ struct hubp *hubp = pipe_ctx->plane_res.hubp;
+ unsigned int panel_inst = 0;
+
+ if (!dc_get_edp_link_panel_inst(hubp->ctx->dc,
+ pipe_ctx->stream->link, &panel_inst))
+ return;
+
+ /* Payload: Cursor Rect is built from position & attribute
+ * x & y are obtained from postion
+ */
+ payload->cursor_rect.x = hubp->cur_rect.x;
+ payload->cursor_rect.y = hubp->cur_rect.y;
+ /* w & h are obtained from attribute */
+ payload->cursor_rect.width = hubp->cur_rect.w;
+ payload->cursor_rect.height = hubp->cur_rect.h;
+
+ payload->enable = hubp->pos.cur_ctl.bits.cur_enable;
+ payload->pipe_idx = p_idx;
+ payload->cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1;
+ payload->panel_inst = panel_inst;
+}
+
+static void dc_send_cmd_to_dmu(struct dc_dmub_srv *dmub_srv,
+ union dmub_rb_cmd *cmd)
+{
+ dc_dmub_srv_cmd_queue(dmub_srv, cmd);
+ dc_dmub_srv_cmd_execute(dmub_srv);
+ dc_dmub_srv_wait_idle(dmub_srv);
+}
+
+static void dc_build_cursor_position_update_payload0(
+ struct dmub_cmd_update_cursor_payload0 *pl, const uint8_t p_idx,
+ const struct hubp *hubp, const struct dpp *dpp)
+{
+ /* Hubp */
+ pl->position_cfg.pHubp.cur_ctl.raw = hubp->pos.cur_ctl.raw;
+ pl->position_cfg.pHubp.position.raw = hubp->pos.position.raw;
+ pl->position_cfg.pHubp.hot_spot.raw = hubp->pos.hot_spot.raw;
+ pl->position_cfg.pHubp.dst_offset.raw = hubp->pos.dst_offset.raw;
+
+ /* dpp */
+ pl->position_cfg.pDpp.cur0_ctl.raw = dpp->pos.cur0_ctl.raw;
+ pl->position_cfg.pipe_idx = p_idx;
+}
+
+static void dc_build_cursor_attribute_update_payload1(
+ struct dmub_cursor_attributes_cfg *pl_A, const uint8_t p_idx,
+ const struct hubp *hubp, const struct dpp *dpp)
+{
+ /* Hubp */
+ pl_A->aHubp.SURFACE_ADDR_HIGH = hubp->att.SURFACE_ADDR_HIGH;
+ pl_A->aHubp.SURFACE_ADDR = hubp->att.SURFACE_ADDR;
+ pl_A->aHubp.cur_ctl.raw = hubp->att.cur_ctl.raw;
+ pl_A->aHubp.size.raw = hubp->att.size.raw;
+ pl_A->aHubp.settings.raw = hubp->att.settings.raw;
+
+ /* dpp */
+ pl_A->aDpp.cur0_ctl.raw = dpp->att.cur0_ctl.raw;
+}
+
+/**
+ * ***************************************************************************************
+ * dc_send_update_cursor_info_to_dmu: Populate the DMCUB Cursor update info command
+ *
+ * This function would store the cursor related information and pass it into dmub
+ *
+ * @param [in] pCtx: pipe context
+ * @param [in] pipe_idx: pipe index
+ *
+ * @return: void
+ *
+ * ***************************************************************************************
+ */
+
+void dc_send_update_cursor_info_to_dmu(
+ struct pipe_ctx *pCtx, uint8_t pipe_idx)
+{
+ union dmub_rb_cmd cmd = { 0 };
+ union dmub_cmd_update_cursor_info_data *update_cursor_info =
+ &cmd.update_cursor_info.update_cursor_info_data;
+
+ if (!dc_dmub_should_update_cursor_data(pCtx))
+ return;
+ /*
+ * Since we use multi_cmd_pending for dmub command, the 2nd command is
+ * only assigned to store cursor attributes info.
+ * 1st command can view as 2 parts, 1st is for PSR/Replay data, the other
+ * is to store cursor position info.
+ *
+ * Command heaer type must be the same type if using multi_cmd_pending.
+ * Besides, while process 2nd command in DMU, the sub type is useless.
+ * So it's meanless to pass the sub type header with different type.
+ */
+
+ {
+ /* Build Payload#0 Header */
+ cmd.update_cursor_info.header.type = DMUB_CMD__UPDATE_CURSOR_INFO;
+ cmd.update_cursor_info.header.payload_bytes =
+ sizeof(cmd.update_cursor_info.update_cursor_info_data);
+ cmd.update_cursor_info.header.multi_cmd_pending = 1; /* To combine multi dmu cmd, 1st cmd */
+
+ /* Prepare Payload */
+ dc_build_cursor_update_payload0(pCtx, pipe_idx, &update_cursor_info->payload0);
+
+ dc_build_cursor_position_update_payload0(&update_cursor_info->payload0, pipe_idx,
+ pCtx->plane_res.hubp, pCtx->plane_res.dpp);
+ /* Send update_curosr_info to queue */
+ dc_dmub_srv_cmd_queue(pCtx->stream->ctx->dmub_srv, &cmd);
+ }
+ {
+ /* Build Payload#1 Header */
+ memset(update_cursor_info, 0, sizeof(union dmub_cmd_update_cursor_info_data));
+ cmd.update_cursor_info.header.type = DMUB_CMD__UPDATE_CURSOR_INFO;
+ cmd.update_cursor_info.header.payload_bytes = sizeof(struct cursor_attributes_cfg);
+ cmd.update_cursor_info.header.multi_cmd_pending = 0; /* Indicate it's the last command. */
+
+ dc_build_cursor_attribute_update_payload1(
+ &cmd.update_cursor_info.update_cursor_info_data.payload1.attribute_cfg,
+ pipe_idx, pCtx->plane_res.hubp, pCtx->plane_res.dpp);
+
+ /* Combine 2nd cmds update_curosr_info to DMU */
+ dc_send_cmd_to_dmu(pCtx->stream->ctx->dmub_srv, &cmd);
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
index 9f5b47b9a83d..d34f5563df2e 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
@@ -85,6 +85,8 @@ void dc_dmub_srv_send_inbox0_cmd(struct dc_dmub_srv *dmub_srv, union dmub_inbox0
bool dc_dmub_srv_get_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv, struct dmub_diagnostic_data *dmub_oca);
+void dc_dmub_setup_subvp_dmub_command(struct dc *dc, struct dc_state *context, bool enable);
void dc_dmub_srv_log_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv);
+void dc_send_update_cursor_info_to_dmu(struct pipe_ctx *pCtx, uint8_t pipe_idx);
#endif /* _DMUB_DC_SRV_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
index 3f64b3092692..caf0c7af2d0b 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -117,11 +117,31 @@ struct psr_settings {
* Add a struct dc_panel_config under dc_link
*/
struct dc_panel_config {
+ // extra panel power sequence parameters
+ struct pps {
+ unsigned int extra_t3_ms;
+ unsigned int extra_t7_ms;
+ unsigned int extra_delay_backlight_off;
+ unsigned int extra_post_t7_ms;
+ unsigned int extra_pre_t11_ms;
+ unsigned int extra_t12_ms;
+ unsigned int extra_post_OUI_ms;
+ } pps;
+ // ABM
+ struct varib {
+ unsigned int varibright_feature_enable;
+ unsigned int def_varibright_level;
+ unsigned int abm_config_setting;
+ } varib;
// edp DSC
struct dsc {
bool disable_dsc_edp;
unsigned int force_dsc_edp_policy;
} dsc;
+ /* eDP ILR */
+ struct ilr {
+ bool optimize_edp_link_rate; /* eDP ILR */
+ } ilr;
};
/*
* A link contains one or more sinks and their connected status.
@@ -141,7 +161,6 @@ struct dc_link {
bool link_state_valid;
bool aux_access_disabled;
bool sync_lt_in_progress;
- enum lttpr_mode lttpr_mode;
bool is_internal_display;
/* TODO: Rename. Flag an endpoint as having a programmable mapping to a
@@ -244,7 +263,7 @@ struct dc_link {
struct gpio *hpd_gpio;
enum dc_link_fec_state fec_state;
struct dc_panel_config panel_config;
- enum phy_state phy_state;
+ struct phy_state phy_state;
};
const struct dc_link_status *dc_link_get_status(const struct dc_link *dc_link);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index 9fcf9dc5bce4..9e6025c98db9 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -212,8 +212,7 @@ struct dc_stream_state {
/* DMCU info */
unsigned int abm_level;
- struct periodic_interrupt_config periodic_interrupt0;
- struct periodic_interrupt_config periodic_interrupt1;
+ struct periodic_interrupt_config periodic_interrupt;
/* from core_stream struct */
struct dc_context *ctx;
@@ -281,8 +280,7 @@ struct dc_stream_update {
struct dc_info_packet *hdr_static_metadata;
unsigned int *abm_level;
- struct periodic_interrupt_config *periodic_interrupt0;
- struct periodic_interrupt_config *periodic_interrupt1;
+ struct periodic_interrupt_config *periodic_interrupt;
struct dc_info_packet *vrr_infopacket;
struct dc_info_packet *vsc_infopacket;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
index bdb6bac8dd97..c94a966c6612 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
@@ -300,7 +300,7 @@ static void set_high_bit_rate_capable(
AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR, value);
}
-/* set video latency in in ms/2+1 */
+/* set video latency in ms/2+1 */
static void set_video_latency(
struct audio *audio,
int latency_in_ms)
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
index 32782ef9ef77..140297c8ff55 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
@@ -942,10 +942,6 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
case AUX_RET_ERROR_ENGINE_ACQUIRE:
case AUX_RET_ERROR_UNKNOWN:
default:
- DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION,
- LOG_FLAG_I2cAux_DceAux,
- "dce_aux_transfer_with_retries: Failure: operation_result=%d",
- (int)operation_result);
goto fail;
}
}
@@ -953,14 +949,11 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
fail:
DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_ERROR,
LOG_FLAG_Error_I2cAux,
- "dce_aux_transfer_with_retries: FAILURE");
+ "%s: Failure: operation_result=%d",
+ __func__,
+ (int)operation_result);
if (!payload_reply)
payload->reply = NULL;
- DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_ERROR,
- WPP_BIT_FLAG_DC_ERROR,
- "AUX transaction failed. Result: %d",
- operation_result);
-
return false;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
index 0df06740ec39..bec5e9f787fc 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
@@ -393,17 +393,18 @@ static bool dmub_psr_copy_settings(struct dmub_psr *dmub,
if (copy_settings_data->dsc_enable_status &&
link->dpcd_caps.sink_dev_id == DP_DEVICE_ID_38EC11 &&
!memcmp(link->dpcd_caps.sink_dev_id_str, DP_SINK_DEVICE_STR_ID_1,
- sizeof(link->dpcd_caps.sink_dev_id_str)))
+ sizeof(DP_SINK_DEVICE_STR_ID_1)))
link->psr_settings.force_ffu_mode = 1;
else
link->psr_settings.force_ffu_mode = 0;
copy_settings_data->force_ffu_mode = link->psr_settings.force_ffu_mode;
if (link->fec_state == dc_link_fec_enabled &&
+ link->dpcd_caps.sink_dev_id == DP_DEVICE_ID_38EC11 &&
(!memcmp(link->dpcd_caps.sink_dev_id_str, DP_SINK_DEVICE_STR_ID_1,
- sizeof(link->dpcd_caps.sink_dev_id_str)) ||
+ sizeof(DP_SINK_DEVICE_STR_ID_1)) ||
!memcmp(link->dpcd_caps.sink_dev_id_str, DP_SINK_DEVICE_STR_ID_2,
- sizeof(link->dpcd_caps.sink_dev_id_str))))
+ sizeof(DP_SINK_DEVICE_STR_ID_2))))
copy_settings_data->debug.bitfields.force_wakeup_by_tps3 = 1;
else
copy_settings_data->debug.bitfields.force_wakeup_by_tps3 = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index fe346e96c2d1..d260eaa1509e 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -722,7 +722,6 @@ void dce110_edp_wait_for_hpd_ready(
struct dc_context *ctx = link->ctx;
struct graphics_object_id connector = link->link_enc->connector;
struct gpio *hpd;
- struct dc_sink *sink = link->local_sink;
bool edp_hpd_high = false;
uint32_t time_elapsed = 0;
uint32_t timeout = power_up ?
@@ -755,9 +754,9 @@ void dce110_edp_wait_for_hpd_ready(
return;
}
- if (sink != NULL) {
- if (sink->edid_caps.panel_patch.extra_t3_ms > 0) {
- int extra_t3_in_ms = sink->edid_caps.panel_patch.extra_t3_ms;
+ if (link != NULL) {
+ if (link->panel_config.pps.extra_t3_ms > 0) {
+ int extra_t3_in_ms = link->panel_config.pps.extra_t3_ms;
msleep(extra_t3_in_ms);
}
@@ -842,7 +841,7 @@ void dce110_edp_power_control(
/* add time defined by a patch, if any (usually patch extra_t12_ms is 0) */
if (link->local_sink != NULL)
remaining_min_edp_poweroff_time_ms +=
- link->local_sink->edid_caps.panel_patch.extra_t12_ms;
+ link->panel_config.pps.extra_t12_ms;
/* Adjust remaining_min_edp_poweroff_time_ms if this is not the first time. */
if (dp_trace_get_edp_poweroff_timestamp(link) != 0) {
@@ -946,7 +945,7 @@ void dce110_edp_wait_for_T12(
current_ts,
dp_trace_get_edp_poweroff_timestamp(link)), 1000000);
- t12_duration += link->local_sink->edid_caps.panel_patch.extra_t12_ms; // Add extra T12
+ t12_duration += link->panel_config.pps.extra_t12_ms; // Add extra T12
if (time_since_edp_poweroff_ms < t12_duration)
msleep(t12_duration - time_since_edp_poweroff_ms);
@@ -965,6 +964,8 @@ void dce110_edp_backlight_control(
struct dc_context *ctx = link->ctx;
struct bp_transmitter_control cntl = { 0 };
uint8_t panel_instance;
+ unsigned int pre_T11_delay = OLED_PRE_T11_DELAY;
+ unsigned int post_T7_delay = OLED_POST_T7_DELAY;
if (dal_graphics_object_id_get_connector_id(link->link_enc->connector)
!= CONNECTOR_ID_EDP) {
@@ -1043,8 +1044,10 @@ void dce110_edp_backlight_control(
link_transmitter_control(ctx->dc_bios, &cntl);
- if (enable && link->dpcd_sink_ext_caps.bits.oled)
- msleep(OLED_POST_T7_DELAY);
+ if (enable && link->dpcd_sink_ext_caps.bits.oled) {
+ post_T7_delay += link->panel_config.pps.extra_post_t7_ms;
+ msleep(post_T7_delay);
+ }
if (link->dpcd_sink_ext_caps.bits.oled ||
link->dpcd_sink_ext_caps.bits.hdr_aux_backlight_control == 1 ||
@@ -1066,8 +1069,10 @@ void dce110_edp_backlight_control(
DC_LOG_DC("edp_receiver_ready_T9 skipped\n");
}
- if (!enable && link->dpcd_sink_ext_caps.bits.oled)
- msleep(OLED_PRE_T11_DELAY);
+ if (!enable && link->dpcd_sink_ext_caps.bits.oled) {
+ pre_T11_delay += link->panel_config.pps.extra_pre_t11_ms;
+ msleep(pre_T11_delay);
+ }
}
void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx)
@@ -1441,6 +1446,14 @@ static enum dc_status dce110_enable_stream_timing(
return DC_ERROR_UNEXPECTED;
}
+ if (dc_is_hdmi_tmds_signal(stream->signal)) {
+ stream->link->phy_state.symclk_ref_cnts.otg = 1;
+ if (stream->link->phy_state.symclk_state == SYMCLK_OFF_TX_OFF)
+ stream->link->phy_state.symclk_state = SYMCLK_ON_TX_OFF;
+ else
+ stream->link->phy_state.symclk_state = SYMCLK_ON_TX_ON;
+ }
+
pipe_ctx->stream_res.tg->funcs->program_timing(
pipe_ctx->stream_res.tg,
&stream->timing,
@@ -1577,25 +1590,8 @@ static enum dc_status apply_single_controller_ctx_to_hw(
if (dc_is_dp_signal(pipe_ctx->stream->signal))
dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_CONNECT_DIG_FE_OTG);
- /* Have to setup DSC before DIG FE and BE are connected (which happens before the
- * link training). This is to make sure the bandwidth sent to DIG BE won't be
- * bigger than what the link and/or DIG BE can handle. VBID[6]/CompressedStream_flag
- * will be automatically set at a later time when the video is enabled
- * (DP_VID_STREAM_EN = 1).
- */
- if (pipe_ctx->stream->timing.flags.DSC) {
- if (dc_is_dp_signal(pipe_ctx->stream->signal) ||
- dc_is_virtual_signal(pipe_ctx->stream->signal))
- dp_set_dsc_enable(pipe_ctx, true);
-
- }
-
- if (!stream->dpms_off) {
- if (dc->hwss.update_phy_state)
- dc->hwss.update_phy_state(context, pipe_ctx, TX_ON_SYMCLK_ON);
- else
- core_link_enable_stream(context, pipe_ctx);
- }
+ if (!stream->dpms_off)
+ core_link_enable_stream(context, pipe_ctx);
/* DCN3.1 FPGA Workaround
* Need to enable HPO DP Stream Encoder before setting OTG master enable.
@@ -2131,6 +2127,7 @@ static void dce110_reset_hw_ctx_wrap(
BREAK_TO_DEBUGGER();
}
pipe_ctx_old->stream_res.tg->funcs->disable_crtc(pipe_ctx_old->stream_res.tg);
+ pipe_ctx_old->stream->link->phy_state.symclk_ref_cnts.otg = 0;
pipe_ctx_old->plane_res.mi->funcs->free_mem_input(
pipe_ctx_old->plane_res.mi, dc->current_state->stream_count);
@@ -3009,6 +3006,124 @@ void dce110_set_pipe(struct pipe_ctx *pipe_ctx)
abm->funcs->set_pipe(abm, otg_inst, panel_cntl->inst);
}
+void dce110_enable_lvds_link_output(struct dc_link *link,
+ const struct link_resource *link_res,
+ enum clock_source_id clock_source,
+ uint32_t pixel_clock)
+{
+ link->link_enc->funcs->enable_lvds_output(
+ link->link_enc,
+ clock_source,
+ pixel_clock);
+ link->phy_state.symclk_state = SYMCLK_ON_TX_ON;
+}
+
+void dce110_enable_tmds_link_output(struct dc_link *link,
+ const struct link_resource *link_res,
+ enum signal_type signal,
+ enum clock_source_id clock_source,
+ enum dc_color_depth color_depth,
+ uint32_t pixel_clock)
+{
+ link->link_enc->funcs->enable_tmds_output(
+ link->link_enc,
+ clock_source,
+ color_depth,
+ signal,
+ pixel_clock);
+ link->phy_state.symclk_state = SYMCLK_ON_TX_ON;
+}
+
+void dce110_enable_dp_link_output(
+ struct dc_link *link,
+ const struct link_resource *link_res,
+ enum signal_type signal,
+ enum clock_source_id clock_source,
+ const struct dc_link_settings *link_settings)
+{
+ struct dc *dc = link->ctx->dc;
+ struct dmcu *dmcu = dc->res_pool->dmcu;
+ struct pipe_ctx *pipes =
+ link->dc->current_state->res_ctx.pipe_ctx;
+ struct clock_source *dp_cs =
+ link->dc->res_pool->dp_clock_source;
+ const struct link_hwss *link_hwss = get_link_hwss(link, link_res);
+ unsigned int i;
+
+
+ if (link->connector_signal == SIGNAL_TYPE_EDP) {
+ if (!link->dc->config.edp_no_power_sequencing)
+ link->dc->hwss.edp_power_control(link, true);
+ link->dc->hwss.edp_wait_for_hpd_ready(link, true);
+ }
+
+ /* If the current pixel clock source is not DTO(happens after
+ * switching from HDMI passive dongle to DP on the same connector),
+ * switch the pixel clock source to DTO.
+ */
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (pipes[i].stream != NULL &&
+ pipes[i].stream->link == link) {
+ if (pipes[i].clock_source != NULL &&
+ pipes[i].clock_source->id != CLOCK_SOURCE_ID_DP_DTO) {
+ pipes[i].clock_source = dp_cs;
+ pipes[i].stream_res.pix_clk_params.requested_pix_clk_100hz =
+ pipes[i].stream->timing.pix_clk_100hz;
+ pipes[i].clock_source->funcs->program_pix_clk(
+ pipes[i].clock_source,
+ &pipes[i].stream_res.pix_clk_params,
+ dp_get_link_encoding_format(link_settings),
+ &pipes[i].pll_settings);
+ }
+ }
+ }
+
+ if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING) {
+ if (dc->clk_mgr->funcs->notify_link_rate_change)
+ dc->clk_mgr->funcs->notify_link_rate_change(dc->clk_mgr, link);
+ }
+
+ if (dmcu != NULL && dmcu->funcs->lock_phy)
+ dmcu->funcs->lock_phy(dmcu);
+
+ if (link_hwss->ext.enable_dp_link_output)
+ link_hwss->ext.enable_dp_link_output(link, link_res, signal,
+ clock_source, link_settings);
+
+ link->phy_state.symclk_state = SYMCLK_ON_TX_ON;
+
+ if (dmcu != NULL && dmcu->funcs->unlock_phy)
+ dmcu->funcs->unlock_phy(dmcu);
+
+ dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_LINK_PHY);
+}
+
+void dce110_disable_link_output(struct dc_link *link,
+ const struct link_resource *link_res,
+ enum signal_type signal)
+{
+ struct dc *dc = link->ctx->dc;
+ const struct link_hwss *link_hwss = get_link_hwss(link, link_res);
+ struct dmcu *dmcu = dc->res_pool->dmcu;
+
+ if (signal == SIGNAL_TYPE_EDP &&
+ link->dc->hwss.edp_backlight_control)
+ link->dc->hwss.edp_backlight_control(link, false);
+ else if (dmcu != NULL && dmcu->funcs->lock_phy)
+ dmcu->funcs->lock_phy(dmcu);
+
+ link_hwss->disable_link_output(link, link_res, signal);
+ link->phy_state.symclk_state = SYMCLK_OFF_TX_OFF;
+
+ if (signal == SIGNAL_TYPE_EDP &&
+ link->dc->hwss.edp_backlight_control)
+ link->dc->hwss.edp_power_control(link, false);
+ else if (dmcu != NULL && dmcu->funcs->lock_phy)
+ dmcu->funcs->unlock_phy(dmcu);
+ dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY);
+}
+
static const struct hw_sequencer_funcs dce110_funcs = {
.program_gamut_remap = program_gamut_remap,
.program_output_csc = program_output_csc,
@@ -3048,6 +3163,10 @@ static const struct hw_sequencer_funcs dce110_funcs = {
.set_backlight_level = dce110_set_backlight_level,
.set_abm_immediate_disable = dce110_set_abm_immediate_disable,
.set_pipe = dce110_set_pipe,
+ .enable_lvds_link_output = dce110_enable_lvds_link_output,
+ .enable_tmds_link_output = dce110_enable_tmds_link_output,
+ .enable_dp_link_output = dce110_enable_dp_link_output,
+ .disable_link_output = dce110_disable_link_output,
};
static const struct hwseq_private_funcs dce110_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
index b6f3843d3d05..758f4b3b0087 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
@@ -90,6 +90,24 @@ bool dce110_set_backlight_level(struct pipe_ctx *pipe_ctx,
uint32_t frame_ramp);
void dce110_set_abm_immediate_disable(struct pipe_ctx *pipe_ctx);
void dce110_set_pipe(struct pipe_ctx *pipe_ctx);
-
+void dce110_disable_link_output(struct dc_link *link,
+ const struct link_resource *link_res,
+ enum signal_type signal);
+void dce110_enable_lvds_link_output(struct dc_link *link,
+ const struct link_resource *link_res,
+ enum clock_source_id clock_source,
+ uint32_t pixel_clock);
+void dce110_enable_tmds_link_output(struct dc_link *link,
+ const struct link_resource *link_res,
+ enum signal_type signal,
+ enum clock_source_id clock_source,
+ enum dc_color_depth color_depth,
+ uint32_t pixel_clock);
+void dce110_enable_dp_link_output(
+ struct dc_link *link,
+ const struct link_resource *link_res,
+ enum signal_type signal,
+ enum clock_source_id clock_source,
+ const struct dc_link_settings *link_settings);
#endif /* __DC_HWSS_DCE110_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
index 897f412f539e..b9765b3899e1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
@@ -469,6 +469,7 @@ void dpp1_set_cursor_position(
REG_UPDATE(CURSOR0_CONTROL,
CUR0_ENABLE, cur_en);
+ dpp_base->pos.cur0_ctl.bits.cur0_enable = cur_en;
}
void dpp1_cnv_set_optional_cursor_attributes(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index f26e08032da0..11e4c4e46947 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -899,6 +899,14 @@ enum dc_status dcn10_enable_stream_timing(
return DC_ERROR_UNEXPECTED;
}
+ if (dc_is_hdmi_tmds_signal(stream->signal)) {
+ stream->link->phy_state.symclk_ref_cnts.otg = 1;
+ if (stream->link->phy_state.symclk_state == SYMCLK_OFF_TX_OFF)
+ stream->link->phy_state.symclk_state = SYMCLK_ON_TX_OFF;
+ else
+ stream->link->phy_state.symclk_state = SYMCLK_ON_TX_ON;
+ }
+
pipe_ctx->stream_res.tg->funcs->program_timing(
pipe_ctx->stream_res.tg,
&stream->timing,
@@ -1017,6 +1025,7 @@ static void dcn10_reset_back_end_for_pipe(
if (pipe_ctx->stream_res.tg->funcs->set_drr)
pipe_ctx->stream_res.tg->funcs->set_drr(
pipe_ctx->stream_res.tg, NULL);
+ pipe_ctx->stream->link->phy_state.symclk_ref_cnts.otg = 0;
}
for (i = 0; i < dc->res_pool->pipe_count; i++)
@@ -2235,6 +2244,9 @@ void dcn10_enable_timing_synchronization(
DC_SYNC_INFO("Setting up OTG reset trigger\n");
for (i = 1; i < group_size; i++) {
+ if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
+ continue;
+
opp = grouped_pipes[i]->stream_res.opp;
tg = grouped_pipes[i]->stream_res.tg;
tg->funcs->get_otg_active_size(tg, &width, &height);
@@ -2245,13 +2257,21 @@ void dcn10_enable_timing_synchronization(
for (i = 0; i < group_size; i++) {
if (grouped_pipes[i]->stream == NULL)
continue;
+
+ if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
+ continue;
+
grouped_pipes[i]->stream->vblank_synchronized = false;
}
- for (i = 1; i < group_size; i++)
+ for (i = 1; i < group_size; i++) {
+ if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
+ continue;
+
grouped_pipes[i]->stream_res.tg->funcs->enable_reset_trigger(
grouped_pipes[i]->stream_res.tg,
grouped_pipes[0]->stream_res.tg->inst);
+ }
DC_SYNC_INFO("Waiting for trigger\n");
@@ -2259,12 +2279,21 @@ void dcn10_enable_timing_synchronization(
* synchronized. Look at last pipe programmed to reset.
*/
- wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[1]->stream_res.tg);
- for (i = 1; i < group_size; i++)
+ if (grouped_pipes[1]->stream && grouped_pipes[1]->stream->mall_stream_config.type != SUBVP_PHANTOM)
+ wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[1]->stream_res.tg);
+
+ for (i = 1; i < group_size; i++) {
+ if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
+ continue;
+
grouped_pipes[i]->stream_res.tg->funcs->disable_reset_trigger(
grouped_pipes[i]->stream_res.tg);
+ }
for (i = 1; i < group_size; i++) {
+ if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
+ continue;
+
opp = grouped_pipes[i]->stream_res.opp;
tg = grouped_pipes[i]->stream_res.tg;
tg->funcs->get_otg_active_size(tg, &width, &height);
@@ -2996,6 +3025,7 @@ void dcn10_prepare_bandwidth(
{
struct dce_hwseq *hws = dc->hwseq;
struct hubbub *hubbub = dc->res_pool->hubbub;
+ int min_fclk_khz, min_dcfclk_khz, socclk_khz;
if (dc->debug.sanity_checks)
hws->funcs.verify_allow_pstate_change_high(dc);
@@ -3018,8 +3048,11 @@ void dcn10_prepare_bandwidth(
if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) {
DC_FP_START();
- dcn_bw_notify_pplib_of_wm_ranges(dc);
+ dcn_get_soc_clks(
+ dc, &min_fclk_khz, &min_dcfclk_khz, &socclk_khz);
DC_FP_END();
+ dcn_bw_notify_pplib_of_wm_ranges(
+ dc, min_fclk_khz, min_dcfclk_khz, socclk_khz);
}
if (dc->debug.sanity_checks)
@@ -3032,6 +3065,7 @@ void dcn10_optimize_bandwidth(
{
struct dce_hwseq *hws = dc->hwseq;
struct hubbub *hubbub = dc->res_pool->hubbub;
+ int min_fclk_khz, min_dcfclk_khz, socclk_khz;
if (dc->debug.sanity_checks)
hws->funcs.verify_allow_pstate_change_high(dc);
@@ -3055,8 +3089,11 @@ void dcn10_optimize_bandwidth(
if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) {
DC_FP_START();
- dcn_bw_notify_pplib_of_wm_ranges(dc);
+ dcn_get_soc_clks(
+ dc, &min_fclk_khz, &min_dcfclk_khz, &socclk_khz);
DC_FP_END();
+ dcn_bw_notify_pplib_of_wm_ranges(
+ dc, min_fclk_khz, min_dcfclk_khz, socclk_khz);
}
if (dc->debug.sanity_checks)
@@ -3335,127 +3372,6 @@ static bool dcn10_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx)
return false;
}
-static bool dcn10_dmub_should_update_cursor_data(
- struct pipe_ctx *pipe_ctx,
- struct dc_debug_options *debug)
-{
- if (pipe_ctx->plane_state->address.type == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
- return false;
-
- if (dcn10_can_pipe_disable_cursor(pipe_ctx))
- return false;
-
- if ((pipe_ctx->stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1 || pipe_ctx->stream->link->psr_settings.psr_version == DC_PSR_VERSION_1)
- && pipe_ctx->stream->ctx->dce_version >= DCN_VERSION_3_1)
- return true;
-
- return false;
-}
-
-static void dcn10_dmub_update_cursor_data(
- struct pipe_ctx *pipe_ctx,
- struct hubp *hubp,
- const struct dc_cursor_mi_param *param,
- const struct dc_cursor_position *cur_pos,
- const struct dc_cursor_attributes *cur_attr)
-{
- union dmub_rb_cmd cmd;
- struct dmub_cmd_update_cursor_info_data *update_cursor_info;
- const struct dc_cursor_position *pos;
- const struct dc_cursor_attributes *attr;
- int src_x_offset = 0;
- int src_y_offset = 0;
- int x_hotspot = 0;
- int cursor_height = 0;
- int cursor_width = 0;
- uint32_t cur_en = 0;
- unsigned int panel_inst = 0;
-
- struct dc_debug_options *debug = &hubp->ctx->dc->debug;
-
- if (!dcn10_dmub_should_update_cursor_data(pipe_ctx, debug))
- return;
- /**
- * if cur_pos == NULL means the caller is from cursor_set_attribute
- * then driver use previous cursor position data
- * if cur_attr == NULL means the caller is from cursor_set_position
- * then driver use previous cursor attribute
- * if cur_pos or cur_attr is not NULL then update it
- */
- if (cur_pos != NULL)
- pos = cur_pos;
- else
- pos = &hubp->curs_pos;
-
- if (cur_attr != NULL)
- attr = cur_attr;
- else
- attr = &hubp->curs_attr;
-
- if (!dc_get_edp_link_panel_inst(hubp->ctx->dc, pipe_ctx->stream->link, &panel_inst))
- return;
-
- src_x_offset = pos->x - pos->x_hotspot - param->viewport.x;
- src_y_offset = pos->y - pos->y_hotspot - param->viewport.y;
- x_hotspot = pos->x_hotspot;
- cursor_height = (int)attr->height;
- cursor_width = (int)attr->width;
- cur_en = pos->enable ? 1:0;
-
- // Rotated cursor width/height and hotspots tweaks for offset calculation
- if (param->rotation == ROTATION_ANGLE_90 || param->rotation == ROTATION_ANGLE_270) {
- swap(cursor_height, cursor_width);
- if (param->rotation == ROTATION_ANGLE_90) {
- src_x_offset = pos->x - pos->y_hotspot - param->viewport.x;
- src_y_offset = pos->y - pos->x_hotspot - param->viewport.y;
- }
- } else if (param->rotation == ROTATION_ANGLE_180) {
- src_x_offset = pos->x - param->viewport.x;
- src_y_offset = pos->y - param->viewport.y;
- }
-
- if (param->mirror) {
- x_hotspot = param->viewport.width - x_hotspot;
- src_x_offset = param->viewport.x + param->viewport.width - src_x_offset;
- }
-
- if (src_x_offset >= (int)param->viewport.width)
- cur_en = 0; /* not visible beyond right edge*/
-
- if (src_x_offset + cursor_width <= 0)
- cur_en = 0; /* not visible beyond left edge*/
-
- if (src_y_offset >= (int)param->viewport.height)
- cur_en = 0; /* not visible beyond bottom edge*/
-
- if (src_y_offset + cursor_height <= 0)
- cur_en = 0; /* not visible beyond top edge*/
-
- // Cursor bitmaps have different hotspot values
- // There's a possibility that the above logic returns a negative value, so we clamp them to 0
- if (src_x_offset < 0)
- src_x_offset = 0;
- if (src_y_offset < 0)
- src_y_offset = 0;
-
- memset(&cmd, 0x0, sizeof(cmd));
- cmd.update_cursor_info.header.type = DMUB_CMD__UPDATE_CURSOR_INFO;
- cmd.update_cursor_info.header.payload_bytes =
- sizeof(cmd.update_cursor_info.update_cursor_info_data);
- update_cursor_info = &cmd.update_cursor_info.update_cursor_info_data;
- update_cursor_info->cursor_rect.x = src_x_offset + param->viewport.x;
- update_cursor_info->cursor_rect.y = src_y_offset + param->viewport.y;
- update_cursor_info->cursor_rect.width = attr->width;
- update_cursor_info->cursor_rect.height = attr->height;
- update_cursor_info->enable = cur_en;
- update_cursor_info->pipe_idx = pipe_ctx->pipe_idx;
- update_cursor_info->cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1;
- update_cursor_info->panel_inst = panel_inst;
- dc_dmub_srv_cmd_queue(pipe_ctx->stream->ctx->dmub_srv, &cmd);
- dc_dmub_srv_cmd_execute(pipe_ctx->stream->ctx->dmub_srv);
- dc_dmub_srv_wait_idle(pipe_ctx->stream->ctx->dmub_srv);
-}
-
void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
{
struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position;
@@ -3690,7 +3606,6 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
pipe_ctx->plane_res.scl_data.viewport.height - pos_cpy.y;
}
- dcn10_dmub_update_cursor_data(pipe_ctx, hubp, &param, &pos_cpy, NULL);
hubp->funcs->set_cursor_position(hubp, &pos_cpy, &param);
dpp->funcs->set_cursor_position(dpp, &pos_cpy, &param, hubp->curs_attr.width, hubp->curs_attr.height);
}
@@ -3698,25 +3613,6 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
{
struct dc_cursor_attributes *attributes = &pipe_ctx->stream->cursor_attributes;
- struct dc_cursor_mi_param param = { 0 };
-
- /**
- * If enter PSR without cursor attribute update
- * the cursor attribute of dmub_restore_plane
- * are initial value. call dmub to exit PSR and
- * restore plane then update cursor attribute to
- * avoid override with initial value
- */
- if (pipe_ctx->plane_state != NULL) {
- param.pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10;
- param.ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz;
- param.viewport = pipe_ctx->plane_res.scl_data.viewport;
- param.h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz;
- param.v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert;
- param.rotation = pipe_ctx->plane_state->rotation;
- param.mirror = pipe_ctx->plane_state->horizontal_mirror;
- dcn10_dmub_update_cursor_data(pipe_ctx, pipe_ctx->plane_res.hubp, &param, NULL, attributes);
- }
pipe_ctx->plane_res.hubp->funcs->set_cursor_attributes(
pipe_ctx->plane_res.hubp, attributes);
@@ -3801,81 +3697,56 @@ void dcn10_calc_vupdate_position(
uint32_t *start_line,
uint32_t *end_line)
{
- const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing;
- int vline_int_offset_from_vupdate =
- pipe_ctx->stream->periodic_interrupt0.lines_offset;
- int vupdate_offset_from_vsync = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
- int start_position;
-
- if (vline_int_offset_from_vupdate > 0)
- vline_int_offset_from_vupdate--;
- else if (vline_int_offset_from_vupdate < 0)
- vline_int_offset_from_vupdate++;
-
- start_position = vline_int_offset_from_vupdate + vupdate_offset_from_vsync;
+ const struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
+ int vupdate_pos = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
- if (start_position >= 0)
- *start_line = start_position;
+ if (vupdate_pos >= 0)
+ *start_line = vupdate_pos - ((vupdate_pos / timing->v_total) * timing->v_total);
else
- *start_line = dc_crtc_timing->v_total + start_position - 1;
-
- *end_line = *start_line + 2;
-
- if (*end_line >= dc_crtc_timing->v_total)
- *end_line = 2;
+ *start_line = vupdate_pos + ((-vupdate_pos / timing->v_total) + 1) * timing->v_total - 1;
+ *end_line = (*start_line + 2) % timing->v_total;
}
static void dcn10_cal_vline_position(
struct dc *dc,
struct pipe_ctx *pipe_ctx,
- enum vline_select vline,
uint32_t *start_line,
uint32_t *end_line)
{
- enum vertical_interrupt_ref_point ref_point = INVALID_POINT;
+ const struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
+ int vline_pos = pipe_ctx->stream->periodic_interrupt.lines_offset;
- if (vline == VLINE0)
- ref_point = pipe_ctx->stream->periodic_interrupt0.ref_point;
- else if (vline == VLINE1)
- ref_point = pipe_ctx->stream->periodic_interrupt1.ref_point;
+ if (pipe_ctx->stream->periodic_interrupt.ref_point == START_V_UPDATE) {
+ if (vline_pos > 0)
+ vline_pos--;
+ else if (vline_pos < 0)
+ vline_pos++;
- switch (ref_point) {
- case START_V_UPDATE:
- dcn10_calc_vupdate_position(
- dc,
- pipe_ctx,
- start_line,
- end_line);
- break;
- case START_V_SYNC:
- // Suppose to do nothing because vsync is 0;
- break;
- default:
+ vline_pos += dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
+ if (vline_pos >= 0)
+ *start_line = vline_pos - ((vline_pos / timing->v_total) * timing->v_total);
+ else
+ *start_line = vline_pos + ((-vline_pos / timing->v_total) + 1) * timing->v_total - 1;
+ *end_line = (*start_line + 2) % timing->v_total;
+ } else if (pipe_ctx->stream->periodic_interrupt.ref_point == START_V_SYNC) {
+ // vsync is line 0 so start_line is just the requested line offset
+ *start_line = vline_pos;
+ *end_line = (*start_line + 2) % timing->v_total;
+ } else
ASSERT(0);
- break;
- }
}
void dcn10_setup_periodic_interrupt(
struct dc *dc,
- struct pipe_ctx *pipe_ctx,
- enum vline_select vline)
+ struct pipe_ctx *pipe_ctx)
{
struct timing_generator *tg = pipe_ctx->stream_res.tg;
+ uint32_t start_line = 0;
+ uint32_t end_line = 0;
- if (vline == VLINE0) {
- uint32_t start_line = 0;
- uint32_t end_line = 0;
-
- dcn10_cal_vline_position(dc, pipe_ctx, vline, &start_line, &end_line);
+ dcn10_cal_vline_position(dc, pipe_ctx, &start_line, &end_line);
- tg->funcs->setup_vertical_interrupt0(tg, start_line, end_line);
-
- } else if (vline == VLINE1) {
- pipe_ctx->stream_res.tg->funcs->setup_vertical_interrupt1(
- tg,
- pipe_ctx->stream->periodic_interrupt1.lines_offset);
- }
+ tg->funcs->setup_vertical_interrupt0(tg, start_line, end_line);
}
void dcn10_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
index 9ae07c77fdc0..0ef7bf7ddb75 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
@@ -175,8 +175,7 @@ void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx);
void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx);
void dcn10_setup_periodic_interrupt(
struct dc *dc,
- struct pipe_ctx *pipe_ctx,
- enum vline_select vline);
+ struct pipe_ctx *pipe_ctx);
enum dc_status dcn10_set_clock(struct dc *dc,
enum dc_clock_type clock_type,
uint32_t clk_khz,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
index 10e613ec7d24..f2371c948822 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
@@ -82,6 +82,10 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
.set_backlight_level = dce110_set_backlight_level,
.set_abm_immediate_disable = dce110_set_abm_immediate_disable,
.set_pipe = dce110_set_pipe,
+ .enable_lvds_link_output = dce110_enable_lvds_link_output,
+ .enable_tmds_link_output = dce110_enable_tmds_link_output,
+ .enable_dp_link_output = dce110_enable_dp_link_output,
+ .disable_link_output = dce110_disable_link_output,
.get_dcc_en_bits = dcn10_get_dcc_en_bits,
.update_visual_confirm_color = dcn10_update_visual_confirm_color,
};
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
index 294827906c69..33d780218790 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
@@ -207,10 +207,7 @@ void optc1_program_timing(
/* In case of V_TOTAL_CONTROL is on, make sure OTG_V_TOTAL_MAX and
* OTG_V_TOTAL_MIN are equal to V_TOTAL.
*/
- REG_SET(OTG_V_TOTAL_MAX, 0,
- OTG_V_TOTAL_MAX, v_total);
- REG_SET(OTG_V_TOTAL_MIN, 0,
- OTG_V_TOTAL_MIN, v_total);
+ optc->funcs->set_vtotal_min_max(optc, v_total, v_total);
/* v_sync_start = 0, v_sync_end = v_sync_width */
v_sync_end = patched_crtc_timing.v_sync_width;
@@ -649,13 +646,6 @@ uint32_t optc1_get_vblank_counter(struct timing_generator *optc)
void optc1_lock(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
- uint32_t regval = 0;
-
- regval = REG_READ(OTG_CONTROL);
-
- /* otg is not running, do not need to be locked */
- if ((regval & 0x1) == 0x0)
- return;
REG_SET(OTG_GLOBAL_CONTROL0, 0,
OTG_MASTER_UPDATE_LOCK_SEL, optc->inst);
@@ -663,12 +653,10 @@ void optc1_lock(struct timing_generator *optc)
OTG_MASTER_UPDATE_LOCK, 1);
/* Should be fast, status does not update on maximus */
- if (optc->ctx->dce_environment != DCE_ENV_FPGA_MAXIMUS) {
-
+ if (optc->ctx->dce_environment != DCE_ENV_FPGA_MAXIMUS)
REG_WAIT(OTG_MASTER_UPDATE_LOCK,
UPDATE_LOCK_STATUS, 1,
1, 10);
- }
}
void optc1_unlock(struct timing_generator *optc)
@@ -679,16 +667,6 @@ void optc1_unlock(struct timing_generator *optc)
OTG_MASTER_UPDATE_LOCK, 0);
}
-bool optc1_is_locked(struct timing_generator *optc)
-{
- struct optc *optc1 = DCN10TG_FROM_TG(optc);
- uint32_t locked;
-
- REG_GET(OTG_MASTER_UPDATE_LOCK, UPDATE_LOCK_STATUS, &locked);
-
- return (locked == 1);
-}
-
void optc1_get_position(struct timing_generator *optc,
struct crtc_position *position)
{
@@ -941,11 +919,7 @@ void optc1_set_drr(
}
- REG_SET(OTG_V_TOTAL_MAX, 0,
- OTG_V_TOTAL_MAX, params->vertical_total_max - 1);
-
- REG_SET(OTG_V_TOTAL_MIN, 0,
- OTG_V_TOTAL_MIN, params->vertical_total_min - 1);
+ optc->funcs->set_vtotal_min_max(optc, params->vertical_total_min - 1, params->vertical_total_max - 1);
REG_UPDATE_5(OTG_V_TOTAL_CONTROL,
OTG_V_TOTAL_MIN_SEL, 1,
@@ -964,11 +938,7 @@ void optc1_set_drr(
OTG_V_TOTAL_MAX_SEL, 0,
OTG_FORCE_LOCK_ON_EVENT, 0);
- REG_SET(OTG_V_TOTAL_MIN, 0,
- OTG_V_TOTAL_MIN, 0);
-
- REG_SET(OTG_V_TOTAL_MAX, 0,
- OTG_V_TOTAL_MAX, 0);
+ optc->funcs->set_vtotal_min_max(optc, 0, 0);
}
}
@@ -1393,6 +1363,12 @@ void optc1_read_otg_state(struct optc *optc1,
REG_GET(OPTC_INPUT_GLOBAL_CONTROL,
OPTC_UNDERFLOW_OCCURRED_STATUS, &s->underflow_occurred_status);
+ REG_GET(OTG_VERTICAL_INTERRUPT1_CONTROL,
+ OTG_VERTICAL_INTERRUPT1_INT_ENABLE, &s->vertical_interrupt1_en);
+
+ REG_GET(OTG_VERTICAL_INTERRUPT1_POSITION,
+ OTG_VERTICAL_INTERRUPT1_LINE_START, &s->vertical_interrupt1_line);
+
REG_GET(OTG_VERTICAL_INTERRUPT2_CONTROL,
OTG_VERTICAL_INTERRUPT2_INT_ENABLE, &s->vertical_interrupt2_en);
@@ -1577,11 +1553,11 @@ static const struct timing_generator_funcs dcn10_tg_funcs = {
.enable_crtc_reset = optc1_enable_crtc_reset,
.disable_reset_trigger = optc1_disable_reset_trigger,
.lock = optc1_lock,
- .is_locked = optc1_is_locked,
.unlock = optc1_unlock,
.enable_optc_clock = optc1_enable_optc_clock,
.set_drr = optc1_set_drr,
.get_last_used_drr_vtotal = NULL,
+ .set_vtotal_min_max = optc1_set_vtotal_min_max,
.set_static_screen_control = optc1_set_static_screen_control,
.set_test_pattern = optc1_set_test_pattern,
.program_stereo = optc1_program_stereo,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
index 3fe5882ed018..88ac5f6f4c96 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
@@ -583,6 +583,8 @@ struct dcn_otg_state {
uint32_t underflow_occurred_status;
uint32_t otg_enabled;
uint32_t blank_enabled;
+ uint32_t vertical_interrupt1_en;
+ uint32_t vertical_interrupt1_line;
uint32_t vertical_interrupt2_en;
uint32_t vertical_interrupt2_line;
};
@@ -652,7 +654,6 @@ void optc1_set_blank(struct timing_generator *optc,
bool enable_blanking);
bool optc1_is_blanked(struct timing_generator *optc);
-bool optc1_is_locked(struct timing_generator *optc);
void optc1_program_blank_color(
struct timing_generator *optc,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index 831080b9eb87..56d30baf12df 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -1336,6 +1336,21 @@ static noinline void dcn10_resource_construct_fp(
}
}
+static bool verify_clock_values(struct dm_pp_clock_levels_with_voltage *clks)
+{
+ int i;
+
+ if (clks->num_levels == 0)
+ return false;
+
+ for (i = 0; i < clks->num_levels; i++)
+ /* Ensure that the result is sane */
+ if (clks->data[i].clocks_in_khz == 0)
+ return false;
+
+ return true;
+}
+
static bool dcn10_resource_construct(
uint8_t num_virtual_links,
struct dc *dc,
@@ -1345,6 +1360,9 @@ static bool dcn10_resource_construct(
int j;
struct dc_context *ctx = dc->ctx;
uint32_t pipe_fuses = read_pipe_fuses(ctx);
+ struct dm_pp_clock_levels_with_voltage fclks = {0}, dcfclks = {0};
+ int min_fclk_khz, min_dcfclk_khz, socclk_khz;
+ bool res;
ctx->dc_bios->regs = &bios_regs;
@@ -1523,15 +1541,53 @@ static bool dcn10_resource_construct(
&& pool->base.pp_smu->rv_funcs.set_pme_wa_enable != NULL)
dc->debug.az_endpoint_mute_only = false;
- DC_FP_START();
- if (!dc->debug.disable_pplib_clock_request)
- dcn_bw_update_from_pplib(dc);
+
+ if (!dc->debug.disable_pplib_clock_request) {
+ /*
+ * TODO: This is not the proper way to obtain
+ * fabric_and_dram_bandwidth, should be min(fclk, memclk).
+ */
+ res = dm_pp_get_clock_levels_by_type_with_voltage(
+ ctx, DM_PP_CLOCK_TYPE_FCLK, &fclks);
+
+ DC_FP_START();
+
+ if (res)
+ res = verify_clock_values(&fclks);
+
+ if (res)
+ dcn_bw_update_from_pplib_fclks(dc, &fclks);
+ else
+ BREAK_TO_DEBUGGER();
+
+ DC_FP_END();
+
+ res = dm_pp_get_clock_levels_by_type_with_voltage(
+ ctx, DM_PP_CLOCK_TYPE_DCFCLK, &dcfclks);
+
+ DC_FP_START();
+
+ if (res)
+ res = verify_clock_values(&dcfclks);
+
+ if (res)
+ dcn_bw_update_from_pplib_dcfclks(dc, &dcfclks);
+ else
+ BREAK_TO_DEBUGGER();
+
+ DC_FP_END();
+ }
+
dcn_bw_sync_calcs_and_dml(dc);
if (!dc->debug.disable_pplib_wm_range) {
dc->res_pool = &pool->base;
- dcn_bw_notify_pplib_of_wm_ranges(dc);
+ DC_FP_START();
+ dcn_get_soc_clks(
+ dc, &min_fclk_khz, &min_dcfclk_khz, &socclk_khz);
+ DC_FP_END();
+ dcn_bw_notify_pplib_of_wm_ranges(
+ dc, min_fclk_khz, min_dcfclk_khz, socclk_khz);
}
- DC_FP_END();
{
struct irq_service_init_data init_data;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h
index 2b9d3e63191b..915a20461c77 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h
@@ -274,6 +274,7 @@ struct dccg_registers {
uint32_t DSCCLK2_DTO_PARAM;
uint32_t DPSTREAMCLK_ROOT_GATE_DISABLE;
uint32_t DPSTREAMCLK_GATE_DISABLE;
+ uint32_t DCCG_GATE_DISABLE_CNTL;
uint32_t DCCG_GATE_DISABLE_CNTL2;
uint32_t DCCG_GATE_DISABLE_CNTL3;
uint32_t HDMISTREAMCLK0_DTO_PARAM;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h
index cd2671161ef1..7ce64a3c1b02 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h
@@ -445,226 +445,6 @@
type DSCRM_DSC_FORWARD_EN; \
type DSCRM_DSC_OPP_PIPE_SOURCE
-#define DSC_REG_LIST_DCN314(id) \
- SRI(DSC_TOP_CONTROL, DSC_TOP, id),\
- SRI(DSC_DEBUG_CONTROL, DSC_TOP, id),\
- SRI(DSCC_CONFIG0, DSCC, id),\
- SRI(DSCC_CONFIG1, DSCC, id),\
- SRI(DSCC_STATUS, DSCC, id),\
- SRI(DSCC_INTERRUPT_CONTROL_STATUS, DSCC, id),\
- SRI(DSCC_PPS_CONFIG0, DSCC, id),\
- SRI(DSCC_PPS_CONFIG1, DSCC, id),\
- SRI(DSCC_PPS_CONFIG2, DSCC, id),\
- SRI(DSCC_PPS_CONFIG3, DSCC, id),\
- SRI(DSCC_PPS_CONFIG4, DSCC, id),\
- SRI(DSCC_PPS_CONFIG5, DSCC, id),\
- SRI(DSCC_PPS_CONFIG6, DSCC, id),\
- SRI(DSCC_PPS_CONFIG7, DSCC, id),\
- SRI(DSCC_PPS_CONFIG8, DSCC, id),\
- SRI(DSCC_PPS_CONFIG9, DSCC, id),\
- SRI(DSCC_PPS_CONFIG10, DSCC, id),\
- SRI(DSCC_PPS_CONFIG11, DSCC, id),\
- SRI(DSCC_PPS_CONFIG12, DSCC, id),\
- SRI(DSCC_PPS_CONFIG13, DSCC, id),\
- SRI(DSCC_PPS_CONFIG14, DSCC, id),\
- SRI(DSCC_PPS_CONFIG15, DSCC, id),\
- SRI(DSCC_PPS_CONFIG16, DSCC, id),\
- SRI(DSCC_PPS_CONFIG17, DSCC, id),\
- SRI(DSCC_PPS_CONFIG18, DSCC, id),\
- SRI(DSCC_PPS_CONFIG19, DSCC, id),\
- SRI(DSCC_PPS_CONFIG20, DSCC, id),\
- SRI(DSCC_PPS_CONFIG21, DSCC, id),\
- SRI(DSCC_PPS_CONFIG22, DSCC, id),\
- SRI(DSCC_MEM_POWER_CONTROL, DSCC, id),\
- SRI(DSCC_R_Y_SQUARED_ERROR_LOWER, DSCC, id),\
- SRI(DSCC_R_Y_SQUARED_ERROR_UPPER, DSCC, id),\
- SRI(DSCC_G_CB_SQUARED_ERROR_LOWER, DSCC, id),\
- SRI(DSCC_G_CB_SQUARED_ERROR_UPPER, DSCC, id),\
- SRI(DSCC_B_CR_SQUARED_ERROR_LOWER, DSCC, id),\
- SRI(DSCC_B_CR_SQUARED_ERROR_UPPER, DSCC, id),\
- SRI(DSCC_MAX_ABS_ERROR0, DSCC, id),\
- SRI(DSCC_MAX_ABS_ERROR1, DSCC, id),\
- SRI(DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL, DSCC, id),\
- SRI(DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL, DSCC, id),\
- SRI(DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL, DSCC, id),\
- SRI(DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL, DSCC, id),\
- SRI(DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL, DSCC, id),\
- SRI(DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL, DSCC, id),\
- SRI(DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL, DSCC, id),\
- SRI(DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL, DSCC, id),\
- SRI(DSCCIF_CONFIG0, DSCCIF, id),\
- SRI(DSCCIF_CONFIG1, DSCCIF, id),\
- SRI(DSCRM_DSC_FORWARD_CONFIG, DSCRM, id)
-
-#define DSC_REG_LIST_SH_MASK_DCN314(mask_sh)\
- DSC_SF(DSC_TOP0_DSC_TOP_CONTROL, DSC_CLOCK_EN, mask_sh), \
- DSC_SF(DSC_TOP0_DSC_TOP_CONTROL, DSC_DISPCLK_R_GATE_DIS, mask_sh), \
- DSC_SF(DSC_TOP0_DSC_TOP_CONTROL, DSC_DSCCLK_R_GATE_DIS, mask_sh), \
- DSC_SF(DSC_TOP0_DSC_DEBUG_CONTROL, DSC_DBG_EN, mask_sh), \
- DSC_SF(DSC_TOP0_DSC_DEBUG_CONTROL, DSC_TEST_CLOCK_MUX_SEL, mask_sh), \
- DSC_SF(DSCC0_DSCC_CONFIG0, NUMBER_OF_SLICES_PER_LINE, mask_sh), \
- DSC_SF(DSCC0_DSCC_CONFIG0, ALTERNATE_ICH_ENCODING_EN, mask_sh), \
- DSC_SF(DSCC0_DSCC_CONFIG0, NUMBER_OF_SLICES_IN_VERTICAL_DIRECTION, mask_sh), \
- DSC_SF(DSCC0_DSCC_CONFIG1, DSCC_RATE_CONTROL_BUFFER_MODEL_SIZE, mask_sh), \
- /*DSC_SF(DSCC0_DSCC_CONFIG1, DSCC_DISABLE_ICH, mask_sh),*/ \
- DSC_SF(DSCC0_DSCC_STATUS, DSCC_DOUBLE_BUFFER_REG_UPDATE_PENDING, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED_INT_EN, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED_INT_EN, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER2_OVERFLOW_OCCURRED_INT_EN, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER3_OVERFLOW_OCCURRED_INT_EN, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER0_UNDERFLOW_OCCURRED_INT_EN, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER1_UNDERFLOW_OCCURRED_INT_EN, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER2_UNDERFLOW_OCCURRED_INT_EN, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER3_UNDERFLOW_OCCURRED_INT_EN, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED_INT_EN, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_CONTROL_BUFFER_MODEL1_OVERFLOW_OCCURRED_INT_EN, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_CONTROL_BUFFER_MODEL2_OVERFLOW_OCCURRED_INT_EN, mask_sh), \
- DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_CONTROL_BUFFER_MODEL3_OVERFLOW_OCCURRED_INT_EN, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG0, DSC_VERSION_MINOR, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG0, DSC_VERSION_MAJOR, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG0, PPS_IDENTIFIER, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG0, LINEBUF_DEPTH, mask_sh), \
- DSC2_SF(DSCC0, DSCC_PPS_CONFIG0__BITS_PER_COMPONENT, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG1, BITS_PER_PIXEL, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG1, VBR_ENABLE, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG1, SIMPLE_422, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG1, CONVERT_RGB, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG1, BLOCK_PRED_ENABLE, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG1, NATIVE_422, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG1, NATIVE_420, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG1, CHUNK_SIZE, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG2, PIC_WIDTH, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG2, PIC_HEIGHT, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG3, SLICE_WIDTH, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG3, SLICE_HEIGHT, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG4, INITIAL_XMIT_DELAY, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG4, INITIAL_DEC_DELAY, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG5, INITIAL_SCALE_VALUE, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG5, SCALE_INCREMENT_INTERVAL, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG6, SCALE_DECREMENT_INTERVAL, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG6, FIRST_LINE_BPG_OFFSET, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG6, SECOND_LINE_BPG_OFFSET, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG7, NFL_BPG_OFFSET, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG7, SLICE_BPG_OFFSET, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG8, NSL_BPG_OFFSET, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG8, SECOND_LINE_OFFSET_ADJ, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG9, INITIAL_OFFSET, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG9, FINAL_OFFSET, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG10, FLATNESS_MIN_QP, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG10, FLATNESS_MAX_QP, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG10, RC_MODEL_SIZE, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG11, RC_EDGE_FACTOR, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG11, RC_QUANT_INCR_LIMIT0, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG11, RC_QUANT_INCR_LIMIT1, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG11, RC_TGT_OFFSET_LO, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG11, RC_TGT_OFFSET_HI, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG12, RC_BUF_THRESH0, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG12, RC_BUF_THRESH1, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG12, RC_BUF_THRESH2, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG12, RC_BUF_THRESH3, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG13, RC_BUF_THRESH4, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG13, RC_BUF_THRESH5, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG13, RC_BUF_THRESH6, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG13, RC_BUF_THRESH7, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG14, RC_BUF_THRESH8, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG14, RC_BUF_THRESH9, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG14, RC_BUF_THRESH10, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG14, RC_BUF_THRESH11, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG15, RC_BUF_THRESH12, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG15, RC_BUF_THRESH13, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG15, RANGE_MIN_QP0, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG15, RANGE_MAX_QP0, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG15, RANGE_BPG_OFFSET0, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG16, RANGE_MIN_QP1, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG16, RANGE_MAX_QP1, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG16, RANGE_BPG_OFFSET1, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG16, RANGE_MIN_QP2, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG16, RANGE_MAX_QP2, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG16, RANGE_BPG_OFFSET2, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG17, RANGE_MIN_QP3, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG17, RANGE_MAX_QP3, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG17, RANGE_BPG_OFFSET3, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG17, RANGE_MIN_QP4, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG17, RANGE_MAX_QP4, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG17, RANGE_BPG_OFFSET4, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG18, RANGE_MIN_QP5, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG18, RANGE_MAX_QP5, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG18, RANGE_BPG_OFFSET5, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG18, RANGE_MIN_QP6, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG18, RANGE_MAX_QP6, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG18, RANGE_BPG_OFFSET6, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG19, RANGE_MIN_QP7, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG19, RANGE_MAX_QP7, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG19, RANGE_BPG_OFFSET7, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG19, RANGE_MIN_QP8, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG19, RANGE_MAX_QP8, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG19, RANGE_BPG_OFFSET8, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG20, RANGE_MIN_QP9, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG20, RANGE_MAX_QP9, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG20, RANGE_BPG_OFFSET9, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG20, RANGE_MIN_QP10, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG20, RANGE_MAX_QP10, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG20, RANGE_BPG_OFFSET10, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG21, RANGE_MIN_QP11, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG21, RANGE_MAX_QP11, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG21, RANGE_BPG_OFFSET11, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG21, RANGE_MIN_QP12, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG21, RANGE_MAX_QP12, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG21, RANGE_BPG_OFFSET12, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG22, RANGE_MIN_QP13, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG22, RANGE_MAX_QP13, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG22, RANGE_BPG_OFFSET13, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG22, RANGE_MIN_QP14, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG22, RANGE_MAX_QP14, mask_sh), \
- DSC_SF(DSCC0_DSCC_PPS_CONFIG22, RANGE_BPG_OFFSET14, mask_sh), \
- DSC_SF(DSCC0_DSCC_MEM_POWER_CONTROL, DSCC_DEFAULT_MEM_LOW_POWER_STATE, mask_sh), \
- DSC_SF(DSCC0_DSCC_MEM_POWER_CONTROL, DSCC_MEM_PWR_FORCE, mask_sh), \
- DSC_SF(DSCC0_DSCC_MEM_POWER_CONTROL, DSCC_MEM_PWR_DIS, mask_sh), \
- DSC_SF(DSCC0_DSCC_MEM_POWER_CONTROL, DSCC_MEM_PWR_STATE, mask_sh), \
- DSC_SF(DSCC0_DSCC_MEM_POWER_CONTROL, DSCC_NATIVE_422_MEM_PWR_FORCE, mask_sh), \
- DSC_SF(DSCC0_DSCC_MEM_POWER_CONTROL, DSCC_NATIVE_422_MEM_PWR_DIS, mask_sh), \
- DSC_SF(DSCC0_DSCC_MEM_POWER_CONTROL, DSCC_NATIVE_422_MEM_PWR_STATE, mask_sh), \
- DSC_SF(DSCC0_DSCC_R_Y_SQUARED_ERROR_LOWER, DSCC_R_Y_SQUARED_ERROR_LOWER, mask_sh), \
- DSC_SF(DSCC0_DSCC_R_Y_SQUARED_ERROR_UPPER, DSCC_R_Y_SQUARED_ERROR_UPPER, mask_sh), \
- DSC_SF(DSCC0_DSCC_G_CB_SQUARED_ERROR_LOWER, DSCC_G_CB_SQUARED_ERROR_LOWER, mask_sh), \
- DSC_SF(DSCC0_DSCC_G_CB_SQUARED_ERROR_UPPER, DSCC_G_CB_SQUARED_ERROR_UPPER, mask_sh), \
- DSC_SF(DSCC0_DSCC_B_CR_SQUARED_ERROR_LOWER, DSCC_B_CR_SQUARED_ERROR_LOWER, mask_sh), \
- DSC_SF(DSCC0_DSCC_B_CR_SQUARED_ERROR_UPPER, DSCC_B_CR_SQUARED_ERROR_UPPER, mask_sh), \
- DSC_SF(DSCC0_DSCC_MAX_ABS_ERROR0, DSCC_R_Y_MAX_ABS_ERROR, mask_sh), \
- DSC_SF(DSCC0_DSCC_MAX_ABS_ERROR0, DSCC_G_CB_MAX_ABS_ERROR, mask_sh), \
- DSC_SF(DSCC0_DSCC_MAX_ABS_ERROR1, DSCC_B_CR_MAX_ABS_ERROR, mask_sh), \
- DSC_SF(DSCC0_DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL, DSCC_RATE_BUFFER0_MAX_FULLNESS_LEVEL, mask_sh), \
- DSC_SF(DSCC0_DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL, DSCC_RATE_BUFFER1_MAX_FULLNESS_LEVEL, mask_sh), \
- DSC_SF(DSCC0_DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL, DSCC_RATE_BUFFER2_MAX_FULLNESS_LEVEL, mask_sh), \
- DSC_SF(DSCC0_DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL, DSCC_RATE_BUFFER3_MAX_FULLNESS_LEVEL, mask_sh), \
- DSC_SF(DSCC0_DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL, DSCC_RATE_CONTROL_BUFFER0_MAX_FULLNESS_LEVEL, mask_sh), \
- DSC_SF(DSCC0_DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL, DSCC_RATE_CONTROL_BUFFER1_MAX_FULLNESS_LEVEL, mask_sh), \
- DSC_SF(DSCC0_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL, DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL, mask_sh), \
- DSC_SF(DSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL, DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL, mask_sh), \
- DSC_SF(DSCCIF0_DSCCIF_CONFIG0, INPUT_INTERFACE_UNDERFLOW_RECOVERY_EN, mask_sh), \
- DSC_SF(DSCCIF0_DSCCIF_CONFIG0, INPUT_INTERFACE_UNDERFLOW_OCCURRED_INT_EN, mask_sh), \
- DSC_SF(DSCCIF0_DSCCIF_CONFIG0, INPUT_INTERFACE_UNDERFLOW_OCCURRED_STATUS, mask_sh), \
- DSC_SF(DSCCIF0_DSCCIF_CONFIG0, INPUT_PIXEL_FORMAT, mask_sh), \
- DSC2_SF(DSCCIF0, DSCCIF_CONFIG0__BITS_PER_COMPONENT, mask_sh), \
- DSC_SF(DSCCIF0_DSCCIF_CONFIG0, DOUBLE_BUFFER_REG_UPDATE_PENDING, mask_sh), \
- DSC_SF(DSCCIF0_DSCCIF_CONFIG1, PIC_WIDTH, mask_sh), \
- DSC_SF(DSCCIF0_DSCCIF_CONFIG1, PIC_HEIGHT, mask_sh), \
- DSC_SF(DSCRM0_DSCRM_DSC_FORWARD_CONFIG, DSCRM_DSC_FORWARD_EN, mask_sh), \
- DSC_SF(DSCRM0_DSCRM_DSC_FORWARD_CONFIG, DSCRM_DSC_OPP_PIPE_SOURCE, mask_sh)
-
-
struct dcn20_dsc_registers {
uint32_t DSC_TOP_CONTROL;
uint32_t DSC_DEBUG_CONTROL;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
index b1ec0e6f7f58..4996d2810edb 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
@@ -617,6 +617,17 @@ void hubp2_cursor_set_attributes(
CURSOR0_DST_Y_OFFSET, 0,
/* used to shift the cursor chunk request deadline */
CURSOR0_CHUNK_HDL_ADJUST, 3);
+
+ hubp->att.SURFACE_ADDR_HIGH = attr->address.high_part;
+ hubp->att.SURFACE_ADDR = attr->address.low_part;
+ hubp->att.size.bits.width = attr->width;
+ hubp->att.size.bits.height = attr->height;
+ hubp->att.cur_ctl.bits.mode = attr->color_format;
+ hubp->att.cur_ctl.bits.pitch = hw_pitch;
+ hubp->att.cur_ctl.bits.line_per_chunk = lpc;
+ hubp->att.cur_ctl.bits.cur_2x_magnify = attr->attribute_flags.bits.ENABLE_MAGNIFICATION;
+ hubp->att.settings.bits.dst_y_offset = 0;
+ hubp->att.settings.bits.chunk_hdl_adjust = 3;
}
void hubp2_dmdata_set_attributes(
@@ -1033,6 +1044,25 @@ void hubp2_cursor_set_position(
REG_SET(CURSOR_DST_OFFSET, 0,
CURSOR_DST_X_OFFSET, dst_x_offset);
/* TODO Handle surface pixel formats other than 4:4:4 */
+ /* Cursor Position Register Config */
+ hubp->pos.cur_ctl.bits.cur_enable = cur_en;
+ hubp->pos.position.bits.x_pos = pos->x;
+ hubp->pos.position.bits.y_pos = pos->y;
+ hubp->pos.hot_spot.bits.x_hot = x_hotspot;
+ hubp->pos.hot_spot.bits.y_hot = y_hotspot;
+ hubp->pos.dst_offset.bits.dst_x_offset = dst_x_offset;
+ /* Cursor Rectangle Cache
+ * Cursor bitmaps have different hotspot values
+ * There's a possibility that the above logic returns a negative value,
+ * so we clamp them to 0
+ */
+ if (src_x_offset < 0)
+ src_x_offset = 0;
+ if (src_y_offset < 0)
+ src_y_offset = 0;
+ /* Save necessary cursor info x, y position. w, h is saved in attribute func. */
+ hubp->cur_rect.x = src_x_offset + param->viewport.x;
+ hubp->cur_rect.y = src_y_offset + param->viewport.y;
}
void hubp2_clk_cntl(struct hubp *hubp, bool enable)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index 6271caca4d9a..d732b6f031a1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -706,6 +706,14 @@ enum dc_status dcn20_enable_stream_timing(
return DC_ERROR_UNEXPECTED;
}
+ if (dc_is_hdmi_tmds_signal(stream->signal)) {
+ stream->link->phy_state.symclk_ref_cnts.otg = 1;
+ if (stream->link->phy_state.symclk_state == SYMCLK_OFF_TX_OFF)
+ stream->link->phy_state.symclk_state = SYMCLK_ON_TX_OFF;
+ else
+ stream->link->phy_state.symclk_state = SYMCLK_ON_TX_ON;
+ }
+
if (dc->hwseq->funcs.PLAT_58856_wa && (!dc_is_dp_signal(stream->signal)))
dc->hwseq->funcs.PLAT_58856_wa(context, pipe_ctx);
@@ -1565,6 +1573,7 @@ static void dcn20_update_dchubp_dpp(
/* Any updates are handled in dc interface, just need
* to apply existing for plane enable / opp change */
if (pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed
+ || pipe_ctx->update_flags.bits.plane_changed
|| pipe_ctx->stream->update_flags.bits.gamut_remap
|| pipe_ctx->stream->update_flags.bits.out_csc) {
/* dpp/cm gamut remap*/
@@ -1853,24 +1862,6 @@ void dcn20_post_unlock_program_front_end(
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
- struct pipe_ctx *mpcc_pipe;
-
- if (pipe->vtp_locked) {
- dc->hwseq->funcs.wait_for_blank_complete(pipe->stream_res.opp);
- pipe->plane_res.hubp->funcs->set_blank(pipe->plane_res.hubp, true);
- pipe->vtp_locked = false;
-
- for (mpcc_pipe = pipe->bottom_pipe; mpcc_pipe; mpcc_pipe = mpcc_pipe->bottom_pipe)
- mpcc_pipe->plane_res.hubp->funcs->set_blank(mpcc_pipe->plane_res.hubp, true);
-
- for (i = 0; i < dc->res_pool->pipe_count; i++)
- if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable)
- dc->hwss.disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
- }
- }
-
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
/* If an active, non-phantom pipe is being transitioned into a phantom
@@ -1898,8 +1889,14 @@ void dcn20_post_unlock_program_front_end(
* can underflow due to HUBP_VTG_SEL programming if done in the regular front end
* programming sequence).
*/
- if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM)
+ while (pipe) {
+ if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
+ if (dc->hwss.update_phantom_vp_position)
+ dc->hwss.update_phantom_vp_position(dc, context, pipe);
dcn20_program_pipe(dc, pipe, context);
+ }
+ pipe = pipe->bottom_pipe;
+ }
}
}
@@ -2003,6 +2000,10 @@ void dcn20_optimize_bandwidth(
context->bw_ctx.bw.dcn.clk.dramclk_khz <= dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000)
dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->dc_mode_softmax_memclk);
+ /* increase compbuf size */
+ if (hubbub->funcs->program_compbuf_size)
+ hubbub->funcs->program_compbuf_size(hubbub, context->bw_ctx.bw.dcn.compbuf_size_kb, true);
+
dc->clk_mgr->funcs->update_clocks(
dc->clk_mgr,
context,
@@ -2018,9 +2019,6 @@ void dcn20_optimize_bandwidth(
pipe_ctx->dlg_regs.optimized_min_dst_y_next_start);
}
}
- /* increase compbuf size */
- if (hubbub->funcs->program_compbuf_size)
- hubbub->funcs->program_compbuf_size(hubbub, context->bw_ctx.bw.dcn.compbuf_size_kb, true);
}
bool dcn20_update_bandwidth(
@@ -2346,7 +2344,9 @@ static void dcn20_reset_back_end_for_pipe(
struct dc_state *context)
{
int i;
- struct dc_link *link;
+ struct dc_link *link = pipe_ctx->stream->link;
+ const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
+
DC_LOGGER_INIT(dc->ctx->logger);
if (pipe_ctx->stream_res.stream_enc == NULL) {
pipe_ctx->stream = NULL;
@@ -2354,19 +2354,15 @@ static void dcn20_reset_back_end_for_pipe(
}
if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
- link = pipe_ctx->stream->link;
/* DPMS may already disable or */
/* dpms_off status is incorrect due to fastboot
* feature. When system resume from S4 with second
* screen only, the dpms_off would be true but
* VBIOS lit up eDP, so check link status too.
*/
- if (!pipe_ctx->stream->dpms_off || link->link_status.link_active) {
- if (dc->hwss.update_phy_state)
- dc->hwss.update_phy_state(dc->current_state, pipe_ctx, TX_OFF_SYMCLK_OFF);
- else
- core_link_disable_stream(pipe_ctx);
- } else if (pipe_ctx->stream_res.audio)
+ if (!pipe_ctx->stream->dpms_off || link->link_status.link_active)
+ core_link_disable_stream(pipe_ctx);
+ else if (pipe_ctx->stream_res.audio)
dc->hwss.disable_audio_stream(pipe_ctx);
/* free acquired resources */
@@ -2406,6 +2402,16 @@ static void dcn20_reset_back_end_for_pipe(
if (pipe_ctx->stream_res.tg->funcs->set_drr)
pipe_ctx->stream_res.tg->funcs->set_drr(
pipe_ctx->stream_res.tg, NULL);
+ /* TODO - convert symclk_ref_cnts for otg to a bit map to solve
+ * the case where the same symclk is shared across multiple otg
+ * instances
+ */
+ link->phy_state.symclk_ref_cnts.otg = 0;
+ if (link->phy_state.symclk_state == SYMCLK_ON_TX_OFF) {
+ link_hwss->disable_link_output(link,
+ &pipe_ctx->link_res, pipe_ctx->stream->signal);
+ link->phy_state.symclk_state = SYMCLK_OFF_TX_OFF;
+ }
}
for (i = 0; i < dc->res_pool->pipe_count; i++)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
index 91e4885b743e..7c5817c426fa 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
@@ -96,6 +96,10 @@ static const struct hw_sequencer_funcs dcn20_funcs = {
#ifndef TRIM_FSFT
.optimize_timing_for_fsft = dcn20_optimize_timing_for_fsft,
#endif
+ .enable_lvds_link_output = dce110_enable_lvds_link_output,
+ .enable_tmds_link_output = dce110_enable_tmds_link_output,
+ .enable_dp_link_output = dce110_enable_dp_link_output,
+ .disable_link_output = dce110_disable_link_output,
.set_disp_pattern_generator = dcn20_set_disp_pattern_generator,
.get_dcc_en_bits = dcn10_get_dcc_en_bits,
.update_visual_confirm_color = dcn20_update_visual_confirm_color
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.c
index 694260c10a01..ccd91792991b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.c
@@ -215,7 +215,8 @@ void mmhubbub2_config_mcif_irq(struct mcif_wb *mcif_wb,
REG_UPDATE(MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUFMGR_SW_OVERRUN_INT_EN, params->sw_overrun_int_en);
REG_UPDATE(MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_VCE_INT_EN, params->vce_int_en);
- REG_UPDATE(MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_VCE_SLICE_INT_EN, params->vce_slice_int_en);
+ if (mcif_wb20->mcif_wb_mask->MCIF_WB_BUFMGR_VCE_SLICE_INT_EN)
+ REG_UPDATE(MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_VCE_SLICE_INT_EN, params->vce_slice_int_en);
}
void mmhubbub2_enable_mcif(struct mcif_wb *mcif_wb)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
index 0340fdd3f5fb..a08c335b7383 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
@@ -529,6 +529,7 @@ static struct timing_generator_funcs dcn20_tg_funcs = {
.enable_optc_clock = optc1_enable_optc_clock,
.set_drr = optc1_set_drr,
.get_last_used_drr_vtotal = optc2_get_last_used_drr_vtotal,
+ .set_vtotal_min_max = optc1_set_vtotal_min_max,
.set_static_screen_control = optc1_set_static_screen_control,
.program_stereo = optc1_program_stereo,
.is_stereo_left_eye = optc1_is_stereo_left_eye,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c
index 05b3fba9ccce..61bcfa03c4e7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c
@@ -82,7 +82,7 @@ static bool patch_address_for_sbs_tb_stereo(
return false;
}
-static void gpu_addr_to_uma(struct dce_hwseq *hwseq,
+static bool gpu_addr_to_uma(struct dce_hwseq *hwseq,
PHYSICAL_ADDRESS_LOC *addr)
{
bool is_in_uma;
@@ -98,6 +98,7 @@ static void gpu_addr_to_uma(struct dce_hwseq *hwseq,
} else {
is_in_uma = false;
}
+ return is_in_uma;
}
static void plane_address_in_gpu_space_to_uma(struct dce_hwseq *hwseq,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.c
index 1826dd7f3da1..9c16633e473a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.c
@@ -86,6 +86,10 @@ static const struct hw_sequencer_funcs dcn201_funcs = {
.set_backlight_level = dce110_set_backlight_level,
.set_abm_immediate_disable = dce110_set_abm_immediate_disable,
.set_pipe = dce110_set_pipe,
+ .enable_lvds_link_output = dce110_enable_lvds_link_output,
+ .enable_tmds_link_output = dce110_enable_tmds_link_output,
+ .enable_dp_link_output = dce110_enable_dp_link_output,
+ .disable_link_output = dce110_disable_link_output,
.set_disp_pattern_generator = dcn20_set_disp_pattern_generator,
.update_visual_confirm_color = dcn20_update_visual_confirm_color,
};
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
index 5752271f22df..c5e200d09038 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
@@ -67,15 +67,9 @@ static uint32_t convert_and_clamp(
void dcn21_dchvm_init(struct hubbub *hubbub)
{
struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
- uint32_t riommu_active, prefetch_done;
+ uint32_t riommu_active;
int i;
- REG_GET(DCHVM_RIOMMU_STAT0, HOSTVM_PREFETCH_DONE, &prefetch_done);
-
- if (prefetch_done) {
- hubbub->riommu_active = true;
- return;
- }
//Init DCHVM block
REG_UPDATE(DCHVM_CTRL0, HOSTVM_INIT_REQ, 1);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
index b270f0b194dc..fe1a8e2e08ef 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
@@ -99,6 +99,10 @@ static const struct hw_sequencer_funcs dcn21_funcs = {
#ifndef TRIM_FSFT
.optimize_timing_for_fsft = dcn20_optimize_timing_for_fsft,
#endif
+ .enable_lvds_link_output = dce110_enable_lvds_link_output,
+ .enable_tmds_link_output = dce110_enable_tmds_link_output,
+ .enable_dp_link_output = dce110_enable_dp_link_output,
+ .disable_link_output = dce110_disable_link_output,
.is_abm_supported = dcn21_is_abm_supported,
.set_disp_pattern_generator = dcn20_set_disp_pattern_generator,
.get_dcc_en_bits = dcn10_get_dcc_en_bits,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
index 7cb35bb1c0f1..887081472c0d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
@@ -657,7 +657,6 @@ static const struct dc_debug_options debug_defaults_drv = {
.usbc_combo_phy_reset_wa = true,
.dmub_command_table = true,
.use_max_lb = true,
- .optimize_edp_link_rate = true
};
static const struct dc_debug_options debug_defaults_diags = {
@@ -677,6 +676,12 @@ static const struct dc_debug_options debug_defaults_diags = {
.use_max_lb = true
};
+static const struct dc_panel_config panel_config_defaults = {
+ .ilr = {
+ .optimize_edp_link_rate = true,
+ },
+};
+
enum dcn20_clk_src_array_id {
DCN20_CLK_SRC_PLL0,
DCN20_CLK_SRC_PLL1,
@@ -1367,6 +1372,11 @@ static struct panel_cntl *dcn21_panel_cntl_create(const struct panel_cntl_init_d
return &panel_cntl->base;
}
+static void dcn21_get_panel_config_defaults(struct dc_panel_config *panel_config)
+{
+ *panel_config = panel_config_defaults;
+}
+
#define CTX ctx
#define REG(reg_name) \
@@ -1408,6 +1418,7 @@ static const struct resource_funcs dcn21_res_pool_funcs = {
.set_mcif_arb_params = dcn20_set_mcif_arb_params,
.find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link,
.update_bw_bounding_box = dcn21_update_bw_bounding_box,
+ .get_panel_config_defaults = dcn21_get_panel_config_defaults,
};
static bool dcn21_resource_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
index 4a668d6563df..e5b7ef7422b8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
@@ -372,6 +372,10 @@ void dpp3_set_cursor_attributes(
REG_UPDATE(CURSOR0_COLOR1,
CUR0_COLOR1, 0xFFFFFFFF);
}
+
+ dpp_base->att.cur0_ctl.bits.expansion_mode = 0;
+ dpp_base->att.cur0_ctl.bits.cur0_rom_en = cur_rom_en;
+ dpp_base->att.cur0_ctl.bits.mode = color_format;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
index fb59fed8f425..8c5045711264 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
@@ -939,13 +939,32 @@ bool dcn30_does_plane_fit_in_mall(struct dc *dc, struct dc_plane_state *plane, s
void dcn30_hardware_release(struct dc *dc)
{
+ bool subvp_in_use = false;
+ uint32_t i;
+
dc_dmub_srv_p_state_delegate(dc, false, NULL);
+ dc_dmub_setup_subvp_dmub_command(dc, dc->current_state, false);
+
+ /* SubVP treated the same way as FPO. If driver disable and
+ * we are using a SubVP config, disable and force on DCN side
+ * to prevent P-State hang on driver enable.
+ */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+ if (!pipe->stream)
+ continue;
+
+ if (pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
+ subvp_in_use = true;
+ break;
+ }
+ }
/* If pstate unsupported, or still supported
* by firmware, force it supported by dcn
*/
if (dc->current_state)
- if ((!dc->clk_mgr->clks.p_state_change_support ||
+ if ((!dc->clk_mgr->clks.p_state_change_support || subvp_in_use ||
dc->current_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) &&
dc->res_pool->hubbub->funcs->force_pstate_change_control)
dc->res_pool->hubbub->funcs->force_pstate_change_control(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c
index 4c06e6e1ba4a..3216d10c58ba 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c
@@ -100,6 +100,10 @@ static const struct hw_sequencer_funcs dcn30_funcs = {
.set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
.hardware_release = dcn30_hardware_release,
.set_pipe = dcn21_set_pipe,
+ .enable_lvds_link_output = dce110_enable_lvds_link_output,
+ .enable_tmds_link_output = dce110_enable_tmds_link_output,
+ .enable_dp_link_output = dce110_enable_dp_link_output,
+ .disable_link_output = dce110_disable_link_output,
.set_disp_pattern_generator = dcn30_set_disp_pattern_generator,
.get_dcc_en_bits = dcn10_get_dcc_en_bits,
.update_visual_confirm_color = dcn20_update_visual_confirm_color,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mmhubbub.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mmhubbub.h
index f2580e65196c..7446e54bf5aa 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mmhubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mmhubbub.h
@@ -227,11 +227,7 @@
SF(MCIF_WB0_MCIF_WB_BUF_3_ADDR_C, MCIF_WB_BUF_3_ADDR_C, mask_sh),\
SF(MCIF_WB0_MCIF_WB_BUF_4_ADDR_Y, MCIF_WB_BUF_4_ADDR_Y, mask_sh),\
SF(MCIF_WB0_MCIF_WB_BUF_4_ADDR_C, MCIF_WB_BUF_4_ADDR_C, mask_sh),\
- SF(MCIF_WB0_MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_VCE_LOCK_IGNORE, mask_sh),\
- SF(MCIF_WB0_MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_VCE_INT_EN, mask_sh),\
- SF(MCIF_WB0_MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_VCE_INT_ACK, mask_sh),\
SF(MCIF_WB0_MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_VCE_SLICE_INT_EN, mask_sh),\
- SF(MCIF_WB0_MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_VCE_LOCK, mask_sh),\
SF(MCIF_WB0_MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_SLICE_SIZE, mask_sh),\
SF(MCIF_WB_NB_PSTATE_LATENCY_WATERMARK, NB_PSTATE_CHANGE_REFRESH_WATERMARK, mask_sh),\
SF(MCIF_WB_NB_PSTATE_LATENCY_WATERMARK, NB_PSTATE_CHANGE_WATERMARK_MASK, mask_sh),\
@@ -363,11 +359,7 @@
SF(MCIF_WB_BUF_3_ADDR_C, MCIF_WB_BUF_3_ADDR_C, mask_sh),\
SF(MCIF_WB_BUF_4_ADDR_Y, MCIF_WB_BUF_4_ADDR_Y, mask_sh),\
SF(MCIF_WB_BUF_4_ADDR_C, MCIF_WB_BUF_4_ADDR_C, mask_sh),\
- SF(MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_VCE_LOCK_IGNORE, mask_sh),\
- SF(MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_VCE_INT_EN, mask_sh),\
- SF(MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_VCE_INT_ACK, mask_sh),\
SF(MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_VCE_SLICE_INT_EN, mask_sh),\
- SF(MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_VCE_LOCK, mask_sh),\
SF(MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_SLICE_SIZE, mask_sh),\
SF(MCIF_WB_NB_PSTATE_LATENCY_WATERMARK, NB_PSTATE_CHANGE_REFRESH_WATERMARK, mask_sh),\
SF(MCIF_WB_NB_PSTATE_LATENCY_WATERMARK, NB_PSTATE_CHANGE_WATERMARK_MASK, mask_sh),\
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
index 1782b9c26cf4..892d3c4d01a1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
@@ -319,13 +319,13 @@ static struct timing_generator_funcs dcn30_tg_funcs = {
.enable_crtc_reset = optc1_enable_crtc_reset,
.disable_reset_trigger = optc1_disable_reset_trigger,
.lock = optc3_lock,
- .is_locked = optc1_is_locked,
.unlock = optc1_unlock,
.lock_doublebuffer_enable = optc3_lock_doublebuffer_enable,
.lock_doublebuffer_disable = optc3_lock_doublebuffer_disable,
.enable_optc_clock = optc1_enable_optc_clock,
.set_drr = optc1_set_drr,
.get_last_used_drr_vtotal = optc2_get_last_used_drr_vtotal,
+ .set_vtotal_min_max = optc3_set_vtotal_min_max,
.set_static_screen_control = optc1_set_static_screen_control,
.program_stereo = optc1_program_stereo,
.is_stereo_left_eye = optc1_is_stereo_left_eye,
@@ -366,4 +366,3 @@ void dcn30_timing_generator_init(struct optc *optc1)
optc1->min_h_sync_width = 4;
optc1->min_v_sync_width = 1;
}
-
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
index 64320e0ca446..020f512e9690 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
@@ -724,7 +724,8 @@ static const struct dc_debug_options debug_defaults_drv = {
.dwb_fi_phase = -1, // -1 = disable,
.dmub_command_table = true,
.disable_psr = false,
- .use_max_lb = true
+ .use_max_lb = true,
+ .exit_idle_opt_for_cursor_updates = true
};
static const struct dc_debug_options debug_defaults_diags = {
@@ -1654,6 +1655,9 @@ noinline bool dcn30_internal_validate_bw(
if (!pipes)
return false;
+ context->bw_ctx.dml.vba.maxMpcComb = 0;
+ context->bw_ctx.dml.vba.VoltageLevel = 0;
+ context->bw_ctx.dml.vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vactive;
dc->res_pool->funcs->update_soc_for_wm_a(dc, context);
pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate);
@@ -1872,6 +1876,7 @@ noinline bool dcn30_internal_validate_bw(
if (repopulate_pipes)
pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate);
+ context->bw_ctx.dml.vba.VoltageLevel = vlevel;
*vlevel_out = vlevel;
*pipe_cnt_out = pipe_cnt;
@@ -1916,7 +1921,7 @@ static int get_refresh_rate(struct dc_state *context)
*/
#define V_SCALE (10000 / MAX_STRETCHED_V_BLANK)
-int get_frame_rate_at_max_stretch_100hz(struct dc_state *context)
+static int get_frame_rate_at_max_stretch_100hz(struct dc_state *context)
{
struct dc_crtc_timing *timing = NULL;
uint32_t sec_per_100_lines;
@@ -1946,7 +1951,7 @@ int get_frame_rate_at_max_stretch_100hz(struct dc_state *context)
return scaled_refresh_rate;
}
-bool is_refresh_rate_support_mclk_switch_using_fw_based_vblank_stretch(struct dc_state *context)
+static bool is_refresh_rate_support_mclk_switch_using_fw_based_vblank_stretch(struct dc_state *context)
{
int refresh_rate_max_stretch_100hz;
int min_refresh_100hz;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c
index 3d42a1a337ec..6192851c59ed 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c
@@ -99,6 +99,10 @@ static const struct hw_sequencer_funcs dcn301_funcs = {
.set_backlight_level = dcn21_set_backlight_level,
.set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
.set_pipe = dcn21_set_pipe,
+ .enable_lvds_link_output = dce110_enable_lvds_link_output,
+ .enable_tmds_link_output = dce110_enable_tmds_link_output,
+ .enable_dp_link_output = dce110_enable_dp_link_output,
+ .disable_link_output = dce110_disable_link_output,
.set_disp_pattern_generator = dcn30_set_disp_pattern_generator,
.get_dcc_en_bits = dcn10_get_dcc_en_bits,
.optimize_pwr_state = dcn21_optimize_pwr_state,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
index db172677d613..f04595b750ab 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
@@ -634,7 +634,7 @@ static const struct dcn20_vmid_mask vmid_masks = {
DCN20_VMID_MASK_SH_LIST(_MASK)
};
-static const struct resource_caps res_cap_dcn301 = {
+static struct resource_caps res_cap_dcn301 = {
.num_timing_generator = 4,
.num_opp = 4,
.num_video_plane = 4,
@@ -700,6 +700,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.dwb_fi_phase = -1, // -1 = disable
.dmub_command_table = true,
.use_max_lb = false,
+ .exit_idle_opt_for_cursor_updates = true
};
static const struct dc_debug_options debug_defaults_diags = {
@@ -851,7 +852,7 @@ static struct hubbub *dcn301_hubbub_create(struct dc_context *ctx)
vmid->masks = &vmid_masks;
}
- hubbub3->num_vmid = res_cap_dcn301.num_vmid;
+ hubbub3->num_vmid = res_cap_dcn301.num_vmid;
return &hubbub3->base;
}
@@ -1429,6 +1430,8 @@ static bool dcn301_resource_construct(
ctx->dc_bios->regs = &bios_regs;
+ if (dc->ctx->asic_id.chip_id == DEVICE_ID_VGH_1435)
+ res_cap_dcn301.num_pll = 2;
pool->base.res_cap = &res_cap_dcn301;
pool->base.funcs = &dcn301_res_pool_funcs;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
index 4fab537e822f..b925b6ddde5a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
@@ -93,7 +93,8 @@ static const struct dc_debug_options debug_defaults_drv = {
.underflow_assert_delay_us = 0xFFFFFFFF,
.dwb_fi_phase = -1, // -1 = disable,
.dmub_command_table = true,
- .use_max_lb = true
+ .use_max_lb = true,
+ .exit_idle_opt_for_cursor_updates = true
};
static const struct dc_debug_options debug_defaults_diags = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
index d97076648acb..527d5c902878 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
@@ -77,6 +77,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.underflow_assert_delay_us = 0xFFFFFFFF,
.dwb_fi_phase = -1, // -1 = disable,
.dmub_command_table = true,
+ .exit_idle_opt_for_cursor_updates = true,
.disable_idle_power_optimizations = false,
};
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c
index 23621ff08c90..814f401db3b3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c
@@ -150,9 +150,9 @@ static void dcn31_hpo_dp_stream_enc_dp_blank(
* 10us*5000=50ms. This covers 41.7ms of minimum 24 Hz mode +
* a little more because we may not trust delay accuracy.
*/
- //REG_WAIT(DP_SYM32_ENC_VID_STREAM_CONTROL,
- // VID_STREAM_STATUS, 0,
- // 10, 5000);
+ REG_WAIT(DP_SYM32_ENC_VID_STREAM_CONTROL,
+ VID_STREAM_STATUS, 0,
+ 10, 5000);
/* Disable SDP tranmission */
REG_UPDATE(DP_SYM32_ENC_SDP_CONTROL,
@@ -197,7 +197,7 @@ static void dcn31_hpo_dp_stream_enc_set_stream_attribute(
uint32_t h_back_porch;
uint32_t h_width;
uint32_t v_height;
- unsigned long long v_freq;
+ uint64_t v_freq;
uint8_t misc0 = 0;
uint8_t misc1 = 0;
uint8_t hsp;
@@ -360,7 +360,7 @@ static void dcn31_hpo_dp_stream_enc_set_stream_attribute(
v_height = hw_crtc_timing.v_border_top + hw_crtc_timing.v_addressable + hw_crtc_timing.v_border_bottom;
hsp = hw_crtc_timing.flags.HSYNC_POSITIVE_POLARITY ? 0 : 0x80;
vsp = hw_crtc_timing.flags.VSYNC_POSITIVE_POLARITY ? 0 : 0x80;
- v_freq = hw_crtc_timing.pix_clk_100hz * 100;
+ v_freq = (uint64_t)hw_crtc_timing.pix_clk_100hz * 100;
/* MSA Packet Mapping to 32-bit Link Symbols - DP2 spec, section 2.7.4.1
*
@@ -436,32 +436,28 @@ static void dcn31_hpo_dp_stream_enc_update_dp_info_packets(
{
struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc);
uint32_t dmdata_packet_enabled = 0;
- bool sdp_stream_enable = false;
- if (info_frame->vsc.valid) {
+ if (info_frame->vsc.valid)
enc->vpg->funcs->update_generic_info_packet(
enc->vpg,
0, /* packetIndex */
&info_frame->vsc,
true);
- sdp_stream_enable = true;
- }
- if (info_frame->spd.valid) {
+
+ if (info_frame->spd.valid)
enc->vpg->funcs->update_generic_info_packet(
enc->vpg,
2, /* packetIndex */
&info_frame->spd,
true);
- sdp_stream_enable = true;
- }
- if (info_frame->hdrsmd.valid) {
+
+ if (info_frame->hdrsmd.valid)
enc->vpg->funcs->update_generic_info_packet(
enc->vpg,
3, /* packetIndex */
&info_frame->hdrsmd,
true);
- sdp_stream_enable = true;
- }
+
/* enable/disable transmission of packet(s).
* If enabled, packet transmission begins on the next frame
*/
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c
index 51c5f3685470..6360dc9502e7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c
@@ -876,7 +876,7 @@ static bool hubbub31_get_dcc_compression_cap(struct hubbub *hubbub,
return true;
}
-static int hubbub31_init_dchub_sys_ctx(struct hubbub *hubbub,
+int hubbub31_init_dchub_sys_ctx(struct hubbub *hubbub,
struct dcn_hubbub_phys_addr_config *pa_config)
{
struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.h
index e3a654bf04e8..70c60de448ac 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.h
@@ -122,6 +122,8 @@
HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_PIPE, mask_sh), \
HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_INTERRUPT_STATUS, mask_sh)
+int hubbub31_init_dchub_sys_ctx(struct hubbub *hubbub,
+ struct dcn_hubbub_phys_addr_config *pa_config);
void hubbub31_construct(struct dcn20_hubbub *hubbub3,
struct dc_context *ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
index 8d83b611507a..bdf101547484 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
@@ -535,11 +535,11 @@ static void dcn31_reset_back_end_for_pipe(
pipe_ctx->stream_res.tg,
OPTC_DSC_DISABLED, 0, 0);
pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
-
pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false);
if (pipe_ctx->stream_res.tg->funcs->set_odm_bypass)
pipe_ctx->stream_res.tg->funcs->set_odm_bypass(
pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
+ pipe_ctx->stream->link->phy_state.symclk_ref_cnts.otg = 0;
if (pipe_ctx->stream_res.tg->funcs->set_drr)
pipe_ctx->stream_res.tg->funcs->set_drr(
@@ -553,12 +553,9 @@ static void dcn31_reset_back_end_for_pipe(
* screen only, the dpms_off would be true but
* VBIOS lit up eDP, so check link status too.
*/
- if (!pipe_ctx->stream->dpms_off || link->link_status.link_active) {
- if (dc->hwss.update_phy_state)
- dc->hwss.update_phy_state(dc->current_state, pipe_ctx, TX_OFF_SYMCLK_OFF);
- else
- core_link_disable_stream(pipe_ctx);
- } else if (pipe_ctx->stream_res.audio)
+ if (!pipe_ctx->stream->dpms_off || link->link_status.link_active)
+ core_link_disable_stream(pipe_ctx);
+ else if (pipe_ctx->stream_res.audio)
dc->hwss.disable_audio_stream(pipe_ctx);
/* free acquired resources */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
index e708f07fe75a..3a32810bbe38 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
@@ -100,6 +100,10 @@ static const struct hw_sequencer_funcs dcn31_funcs = {
.set_backlight_level = dcn21_set_backlight_level,
.set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
.set_pipe = dcn21_set_pipe,
+ .enable_lvds_link_output = dce110_enable_lvds_link_output,
+ .enable_tmds_link_output = dce110_enable_tmds_link_output,
+ .enable_dp_link_output = dce110_enable_dp_link_output,
+ .disable_link_output = dce110_disable_link_output,
.z10_restore = dcn31_z10_restore,
.z10_save_init = dcn31_z10_save_init,
.set_disp_pattern_generator = dcn30_set_disp_pattern_generator,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c
index 2f7404a97479..63a677c8ee27 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c
@@ -201,7 +201,6 @@ void optc31_set_drr(
// Setup manual flow control for EOF via TRIG_A
optc->funcs->setup_manual_trigger(optc);
-
} else {
REG_UPDATE_4(OTG_V_TOTAL_CONTROL,
OTG_SET_V_TOTAL_MIN_MASK, 0,
@@ -260,7 +259,6 @@ static struct timing_generator_funcs dcn31_tg_funcs = {
.enable_crtc_reset = optc1_enable_crtc_reset,
.disable_reset_trigger = optc1_disable_reset_trigger,
.lock = optc3_lock,
- .is_locked = optc1_is_locked,
.unlock = optc1_unlock,
.lock_doublebuffer_enable = optc3_lock_doublebuffer_enable,
.lock_doublebuffer_disable = optc3_lock_doublebuffer_disable,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
index 8745132d6374..fddc21a5a04c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
@@ -888,7 +888,6 @@ static const struct dc_debug_options debug_defaults_drv = {
}
},
.disable_z10 = true,
- .optimize_edp_link_rate = true,
.enable_z9_disable_interface = true, /* Allow support for the PMFW interface for disable Z9*/
.dml_hostvm_override = DML_HOSTVM_OVERRIDE_FALSE,
};
@@ -911,6 +910,12 @@ static const struct dc_debug_options debug_defaults_diags = {
.use_max_lb = true
};
+static const struct dc_panel_config panel_config_defaults = {
+ .ilr = {
+ .optimize_edp_link_rate = true,
+ },
+};
+
static void dcn31_dpp_destroy(struct dpp **dpp)
{
kfree(TO_DCN20_DPP(*dpp));
@@ -1803,6 +1808,11 @@ validate_out:
return out;
}
+static void dcn31_get_panel_config_defaults(struct dc_panel_config *panel_config)
+{
+ *panel_config = panel_config_defaults;
+}
+
static struct dc_cap_funcs cap_funcs = {
.get_dcc_compression_cap = dcn20_get_dcc_compression_cap
};
@@ -1829,6 +1839,7 @@ static struct resource_funcs dcn31_res_pool_funcs = {
.release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut,
.update_bw_bounding_box = dcn31_update_bw_bounding_box,
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
+ .get_panel_config_defaults = dcn31_get_panel_config_defaults,
};
static struct clock_source *dcn30_clock_source_create(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c
index 232cc15979dd..1bd7e0f327d8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c
@@ -45,6 +45,48 @@
#define DC_LOGGER \
dccg->ctx->logger
+static void dccg314_get_pixel_rate_div(
+ struct dccg *dccg,
+ uint32_t otg_inst,
+ enum pixel_rate_div *k1,
+ enum pixel_rate_div *k2)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+ uint32_t val_k1 = PIXEL_RATE_DIV_NA, val_k2 = PIXEL_RATE_DIV_NA;
+
+ *k1 = PIXEL_RATE_DIV_NA;
+ *k2 = PIXEL_RATE_DIV_NA;
+
+ switch (otg_inst) {
+ case 0:
+ REG_GET_2(OTG_PIXEL_RATE_DIV,
+ OTG0_PIXEL_RATE_DIVK1, &val_k1,
+ OTG0_PIXEL_RATE_DIVK2, &val_k2);
+ break;
+ case 1:
+ REG_GET_2(OTG_PIXEL_RATE_DIV,
+ OTG1_PIXEL_RATE_DIVK1, &val_k1,
+ OTG1_PIXEL_RATE_DIVK2, &val_k2);
+ break;
+ case 2:
+ REG_GET_2(OTG_PIXEL_RATE_DIV,
+ OTG2_PIXEL_RATE_DIVK1, &val_k1,
+ OTG2_PIXEL_RATE_DIVK2, &val_k2);
+ break;
+ case 3:
+ REG_GET_2(OTG_PIXEL_RATE_DIV,
+ OTG3_PIXEL_RATE_DIVK1, &val_k1,
+ OTG3_PIXEL_RATE_DIVK2, &val_k2);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ *k1 = (enum pixel_rate_div)val_k1;
+ *k2 = (enum pixel_rate_div)val_k2;
+}
+
static void dccg314_set_pixel_rate_div(
struct dccg *dccg,
uint32_t otg_inst,
@@ -52,6 +94,11 @@ static void dccg314_set_pixel_rate_div(
enum pixel_rate_div k2)
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+ enum pixel_rate_div cur_k1 = PIXEL_RATE_DIV_NA, cur_k2 = PIXEL_RATE_DIV_NA;
+
+ dccg314_get_pixel_rate_div(dccg, otg_inst, &cur_k1, &cur_k2);
+ if (k1 == PIXEL_RATE_DIV_NA || k2 == PIXEL_RATE_DIV_NA || (k1 == cur_k1 && k2 == cur_k2))
+ return;
switch (otg_inst) {
case 0:
@@ -137,7 +184,7 @@ static void dccg314_set_dtbclk_p_src(
}
/* Controls the generation of pixel valid for OTG in (OTG -> HPO case) */
-void dccg314_set_dtbclk_dto(
+static void dccg314_set_dtbclk_dto(
struct dccg *dccg,
const struct dtbclk_dto_params *params)
{
@@ -181,7 +228,7 @@ void dccg314_set_dtbclk_dto(
}
}
-void dccg314_set_dpstreamclk(
+static void dccg314_set_dpstreamclk(
struct dccg *dccg,
enum streamclk_source src,
int otg_inst,
@@ -220,7 +267,7 @@ void dccg314_set_dpstreamclk(
}
}
-void dccg314_set_valid_pixel_rate(
+static void dccg314_set_valid_pixel_rate(
struct dccg *dccg,
int ref_dtbclk_khz,
int otg_inst,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.h
index 9a4a9efc0203..6a35986307af 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.h
@@ -63,34 +63,28 @@
DCCG_SRII(PHASE, DTBCLK_DTO, 3),\
SR(DCCG_AUDIO_DTBCLK_DTO_MODULO),\
SR(DCCG_AUDIO_DTBCLK_DTO_PHASE),\
+ SR(DCCG_AUDIO_DTO_SOURCE),\
+ SR(DENTIST_DISPCLK_CNTL),\
+ SR(DSCCLK0_DTO_PARAM),\
+ SR(DSCCLK1_DTO_PARAM),\
+ SR(DSCCLK2_DTO_PARAM),\
+ SR(DSCCLK_DTO_CTRL),\
+ SR(DCCG_GATE_DISABLE_CNTL2),\
+ SR(DCCG_GATE_DISABLE_CNTL3),\
+ SR(HDMISTREAMCLK0_DTO_PARAM),\
SR(OTG_PIXEL_RATE_DIV),\
SR(DTBCLK_P_CNTL),\
SR(DCCG_AUDIO_DTO_SOURCE)
-
-#define DCCG_MASK_SH_LIST_DCN314(mask_sh) \
- DCCG_SFI(DPPCLK_DTO_CTRL, DTO_ENABLE, DPPCLK, 0, mask_sh),\
+#define DCCG_MASK_SH_LIST_DCN314_COMMON(mask_sh) \
DCCG_SFI(DPPCLK_DTO_CTRL, DTO_DB_EN, DPPCLK, 0, mask_sh),\
- DCCG_SFI(DPPCLK_DTO_CTRL, DTO_ENABLE, DPPCLK, 1, mask_sh),\
DCCG_SFI(DPPCLK_DTO_CTRL, DTO_DB_EN, DPPCLK, 1, mask_sh),\
- DCCG_SFI(DPPCLK_DTO_CTRL, DTO_ENABLE, DPPCLK, 2, mask_sh),\
DCCG_SFI(DPPCLK_DTO_CTRL, DTO_DB_EN, DPPCLK, 2, mask_sh),\
- DCCG_SFI(DPPCLK_DTO_CTRL, DTO_ENABLE, DPPCLK, 3, mask_sh),\
DCCG_SFI(DPPCLK_DTO_CTRL, DTO_DB_EN, DPPCLK, 3, mask_sh),\
DCCG_SF(DPPCLK0_DTO_PARAM, DPPCLK0_DTO_PHASE, mask_sh),\
DCCG_SF(DPPCLK0_DTO_PARAM, DPPCLK0_DTO_MODULO, mask_sh),\
DCCG_SF(HDMICHARCLK0_CLOCK_CNTL, HDMICHARCLK0_EN, mask_sh),\
DCCG_SF(HDMICHARCLK0_CLOCK_CNTL, HDMICHARCLK0_SRC_SEL, mask_sh),\
- DCCG_SF(PHYASYMCLK_CLOCK_CNTL, PHYASYMCLK_FORCE_EN, mask_sh),\
- DCCG_SF(PHYASYMCLK_CLOCK_CNTL, PHYASYMCLK_FORCE_SRC_SEL, mask_sh),\
- DCCG_SF(PHYBSYMCLK_CLOCK_CNTL, PHYBSYMCLK_FORCE_EN, mask_sh),\
- DCCG_SF(PHYBSYMCLK_CLOCK_CNTL, PHYBSYMCLK_FORCE_SRC_SEL, mask_sh),\
- DCCG_SF(PHYCSYMCLK_CLOCK_CNTL, PHYCSYMCLK_FORCE_EN, mask_sh),\
- DCCG_SF(PHYCSYMCLK_CLOCK_CNTL, PHYCSYMCLK_FORCE_SRC_SEL, mask_sh),\
- DCCG_SF(PHYDSYMCLK_CLOCK_CNTL, PHYDSYMCLK_FORCE_EN, mask_sh),\
- DCCG_SF(PHYDSYMCLK_CLOCK_CNTL, PHYDSYMCLK_FORCE_SRC_SEL, mask_sh),\
- DCCG_SF(PHYESYMCLK_CLOCK_CNTL, PHYESYMCLK_FORCE_EN, mask_sh),\
- DCCG_SF(PHYESYMCLK_CLOCK_CNTL, PHYESYMCLK_FORCE_SRC_SEL, mask_sh),\
DCCG_SF(DPSTREAMCLK_CNTL, DPSTREAMCLK0_EN, mask_sh),\
DCCG_SF(DPSTREAMCLK_CNTL, DPSTREAMCLK1_EN, mask_sh),\
DCCG_SF(DPSTREAMCLK_CNTL, DPSTREAMCLK2_EN, mask_sh),\
@@ -100,7 +94,6 @@
DCCG_SF(DPSTREAMCLK_CNTL, DPSTREAMCLK2_SRC_SEL, mask_sh),\
DCCG_SF(DPSTREAMCLK_CNTL, DPSTREAMCLK3_SRC_SEL, mask_sh),\
DCCG_SF(HDMISTREAMCLK_CNTL, HDMISTREAMCLK0_EN, mask_sh),\
- DCCG_SF(HDMISTREAMCLK_CNTL, HDMISTREAMCLK0_DTO_FORCE_DIS, mask_sh),\
DCCG_SF(HDMISTREAMCLK_CNTL, HDMISTREAMCLK0_SRC_SEL, mask_sh),\
DCCG_SF(SYMCLK32_SE_CNTL, SYMCLK32_SE0_SRC_SEL, mask_sh),\
DCCG_SF(SYMCLK32_SE_CNTL, SYMCLK32_SE1_SRC_SEL, mask_sh),\
@@ -148,7 +141,48 @@
DCCG_SF(DTBCLK_P_CNTL, DTBCLK_P3_SRC_SEL, mask_sh),\
DCCG_SF(DTBCLK_P_CNTL, DTBCLK_P3_EN, mask_sh),\
DCCG_SF(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL, mask_sh),\
- DCCG_SF(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO_SEL, mask_sh)
+ DCCG_SF(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO_SEL, mask_sh),\
+ DCCG_SF(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_MODE, mask_sh),\
+ DCCG_SF(DSCCLK0_DTO_PARAM, DSCCLK0_DTO_PHASE, mask_sh),\
+ DCCG_SF(DSCCLK0_DTO_PARAM, DSCCLK0_DTO_MODULO, mask_sh),\
+ DCCG_SF(DSCCLK1_DTO_PARAM, DSCCLK1_DTO_PHASE, mask_sh),\
+ DCCG_SF(DSCCLK1_DTO_PARAM, DSCCLK1_DTO_MODULO, mask_sh),\
+ DCCG_SF(DSCCLK2_DTO_PARAM, DSCCLK2_DTO_PHASE, mask_sh),\
+ DCCG_SF(DSCCLK2_DTO_PARAM, DSCCLK2_DTO_MODULO, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_SE0_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_SE1_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_SE2_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_SE3_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_LE0_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_LE1_GATE_DISABLE, mask_sh),\
+ DCCG_SF(HDMISTREAMCLK0_DTO_PARAM, HDMISTREAMCLK0_DTO_PHASE, mask_sh),\
+ DCCG_SF(HDMISTREAMCLK0_DTO_PARAM, HDMISTREAMCLK0_DTO_MODULO, mask_sh)
+
+#define DCCG_MASK_SH_LIST_DCN314(mask_sh) \
+ DCCG_MASK_SH_LIST_DCN314_COMMON(mask_sh),\
+ DCCG_SFI(DPPCLK_DTO_CTRL, DTO_ENABLE, DPPCLK, 0, mask_sh),\
+ DCCG_SFI(DPPCLK_DTO_CTRL, DTO_ENABLE, DPPCLK, 1, mask_sh),\
+ DCCG_SFI(DPPCLK_DTO_CTRL, DTO_ENABLE, DPPCLK, 2, mask_sh),\
+ DCCG_SFI(DPPCLK_DTO_CTRL, DTO_ENABLE, DPPCLK, 3, mask_sh),\
+ DCCG_SF(PHYASYMCLK_CLOCK_CNTL, PHYASYMCLK_FORCE_EN, mask_sh),\
+ DCCG_SF(PHYASYMCLK_CLOCK_CNTL, PHYASYMCLK_FORCE_SRC_SEL, mask_sh),\
+ DCCG_SF(PHYBSYMCLK_CLOCK_CNTL, PHYBSYMCLK_FORCE_EN, mask_sh),\
+ DCCG_SF(PHYBSYMCLK_CLOCK_CNTL, PHYBSYMCLK_FORCE_SRC_SEL, mask_sh),\
+ DCCG_SF(PHYCSYMCLK_CLOCK_CNTL, PHYCSYMCLK_FORCE_EN, mask_sh),\
+ DCCG_SF(PHYCSYMCLK_CLOCK_CNTL, PHYCSYMCLK_FORCE_SRC_SEL, mask_sh),\
+ DCCG_SF(PHYDSYMCLK_CLOCK_CNTL, PHYDSYMCLK_FORCE_EN, mask_sh),\
+ DCCG_SF(PHYDSYMCLK_CLOCK_CNTL, PHYDSYMCLK_FORCE_SRC_SEL, mask_sh),\
+ DCCG_SF(PHYESYMCLK_CLOCK_CNTL, PHYESYMCLK_FORCE_EN, mask_sh),\
+ DCCG_SF(PHYESYMCLK_CLOCK_CNTL, PHYESYMCLK_FORCE_SRC_SEL, mask_sh),\
+ DCCG_SF(HDMISTREAMCLK_CNTL, HDMISTREAMCLK0_DTO_FORCE_DIS, mask_sh),\
+ DCCG_SF(DSCCLK_DTO_CTRL, DSCCLK0_DTO_ENABLE, mask_sh),\
+ DCCG_SF(DSCCLK_DTO_CTRL, DSCCLK1_DTO_ENABLE, mask_sh),\
+ DCCG_SF(DSCCLK_DTO_CTRL, DSCCLK2_DTO_ENABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL2, PHYASYMCLK_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL2, PHYBSYMCLK_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL2, PHYCSYMCLK_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL2, PHYDSYMCLK_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL2, PHYESYMCLK_GATE_DISABLE, mask_sh)
struct dccg *dccg314_create(
struct dc_context *ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c
index e3351ddc566c..7e773bf7b895 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c
@@ -56,7 +56,8 @@ static void enc314_enable_fifo(struct stream_encoder *enc)
/* TODO: Confirm if we need to wait for DIG_SYMCLK_FE_ON */
REG_WAIT(DIG_FE_CNTL, DIG_SYMCLK_FE_ON, 1, 10, 5000);
- REG_UPDATE_2(DIG_FIFO_CTRL0, DIG_FIFO_RESET, 1, DIG_FIFO_READ_START_LEVEL, 0x7);
+ REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_READ_START_LEVEL, 0x7);
+ REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_RESET, 1);
REG_WAIT(DIG_FIFO_CTRL0, DIG_FIFO_RESET_DONE, 1, 10, 5000);
REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_RESET, 0);
REG_WAIT(DIG_FIFO_CTRL0, DIG_FIFO_RESET_DONE, 0, 10, 5000);
@@ -67,8 +68,7 @@ static void enc314_disable_fifo(struct stream_encoder *enc)
{
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
- REG_UPDATE_2(DIG_FIFO_CTRL0, DIG_FIFO_ENABLE, 0,
- DIG_FIFO_READ_START_LEVEL, 0);
+ REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_ENABLE, 0);
}
static void enc314_dp_set_odm_combine(
@@ -81,7 +81,7 @@ static void enc314_dp_set_odm_combine(
}
/* setup stream encoder in dvi mode */
-void enc314_stream_encoder_dvi_set_stream_attribute(
+static void enc314_stream_encoder_dvi_set_stream_attribute(
struct stream_encoder *enc,
struct dc_crtc_timing *crtc_timing,
bool is_dual_link)
@@ -262,6 +262,16 @@ static bool is_two_pixels_per_containter(const struct dc_crtc_timing *timing)
return two_pix;
}
+static void enc314_stream_encoder_dp_blank(
+ struct dc_link *link,
+ struct stream_encoder *enc)
+{
+ /* New to DCN314 - disable the FIFO before VID stream disable. */
+ enc314_disable_fifo(enc);
+
+ enc1_stream_encoder_dp_blank(link, enc);
+}
+
static void enc314_stream_encoder_dp_unblank(
struct dc_link *link,
struct stream_encoder *enc,
@@ -317,15 +327,11 @@ static void enc314_stream_encoder_dp_unblank(
/* switch DP encoder to CRTC data, but reset it the fifo first. It may happen
* that it overflows during mode transition, and sometimes doesn't recover.
*/
- REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_READ_START_LEVEL, 0x7);
REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, 1);
udelay(10);
REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, 0);
- /* DIG Resync FIFO now needs to be explicitly enabled. */
- enc314_enable_fifo(enc);
-
/* wait 100us for DIG/DP logic to prime
* (i.e. a few video lines)
*/
@@ -341,6 +347,12 @@ static void enc314_stream_encoder_dp_unblank(
REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, true);
+ /*
+ * DIG Resync FIFO now needs to be explicitly enabled.
+ * This should come after DP_VID_STREAM_ENABLE per HW docs.
+ */
+ enc314_enable_fifo(enc);
+
dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_DP_VID_STREAM);
}
@@ -409,7 +421,7 @@ static const struct stream_encoder_funcs dcn314_str_enc_funcs = {
.stop_dp_info_packets =
enc1_stream_encoder_stop_dp_info_packets,
.dp_blank =
- enc1_stream_encoder_dp_blank,
+ enc314_stream_encoder_dp_blank,
.dp_unblank =
enc314_stream_encoder_dp_unblank,
.audio_mute_control = enc3_audio_mute_control,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c
index 39931d48f385..588c1c71241f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c
@@ -343,12 +343,14 @@ unsigned int dcn314_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsig
{
struct dc_stream_state *stream = pipe_ctx->stream;
unsigned int odm_combine_factor = 0;
- struct dc *dc = pipe_ctx->stream->ctx->dc;
bool two_pix_per_container = false;
two_pix_per_container = optc2_is_two_pixels_per_containter(&stream->timing);
odm_combine_factor = get_odm_config(pipe_ctx, NULL);
+ if (pipe_ctx->stream->signal == SIGNAL_TYPE_VIRTUAL)
+ return odm_combine_factor;
+
if (is_dp_128b_132b_signal(pipe_ctx)) {
*k2_div = PIXEL_RATE_DIV_BY_1;
} else if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal) || dc_is_dvi_signal(pipe_ctx->stream->signal)) {
@@ -364,7 +366,7 @@ unsigned int dcn314_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsig
} else {
*k1_div = PIXEL_RATE_DIV_BY_1;
*k2_div = PIXEL_RATE_DIV_BY_4;
- if ((odm_combine_factor == 2) || dc->debug.enable_dp_dig_pixel_rate_div_policy)
+ if (odm_combine_factor == 2)
*k2_div = PIXEL_RATE_DIV_BY_2;
}
}
@@ -384,21 +386,10 @@ void dcn314_set_pixels_per_cycle(struct pipe_ctx *pipe_ctx)
return;
odm_combine_factor = get_odm_config(pipe_ctx, NULL);
- if (optc2_is_two_pixels_per_containter(&pipe_ctx->stream->timing) || odm_combine_factor > 1
- || dcn314_is_dp_dig_pixel_rate_div_policy(pipe_ctx))
+ if (optc2_is_two_pixels_per_containter(&pipe_ctx->stream->timing) || odm_combine_factor > 1)
pix_per_cycle = 2;
if (pipe_ctx->stream_res.stream_enc->funcs->set_input_mode)
pipe_ctx->stream_res.stream_enc->funcs->set_input_mode(pipe_ctx->stream_res.stream_enc,
pix_per_cycle);
}
-
-bool dcn314_is_dp_dig_pixel_rate_div_policy(struct pipe_ctx *pipe_ctx)
-{
- struct dc *dc = pipe_ctx->stream->ctx->dc;
-
- if (dc_is_dp_signal(pipe_ctx->stream->signal) && !is_dp_128b_132b_signal(pipe_ctx) &&
- dc->debug.enable_dp_dig_pixel_rate_div_policy)
- return true;
- return false;
-}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h
index d014580592ac..244280298212 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h
@@ -41,6 +41,4 @@ unsigned int dcn314_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsig
void dcn314_set_pixels_per_cycle(struct pipe_ctx *pipe_ctx);
-bool dcn314_is_dp_dig_pixel_rate_div_policy(struct pipe_ctx *pipe_ctx);
-
#endif /* __DC_HWSS_DCN314_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c
index fcf67eb3478f..5b6c2d94ec71 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c
@@ -102,6 +102,10 @@ static const struct hw_sequencer_funcs dcn314_funcs = {
.set_backlight_level = dcn21_set_backlight_level,
.set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
.set_pipe = dcn21_set_pipe,
+ .enable_lvds_link_output = dce110_enable_lvds_link_output,
+ .enable_tmds_link_output = dce110_enable_tmds_link_output,
+ .enable_dp_link_output = dce110_enable_dp_link_output,
+ .disable_link_output = dce110_disable_link_output,
.z10_restore = dcn31_z10_restore,
.z10_save_init = dcn31_z10_save_init,
.set_disp_pattern_generator = dcn30_set_disp_pattern_generator,
@@ -146,7 +150,6 @@ static const struct hwseq_private_funcs dcn314_private_funcs = {
.setup_hpo_hw_control = dcn31_setup_hpo_hw_control,
.calculate_dccg_k1_k2_values = dcn314_calculate_dccg_k1_k2_values,
.set_pixels_per_cycle = dcn314_set_pixels_per_cycle,
- .is_dp_dig_pixel_rate_div_policy = dcn314_is_dp_dig_pixel_rate_div_policy,
};
void dcn314_hw_sequencer_construct(struct dc *dc)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.c
index 38aa28ec6b13..47eb162f1a75 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.c
@@ -150,7 +150,7 @@ static bool optc314_disable_crtc(struct timing_generator *optc)
return true;
}
-void optc314_phantom_crtc_post_enable(struct timing_generator *optc)
+static void optc314_phantom_crtc_post_enable(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
index 49b7e256d4ea..d0ad72caead2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
@@ -87,6 +87,9 @@
#define DCHUBBUB_DEBUG_CTRL_0__DET_DEPTH__SHIFT 0x10
#define DCHUBBUB_DEBUG_CTRL_0__DET_DEPTH_MASK 0x01FF0000L
+#define DSCC0_DSCC_CONFIG0__ICH_RESET_AT_END_OF_LINE__SHIFT 0x0
+#define DSCC0_DSCC_CONFIG0__ICH_RESET_AT_END_OF_LINE_MASK 0x0000000FL
+
#include "reg_helper.h"
#include "dce/dmub_abm.h"
#include "dce/dmub_psr.h"
@@ -579,7 +582,7 @@ static const struct dcn30_mmhubbub_mask mcif_wb30_mask = {
#define dsc_regsDCN314(id)\
[id] = {\
- DSC_REG_LIST_DCN314(id)\
+ DSC_REG_LIST_DCN20(id)\
}
static const struct dcn20_dsc_registers dsc_regs[] = {
@@ -590,11 +593,11 @@ static const struct dcn20_dsc_registers dsc_regs[] = {
};
static const struct dcn20_dsc_shift dsc_shift = {
- DSC_REG_LIST_SH_MASK_DCN314(__SHIFT)
+ DSC_REG_LIST_SH_MASK_DCN20(__SHIFT)
};
static const struct dcn20_dsc_mask dsc_mask = {
- DSC_REG_LIST_SH_MASK_DCN314(_MASK)
+ DSC_REG_LIST_SH_MASK_DCN20(_MASK)
};
static const struct dcn30_mpc_registers mpc_regs = {
@@ -844,7 +847,7 @@ static const struct resource_caps res_cap_dcn314 = {
.num_ddc = 5,
.num_vmid = 16,
.num_mpc_3dlut = 2,
- .num_dsc = 4,
+ .num_dsc = 3,
};
static const struct dc_plane_cap plane_cap = {
@@ -878,7 +881,8 @@ static const struct dc_plane_cap plane_cap = {
};
static const struct dc_debug_options debug_defaults_drv = {
- .disable_z10 = true, /*hw not support it*/
+ .disable_z10 = false,
+ .enable_z9_disable_interface = true,
.disable_dmcu = true,
.force_abm_enable = false,
.timing_trace = false,
@@ -911,7 +915,6 @@ static const struct dc_debug_options debug_defaults_drv = {
.afmt = true,
}
},
- .optimize_edp_link_rate = true,
.seamless_boot_odm_combine = true
};
@@ -933,6 +936,12 @@ static const struct dc_debug_options debug_defaults_diags = {
.use_max_lb = true
};
+static const struct dc_panel_config panel_config_defaults = {
+ .ilr = {
+ .optimize_edp_link_rate = true,
+ },
+};
+
static void dcn31_dpp_destroy(struct dpp **dpp)
{
kfree(TO_DCN20_DPP(*dpp));
@@ -1672,6 +1681,11 @@ static void dcn314_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *b
DC_FP_END();
}
+static void dcn314_get_panel_config_defaults(struct dc_panel_config *panel_config)
+{
+ *panel_config = panel_config_defaults;
+}
+
static struct resource_funcs dcn314_res_pool_funcs = {
.destroy = dcn314_destroy_resource_pool,
.link_enc_create = dcn31_link_encoder_create,
@@ -1694,6 +1708,7 @@ static struct resource_funcs dcn314_res_pool_funcs = {
.release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut,
.update_bw_bounding_box = dcn314_update_bw_bounding_box,
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
+ .get_panel_config_defaults = dcn314_get_panel_config_defaults,
};
static struct clock_source *dcn30_clock_source_create(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c
index eebb42c9ddd6..58746c437554 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c
@@ -885,7 +885,6 @@ static const struct dc_debug_options debug_defaults_drv = {
.afmt = true,
}
},
- .optimize_edp_link_rate = true,
.psr_power_use_phy_fsm = 0,
};
@@ -907,6 +906,12 @@ static const struct dc_debug_options debug_defaults_diags = {
.use_max_lb = true
};
+static const struct dc_panel_config panel_config_defaults = {
+ .ilr = {
+ .optimize_edp_link_rate = true,
+ },
+};
+
static void dcn31_dpp_destroy(struct dpp **dpp)
{
kfree(TO_DCN20_DPP(*dpp));
@@ -1708,6 +1713,11 @@ static int dcn315_populate_dml_pipes_from_context(
return pipe_cnt;
}
+static void dcn315_get_panel_config_defaults(struct dc_panel_config *panel_config)
+{
+ *panel_config = panel_config_defaults;
+}
+
static struct dc_cap_funcs cap_funcs = {
.get_dcc_compression_cap = dcn20_get_dcc_compression_cap
};
@@ -1721,7 +1731,7 @@ static struct resource_funcs dcn315_res_pool_funcs = {
.panel_cntl_create = dcn31_panel_cntl_create,
.validate_bandwidth = dcn31_validate_bandwidth,
.calculate_wm_and_dlg = dcn31_calculate_wm_and_dlg,
- .update_soc_for_wm_a = dcn31_update_soc_for_wm_a,
+ .update_soc_for_wm_a = dcn315_update_soc_for_wm_a,
.populate_dml_pipes = dcn315_populate_dml_pipes_from_context,
.acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
.add_stream_to_ctx = dcn30_add_stream_to_ctx,
@@ -1734,6 +1744,7 @@ static struct resource_funcs dcn315_res_pool_funcs = {
.release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut,
.update_bw_bounding_box = dcn315_update_bw_bounding_box,
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
+ .get_panel_config_defaults = dcn315_get_panel_config_defaults,
};
static bool dcn315_resource_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c b/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c
index f4b52a35ad84..6b40a11ac83a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c
@@ -885,7 +885,6 @@ static const struct dc_debug_options debug_defaults_drv = {
.afmt = true,
}
},
- .optimize_edp_link_rate = true,
};
static const struct dc_debug_options debug_defaults_diags = {
@@ -906,6 +905,12 @@ static const struct dc_debug_options debug_defaults_diags = {
.use_max_lb = true
};
+static const struct dc_panel_config panel_config_defaults = {
+ .ilr = {
+ .optimize_edp_link_rate = true,
+ },
+};
+
static void dcn31_dpp_destroy(struct dpp **dpp)
{
kfree(TO_DCN20_DPP(*dpp));
@@ -1710,6 +1715,11 @@ static int dcn316_populate_dml_pipes_from_context(
return pipe_cnt;
}
+static void dcn316_get_panel_config_defaults(struct dc_panel_config *panel_config)
+{
+ *panel_config = panel_config_defaults;
+}
+
static struct dc_cap_funcs cap_funcs = {
.get_dcc_compression_cap = dcn20_get_dcc_compression_cap
};
@@ -1736,6 +1746,7 @@ static struct resource_funcs dcn316_res_pool_funcs = {
.release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut,
.update_bw_bounding_box = dcn316_update_bw_bounding_box,
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
+ .get_panel_config_defaults = dcn316_get_panel_config_defaults,
};
static bool dcn316_resource_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c
index 0d5e8a441512..e4daed44ef5f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c
@@ -42,6 +42,48 @@
#define DC_LOGGER \
dccg->ctx->logger
+static void dccg32_get_pixel_rate_div(
+ struct dccg *dccg,
+ uint32_t otg_inst,
+ enum pixel_rate_div *k1,
+ enum pixel_rate_div *k2)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+ uint32_t val_k1 = PIXEL_RATE_DIV_NA, val_k2 = PIXEL_RATE_DIV_NA;
+
+ *k1 = PIXEL_RATE_DIV_NA;
+ *k2 = PIXEL_RATE_DIV_NA;
+
+ switch (otg_inst) {
+ case 0:
+ REG_GET_2(OTG_PIXEL_RATE_DIV,
+ OTG0_PIXEL_RATE_DIVK1, &val_k1,
+ OTG0_PIXEL_RATE_DIVK2, &val_k2);
+ break;
+ case 1:
+ REG_GET_2(OTG_PIXEL_RATE_DIV,
+ OTG1_PIXEL_RATE_DIVK1, &val_k1,
+ OTG1_PIXEL_RATE_DIVK2, &val_k2);
+ break;
+ case 2:
+ REG_GET_2(OTG_PIXEL_RATE_DIV,
+ OTG2_PIXEL_RATE_DIVK1, &val_k1,
+ OTG2_PIXEL_RATE_DIVK2, &val_k2);
+ break;
+ case 3:
+ REG_GET_2(OTG_PIXEL_RATE_DIV,
+ OTG3_PIXEL_RATE_DIVK1, &val_k1,
+ OTG3_PIXEL_RATE_DIVK2, &val_k2);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ *k1 = (enum pixel_rate_div)val_k1;
+ *k2 = (enum pixel_rate_div)val_k2;
+}
+
static void dccg32_set_pixel_rate_div(
struct dccg *dccg,
uint32_t otg_inst,
@@ -50,6 +92,17 @@ static void dccg32_set_pixel_rate_div(
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+ enum pixel_rate_div cur_k1 = PIXEL_RATE_DIV_NA, cur_k2 = PIXEL_RATE_DIV_NA;
+
+ // Don't program 0xF into the register field. Not valid since
+ // K1 / K2 field is only 1 / 2 bits wide
+ if (k1 == PIXEL_RATE_DIV_NA || k2 == PIXEL_RATE_DIV_NA)
+ return;
+
+ dccg32_get_pixel_rate_div(dccg, otg_inst, &cur_k1, &cur_k2);
+ if (k1 == cur_k1 && k2 == cur_k2)
+ return;
+
switch (otg_inst) {
case 0:
REG_UPDATE_2(OTG_PIXEL_RATE_DIV,
@@ -133,7 +186,7 @@ static void dccg32_set_dtbclk_p_src(
}
/* Controls the generation of pixel valid for OTG in (OTG -> HPO case) */
-void dccg32_set_dtbclk_dto(
+static void dccg32_set_dtbclk_dto(
struct dccg *dccg,
const struct dtbclk_dto_params *params)
{
@@ -208,7 +261,7 @@ static void dccg32_get_dccg_ref_freq(struct dccg *dccg,
return;
}
-void dccg32_set_dpstreamclk(
+static void dccg32_set_dpstreamclk(
struct dccg *dccg,
enum streamclk_source src,
int otg_inst,
@@ -245,7 +298,7 @@ void dccg32_set_dpstreamclk(
}
}
-void dccg32_otg_add_pixel(struct dccg *dccg,
+static void dccg32_otg_add_pixel(struct dccg *dccg,
uint32_t otg_inst)
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
@@ -254,7 +307,7 @@ void dccg32_otg_add_pixel(struct dccg *dccg,
OTG_ADD_PIXEL[otg_inst], 1);
}
-void dccg32_otg_drop_pixel(struct dccg *dccg,
+static void dccg32_otg_drop_pixel(struct dccg *dccg,
uint32_t otg_inst)
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c
index d6855d4f749b..076969d928af 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c
@@ -118,7 +118,7 @@ void dcn32_link_encoder_enable_dp_output(
}
}
-bool dcn32_link_encoder_is_in_alt_mode(struct link_encoder *enc)
+static bool dcn32_link_encoder_is_in_alt_mode(struct link_encoder *enc)
{
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
uint32_t dp_alt_mode_disable = 0;
@@ -133,7 +133,7 @@ bool dcn32_link_encoder_is_in_alt_mode(struct link_encoder *enc)
return is_usb_c_alt_mode;
}
-void dcn32_link_encoder_get_max_link_cap(struct link_encoder *enc,
+static void dcn32_link_encoder_get_max_link_cap(struct link_encoder *enc,
struct dc_link_settings *link_settings)
{
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
@@ -150,12 +150,6 @@ void dcn32_link_encoder_get_max_link_cap(struct link_encoder *enc,
}
-void enc32_set_dig_output_mode(struct link_encoder *enc, uint8_t pix_per_container)
-{
- struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
- REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_OUTPUT_PIXEL_MODE, pix_per_container);
-}
-
static const struct link_encoder_funcs dcn32_link_enc_funcs = {
.read_state = link_enc2_read_state,
.validate_output_with_stream =
@@ -186,7 +180,6 @@ static const struct link_encoder_funcs dcn32_link_enc_funcs = {
.is_in_alt_mode = dcn32_link_encoder_is_in_alt_mode,
.get_max_link_cap = dcn32_link_encoder_get_max_link_cap,
.set_dio_phy_mux = dcn31_link_encoder_set_dio_phy_mux,
- .set_dig_output_mode = enc32_set_dig_output_mode,
};
void dcn32_link_encoder_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.h
index 749a1e8cb811..bbcfce06bec0 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.h
@@ -53,8 +53,4 @@ void dcn32_link_encoder_enable_dp_output(
const struct dc_link_settings *link_settings,
enum clock_source_id clock_source);
-void enc32_set_dig_output_mode(
- struct link_encoder *enc,
- uint8_t pix_per_container);
-
#endif /* __DC_LINK_ENCODER__DCN32_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c
index 38a48983f663..d19fc93dbc75 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c
@@ -60,7 +60,7 @@ static void enc32_dp_set_odm_combine(
}
/* setup stream encoder in dvi mode */
-void enc32_stream_encoder_dvi_set_stream_attribute(
+static void enc32_stream_encoder_dvi_set_stream_attribute(
struct stream_encoder *enc,
struct dc_crtc_timing *crtc_timing,
bool is_dual_link)
@@ -243,6 +243,39 @@ static bool is_two_pixels_per_containter(const struct dc_crtc_timing *timing)
return two_pix;
}
+static bool is_h_timing_divisible_by_2(const struct dc_crtc_timing *timing)
+{
+ /* math borrowed from function of same name in inc/resource
+ * checks if h_timing is divisible by 2
+ */
+
+ bool divisible = false;
+ uint16_t h_blank_start = 0;
+ uint16_t h_blank_end = 0;
+
+ if (timing) {
+ h_blank_start = timing->h_total - timing->h_front_porch;
+ h_blank_end = h_blank_start - timing->h_addressable;
+
+ /* HTOTAL, Hblank start/end, and Hsync start/end all must be
+ * divisible by 2 in order for the horizontal timing params
+ * to be considered divisible by 2. Hsync start is always 0.
+ */
+ divisible = (timing->h_total % 2 == 0) &&
+ (h_blank_start % 2 == 0) &&
+ (h_blank_end % 2 == 0) &&
+ (timing->h_sync_width % 2 == 0);
+ }
+ return divisible;
+}
+
+static bool is_dp_dig_pixel_rate_div_policy(struct dc *dc, const struct dc_crtc_timing *timing)
+{
+ /* should be functionally the same as dcn32_is_dp_dig_pixel_rate_div_policy for DP encoders*/
+ return is_h_timing_divisible_by_2(timing) &&
+ dc->debug.enable_dp_dig_pixel_rate_div_policy;
+}
+
static void enc32_stream_encoder_dp_unblank(
struct dc_link *link,
struct stream_encoder *enc,
@@ -259,7 +292,7 @@ static void enc32_stream_encoder_dp_unblank(
/* YCbCr 4:2:0 : Computed VID_M will be 2X the input rate */
if (is_two_pixels_per_containter(&param->timing) || param->opp_cnt > 1
- || dc->debug.enable_dp_dig_pixel_rate_div_policy) {
+ || is_dp_dig_pixel_rate_div_policy(dc, &param->timing)) {
/*this logic should be the same in get_pixel_clock_parameters() */
n_multiply = 1;
}
@@ -355,7 +388,7 @@ static void enc32_dp_set_dsc_config(struct stream_encoder *enc,
{
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
- REG_UPDATE(DP_DSC_CNTL, DP_DSC_MODE, dsc_mode);
+ REG_UPDATE(DP_DSC_CNTL, DP_DSC_MODE, dsc_mode == OPTC_DSC_DISABLED ? 0 : 1);
}
/* this function read dsc related register fields to be logged later in dcn10_log_hw_state
@@ -378,24 +411,6 @@ static void enc32_read_state(struct stream_encoder *enc, struct enc_state *s)
}
}
-static void enc32_stream_encoder_reset_fifo(struct stream_encoder *enc)
-{
- struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
- uint32_t fifo_enabled;
-
- REG_GET(DIG_FIFO_CTRL0, DIG_FIFO_ENABLE, &fifo_enabled);
-
- if (fifo_enabled == 0) {
- /* reset DIG resync FIFO */
- REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_RESET, 1);
- /* TODO: fix timeout when wait for DIG_FIFO_RESET_DONE */
- //REG_WAIT(DIG_FIFO_CTRL0, DIG_FIFO_RESET_DONE, 1, 1, 100);
- udelay(1);
- REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_RESET, 0);
- REG_WAIT(DIG_FIFO_CTRL0, DIG_FIFO_RESET_DONE, 0, 1, 100);
- }
-}
-
static void enc32_set_dig_input_mode(struct stream_encoder *enc, unsigned int pix_per_container)
{
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
@@ -425,8 +440,6 @@ static const struct stream_encoder_funcs dcn32_str_enc_funcs = {
enc3_stream_encoder_update_dp_info_packets,
.stop_dp_info_packets =
enc1_stream_encoder_stop_dp_info_packets,
- .reset_fifo =
- enc32_stream_encoder_reset_fifo,
.dp_blank =
enc1_stream_encoder_dp_blank,
.dp_unblank =
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.h
index 250d9a341cf6..ecd041a446d2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.h
@@ -71,7 +71,9 @@
SRI(DP_MSE_RATE_UPDATE, DP, id), \
SRI(DP_PIXEL_FORMAT, DP, id), \
SRI(DP_SEC_CNTL, DP, id), \
+ SRI(DP_SEC_CNTL1, DP, id), \
SRI(DP_SEC_CNTL2, DP, id), \
+ SRI(DP_SEC_CNTL5, DP, id), \
SRI(DP_SEC_CNTL6, DP, id), \
SRI(DP_STEER_FIFO, DP, id), \
SRI(DP_VID_M, DP, id), \
@@ -93,7 +95,7 @@
SRI(DIG_FIFO_CTRL0, DIG, id)
-#define SE_COMMON_MASK_SH_LIST_DCN32_BASE(mask_sh)\
+#define SE_COMMON_MASK_SH_LIST_DCN32(mask_sh)\
SE_SF(DP0_DP_PIXEL_FORMAT, DP_PIXEL_ENCODING, mask_sh),\
SE_SF(DP0_DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH, mask_sh),\
SE_SF(DP0_DP_PIXEL_FORMAT, DP_PIXEL_PER_CYCLE_PROCESSING_MODE, mask_sh),\
@@ -106,6 +108,7 @@
SE_SF(DIG0_HDMI_VBI_PACKET_CONTROL, HDMI_GC_CONT, mask_sh),\
SE_SF(DIG0_HDMI_VBI_PACKET_CONTROL, HDMI_GC_SEND, mask_sh),\
SE_SF(DIG0_HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, mask_sh),\
+ SE_SF(DIG0_HDMI_VBI_PACKET_CONTROL, HDMI_ACP_SEND, mask_sh),\
SE_SF(DIG0_HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, mask_sh),\
SE_SF(DIG0_HDMI_INFOFRAME_CONTROL1, HDMI_AUDIO_INFO_LINE, mask_sh),\
SE_SF(DIG0_HDMI_GC, HDMI_GC_AVMUTE, mask_sh),\
@@ -244,15 +247,6 @@
SE_SF(DIG0_DIG_FIFO_CTRL0, DIG_FIFO_RESET_DONE, mask_sh),\
SE_SF(DIG0_DIG_FIFO_CTRL0, DIG_FIFO_OUTPUT_PIXEL_MODE, mask_sh)
-#if defined(CONFIG_DRM_AMD_DC_HDCP)
-#define SE_COMMON_MASK_SH_LIST_DCN32(mask_sh)\
- SE_COMMON_MASK_SH_LIST_DCN32_BASE(mask_sh),\
- SE_SF(DIG0_HDMI_VBI_PACKET_CONTROL, HDMI_ACP_SEND, mask_sh)
-#else
-#define SE_COMMON_MASK_SH_LIST_DCN32(mask_sh)\
- SE_COMMON_MASK_SH_LIST_DCN32_BASE(mask_sh)
-#endif
-
void dcn32_dio_stream_encoder_construct(
struct dcn10_stream_encoder *enc1,
struct dc_context *ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dpp.c
index f349cbe2a0f0..dcf12a0b031c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dpp.c
@@ -31,7 +31,7 @@
#include "dcn30/dcn30_cm_common.h"
/* Compute the maximum number of lines that we can fit in the line buffer */
-void dscl32_calc_lb_num_partitions(
+static void dscl32_calc_lb_num_partitions(
const struct scaler_data *scl_data,
enum lb_memory_config lb_config,
int *num_part_y,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hpo_dp_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hpo_dp_link_encoder.h
index 9db1323e1933..176b1537d2a1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hpo_dp_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hpo_dp_link_encoder.h
@@ -47,6 +47,7 @@
SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG, TP_PRBS_SEL1, mask_sh),\
SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG, TP_PRBS_SEL2, mask_sh),\
SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG, TP_PRBS_SEL3, mask_sh),\
+ SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_TP_SQ_PULSE, TP_SQ_PULSE_WIDTH, mask_sh),\
SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC0, SAT_STREAM_SOURCE, mask_sh),\
SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC0, SAT_SLOT_COUNT, mask_sh),\
SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL0, STREAM_VC_RATE_X, mask_sh),\
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c
index 99eb239bbc7b..9fbb72369c10 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c
@@ -68,7 +68,7 @@ static void dcn32_init_crb(struct hubbub *hubbub)
REG_UPDATE(DCHUBBUB_DEBUG_CTRL_0, DET_DEPTH, 0x47F);
}
-static void dcn32_program_det_size(struct hubbub *hubbub, int hubp_inst, unsigned int det_buffer_size_in_kbyte)
+void dcn32_program_det_size(struct hubbub *hubbub, int hubp_inst, unsigned int det_buffer_size_in_kbyte)
{
struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
@@ -98,9 +98,13 @@ static void dcn32_program_det_size(struct hubbub *hubbub, int hubp_inst, unsigne
default:
break;
}
- /* Should never be hit, if it is we have an erroneous hw config*/
- ASSERT(hubbub2->det0_size + hubbub2->det1_size + hubbub2->det2_size
- + hubbub2->det3_size + hubbub2->compbuf_size_segments <= hubbub2->crb_size_segs);
+ if (hubbub2->det0_size + hubbub2->det1_size + hubbub2->det2_size
+ + hubbub2->det3_size + hubbub2->compbuf_size_segments > hubbub2->crb_size_segs) {
+ /* This may happen during seamless transition from ODM 2:1 to ODM4:1 */
+ DC_LOG_WARNING("CRB Config Warning: DET size (%d,%d,%d,%d) + Compbuf size (%d) > CRB segments (%d)\n",
+ hubbub2->det0_size, hubbub2->det1_size, hubbub2->det2_size, hubbub2->det3_size,
+ hubbub2->compbuf_size_segments, hubbub2->crb_size_segs);
+ }
}
static void dcn32_program_compbuf_size(struct hubbub *hubbub, unsigned int compbuf_size_kb, bool safe_to_increase)
@@ -140,7 +144,7 @@ static uint32_t convert_and_clamp(
return ret_val;
}
-static bool hubbub32_program_urgent_watermarks(
+bool hubbub32_program_urgent_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
@@ -330,7 +334,7 @@ static bool hubbub32_program_urgent_watermarks(
return wm_pending;
}
-static bool hubbub32_program_stutter_watermarks(
+bool hubbub32_program_stutter_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
@@ -476,7 +480,7 @@ static bool hubbub32_program_stutter_watermarks(
}
-static bool hubbub32_program_pstate_watermarks(
+bool hubbub32_program_pstate_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
@@ -629,7 +633,7 @@ static bool hubbub32_program_pstate_watermarks(
}
-static bool hubbub32_program_usr_watermarks(
+bool hubbub32_program_usr_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
@@ -769,7 +773,7 @@ static bool hubbub32_program_watermarks(
}
/* Copy values from WM set A to all other sets */
-void hubbub32_init_watermarks(struct hubbub *hubbub)
+static void hubbub32_init_watermarks(struct hubbub *hubbub)
{
struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
uint32_t reg;
@@ -820,7 +824,7 @@ void hubbub32_init_watermarks(struct hubbub *hubbub)
REG_WRITE(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D, reg);
}
-void hubbub32_wm_read_state(struct hubbub *hubbub,
+static void hubbub32_wm_read_state(struct hubbub *hubbub,
struct dcn_hubbub_wm *wm)
{
struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
@@ -932,6 +936,7 @@ static const struct hubbub_funcs hubbub32_funcs = {
.program_watermarks = hubbub32_program_watermarks,
.allow_self_refresh_control = hubbub1_allow_self_refresh_control,
.is_allow_self_refresh_enabled = hubbub1_is_allow_self_refresh_enabled,
+ .verify_allow_pstate_change_high = hubbub1_verify_allow_pstate_change_high,
.force_wm_propagate_to_pipes = hubbub32_force_wm_propagate_to_pipes,
.force_pstate_change_control = hubbub3_force_pstate_change_control,
.init_watermarks = hubbub32_init_watermarks,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.h
index 3bae6e558971..cda94e0e31bf 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.h
@@ -161,6 +161,35 @@
HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_PIPE, mask_sh), \
HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_INTERRUPT_STATUS, mask_sh)
+bool hubbub32_program_urgent_watermarks(
+ struct hubbub *hubbub,
+ struct dcn_watermark_set *watermarks,
+ unsigned int refclk_mhz,
+ bool safe_to_lower);
+
+bool hubbub32_program_stutter_watermarks(
+ struct hubbub *hubbub,
+ struct dcn_watermark_set *watermarks,
+ unsigned int refclk_mhz,
+ bool safe_to_lower);
+
+bool hubbub32_program_pstate_watermarks(
+ struct hubbub *hubbub,
+ struct dcn_watermark_set *watermarks,
+ unsigned int refclk_mhz,
+ bool safe_to_lower);
+
+bool hubbub32_program_usr_watermarks(
+ struct hubbub *hubbub,
+ struct dcn_watermark_set *watermarks,
+ unsigned int refclk_mhz,
+ bool safe_to_lower);
+
+void hubbub32_force_usr_retraining_allow(struct hubbub *hubbub, bool allow);
+
+void hubbub32_force_wm_propagate_to_pipes(struct hubbub *hubbub);
+
+void dcn32_program_det_size(struct hubbub *hubbub, int hubp_inst, unsigned int det_buffer_size_in_kbyte);
void hubbub32_construct(struct dcn20_hubbub *hubbub2,
struct dc_context *ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c
index 6ec1c52535b9..ac1c6458dd55 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c
@@ -79,6 +79,8 @@ void hubp32_phantom_hubp_post_enable(struct hubp *hubp)
uint32_t reg_val;
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+ /* For phantom pipe enable, disable GSL */
+ REG_UPDATE(DCSURF_FLIP_CONTROL2, SURFACE_GSL_ENABLE, 0);
REG_UPDATE(DCHUBP_CNTL, HUBP_BLANK_EN, 1);
reg_val = REG_READ(DCHUBP_CNTL);
if (reg_val) {
@@ -103,6 +105,11 @@ void hubp32_cursor_set_attributes(
enum cursor_lines_per_chunk lpc = hubp2_get_lines_per_chunk(
attr->width, attr->color_format);
+ //Round cursor width up to next multiple of 64
+ uint32_t cursor_width = ((attr->width + 63) / 64) * 64;
+ uint32_t cursor_height = attr->height;
+ uint32_t cursor_size = cursor_width * cursor_height;
+
hubp->curs_attr = *attr;
REG_UPDATE(CURSOR_SURFACE_ADDRESS_HIGH,
@@ -126,7 +133,24 @@ void hubp32_cursor_set_attributes(
/* used to shift the cursor chunk request deadline */
CURSOR0_CHUNK_HDL_ADJUST, 3);
- if (attr->width * attr->height * 4 > 16384)
+ switch (attr->color_format) {
+ case CURSOR_MODE_MONO:
+ cursor_size /= 2;
+ break;
+ case CURSOR_MODE_COLOR_1BIT_AND:
+ case CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA:
+ case CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA:
+ cursor_size *= 4;
+ break;
+
+ case CURSOR_MODE_COLOR_64BIT_FP_PRE_MULTIPLIED:
+ case CURSOR_MODE_COLOR_64BIT_FP_UN_PRE_MULTIPLIED:
+ default:
+ cursor_size *= 8;
+ break;
+ }
+
+ if (cursor_size > 16384)
REG_UPDATE(DCHUBP_MALL_CONFIG, USE_MALL_FOR_CURSOR, true);
else
REG_UPDATE(DCHUBP_MALL_CONFIG, USE_MALL_FOR_CURSOR, false);
@@ -157,12 +181,12 @@ static struct hubp_funcs dcn32_hubp_funcs = {
.hubp_init = hubp3_init,
.set_unbounded_requesting = hubp31_set_unbounded_requesting,
.hubp_soft_reset = hubp31_soft_reset,
+ .hubp_set_flip_int = hubp1_set_flip_int,
.hubp_in_blank = hubp1_in_blank,
.hubp_update_force_pstate_disallow = hubp32_update_force_pstate_disallow,
.phantom_hubp_post_enable = hubp32_phantom_hubp_post_enable,
.hubp_update_mall_sel = hubp32_update_mall_sel,
- .hubp_prepare_subvp_buffering = hubp32_prepare_subvp_buffering,
- .hubp_set_flip_int = hubp1_set_flip_int
+ .hubp_prepare_subvp_buffering = hubp32_prepare_subvp_buffering
};
bool hubp32_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
index 769171ab8ef6..cf5bd9713f54 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
@@ -49,6 +49,7 @@
#include "dcn20/dcn20_optc.h"
#include "dmub_subvp_state.h"
#include "dce/dmub_hw_lock_mgr.h"
+#include "dcn32_resource.h"
#include "dc_link_dp.h"
#include "dmub/inc/dmub_subvp_state.h"
@@ -198,42 +199,6 @@ static bool dcn32_check_no_memory_request_for_cab(struct dc *dc)
return false;
}
-/* This function takes in the start address and surface size to be cached in CAB
- * and calculates the total number of cache lines required to store the surface.
- * The number of cache lines used for each surface is calculated independently of
- * one another. For example, if there is a primary surface(1), meta surface(2), and
- * cursor(3), this function should be called 3 times to calculate the number of cache
- * lines used for each of those surfaces.
- */
-static uint32_t dcn32_cache_lines_for_surface(struct dc *dc, uint32_t surface_size, uint64_t start_address)
-{
- uint32_t lines_used = 1;
- uint32_t num_cached_bytes = 0;
- uint32_t remaining_size = 0;
- uint32_t cache_line_size = dc->caps.cache_line_size;
- uint32_t remainder = 0;
-
- /* 1. Calculate surface size minus the number of bytes stored
- * in the first cache line (all bytes in first cache line might
- * not be fully used).
- */
- div_u64_rem(start_address, cache_line_size, &remainder);
- num_cached_bytes = cache_line_size - remainder;
- remaining_size = surface_size - num_cached_bytes;
-
- /* 2. Calculate number of cache lines that will be fully used with
- * the remaining number of bytes to be stored.
- */
- lines_used += (remaining_size / cache_line_size);
-
- /* 3. Check if we need an extra line due to the remaining size not being
- * a multiple of CACHE_LINE_SIZE.
- */
- if (remaining_size % cache_line_size > 0)
- lines_used++;
-
- return lines_used;
-}
/* This function loops through every surface that needs to be cached in CAB for SS,
* and calculates the total number of ways required to store all surfaces (primary,
@@ -241,94 +206,115 @@ static uint32_t dcn32_cache_lines_for_surface(struct dc *dc, uint32_t surface_si
*/
static uint32_t dcn32_calculate_cab_allocation(struct dc *dc, struct dc_state *ctx)
{
- uint8_t i, j;
+ int i, j;
struct dc_stream_state *stream = NULL;
struct dc_plane_state *plane = NULL;
- uint32_t surface_size = 0;
uint32_t cursor_size = 0;
- uint32_t cache_lines_used = 0;
uint32_t total_lines = 0;
uint32_t lines_per_way = 0;
- uint32_t num_ways = 0;
- uint32_t prev_addr_low = 0;
+ uint8_t num_ways = 0;
+ uint8_t bytes_per_pixel = 0;
+ uint8_t cursor_bpp = 0;
+ uint16_t mblk_width = 0;
+ uint16_t mblk_height = 0;
+ uint16_t mall_alloc_width_blk_aligned = 0;
+ uint16_t mall_alloc_height_blk_aligned = 0;
+ uint16_t num_mblks = 0;
+ uint32_t bytes_in_mall = 0;
+ uint32_t cache_lines_used = 0;
+ uint32_t cache_lines_per_plane = 0;
- for (i = 0; i < ctx->stream_count; i++) {
- stream = ctx->streams[i];
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
- // Don't include PSR surface in the total surface size for CAB allocation
- if (stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED)
+ if (!pipe->stream || !pipe->plane_state ||
+ pipe->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED ||
+ pipe->stream->mall_stream_config.type == SUBVP_PHANTOM)
continue;
- if (ctx->stream_status[i].plane_count == 0)
- continue;
+ bytes_per_pixel = pipe->plane_state->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 ? 8 : 4;
+ mblk_width = DCN3_2_MBLK_WIDTH;
+ mblk_height = bytes_per_pixel == 4 ? DCN3_2_MBLK_HEIGHT_4BPE : DCN3_2_MBLK_HEIGHT_8BPE;
- // For each stream, loop through each plane to calculate the number of cache
- // lines required to store the surface in CAB
- for (j = 0; j < ctx->stream_status[i].plane_count; j++) {
- plane = ctx->stream_status[i].plane_states[j];
+ /* full_vp_width_blk_aligned = FLOOR(vp_x_start + full_vp_width + blk_width - 1, blk_width) -
+ * FLOOR(vp_x_start, blk_width)
+ *
+ * mall_alloc_width_blk_aligned_l/c = full_vp_width_blk_aligned_l/c
+ */
+ mall_alloc_width_blk_aligned = ((pipe->plane_res.scl_data.viewport.x +
+ pipe->plane_res.scl_data.viewport.width + mblk_width - 1) / mblk_width * mblk_width) -
+ (pipe->plane_res.scl_data.viewport.x / mblk_width * mblk_width);
+
+ /* full_vp_height_blk_aligned = FLOOR(vp_y_start + full_vp_height + blk_height - 1, blk_height) -
+ * FLOOR(vp_y_start, blk_height)
+ *
+ * mall_alloc_height_blk_aligned_l/c = full_vp_height_blk_aligned_l/c
+ */
+ mall_alloc_height_blk_aligned = ((pipe->plane_res.scl_data.viewport.y +
+ pipe->plane_res.scl_data.viewport.height + mblk_height - 1) / mblk_height * mblk_height) -
+ (pipe->plane_res.scl_data.viewport.y / mblk_height * mblk_height);
- // Calculate total surface size
- if (prev_addr_low != plane->address.grph.addr.u.low_part) {
- /* if plane address are different from prev FB, then userspace allocated separate FBs*/
- surface_size += plane->plane_size.surface_pitch *
- plane->plane_size.surface_size.height *
- (plane->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 ? 8 : 4);
+ num_mblks = ((mall_alloc_width_blk_aligned + mblk_width - 1) / mblk_width) *
+ ((mall_alloc_height_blk_aligned + mblk_height - 1) / mblk_height);
- prev_addr_low = plane->address.grph.addr.u.low_part;
- } else {
- /* We have the same fb for all the planes.
- * Xorg always creates one giant fb that holds all surfaces,
- * so allocating it once is sufficient.
- * */
- continue;
- }
- // Convert surface size + starting address to number of cache lines required
- // (alignment accounted for)
- cache_lines_used += dcn32_cache_lines_for_surface(dc, surface_size,
- plane->address.grph.addr.quad_part);
-
- if (plane->address.grph.meta_addr.quad_part) {
- // Meta surface
- cache_lines_used += dcn32_cache_lines_for_surface(dc, surface_size,
- plane->address.grph.meta_addr.quad_part);
- }
- }
+ /* For DCC:
+ * meta_num_mblk = CEILING(full_mblk_width_ub_l*full_mblk_height_ub_l*Bpe/256/mblk_bytes, 1)
+ */
+ if (pipe->plane_state->dcc.enable)
+ num_mblks += (mall_alloc_width_blk_aligned * mall_alloc_width_blk_aligned * bytes_per_pixel +
+ (256 * DCN3_2_MALL_MBLK_SIZE_BYTES) - 1) / (256 * DCN3_2_MALL_MBLK_SIZE_BYTES);
- // Include cursor size for CAB allocation
- for (j = 0; j < dc->res_pool->pipe_count; j++) {
- struct pipe_ctx *pipe = &ctx->res_ctx.pipe_ctx[j];
- struct hubp *hubp = pipe->plane_res.hubp;
+ bytes_in_mall = num_mblks * DCN3_2_MALL_MBLK_SIZE_BYTES;
- if (pipe->stream && pipe->plane_state && hubp)
- /* Find the cursor plane and use the exact size instead of
- * using the max for calculation
- */
- if (hubp->curs_attr.width > 0) {
- cursor_size = hubp->curs_attr.width * hubp->curs_attr.height;
- break;
- }
- }
+ /* (cache lines used is total bytes / cache_line size. Add +2 for worst case alignment
+ * (MALL is 64-byte aligned)
+ */
+ cache_lines_per_plane = bytes_in_mall / dc->caps.cache_line_size + 2;
+ cache_lines_used += cache_lines_per_plane;
+ }
- switch (stream->cursor_attributes.color_format) {
- case CURSOR_MODE_MONO:
- cursor_size /= 2;
- break;
- case CURSOR_MODE_COLOR_1BIT_AND:
- case CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA:
- case CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA:
- cursor_size *= 4;
- break;
+ // Include cursor size for CAB allocation
+ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ struct pipe_ctx *pipe = &ctx->res_ctx.pipe_ctx[j];
+ struct hubp *hubp = pipe->plane_res.hubp;
- case CURSOR_MODE_COLOR_64BIT_FP_PRE_MULTIPLIED:
- case CURSOR_MODE_COLOR_64BIT_FP_UN_PRE_MULTIPLIED:
- cursor_size *= 8;
- break;
- }
+ if (pipe->stream && pipe->plane_state && hubp)
+ /* Find the cursor plane and use the exact size instead of
+ using the max for calculation */
- if (stream->cursor_position.enable && plane->address.grph.cursor_cache_addr.quad_part) {
- cache_lines_used += dcn32_cache_lines_for_surface(dc, cursor_size,
- plane->address.grph.cursor_cache_addr.quad_part);
- }
+ if (hubp->curs_attr.width > 0) {
+ // Round cursor width to next multiple of 64
+ cursor_size = (((hubp->curs_attr.width + 63) / 64) * 64) * hubp->curs_attr.height;
+
+ switch (pipe->stream->cursor_attributes.color_format) {
+ case CURSOR_MODE_MONO:
+ cursor_size /= 2;
+ cursor_bpp = 4;
+ break;
+ case CURSOR_MODE_COLOR_1BIT_AND:
+ case CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA:
+ case CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA:
+ cursor_size *= 4;
+ cursor_bpp = 4;
+ break;
+
+ case CURSOR_MODE_COLOR_64BIT_FP_PRE_MULTIPLIED:
+ case CURSOR_MODE_COLOR_64BIT_FP_UN_PRE_MULTIPLIED:
+ cursor_size *= 8;
+ cursor_bpp = 8;
+ break;
+ }
+
+ if (pipe->stream->cursor_position.enable && !dc->debug.alloc_extra_way_for_cursor &&
+ cursor_size > 16384) {
+ /* cursor_num_mblk = CEILING(num_cursors*cursor_width*cursor_width*cursor_Bpe/mblk_bytes, 1)
+ */
+ cache_lines_used += (((hubp->curs_attr.width * hubp->curs_attr.height * cursor_bpp +
+ DCN3_2_MALL_MBLK_SIZE_BYTES - 1) / DCN3_2_MALL_MBLK_SIZE_BYTES) *
+ DCN3_2_MALL_MBLK_SIZE_BYTES) / dc->caps.cache_line_size + 2;
+ }
+ break;
+ }
}
// Convert number of cache lines required to number of ways
@@ -345,8 +331,8 @@ static uint32_t dcn32_calculate_cab_allocation(struct dc *dc, struct dc_state *c
plane = ctx->stream_status[i].plane_states[j];
if (stream->cursor_position.enable && plane &&
- !plane->address.grph.cursor_cache_addr.quad_part &&
- cursor_size > 16384) {
+ dc->debug.alloc_extra_way_for_cursor &&
+ cursor_size > 16384) {
/* Cursor caching is not supported since it won't be on the same line.
* So we need an extra line to accommodate it. With large cursors and a single 4k monitor
* this case triggers corruption. If we're at the edge, then dont trigger display refresh
@@ -358,7 +344,9 @@ static uint32_t dcn32_calculate_cab_allocation(struct dc *dc, struct dc_state *c
}
}
}
-
+ if (dc->debug.force_mall_ss_num_ways > 0) {
+ num_ways = dc->debug.force_mall_ss_num_ways;
+ }
return num_ways;
}
@@ -367,7 +355,7 @@ bool dcn32_apply_idle_power_optimizations(struct dc *dc, bool enable)
union dmub_rb_cmd cmd;
uint8_t ways, i;
int j;
- bool stereo_in_use = false;
+ bool mall_ss_unsupported = false;
struct dc_plane_state *plane = NULL;
if (!dc->ctx->dmub_srv)
@@ -398,22 +386,23 @@ bool dcn32_apply_idle_power_optimizations(struct dc *dc, bool enable)
*/
ways = dcn32_calculate_cab_allocation(dc, dc->current_state);
- /* MALL not supported with Stereo3D. If any plane is using stereo,
- * don't try to enter MALL.
+ /* MALL not supported with Stereo3D or TMZ surface. If any plane is using stereo,
+ * or TMZ surface, don't try to enter MALL.
*/
for (i = 0; i < dc->current_state->stream_count; i++) {
for (j = 0; j < dc->current_state->stream_status[i].plane_count; j++) {
plane = dc->current_state->stream_status[i].plane_states[j];
- if (plane->address.type == PLN_ADDR_TYPE_GRPH_STEREO) {
- stereo_in_use = true;
+ if (plane->address.type == PLN_ADDR_TYPE_GRPH_STEREO ||
+ plane->address.tmz_surface) {
+ mall_ss_unsupported = true;
break;
}
}
- if (stereo_in_use)
+ if (mall_ss_unsupported)
break;
}
- if (ways <= dc->caps.cache_num_ways && !stereo_in_use) {
+ if (ways <= dc->caps.cache_num_ways && !mall_ss_unsupported) {
memset(&cmd, 0, sizeof(cmd));
cmd.cab.header.type = DMUB_CMD__CAB_FOR_SS;
cmd.cab.header.sub_type = DMUB_CMD__CAB_DCN_SS_FIT_IN_CAB;
@@ -451,7 +440,6 @@ bool dcn32_apply_idle_power_optimizations(struct dc *dc, bool enable)
*/
void dcn32_commit_subvp_config(struct dc *dc, struct dc_state *context)
{
-/*
int i;
bool enable_subvp = false;
@@ -469,7 +457,6 @@ void dcn32_commit_subvp_config(struct dc *dc, struct dc_state *context)
}
}
dc_dmub_setup_subvp_dmub_command(dc, context, enable_subvp);
-*/
}
/* Sub-Viewport DMUB lock needs to be acquired by driver whenever SubVP is active and:
@@ -642,10 +629,9 @@ bool dcn32_set_input_transfer_func(struct dc *dc,
params = &dpp_base->degamma_params;
}
- result = dpp_base->funcs->dpp_program_gamcor_lut(dpp_base, params);
+ dpp_base->funcs->dpp_program_gamcor_lut(dpp_base, params);
- if (result &&
- pipe_ctx->stream_res.opp &&
+ if (pipe_ctx->stream_res.opp &&
pipe_ctx->stream_res.opp->ctx &&
hws->funcs.set_mcm_luts)
result = hws->funcs.set_mcm_luts(pipe_ctx, plane_state);
@@ -741,7 +727,29 @@ void dcn32_update_mall_sel(struct dc *dc, struct dc_state *context)
struct hubp *hubp = pipe->plane_res.hubp;
if (pipe->stream && pipe->plane_state && hubp && hubp->funcs->hubp_update_mall_sel) {
- if (hubp->curs_attr.width * hubp->curs_attr.height * 4 > 16384)
+ //Round cursor width up to next multiple of 64
+ int cursor_width = ((hubp->curs_attr.width + 63) / 64) * 64;
+ int cursor_height = hubp->curs_attr.height;
+ int cursor_size = cursor_width * cursor_height;
+
+ switch (hubp->curs_attr.color_format) {
+ case CURSOR_MODE_MONO:
+ cursor_size /= 2;
+ break;
+ case CURSOR_MODE_COLOR_1BIT_AND:
+ case CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA:
+ case CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA:
+ cursor_size *= 4;
+ break;
+
+ case CURSOR_MODE_COLOR_64BIT_FP_PRE_MULTIPLIED:
+ case CURSOR_MODE_COLOR_64BIT_FP_UN_PRE_MULTIPLIED:
+ default:
+ cursor_size *= 8;
+ break;
+ }
+
+ if (cursor_size > 16384)
cache_cursor = true;
if (pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
@@ -751,7 +759,8 @@ void dcn32_update_mall_sel(struct dc *dc, struct dc_state *context)
hubp->funcs->hubp_update_mall_sel(hubp,
num_ways <= dc->caps.cache_num_ways &&
pipe->stream->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED &&
- pipe->plane_state->address.type != PLN_ADDR_TYPE_GRPH_STEREO ? 2 : 0,
+ pipe->plane_state->address.type != PLN_ADDR_TYPE_GRPH_STEREO &&
+ !pipe->plane_state->address.tmz_surface ? 2 : 0,
cache_cursor);
}
}
@@ -861,6 +870,7 @@ void dcn32_init_hw(struct dc *dc)
if (link->link_enc->funcs->is_dig_enabled &&
link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
link->link_status.link_active = true;
+ link->phy_state.symclk_state = SYMCLK_ON_TX_ON;
if (link->link_enc->funcs->fec_is_active &&
link->link_enc->funcs->fec_is_active(link->link_enc))
link->fec_state = dc_link_fec_enabled;
@@ -979,6 +989,10 @@ void dcn32_init_hw(struct dc *dc)
dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv->dmub);
dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr;
}
+
+ /* Enable support for ODM and windowed MPO if policy flag is set */
+ if (dc->debug.enable_single_display_2to1_odm_policy)
+ dc->config.enable_windowed_mpo_odm = true;
}
static int calc_mpc_flow_ctrl_cnt(const struct dc_stream_state *stream,
@@ -1133,23 +1147,25 @@ void dcn32_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *
true);
}
- // Don't program pixel clock after link is already enabled
-/* if (false == pipe_ctx->clock_source->funcs->program_pix_clk(
- pipe_ctx->clock_source,
- &pipe_ctx->stream_res.pix_clk_params,
- &pipe_ctx->pll_settings)) {
- BREAK_TO_DEBUGGER();
- }*/
+ if (pipe_ctx->stream_res.dsc) {
+ struct pipe_ctx *current_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[pipe_ctx->pipe_idx];
- if (pipe_ctx->stream_res.dsc)
update_dsc_on_stream(pipe_ctx, pipe_ctx->stream->timing.flags.DSC);
+
+ /* Check if no longer using pipe for ODM, then need to disconnect DSC for that pipe */
+ if (!pipe_ctx->next_odm_pipe && current_pipe_ctx->next_odm_pipe &&
+ current_pipe_ctx->next_odm_pipe->stream_res.dsc) {
+ struct display_stream_compressor *dsc = current_pipe_ctx->next_odm_pipe->stream_res.dsc;
+ /* disconnect DSC block from stream */
+ dsc->funcs->dsc_disconnect(dsc);
+ }
+ }
}
unsigned int dcn32_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsigned int *k1_div, unsigned int *k2_div)
{
struct dc_stream_state *stream = pipe_ctx->stream;
unsigned int odm_combine_factor = 0;
- struct dc *dc = pipe_ctx->stream->ctx->dc;
bool two_pix_per_container = false;
// For phantom pipes, use the same programming as the main pipes
@@ -1159,6 +1175,9 @@ unsigned int dcn32_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsign
two_pix_per_container = optc2_is_two_pixels_per_containter(&stream->timing);
odm_combine_factor = get_odm_config(pipe_ctx, NULL);
+ if (pipe_ctx->stream->signal == SIGNAL_TYPE_VIRTUAL)
+ return odm_combine_factor;
+
if (is_dp_128b_132b_signal(pipe_ctx)) {
*k2_div = PIXEL_RATE_DIV_BY_1;
} else if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal) || dc_is_dvi_signal(pipe_ctx->stream->signal)) {
@@ -1174,7 +1193,7 @@ unsigned int dcn32_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsign
} else {
*k1_div = PIXEL_RATE_DIV_BY_1;
*k2_div = PIXEL_RATE_DIV_BY_4;
- if ((odm_combine_factor == 2) || dc->debug.enable_dp_dig_pixel_rate_div_policy)
+ if ((odm_combine_factor == 2) || dcn32_is_dp_dig_pixel_rate_div_policy(pipe_ctx))
*k2_div = PIXEL_RATE_DIV_BY_2;
}
}
@@ -1211,7 +1230,6 @@ void dcn32_unblank_stream(struct pipe_ctx *pipe_ctx,
struct dc_link *link = stream->link;
struct dce_hwseq *hws = link->dc->hwseq;
struct pipe_ctx *odm_pipe;
- struct dc *dc = pipe_ctx->stream->ctx->dc;
uint32_t pix_per_cycle = 1;
params.opp_cnt = 1;
@@ -1230,7 +1248,7 @@ void dcn32_unblank_stream(struct pipe_ctx *pipe_ctx,
pipe_ctx->stream_res.tg->inst);
} else if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
if (optc2_is_two_pixels_per_containter(&stream->timing) || params.opp_cnt > 1
- || dc->debug.enable_dp_dig_pixel_rate_div_policy) {
+ || dcn32_is_dp_dig_pixel_rate_div_policy(pipe_ctx)) {
params.timing.pix_clk_100hz /= 2;
pix_per_cycle = 2;
}
@@ -1247,35 +1265,163 @@ bool dcn32_is_dp_dig_pixel_rate_div_policy(struct pipe_ctx *pipe_ctx)
{
struct dc *dc = pipe_ctx->stream->ctx->dc;
+ if (!is_h_timing_divisible_by_2(pipe_ctx->stream))
+ return false;
+
if (dc_is_dp_signal(pipe_ctx->stream->signal) && !is_dp_128b_132b_signal(pipe_ctx) &&
dc->debug.enable_dp_dig_pixel_rate_div_policy)
return true;
return false;
}
-void dcn32_update_phy_state(struct dc_state *state, struct pipe_ctx *pipe_ctx,
- enum phy_state target_state)
+static void apply_symclk_on_tx_off_wa(struct dc_link *link)
{
- enum phy_state current_state = pipe_ctx->stream->link->phy_state;
-
- if (target_state == TX_OFF_SYMCLK_OFF) {
- core_link_disable_stream(pipe_ctx);
- pipe_ctx->stream->link->phy_state = TX_OFF_SYMCLK_OFF;
- } else if (target_state == TX_ON_SYMCLK_ON) {
- core_link_enable_stream(state, pipe_ctx);
- pipe_ctx->stream->link->phy_state = TX_ON_SYMCLK_ON;
- } else if (target_state == TX_OFF_SYMCLK_ON) {
- if (current_state == TX_ON_SYMCLK_ON) {
- core_link_disable_stream(pipe_ctx);
- pipe_ctx->stream->link->phy_state = TX_OFF_SYMCLK_OFF;
+ /* There are use cases where SYMCLK is referenced by OTG. For instance
+ * for TMDS signal, OTG relies SYMCLK even if TX video output is off.
+ * However current link interface will power off PHY when disabling link
+ * output. This will turn off SYMCLK generated by PHY. The workaround is
+ * to identify such case where SYMCLK is still in use by OTG when we
+ * power off PHY. When this is detected, we will temporarily power PHY
+ * back on and move PHY's SYMCLK state to SYMCLK_ON_TX_OFF by calling
+ * program_pix_clk interface. When OTG is disabled, we will then power
+ * off PHY by calling disable link output again.
+ *
+ * In future dcn generations, we plan to rework transmitter control
+ * interface so that we could have an option to set SYMCLK ON TX OFF
+ * state in one step without this workaround
+ */
+
+ struct dc *dc = link->ctx->dc;
+ struct pipe_ctx *pipe_ctx = NULL;
+ uint8_t i;
+
+ if (link->phy_state.symclk_ref_cnts.otg > 0) {
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+ if (pipe_ctx->stream && pipe_ctx->stream->link == link && pipe_ctx->top_pipe == NULL) {
+ pipe_ctx->clock_source->funcs->program_pix_clk(
+ pipe_ctx->clock_source,
+ &pipe_ctx->stream_res.pix_clk_params,
+ dp_get_link_encoding_format(&pipe_ctx->link_config.dp_link_settings),
+ &pipe_ctx->pll_settings);
+ link->phy_state.symclk_state = SYMCLK_ON_TX_OFF;
+ break;
+ }
}
+ }
+}
- pipe_ctx->clock_source->funcs->program_pix_clk(
- pipe_ctx->clock_source,
- &pipe_ctx->stream_res.pix_clk_params,
- dp_get_link_encoding_format(&pipe_ctx->link_config.dp_link_settings),
- &pipe_ctx->pll_settings);
- pipe_ctx->stream->link->phy_state = TX_OFF_SYMCLK_ON;
- } else
+void dcn32_disable_link_output(struct dc_link *link,
+ const struct link_resource *link_res,
+ enum signal_type signal)
+{
+ struct dc *dc = link->ctx->dc;
+ const struct link_hwss *link_hwss = get_link_hwss(link, link_res);
+ struct dmcu *dmcu = dc->res_pool->dmcu;
+
+ if (signal == SIGNAL_TYPE_EDP &&
+ link->dc->hwss.edp_backlight_control)
+ link->dc->hwss.edp_backlight_control(link, false);
+ else if (dmcu != NULL && dmcu->funcs->lock_phy)
+ dmcu->funcs->lock_phy(dmcu);
+
+ link_hwss->disable_link_output(link, link_res, signal);
+ link->phy_state.symclk_state = SYMCLK_OFF_TX_OFF;
+
+ if (signal == SIGNAL_TYPE_EDP &&
+ link->dc->hwss.edp_backlight_control)
+ link->dc->hwss.edp_power_control(link, false);
+ else if (dmcu != NULL && dmcu->funcs->lock_phy)
+ dmcu->funcs->unlock_phy(dmcu);
+
+ dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY);
+
+ apply_symclk_on_tx_off_wa(link);
+}
+
+/* For SubVP the main pipe can have a viewport position change
+ * without a full update. In this case we must also update the
+ * viewport positions for the phantom pipe accordingly.
+ */
+void dcn32_update_phantom_vp_position(struct dc *dc,
+ struct dc_state *context,
+ struct pipe_ctx *phantom_pipe)
+{
+ uint32_t i;
+ struct dc_plane_state *phantom_plane = phantom_pipe->plane_state;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_MAIN &&
+ pipe->stream->mall_stream_config.paired_stream == phantom_pipe->stream) {
+ if (pipe->plane_state && pipe->plane_state->update_flags.bits.position_change) {
+
+ phantom_plane->src_rect.x = pipe->plane_state->src_rect.x;
+ phantom_plane->src_rect.y = pipe->plane_state->src_rect.y;
+ phantom_plane->clip_rect.x = pipe->plane_state->clip_rect.x;
+ phantom_plane->dst_rect.x = pipe->plane_state->dst_rect.x;
+ phantom_plane->dst_rect.y = pipe->plane_state->dst_rect.y;
+
+ phantom_pipe->plane_state->update_flags.bits.position_change = 1;
+ resource_build_scaling_params(phantom_pipe);
+ return;
+ }
+ }
+ }
+}
+
+bool dcn32_dsc_pg_status(
+ struct dce_hwseq *hws,
+ unsigned int dsc_inst)
+{
+ uint32_t pwr_status = 0;
+
+ switch (dsc_inst) {
+ case 0: /* DSC0 */
+ REG_GET(DOMAIN16_PG_STATUS,
+ DOMAIN_PGFSM_PWR_STATUS, &pwr_status);
+ break;
+ case 1: /* DSC1 */
+
+ REG_GET(DOMAIN17_PG_STATUS,
+ DOMAIN_PGFSM_PWR_STATUS, &pwr_status);
+ break;
+ case 2: /* DSC2 */
+ REG_GET(DOMAIN18_PG_STATUS,
+ DOMAIN_PGFSM_PWR_STATUS, &pwr_status);
+ break;
+ case 3: /* DSC3 */
+ REG_GET(DOMAIN19_PG_STATUS,
+ DOMAIN_PGFSM_PWR_STATUS, &pwr_status);
+ break;
+ default:
BREAK_TO_DEBUGGER();
+ break;
+ }
+
+ return pwr_status == 0;
+}
+
+void dcn32_update_dsc_pg(struct dc *dc,
+ struct dc_state *context,
+ bool safe_to_disable)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+ int i;
+
+ for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++) {
+ struct display_stream_compressor *dsc = dc->res_pool->dscs[i];
+ bool is_dsc_ungated = hws->funcs.dsc_pg_status(hws, dsc->inst);
+
+ if (context->res_ctx.is_dsc_acquired[i]) {
+ if (!is_dsc_ungated) {
+ hws->funcs.dsc_pg_control(hws, dsc->inst, true);
+ }
+ } else if (safe_to_disable) {
+ if (is_dsc_ungated) {
+ hws->funcs.dsc_pg_control(hws, dsc->inst, false);
+ }
+ }
+ }
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h
index 221e31144d50..ac3657a5b9ea 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h
@@ -84,7 +84,20 @@ void dcn32_unblank_stream(struct pipe_ctx *pipe_ctx,
bool dcn32_is_dp_dig_pixel_rate_div_policy(struct pipe_ctx *pipe_ctx);
-void dcn32_update_phy_state(struct dc_state *state, struct pipe_ctx *pipe_ctx,
- enum phy_state target_state);
+void dcn32_disable_link_output(struct dc_link *link,
+ const struct link_resource *link_res,
+ enum signal_type signal);
+
+void dcn32_update_phantom_vp_position(struct dc *dc,
+ struct dc_state *context,
+ struct pipe_ctx *phantom_pipe);
+
+bool dcn32_dsc_pg_status(
+ struct dce_hwseq *hws,
+ unsigned int dsc_inst);
+
+void dcn32_update_dsc_pg(struct dc *dc,
+ struct dc_state *context,
+ bool safe_to_disable);
#endif /* __DC_HWSS_DCN32_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c
index 28d220218133..45a949ba6f3f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c
@@ -99,12 +99,17 @@ static const struct hw_sequencer_funcs dcn32_funcs = {
.set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
.hardware_release = dcn30_hardware_release,
.set_pipe = dcn21_set_pipe,
+ .enable_lvds_link_output = dce110_enable_lvds_link_output,
+ .enable_tmds_link_output = dce110_enable_tmds_link_output,
+ .enable_dp_link_output = dce110_enable_dp_link_output,
+ .disable_link_output = dcn32_disable_link_output,
.set_disp_pattern_generator = dcn30_set_disp_pattern_generator,
.get_dcc_en_bits = dcn10_get_dcc_en_bits,
.commit_subvp_config = dcn32_commit_subvp_config,
.subvp_pipe_control_lock = dcn32_subvp_pipe_control_lock,
.update_visual_confirm_color = dcn20_update_visual_confirm_color,
- .update_phy_state = dcn32_update_phy_state,
+ .update_phantom_vp_position = dcn32_update_phantom_vp_position,
+ .update_dsc_pg = dcn32_update_dsc_pg,
};
static const struct hwseq_private_funcs dcn32_private_funcs = {
@@ -134,6 +139,7 @@ static const struct hwseq_private_funcs dcn32_private_funcs = {
.program_all_writeback_pipes_in_tree = dcn30_program_all_writeback_pipes_in_tree,
.update_odm = dcn32_update_odm,
.dsc_pg_control = dcn32_dsc_pg_control,
+ .dsc_pg_status = dcn32_dsc_pg_status,
.set_hdr_multiplier = dcn10_set_hdr_multiplier,
.verify_allow_pstate_change_high = dcn10_verify_allow_pstate_change_high,
.wait_for_blank_complete = dcn20_wait_for_blank_complete,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mmhubbub.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mmhubbub.c
index adf93cc8359c..41b0baf8e183 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mmhubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mmhubbub.c
@@ -100,7 +100,7 @@ static void mmhubbub32_warmup_mcif(struct mcif_wb *mcif_wb,
REG_UPDATE(MMHUBBUB_WARMUP_CONTROL_STATUS, MMHUBBUB_WARMUP_EN, false);
}
-void mmhubbub32_config_mcif_buf(struct mcif_wb *mcif_wb,
+static void mmhubbub32_config_mcif_buf(struct mcif_wb *mcif_wb,
struct mcif_buf_params *params,
unsigned int dest_height)
{
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mmhubbub.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mmhubbub.h
index 22355051f5f7..e460cf8d9041 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mmhubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mmhubbub.h
@@ -90,7 +90,6 @@
SF(MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUFMGR_SW_OVERRUN_INT_EN, mask_sh),\
SF(MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUFMGR_SW_LOCK, mask_sh),\
SF(MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUF_ADDR_FENCE_EN, mask_sh),\
- SF(MCIF_WB_BUFMGR_STATUS, MCIF_WB_BUFMGR_VCE_INT_STATUS, mask_sh),\
SF(MCIF_WB_BUFMGR_STATUS, MCIF_WB_BUFMGR_SW_INT_STATUS, mask_sh),\
SF(MCIF_WB_BUFMGR_STATUS, MCIF_WB_BUFMGR_SW_OVERRUN_INT_STATUS, mask_sh),\
SF(MCIF_WB_BUFMGR_STATUS, MCIF_WB_BUFMGR_CUR_BUF, mask_sh),\
@@ -101,7 +100,6 @@
SF(MCIF_WB_BUF_PITCH, MCIF_WB_BUF_CHROMA_PITCH, mask_sh),\
SF(MCIF_WB_BUF_1_STATUS, MCIF_WB_BUF_1_ACTIVE, mask_sh),\
SF(MCIF_WB_BUF_1_STATUS, MCIF_WB_BUF_1_SW_LOCKED, mask_sh),\
- SF(MCIF_WB_BUF_1_STATUS, MCIF_WB_BUF_1_VCE_LOCKED, mask_sh),\
SF(MCIF_WB_BUF_1_STATUS, MCIF_WB_BUF_1_OVERFLOW, mask_sh),\
SF(MCIF_WB_BUF_1_STATUS, MCIF_WB_BUF_1_DISABLE, mask_sh),\
SF(MCIF_WB_BUF_1_STATUS, MCIF_WB_BUF_1_MODE, mask_sh),\
@@ -116,7 +114,6 @@
SF(MCIF_WB_BUF_1_STATUS2, MCIF_WB_BUF_1_C_OVERRUN, mask_sh),\
SF(MCIF_WB_BUF_2_STATUS, MCIF_WB_BUF_2_ACTIVE, mask_sh),\
SF(MCIF_WB_BUF_2_STATUS, MCIF_WB_BUF_2_SW_LOCKED, mask_sh),\
- SF(MCIF_WB_BUF_2_STATUS, MCIF_WB_BUF_2_VCE_LOCKED, mask_sh),\
SF(MCIF_WB_BUF_2_STATUS, MCIF_WB_BUF_2_OVERFLOW, mask_sh),\
SF(MCIF_WB_BUF_2_STATUS, MCIF_WB_BUF_2_DISABLE, mask_sh),\
SF(MCIF_WB_BUF_2_STATUS, MCIF_WB_BUF_2_MODE, mask_sh),\
@@ -131,7 +128,6 @@
SF(MCIF_WB_BUF_2_STATUS2, MCIF_WB_BUF_2_C_OVERRUN, mask_sh),\
SF(MCIF_WB_BUF_3_STATUS, MCIF_WB_BUF_3_ACTIVE, mask_sh),\
SF(MCIF_WB_BUF_3_STATUS, MCIF_WB_BUF_3_SW_LOCKED, mask_sh),\
- SF(MCIF_WB_BUF_3_STATUS, MCIF_WB_BUF_3_VCE_LOCKED, mask_sh),\
SF(MCIF_WB_BUF_3_STATUS, MCIF_WB_BUF_3_OVERFLOW, mask_sh),\
SF(MCIF_WB_BUF_3_STATUS, MCIF_WB_BUF_3_DISABLE, mask_sh),\
SF(MCIF_WB_BUF_3_STATUS, MCIF_WB_BUF_3_MODE, mask_sh),\
@@ -146,7 +142,6 @@
SF(MCIF_WB_BUF_3_STATUS2, MCIF_WB_BUF_3_C_OVERRUN, mask_sh),\
SF(MCIF_WB_BUF_4_STATUS, MCIF_WB_BUF_4_ACTIVE, mask_sh),\
SF(MCIF_WB_BUF_4_STATUS, MCIF_WB_BUF_4_SW_LOCKED, mask_sh),\
- SF(MCIF_WB_BUF_4_STATUS, MCIF_WB_BUF_4_VCE_LOCKED, mask_sh),\
SF(MCIF_WB_BUF_4_STATUS, MCIF_WB_BUF_4_OVERFLOW, mask_sh),\
SF(MCIF_WB_BUF_4_STATUS, MCIF_WB_BUF_4_DISABLE, mask_sh),\
SF(MCIF_WB_BUF_4_STATUS, MCIF_WB_BUF_4_MODE, mask_sh),\
@@ -172,11 +167,6 @@
SF(MCIF_WB_BUF_3_ADDR_C, MCIF_WB_BUF_3_ADDR_C, mask_sh),\
SF(MCIF_WB_BUF_4_ADDR_Y, MCIF_WB_BUF_4_ADDR_Y, mask_sh),\
SF(MCIF_WB_BUF_4_ADDR_C, MCIF_WB_BUF_4_ADDR_C, mask_sh),\
- SF(MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_VCE_LOCK_IGNORE, mask_sh),\
- SF(MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_VCE_INT_EN, mask_sh),\
- SF(MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_VCE_INT_ACK, mask_sh),\
- SF(MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_VCE_SLICE_INT_EN, mask_sh),\
- SF(MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_VCE_LOCK, mask_sh),\
SF(MCIF_WB_BUFMGR_VCE_CONTROL, MCIF_WB_BUFMGR_SLICE_SIZE, mask_sh),\
SF(MCIF_WB_NB_PSTATE_LATENCY_WATERMARK, NB_PSTATE_CHANGE_REFRESH_WATERMARK, mask_sh),\
SF(MCIF_WB_NB_PSTATE_LATENCY_WATERMARK, NB_PSTATE_CHANGE_WATERMARK_MASK, mask_sh),\
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c
index 357bd2461bc9..4edd0655965b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c
@@ -701,7 +701,7 @@ static void mpc32_power_on_shaper_3dlut(
}
-bool mpc32_program_shaper(
+static bool mpc32_program_shaper(
struct mpc *mpc,
const struct pwl_params *params,
uint32_t mpcc_id)
@@ -726,7 +726,7 @@ bool mpc32_program_shaper(
else
next_mode = LUT_RAM_A;
- mpc32_configure_shaper_lut(mpc, next_mode == LUT_RAM_A ? true:false, mpcc_id);
+ mpc32_configure_shaper_lut(mpc, next_mode == LUT_RAM_A, mpcc_id);
if (next_mode == LUT_RAM_A)
mpc32_program_shaper_luta_settings(mpc, params, mpcc_id);
@@ -897,7 +897,7 @@ static void mpc32_set_3dlut_mode(
}
-bool mpc32_program_3dlut(
+static bool mpc32_program_3dlut(
struct mpc *mpc,
const struct tetrahedral_params *params,
int mpcc_id)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c
index 1fad7b48bd5b..2b33eeb213e2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c
@@ -151,12 +151,12 @@ static bool optc32_disable_crtc(struct timing_generator *optc)
/* CRTC disabled, so disable clock. */
REG_WAIT(OTG_CLOCK_CONTROL,
OTG_BUSY, 0,
- 1, 100000);
+ 1, 150000);
return true;
}
-void optc32_phantom_crtc_post_enable(struct timing_generator *optc)
+static void optc32_phantom_crtc_post_enable(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
@@ -190,7 +190,7 @@ static void optc32_set_odm_bypass(struct timing_generator *optc,
optc1->opp_count = 1;
}
-void optc32_setup_manual_trigger(struct timing_generator *optc)
+static void optc32_setup_manual_trigger(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
struct dc *dc = optc->ctx->dc;
@@ -215,7 +215,7 @@ void optc32_setup_manual_trigger(struct timing_generator *optc)
}
}
-void optc32_set_drr(
+static void optc32_set_drr(
struct timing_generator *optc,
const struct drr_params *params)
{
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
index ef0a6d468a10..a88dd7b3d1c1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
@@ -90,29 +90,6 @@
#include "dcn20/dcn20_vmid.h"
#include "dml/dcn32/dcn32_fpu.h"
-#define DCN_BASE__INST0_SEG1 0x000000C0
-#define DCN_BASE__INST0_SEG2 0x000034C0
-#define DCN_BASE__INST0_SEG3 0x00009000
-#define NBIO_BASE__INST0_SEG1 0x00000014
-
-#define MAX_INSTANCE 6
-#define MAX_SEGMENT 6
-
-struct IP_BASE_INSTANCE {
- unsigned int segment[MAX_SEGMENT];
-};
-
-struct IP_BASE {
- struct IP_BASE_INSTANCE instance[MAX_INSTANCE];
-};
-
-static const struct IP_BASE DCN_BASE = { { { { 0x00000012, 0x000000C0, 0x000034C0, 0x00009000, 0x02403C00, 0 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } } } };
-
#define DC_LOGGER_INIT(logger)
enum dcn32_clk_src_array_id {
@@ -152,6 +129,13 @@ enum dcn32_clk_src_array_id {
REG_STRUCT[id].reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
reg ## block ## id ## _ ## reg_name
+#define SR_ARR_I2C(reg_name, id) \
+ REG_STRUCT[id-1].reg_name = BASE(reg##reg_name##_BASE_IDX) + reg##reg_name
+
+#define SRI_ARR_I2C(reg_name, block, id)\
+ REG_STRUCT[id-1].reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ reg ## block ## id ## _ ## reg_name
+
#define SRI_ARR_ALPHABET(reg_name, block, index, id)\
REG_STRUCT[index].reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
reg ## block ## id ## _ ## reg_name
@@ -461,22 +445,17 @@ static const struct dcn20_dsc_mask dsc_mask = {
};
static struct dcn30_mpc_registers mpc_regs;
-#define dcn_mpc_regs_init()\
- ( \
- MPC_REG_LIST_DCN3_0_RI(0),\
- MPC_REG_LIST_DCN3_0_RI(1),\
- MPC_REG_LIST_DCN3_0_RI(2),\
- MPC_REG_LIST_DCN3_0_RI(3),\
- MPC_OUT_MUX_REG_LIST_DCN3_0_RI(0),\
- MPC_OUT_MUX_REG_LIST_DCN3_0_RI(1),\
- MPC_OUT_MUX_REG_LIST_DCN3_0_RI(2),\
- MPC_OUT_MUX_REG_LIST_DCN3_0_RI(3),\
- MPC_MCM_REG_LIST_DCN32_RI(0),\
- MPC_MCM_REG_LIST_DCN32_RI(1),\
- MPC_MCM_REG_LIST_DCN32_RI(2),\
- MPC_MCM_REG_LIST_DCN32_RI(3),\
- MPC_DWB_MUX_REG_LIST_DCN3_0_RI(0)\
- )
+
+#define dcn_mpc_regs_init() \
+ MPC_REG_LIST_DCN3_2_RI(0),\
+ MPC_REG_LIST_DCN3_2_RI(1),\
+ MPC_REG_LIST_DCN3_2_RI(2),\
+ MPC_REG_LIST_DCN3_2_RI(3),\
+ MPC_OUT_MUX_REG_LIST_DCN3_0_RI(0),\
+ MPC_OUT_MUX_REG_LIST_DCN3_0_RI(1),\
+ MPC_OUT_MUX_REG_LIST_DCN3_0_RI(2),\
+ MPC_OUT_MUX_REG_LIST_DCN3_0_RI(3),\
+ MPC_DWB_MUX_REG_LIST_DCN3_0_RI(0)
static const struct dcn30_mpc_shift mpc_shift = {
MPC_COMMON_MASK_SH_LIST_DCN32(__SHIFT)
@@ -739,7 +718,12 @@ static const struct dc_debug_options debug_defaults_drv = {
.force_disable_subvp = false,
.exit_idle_opt_for_cursor_updates = true,
.enable_single_display_2to1_odm_policy = true,
+
+ /* Must match enable_single_display_2to1_odm_policy to support dynamic ODM transitions*/
+ .enable_double_buffered_dsc_pg_support = true,
.enable_dp_dig_pixel_rate_div_policy = 1,
+ .allow_sw_cursor_fallback = false,
+ .alloc_extra_way_for_cursor = true,
};
static const struct dc_debug_options debug_defaults_diags = {
@@ -792,7 +776,7 @@ static struct dce_aux *dcn32_aux_engine_create(
#define i2c_inst_regs_init(id)\
I2C_HW_ENGINE_COMMON_REG_LIST_DCN30_RI(id)
-static struct dce_i2c_registers i2c_hw_regs[6];
+static struct dce_i2c_registers i2c_hw_regs[5];
static const struct dce_i2c_shift i2c_shifts = {
I2C_COMMON_MASK_SH_LIST_DCN30(__SHIFT)
@@ -916,10 +900,10 @@ static struct hubp *dcn32_hubp_create(
#undef REG_STRUCT
#define REG_STRUCT hubp_regs
- hubp_regs_init(0),
- hubp_regs_init(1),
- hubp_regs_init(2),
- hubp_regs_init(3);
+ hubp_regs_init(0),
+ hubp_regs_init(1),
+ hubp_regs_init(2),
+ hubp_regs_init(3);
if (hubp32_construct(hubp2, ctx, inst,
&hubp_regs[inst], &hubp_shift, &hubp_mask))
@@ -1696,6 +1680,8 @@ static void dcn32_enable_phantom_plane(struct dc *dc,
phantom_plane->clip_rect.y = 0;
phantom_plane->clip_rect.height = phantom_stream->timing.v_addressable;
+ phantom_plane->is_phantom = true;
+
dc_add_plane_to_context(dc, phantom_stream, phantom_plane, context);
curr_pipe = curr_pipe->bottom_pipe;
@@ -1765,6 +1751,10 @@ bool dcn32_remove_phantom_pipes(struct dc *dc, struct dc_state *context)
pipe->stream->mall_stream_config.type = SUBVP_NONE;
pipe->stream->mall_stream_config.paired_stream = NULL;
}
+
+ if (pipe->plane_state) {
+ pipe->plane_state->is_phantom = false;
+ }
}
return removed_pipe;
}
@@ -1814,14 +1804,39 @@ bool dcn32_validate_bandwidth(struct dc *dc,
int vlevel = 0;
int pipe_cnt = 0;
display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_KERNEL);
+ struct mall_temp_config mall_temp_config;
+
+ /* To handle Freesync properly, setting FreeSync DML parameters
+ * to its default state for the first stage of validation
+ */
+ context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching = false;
+ context->bw_ctx.dml.soc.dram_clock_change_requirement_final = true;
+
DC_LOGGER_INIT(dc->ctx->logger);
+ /* For fast validation, there are situations where a shallow copy of
+ * of the dc->current_state is created for the validation. In this case
+ * we want to save and restore the mall config because we always
+ * teardown subvp at the beginning of validation (and don't attempt
+ * to add it back if it's fast validation). If we don't restore the
+ * subvp config in cases of fast validation + shallow copy of the
+ * dc->current_state, the dc->current_state will have a partially
+ * removed subvp state when we did not intend to remove it.
+ */
+ if (fast_validate) {
+ memset(&mall_temp_config, 0, sizeof(mall_temp_config));
+ dcn32_save_mall_state(dc, context, &mall_temp_config);
+ }
+
BW_VAL_TRACE_COUNT();
DC_FP_START();
out = dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate);
DC_FP_END();
+ if (fast_validate)
+ dcn32_restore_mall_state(dc, context, &mall_temp_config);
+
if (pipe_cnt == 0)
goto validate_out;
@@ -1856,12 +1871,6 @@ validate_out:
return out;
}
-
-static bool is_dual_plane(enum surface_pixel_format format)
-{
- return format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA;
-}
-
int dcn32_populate_dml_pipes_from_context(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
@@ -1870,12 +1879,37 @@ int dcn32_populate_dml_pipes_from_context(
int i, pipe_cnt;
struct resource_context *res_ctx = &context->res_ctx;
struct pipe_ctx *pipe;
- bool subvp_in_use = false, is_pipe_split_expected[MAX_PIPES];
- int plane_count = 0;
+ bool subvp_in_use = false;
+ uint8_t is_pipe_split_expected[MAX_PIPES] = {0};
struct dc_crtc_timing *timing;
dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
+ /* Determine whether we will apply ODM 2to1 policy:
+ * Applies to single display and where the number of planes is less than 3.
+ * For 3 plane case ( 2 MPO planes ), we will not set the policy for the MPO pipes.
+ *
+ * Apply pipe split policy first so we can predict the pipe split correctly
+ * (dcn32_predict_pipe_split).
+ */
+ for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
+ if (!res_ctx->pipe_ctx[i].stream)
+ continue;
+ pipe = &res_ctx->pipe_ctx[i];
+ timing = &pipe->stream->timing;
+
+ pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_dal;
+ if (context->stream_count == 1 &&
+ context->stream_status[0].plane_count <= 1 &&
+ !dc_is_hdmi_signal(res_ctx->pipe_ctx[i].stream->signal) &&
+ is_h_timing_divisible_by_2(res_ctx->pipe_ctx[i].stream) &&
+ pipe->stream->timing.pix_clk_100hz * 100 > DCN3_2_VMIN_DISPCLK_HZ &&
+ dc->debug.enable_single_display_2to1_odm_policy) {
+ pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_2to1;
+ }
+ pipe_cnt++;
+ }
+
for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
if (!res_ctx->pipe_ctx[i].stream)
@@ -1928,57 +1962,18 @@ int dcn32_populate_dml_pipes_from_context(
}
}
- /* Calculate the number of planes we have so we can determine
- * whether to apply ODM 2to1 policy or not
- */
- if (pipe->stream && !pipe->prev_odm_pipe &&
- (!pipe->top_pipe || pipe->top_pipe->plane_state != pipe->plane_state))
- ++plane_count;
-
DC_FP_START();
- is_pipe_split_expected[i] = dcn32_predict_pipe_split(context, pipes[i].pipe, i);
+ is_pipe_split_expected[i] = dcn32_predict_pipe_split(context, &pipes[pipe_cnt]);
DC_FP_END();
pipe_cnt++;
}
- /* Determine whether we will apply ODM 2to1 policy
- * Applies to single display and where the number of planes is less than 3
- * For 3 plane case ( 2 MPO planes ), we will not set the policy for the MPO pipes
- */
- for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
- if (!res_ctx->pipe_ctx[i].stream)
- continue;
- pipe = &res_ctx->pipe_ctx[i];
- timing = &pipe->stream->timing;
-
- pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_dal;
- if (context->stream_count == 1 && !dc_is_hdmi_signal(res_ctx->pipe_ctx[i].stream->signal)) {
- if (dc->debug.enable_single_display_2to1_odm_policy) {
- if (!((plane_count > 2) && pipe->top_pipe))
- pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_2to1;
- }
- }
- pipe_cnt++;
- }
-
/* For DET allocation, we don't want to use DML policy (not optimal for utilizing all
* the DET available for each pipe). Use the DET override input to maintain our driver
* policy.
*/
- if (pipe_cnt == 1 && !is_pipe_split_expected[0]) {
- pipes[0].pipe.src.det_size_override = DCN3_2_MAX_DET_SIZE;
- if (pipe->plane_state && !dc->debug.disable_z9_mpc) {
- if (!is_dual_plane(pipe->plane_state->format)) {
- pipes[0].pipe.src.det_size_override = DCN3_2_DEFAULT_DET_SIZE;
- pipes[0].pipe.src.unbounded_req_mode = true;
- if (pipe->plane_state->src_rect.width >= 5120 &&
- pipe->plane_state->src_rect.height >= 2880)
- pipes[0].pipe.src.det_size_override = 320; // 5K or higher
- }
- }
- } else
- dcn32_determine_det_override(context, pipes, is_pipe_split_expected, dc->res_pool->pipe_count);
+ dcn32_set_det_allocations(dc, context, pipes);
// In general cases we want to keep the dram clock change requirement
// (prefer configs that support MCLK switch). Only override to false
@@ -2108,7 +2103,8 @@ static bool dcn32_resource_construct(
dc->caps.max_downscale_ratio = 600;
dc->caps.i2c_speed_in_khz = 100;
dc->caps.i2c_speed_in_khz_hdcp = 100; /*1.4 w/a applied by default*/
- dc->caps.max_cursor_size = 256;
+ /* TODO: Bring max_cursor_size back to 256 after subvp cursor corruption is fixed*/
+ dc->caps.max_cursor_size = 64;
dc->caps.min_horizontal_blanking_period = 80;
dc->caps.dmdata_alloc_size = 2048;
dc->caps.mall_size_per_mem_channel = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h
index 60d8fad16eee..f76120e67c16 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h
@@ -28,8 +28,16 @@
#include "core_types.h"
+#define DCN3_2_DEFAULT_DET_SIZE 256
+#define DCN3_2_MAX_DET_SIZE 1152
+#define DCN3_2_MIN_DET_SIZE 128
+#define DCN3_2_MIN_COMPBUF_SIZE_KB 128
#define DCN3_2_DET_SEG_SIZE 64
#define DCN3_2_MALL_MBLK_SIZE_BYTES 65536 // 64 * 1024
+#define DCN3_2_MBLK_WIDTH 128
+#define DCN3_2_MBLK_HEIGHT_4BPE 128
+#define DCN3_2_MBLK_HEIGHT_8BPE 64
+#define DCN3_2_VMIN_DISPCLK_HZ 717000000
#define TO_DCN32_RES_POOL(pool)\
container_of(pool, struct dcn32_resource_pool, base)
@@ -37,6 +45,17 @@
extern struct _vcs_dpi_ip_params_st dcn3_2_ip;
extern struct _vcs_dpi_soc_bounding_box_st dcn3_2_soc;
+/* Temp struct used to save and restore MALL config
+ * during validation.
+ *
+ * TODO: Move MALL config into dc_state instead of stream struct
+ * to avoid needing to save/restore.
+ */
+struct mall_temp_config {
+ struct mall_stream_config mall_stream_config[MAX_PIPES];
+ bool is_phantom_plane[MAX_PIPES];
+};
+
struct dcn32_resource_pool {
struct resource_pool base;
};
@@ -100,14 +119,28 @@ bool dcn32_subvp_in_use(struct dc *dc,
bool dcn32_mpo_in_use(struct dc_state *context);
+bool dcn32_any_surfaces_rotated(struct dc *dc, struct dc_state *context);
+
struct pipe_ctx *dcn32_acquire_idle_pipe_for_head_pipe_in_layer(
struct dc_state *state,
const struct resource_pool *pool,
struct dc_stream_state *stream,
struct pipe_ctx *head_pipe);
-void dcn32_determine_det_override(struct dc_state *context, display_e2e_pipe_params_st *pipes,
- bool *is_pipe_split_expected, int pipe_cnt);
+void dcn32_determine_det_override(struct dc *dc,
+ struct dc_state *context,
+ display_e2e_pipe_params_st *pipes);
+
+void dcn32_set_det_allocations(struct dc *dc, struct dc_state *context,
+ display_e2e_pipe_params_st *pipes);
+
+void dcn32_save_mall_state(struct dc *dc,
+ struct dc_state *context,
+ struct mall_temp_config *temp_config);
+
+void dcn32_restore_mall_state(struct dc *dc,
+ struct dc_state *context,
+ struct mall_temp_config *temp_config);
/* definitions for run time init of reg offsets */
@@ -222,7 +255,8 @@ void dcn32_determine_det_override(struct dc_state *context, display_e2e_pipe_par
SRI_ARR(DP_MSA_TIMING_PARAM4, DP, id), \
SRI_ARR(DP_MSE_RATE_CNTL, DP, id), SRI_ARR(DP_MSE_RATE_UPDATE, DP, id), \
SRI_ARR(DP_PIXEL_FORMAT, DP, id), SRI_ARR(DP_SEC_CNTL, DP, id), \
- SRI_ARR(DP_SEC_CNTL2, DP, id), SRI_ARR(DP_SEC_CNTL6, DP, id), \
+ SRI_ARR(DP_SEC_CNTL1, DP, id), SRI_ARR(DP_SEC_CNTL2, DP, id), \
+ SRI_ARR(DP_SEC_CNTL5, DP, id), SRI_ARR(DP_SEC_CNTL6, DP, id), \
SRI_ARR(DP_STEER_FIFO, DP, id), SRI_ARR(DP_VID_M, DP, id), \
SRI_ARR(DP_VID_N, DP, id), SRI_ARR(DP_VID_STREAM_CNTL, DP, id), \
SRI_ARR(DP_VID_TIMING, DP, id), SRI_ARR(DP_SEC_AUD_N, DP, id), \
@@ -735,75 +769,6 @@ void dcn32_determine_det_override(struct dc_state *context, display_e2e_pipe_par
#define MPC_DWB_MUX_REG_LIST_DCN3_0_RI(inst) \
SRII_DWB(DWB_MUX, MUX, MPC_DWB, inst)
-#define MPC_MCM_REG_LIST_DCN32_RI(inst) \
- ( \
- SRII(MPCC_MCM_SHAPER_CONTROL, MPCC_MCM, inst), \
- SRII(MPCC_MCM_SHAPER_OFFSET_R, MPCC_MCM, inst), \
- SRII(MPCC_MCM_SHAPER_OFFSET_G, MPCC_MCM, inst), \
- SRII(MPCC_MCM_SHAPER_OFFSET_B, MPCC_MCM, inst), \
- SRII(MPCC_MCM_SHAPER_SCALE_R, MPCC_MCM, inst), \
- SRII(MPCC_MCM_SHAPER_SCALE_G_B, MPCC_MCM, inst), \
- SRII(MPCC_MCM_SHAPER_LUT_INDEX, MPCC_MCM, inst), \
- SRII(MPCC_MCM_SHAPER_LUT_DATA, MPCC_MCM, inst), \
- SRII(MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK, MPCC_MCM, inst), \
- SRII(MPCC_MCM_SHAPER_RAMA_START_CNTL_B, MPCC_MCM, inst), \
- SRII(MPCC_MCM_SHAPER_RAMA_START_CNTL_G, MPCC_MCM, inst), \
- SRII(MPCC_MCM_SHAPER_RAMA_START_CNTL_R, MPCC_MCM, inst), \
- SRII(MPCC_MCM_SHAPER_RAMA_END_CNTL_B, MPCC_MCM, inst), \
- SRII(MPCC_MCM_SHAPER_RAMA_END_CNTL_G, MPCC_MCM, inst), \
- SRII(MPCC_MCM_SHAPER_RAMA_END_CNTL_R, MPCC_MCM, inst), \
- SRII(MPCC_MCM_SHAPER_RAMA_REGION_0_1, MPCC_MCM, inst), \
- SRII(MPCC_MCM_SHAPER_RAMA_REGION_2_3, MPCC_MCM, inst), \
- SRII(MPCC_MCM_SHAPER_RAMA_REGION_4_5, MPCC_MCM, inst), \
- SRII(MPCC_MCM_SHAPER_RAMA_REGION_6_7, MPCC_MCM, inst), \
- SRII(MPCC_MCM_SHAPER_RAMA_REGION_8_9, MPCC_MCM, inst), \
- SRII(MPCC_MCM_SHAPER_RAMA_REGION_10_11, MPCC_MCM, inst), \
- SRII(MPCC_MCM_SHAPER_RAMA_REGION_12_13, MPCC_MCM, inst), \
- SRII(MPCC_MCM_SHAPER_RAMA_REGION_14_15, MPCC_MCM, inst), \
- SRII(MPCC_MCM_SHAPER_RAMA_REGION_16_17, MPCC_MCM, inst), \
- SRII(MPCC_MCM_SHAPER_RAMA_REGION_18_19, MPCC_MCM, inst), \
- SRII(MPCC_MCM_SHAPER_RAMA_REGION_20_21, MPCC_MCM, inst), \
- SRII(MPCC_MCM_SHAPER_RAMA_REGION_22_23, MPCC_MCM, inst), \
- SRII(MPCC_MCM_SHAPER_RAMA_REGION_24_25, MPCC_MCM, inst), \
- SRII(MPCC_MCM_SHAPER_RAMA_REGION_26_27, MPCC_MCM, inst), \
- SRII(MPCC_MCM_SHAPER_RAMA_REGION_28_29, MPCC_MCM, inst), \
- SRII(MPCC_MCM_SHAPER_RAMA_REGION_30_31, MPCC_MCM, inst), \
- SRII(MPCC_MCM_SHAPER_RAMA_REGION_32_33, MPCC_MCM, inst), \
- SRII(MPCC_MCM_SHAPER_RAMB_START_CNTL_B, MPCC_MCM, inst), \
- SRII(MPCC_MCM_SHAPER_RAMB_START_CNTL_G, MPCC_MCM, inst), \
- SRII(MPCC_MCM_SHAPER_RAMB_START_CNTL_R, MPCC_MCM, inst), \
- SRII(MPCC_MCM_SHAPER_RAMB_END_CNTL_B, MPCC_MCM, inst), \
- SRII(MPCC_MCM_SHAPER_RAMB_END_CNTL_G, MPCC_MCM, inst), \
- SRII(MPCC_MCM_SHAPER_RAMB_END_CNTL_R, MPCC_MCM, inst), \
- SRII(MPCC_MCM_SHAPER_RAMB_REGION_0_1, MPCC_MCM, inst), \
- SRII(MPCC_MCM_SHAPER_RAMB_REGION_2_3, MPCC_MCM, inst), \
- SRII(MPCC_MCM_SHAPER_RAMB_REGION_4_5, MPCC_MCM, inst), \
- SRII(MPCC_MCM_SHAPER_RAMB_REGION_6_7, MPCC_MCM, inst), \
- SRII(MPCC_MCM_SHAPER_RAMB_REGION_8_9, MPCC_MCM, inst), \
- SRII(MPCC_MCM_SHAPER_RAMB_REGION_10_11, MPCC_MCM, inst), \
- SRII(MPCC_MCM_SHAPER_RAMB_REGION_12_13, MPCC_MCM, inst), \
- SRII(MPCC_MCM_SHAPER_RAMB_REGION_14_15, MPCC_MCM, inst), \
- SRII(MPCC_MCM_SHAPER_RAMB_REGION_16_17, MPCC_MCM, inst), \
- SRII(MPCC_MCM_SHAPER_RAMB_REGION_18_19, MPCC_MCM, inst), \
- SRII(MPCC_MCM_SHAPER_RAMB_REGION_20_21, MPCC_MCM, inst), \
- SRII(MPCC_MCM_SHAPER_RAMB_REGION_22_23, MPCC_MCM, inst), \
- SRII(MPCC_MCM_SHAPER_RAMB_REGION_24_25, MPCC_MCM, inst), \
- SRII(MPCC_MCM_SHAPER_RAMB_REGION_26_27, MPCC_MCM, inst), \
- SRII(MPCC_MCM_SHAPER_RAMB_REGION_28_29, MPCC_MCM, inst), \
- SRII(MPCC_MCM_SHAPER_RAMB_REGION_30_31, MPCC_MCM, inst), \
- SRII(MPCC_MCM_SHAPER_RAMB_REGION_32_33, MPCC_MCM, inst), \
- SRII(MPCC_MCM_3DLUT_MODE, MPCC_MCM, inst), \
- SRII(MPCC_MCM_3DLUT_INDEX, MPCC_MCM, inst), \
- SRII(MPCC_MCM_3DLUT_DATA, MPCC_MCM, inst), \
- SRII(MPCC_MCM_3DLUT_DATA_30BIT, MPCC_MCM, inst), \
- SRII(MPCC_MCM_3DLUT_READ_WRITE_CONTROL, MPCC_MCM, inst), \
- SRII(MPCC_MCM_3DLUT_OUT_NORM_FACTOR, MPCC_MCM, inst), \
- SRII(MPCC_MCM_3DLUT_OUT_OFFSET_R, MPCC_MCM, inst), \
- SRII(MPCC_MCM_3DLUT_OUT_OFFSET_G, MPCC_MCM, inst), \
- SRII(MPCC_MCM_3DLUT_OUT_OFFSET_B, MPCC_MCM, inst), \
- SRII(MPCC_MCM_MEM_PWR_CTRL, MPCC_MCM, inst) \
- )
-
#define MPC_OUT_MUX_COMMON_REG_LIST_DCN1_0_RI(inst) \
( \
SRII(MUX, MPC_OUT, inst), VUPDATE_SRII(CUR, VUPDATE_LOCK_SET, inst) \
@@ -887,6 +852,149 @@ void dcn32_determine_det_override(struct dc_state *context, display_e2e_pipe_par
SRII(MPCC_OGAM_LUT_CONTROL, MPCC_OGAM, inst) \
)
+#define MPC_REG_LIST_DCN3_2_RI(inst) \
+ MPC_REG_LIST_DCN3_0_RI(inst),\
+ SRII(MPCC_MOVABLE_CM_LOCATION_CONTROL, MPCC, inst),\
+ SRII(MPCC_MCM_SHAPER_CONTROL, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_OFFSET_R, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_OFFSET_G, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_OFFSET_B, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_SCALE_R, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_SCALE_G_B, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_LUT_INDEX, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_LUT_DATA, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMA_START_CNTL_B, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMA_START_CNTL_G, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMA_START_CNTL_R, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMA_END_CNTL_B, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMA_END_CNTL_G, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMA_END_CNTL_R, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMA_REGION_0_1, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMA_REGION_2_3, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMA_REGION_4_5, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMA_REGION_6_7, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMA_REGION_8_9, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMA_REGION_10_11, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMA_REGION_12_13, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMA_REGION_14_15, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMA_REGION_16_17, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMA_REGION_18_19, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMA_REGION_20_21, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMA_REGION_22_23, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMA_REGION_24_25, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMA_REGION_26_27, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMA_REGION_28_29, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMA_REGION_30_31, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMA_REGION_32_33, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMB_START_CNTL_B, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMB_START_CNTL_G, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMB_START_CNTL_R, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMB_END_CNTL_B, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMB_END_CNTL_G, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMB_END_CNTL_R, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMB_REGION_0_1, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMB_REGION_2_3, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMB_REGION_4_5, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMB_REGION_6_7, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMB_REGION_8_9, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMB_REGION_10_11, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMB_REGION_12_13, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMB_REGION_14_15, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMB_REGION_16_17, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMB_REGION_18_19, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMB_REGION_20_21, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMB_REGION_22_23, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMB_REGION_24_25, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMB_REGION_26_27, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMB_REGION_28_29, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMB_REGION_30_31, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_SHAPER_RAMB_REGION_32_33, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_3DLUT_MODE, MPCC_MCM, inst), /*TODO: may need to add other 3DLUT regs*/\
+ SRII(MPCC_MCM_3DLUT_INDEX, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_3DLUT_DATA, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_3DLUT_DATA_30BIT, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_3DLUT_READ_WRITE_CONTROL, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_3DLUT_OUT_NORM_FACTOR, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_3DLUT_OUT_OFFSET_R, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_3DLUT_OUT_OFFSET_G, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_3DLUT_OUT_OFFSET_B, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_CONTROL, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_LUT_INDEX, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_LUT_DATA, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_LUT_CONTROL, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_START_CNTL_B, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_START_CNTL_G, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_START_CNTL_R, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_B, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_G, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_START_SLOPE_CNTL_R, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_B, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_G, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_START_BASE_CNTL_R, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_END_CNTL1_B, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_END_CNTL2_B, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_END_CNTL1_G, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_END_CNTL2_G, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_END_CNTL1_R, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_END_CNTL2_R, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_OFFSET_B, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_OFFSET_G, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_OFFSET_R, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_REGION_0_1, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_REGION_2_3, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_REGION_4_5, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_REGION_6_7, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_REGION_8_9, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_REGION_10_11, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_REGION_12_13, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_REGION_14_15, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_REGION_16_17, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_REGION_18_19, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_REGION_20_21, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_REGION_22_23, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_REGION_24_25, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_REGION_26_27, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_REGION_28_29, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_REGION_30_31, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMA_REGION_32_33, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_START_CNTL_B, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_START_CNTL_G, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_START_CNTL_R, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_B, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_G, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_START_SLOPE_CNTL_R, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_B, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_G, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_START_BASE_CNTL_R, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_END_CNTL1_B, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_END_CNTL2_B, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_END_CNTL1_G, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_END_CNTL2_G, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_END_CNTL1_R, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_END_CNTL2_R, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_OFFSET_B, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_OFFSET_G, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_OFFSET_R, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_REGION_0_1, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_REGION_2_3, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_REGION_4_5, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_REGION_6_7, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_REGION_8_9, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_REGION_10_11, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_REGION_12_13, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_REGION_14_15, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_REGION_16_17, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_REGION_18_19, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_REGION_20_21, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_REGION_22_23, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_REGION_24_25, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_REGION_26_27, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_REGION_28_29, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_REGION_30_31, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_1DLUT_RAMB_REGION_32_33, MPCC_MCM, inst),\
+ SRII(MPCC_MCM_MEM_PWR_CTRL, MPCC_MCM, inst)
+
/* OPTC */
#define OPTC_COMMON_REG_LIST_DCN3_2_RI(inst) \
@@ -1121,6 +1229,7 @@ void dcn32_determine_det_override(struct dc_state *context, display_e2e_pipe_par
SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D), SR(DCHUBBUB_DET0_CTRL), \
SR(DCHUBBUB_DET1_CTRL), SR(DCHUBBUB_DET2_CTRL), SR(DCHUBBUB_DET3_CTRL), \
SR(DCHUBBUB_COMPBUF_CTRL), SR(COMPBUF_RESERVED_SPACE), \
+ SR(DCHUBBUB_DEBUG_CTRL_0), \
SR(DCHUBBUB_ARB_USR_RETRAINING_CNTL), \
SR(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A), \
SR(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B), \
@@ -1175,18 +1284,19 @@ void dcn32_determine_det_override(struct dc_state *context, display_e2e_pipe_par
#define I2C_HW_ENGINE_COMMON_REG_LIST_RI(id) \
( \
- SRI_ARR(SETUP, DC_I2C_DDC, id), SRI_ARR(SPEED, DC_I2C_DDC, id), \
- SRI_ARR(HW_STATUS, DC_I2C_DDC, id), SR_ARR(DC_I2C_ARBITRATION, id), \
- SR_ARR(DC_I2C_CONTROL, id), SR_ARR(DC_I2C_SW_STATUS, id), \
- SR_ARR(DC_I2C_TRANSACTION0, id), SR_ARR(DC_I2C_TRANSACTION1, id), \
- SR_ARR(DC_I2C_TRANSACTION2, id), SR_ARR(DC_I2C_TRANSACTION3, id), \
- SR_ARR(DC_I2C_DATA, id), SR_ARR(MICROSECOND_TIME_BASE_DIV, id) \
+ SRI_ARR_I2C(SETUP, DC_I2C_DDC, id), SRI_ARR_I2C(SPEED, DC_I2C_DDC, id), \
+ SRI_ARR_I2C(HW_STATUS, DC_I2C_DDC, id), \
+ SR_ARR_I2C(DC_I2C_ARBITRATION, id), \
+ SR_ARR_I2C(DC_I2C_CONTROL, id), SR_ARR_I2C(DC_I2C_SW_STATUS, id), \
+ SR_ARR_I2C(DC_I2C_TRANSACTION0, id), SR_ARR_I2C(DC_I2C_TRANSACTION1, id),\
+ SR_ARR_I2C(DC_I2C_TRANSACTION2, id), SR_ARR_I2C(DC_I2C_TRANSACTION3, id),\
+ SR_ARR_I2C(DC_I2C_DATA, id), SR_ARR_I2C(MICROSECOND_TIME_BASE_DIV, id) \
)
#define I2C_HW_ENGINE_COMMON_REG_LIST_DCN30_RI(id) \
( \
- I2C_HW_ENGINE_COMMON_REG_LIST_RI(id), SR_ARR(DIO_MEM_PWR_CTRL, id), \
- SR_ARR(DIO_MEM_PWR_STATUS, id) \
+ I2C_HW_ENGINE_COMMON_REG_LIST_RI(id), SR_ARR_I2C(DIO_MEM_PWR_CTRL, id), \
+ SR_ARR_I2C(DIO_MEM_PWR_STATUS, id) \
)
#endif /* _DCN32_RESOURCE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
index ab918fe38f6a..d51d0c40ae5b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
@@ -28,6 +28,11 @@
#include "dcn20/dcn20_resource.h"
#include "dml/dcn32/display_mode_vba_util_32.h"
+static bool is_dual_plane(enum surface_pixel_format format)
+{
+ return format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA;
+}
+
/**
* ********************************************************************************************
* dcn32_helper_calculate_num_ways_for_subvp: Calculate number of ways needed for SubVP
@@ -46,7 +51,6 @@
uint32_t dcn32_helper_calculate_num_ways_for_subvp(struct dc *dc, struct dc_state *context)
{
uint32_t num_ways = 0;
- uint32_t mall_region_pixels = 0;
uint32_t bytes_per_pixel = 0;
uint32_t cache_lines_used = 0;
uint32_t lines_per_way = 0;
@@ -54,28 +58,77 @@ uint32_t dcn32_helper_calculate_num_ways_for_subvp(struct dc *dc, struct dc_stat
uint32_t bytes_in_mall = 0;
uint32_t num_mblks = 0;
uint32_t cache_lines_per_plane = 0;
- uint32_t i = 0;
+ uint32_t i = 0, j = 0;
+ uint16_t mblk_width = 0;
+ uint16_t mblk_height = 0;
+ uint32_t full_vp_width_blk_aligned = 0;
+ uint32_t full_vp_height_blk_aligned = 0;
+ uint32_t mall_alloc_width_blk_aligned = 0;
+ uint32_t mall_alloc_height_blk_aligned = 0;
+ uint16_t full_vp_height = 0;
+ bool subvp_in_use = false;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
- // Find the phantom pipes
- if (pipe->stream && pipe->plane_state && !pipe->top_pipe &&
+ /* Find the phantom pipes.
+ * - For pipe split case we need to loop through the bottom and next ODM
+ * pipes or only half the viewport size is counted
+ */
+ if (pipe->stream && pipe->plane_state &&
pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
+ struct pipe_ctx *main_pipe = NULL;
+
+ subvp_in_use = true;
+ /* Get full viewport height from main pipe (required for MBLK calculation) */
+ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ main_pipe = &context->res_ctx.pipe_ctx[j];
+ if (main_pipe->stream == pipe->stream->mall_stream_config.paired_stream) {
+ full_vp_height = main_pipe->plane_res.scl_data.viewport.height;
+ break;
+ }
+ }
+
bytes_per_pixel = pipe->plane_state->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 ? 8 : 4;
- mall_region_pixels = pipe->plane_state->plane_size.surface_pitch * pipe->stream->timing.v_addressable;
+ mblk_width = DCN3_2_MBLK_WIDTH;
+ mblk_height = bytes_per_pixel == 4 ? DCN3_2_MBLK_HEIGHT_4BPE : DCN3_2_MBLK_HEIGHT_8BPE;
+
+ /* full_vp_width_blk_aligned = FLOOR(vp_x_start + full_vp_width + blk_width - 1, blk_width) -
+ * FLOOR(vp_x_start, blk_width)
+ */
+ full_vp_width_blk_aligned = ((pipe->plane_res.scl_data.viewport.x +
+ pipe->plane_res.scl_data.viewport.width + mblk_width - 1) / mblk_width * mblk_width) +
+ (pipe->plane_res.scl_data.viewport.x / mblk_width * mblk_width);
+
+ /* full_vp_height_blk_aligned = FLOOR(vp_y_start + full_vp_height + blk_height - 1, blk_height) -
+ * FLOOR(vp_y_start, blk_height)
+ */
+ full_vp_height_blk_aligned = ((pipe->plane_res.scl_data.viewport.y +
+ full_vp_height + mblk_height - 1) / mblk_height * mblk_height) +
+ (pipe->plane_res.scl_data.viewport.y / mblk_height * mblk_height);
+
+ /* mall_alloc_width_blk_aligned_l/c = full_vp_width_blk_aligned_l/c */
+ mall_alloc_width_blk_aligned = full_vp_width_blk_aligned;
- // For bytes required in MALL, calculate based on number of MBlks required
- num_mblks = (mall_region_pixels * bytes_per_pixel +
- DCN3_2_MALL_MBLK_SIZE_BYTES - 1) / DCN3_2_MALL_MBLK_SIZE_BYTES;
+ /* mall_alloc_height_blk_aligned_l/c = CEILING(sub_vp_height_l/c - 1, blk_height_l/c) + blk_height_l/c */
+ mall_alloc_height_blk_aligned = (pipe->stream->timing.v_addressable - 1 + mblk_height - 1) /
+ mblk_height * mblk_height + mblk_height;
+
+ /* full_mblk_width_ub_l/c = mall_alloc_width_blk_aligned_l/c;
+ * full_mblk_height_ub_l/c = mall_alloc_height_blk_aligned_l/c;
+ * num_mblk_l/c = (full_mblk_width_ub_l/c / mblk_width_l/c) * (full_mblk_height_ub_l/c / mblk_height_l/c);
+ * (Should be divisible, but round up if not)
+ */
+ num_mblks = ((mall_alloc_width_blk_aligned + mblk_width - 1) / mblk_width) *
+ ((mall_alloc_height_blk_aligned + mblk_height - 1) / mblk_height);
bytes_in_mall = num_mblks * DCN3_2_MALL_MBLK_SIZE_BYTES;
// cache lines used is total bytes / cache_line size. Add +2 for worst case alignment
// (MALL is 64-byte aligned)
cache_lines_per_plane = bytes_in_mall / dc->caps.cache_line_size + 2;
- // For DCC we must cache the meat surface, so double cache lines required
+ /* For DCC divide by 256 */
if (pipe->plane_state->dcc.enable)
- cache_lines_per_plane *= 2;
+ cache_lines_per_plane = cache_lines_per_plane + (cache_lines_per_plane / 256) + 1;
cache_lines_used += cache_lines_per_plane;
}
}
@@ -86,6 +139,9 @@ uint32_t dcn32_helper_calculate_num_ways_for_subvp(struct dc *dc, struct dc_stat
if (cache_lines_used % lines_per_way > 0)
num_ways++;
+ if (subvp_in_use && dc->debug.force_subvp_num_ways > 0)
+ num_ways = dc->debug.force_subvp_num_ways;
+
return num_ways;
}
@@ -177,36 +233,221 @@ bool dcn32_mpo_in_use(struct dc_state *context)
return false;
}
-void dcn32_determine_det_override(struct dc_state *context, display_e2e_pipe_params_st *pipes,
- bool *is_pipe_split_expected, int pipe_cnt)
+
+bool dcn32_any_surfaces_rotated(struct dc *dc, struct dc_state *context)
+{
+ uint32_t i;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (!pipe->stream)
+ continue;
+
+ if (pipe->plane_state && pipe->plane_state->rotation != ROTATION_ANGLE_0)
+ return true;
+ }
+ return false;
+}
+
+/**
+ * *******************************************************************************************
+ * dcn32_determine_det_override: Determine DET allocation for each pipe
+ *
+ * This function determines how much DET to allocate for each pipe. The total number of
+ * DET segments will be split equally among each of the streams, and after that the DET
+ * segments per stream will be split equally among the planes for the given stream.
+ *
+ * If there is a plane that's driven by more than 1 pipe (i.e. pipe split), then the
+ * number of DET for that given plane will be split among the pipes driving that plane.
+ *
+ *
+ * High level algorithm:
+ * 1. Split total DET among number of streams
+ * 2. For each stream, split DET among the planes
+ * 3. For each plane, check if there is a pipe split. If yes, split the DET allocation
+ * among those pipes.
+ * 4. Assign the DET override to the DML pipes.
+ *
+ * @param [in]: dc: Current DC state
+ * @param [in]: context: New DC state to be programmed
+ * @param [in]: pipes: Array of DML pipes
+ *
+ * @return: void
+ *
+ * *******************************************************************************************
+ */
+void dcn32_determine_det_override(struct dc *dc,
+ struct dc_state *context,
+ display_e2e_pipe_params_st *pipes)
{
- int i, j, count, stream_segments, pipe_segments[MAX_PIPES];
+ uint32_t i, j, k;
+ uint8_t pipe_plane_count, stream_segments, plane_segments, pipe_segments[MAX_PIPES] = {0};
+ uint8_t pipe_counted[MAX_PIPES] = {0};
+ uint8_t pipe_cnt = 0;
+ struct dc_plane_state *current_plane = NULL;
+ uint8_t stream_count = 0;
- if (context->stream_count > 0) {
- stream_segments = 18 / context->stream_count;
+ for (i = 0; i < context->stream_count; i++) {
+ /* Don't count SubVP streams for DET allocation */
+ if (context->streams[i]->mall_stream_config.type != SUBVP_PHANTOM) {
+ stream_count++;
+ }
+ }
+
+ if (stream_count > 0) {
+ stream_segments = 18 / stream_count;
for (i = 0; i < context->stream_count; i++) {
- count = 0;
- for (j = 0; j < pipe_cnt; j++) {
- if (context->res_ctx.pipe_ctx[j].stream == context->streams[i]) {
- count++;
- if (is_pipe_split_expected[j])
- count++;
+ if (context->streams[i]->mall_stream_config.type == SUBVP_PHANTOM)
+ continue;
+ if (context->stream_status[i].plane_count > 0)
+ plane_segments = stream_segments / context->stream_status[i].plane_count;
+ else
+ plane_segments = stream_segments;
+ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ pipe_plane_count = 0;
+ if (context->res_ctx.pipe_ctx[j].stream == context->streams[i] &&
+ pipe_counted[j] != 1) {
+ /* Note: pipe_plane_count indicates the number of pipes to be used for a
+ * given plane. e.g. pipe_plane_count = 1 means single pipe (i.e. not split),
+ * pipe_plane_count = 2 means 2:1 split, etc.
+ */
+ pipe_plane_count++;
+ pipe_counted[j] = 1;
+ current_plane = context->res_ctx.pipe_ctx[j].plane_state;
+ for (k = 0; k < dc->res_pool->pipe_count; k++) {
+ if (k != j && context->res_ctx.pipe_ctx[k].stream == context->streams[i] &&
+ context->res_ctx.pipe_ctx[k].plane_state == current_plane) {
+ pipe_plane_count++;
+ pipe_counted[k] = 1;
+ }
+ }
+
+ pipe_segments[j] = plane_segments / pipe_plane_count;
+ for (k = 0; k < dc->res_pool->pipe_count; k++) {
+ if (k != j && context->res_ctx.pipe_ctx[k].stream == context->streams[i] &&
+ context->res_ctx.pipe_ctx[k].plane_state == current_plane) {
+ pipe_segments[k] = plane_segments / pipe_plane_count;
+ }
+ }
}
}
- pipe_segments[i] = stream_segments / count;
}
- for (i = 0; i < pipe_cnt; i++) {
- pipes[i].pipe.src.det_size_override = 0;
- for (j = 0; j < context->stream_count; j++) {
- if (context->res_ctx.pipe_ctx[i].stream == context->streams[j]) {
- pipes[i].pipe.src.det_size_override = pipe_segments[j] * DCN3_2_DET_SEG_SIZE;
- break;
- }
- }
+ for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
+ if (!context->res_ctx.pipe_ctx[i].stream)
+ continue;
+ pipes[pipe_cnt].pipe.src.det_size_override = pipe_segments[i] * DCN3_2_DET_SEG_SIZE;
+ pipe_cnt++;
}
} else {
- for (i = 0; i < pipe_cnt; i++)
+ for (i = 0; i < dc->res_pool->pipe_count; i++)
pipes[i].pipe.src.det_size_override = 4 * DCN3_2_DET_SEG_SIZE; //DCN3_2_DEFAULT_DET_SIZE
}
}
+
+void dcn32_set_det_allocations(struct dc *dc, struct dc_state *context,
+ display_e2e_pipe_params_st *pipes)
+{
+ int i, pipe_cnt;
+ struct resource_context *res_ctx = &context->res_ctx;
+ struct pipe_ctx *pipe;
+
+ for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
+
+ if (!res_ctx->pipe_ctx[i].stream)
+ continue;
+
+ pipe = &res_ctx->pipe_ctx[i];
+ pipe_cnt++;
+ }
+
+ /* For DET allocation, we don't want to use DML policy (not optimal for utilizing all
+ * the DET available for each pipe). Use the DET override input to maintain our driver
+ * policy.
+ */
+ if (pipe_cnt == 1) {
+ pipes[0].pipe.src.det_size_override = DCN3_2_MAX_DET_SIZE;
+ if (pipe->plane_state && !dc->debug.disable_z9_mpc && pipe->plane_state->tiling_info.gfx9.swizzle != DC_SW_LINEAR) {
+ if (!is_dual_plane(pipe->plane_state->format)) {
+ pipes[0].pipe.src.det_size_override = DCN3_2_DEFAULT_DET_SIZE;
+ pipes[0].pipe.src.unbounded_req_mode = true;
+ if (pipe->plane_state->src_rect.width >= 5120 &&
+ pipe->plane_state->src_rect.height >= 2880)
+ pipes[0].pipe.src.det_size_override = 320; // 5K or higher
+ }
+ }
+ } else
+ dcn32_determine_det_override(dc, context, pipes);
+}
+
+/**
+ * *******************************************************************************************
+ * dcn32_save_mall_state: Save MALL (SubVP) state for fast validation cases
+ *
+ * This function saves the MALL (SubVP) case for fast validation cases. For fast validation,
+ * there are situations where a shallow copy of the dc->current_state is created for the
+ * validation. In this case we want to save and restore the mall config because we always
+ * teardown subvp at the beginning of validation (and don't attempt to add it back if it's
+ * fast validation). If we don't restore the subvp config in cases of fast validation +
+ * shallow copy of the dc->current_state, the dc->current_state will have a partially
+ * removed subvp state when we did not intend to remove it.
+ *
+ * NOTE: This function ONLY works if the streams are not moved to a different pipe in the
+ * validation. We don't expect this to happen in fast_validation=1 cases.
+ *
+ * @param [in]: dc: Current DC state
+ * @param [in]: context: New DC state to be programmed
+ * @param [out]: temp_config: struct used to cache the existing MALL state
+ *
+ * @return: void
+ *
+ * *******************************************************************************************
+ */
+void dcn32_save_mall_state(struct dc *dc,
+ struct dc_state *context,
+ struct mall_temp_config *temp_config)
+{
+ uint32_t i;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe->stream)
+ temp_config->mall_stream_config[i] = pipe->stream->mall_stream_config;
+
+ if (pipe->plane_state)
+ temp_config->is_phantom_plane[i] = pipe->plane_state->is_phantom;
+ }
+}
+
+/**
+ * *******************************************************************************************
+ * dcn32_restore_mall_state: Restore MALL (SubVP) state for fast validation cases
+ *
+ * Restore the MALL state based on the previously saved state from dcn32_save_mall_state
+ *
+ * @param [in]: dc: Current DC state
+ * @param [in/out]: context: New DC state to be programmed, restore MALL state into here
+ * @param [in]: temp_config: struct that has the cached MALL state
+ *
+ * @return: void
+ *
+ * *******************************************************************************************
+ */
+void dcn32_restore_mall_state(struct dc *dc,
+ struct dc_state *context,
+ struct mall_temp_config *temp_config)
+{
+ uint32_t i;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe->stream)
+ pipe->stream->mall_stream_config = temp_config->mall_stream_config[i];
+
+ if (pipe->plane_state)
+ pipe->plane_state->is_phantom = temp_config->is_phantom_plane[i];
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_dio_link_encoder.c
index 49682a31ecbd..fa9b6603cfd3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_dio_link_encoder.c
@@ -91,7 +91,6 @@ static const struct link_encoder_funcs dcn321_link_enc_funcs = {
.is_in_alt_mode = dcn20_link_encoder_is_in_alt_mode,
.get_max_link_cap = dcn20_link_encoder_get_max_link_cap,
.set_dio_phy_mux = dcn31_link_encoder_set_dio_phy_mux,
- .set_dig_output_mode = enc32_set_dig_output_mode,
};
void dcn321_link_encoder_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
index a93dc00ebfb5..61087f2385a9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
@@ -93,34 +93,7 @@
#include "vm_helper.h"
#include "dcn20/dcn20_vmid.h"
-#define DCN_BASE__INST0_SEG1 0x000000C0
-#define DCN_BASE__INST0_SEG2 0x000034C0
-#define DCN_BASE__INST0_SEG3 0x00009000
-#define NBIO_BASE__INST0_SEG1 0x00000014
-
-#define MAX_INSTANCE 8
-#define MAX_SEGMENT 6
-
-struct IP_BASE_INSTANCE {
- unsigned int segment[MAX_SEGMENT];
-};
-
-struct IP_BASE {
- struct IP_BASE_INSTANCE instance[MAX_INSTANCE];
-};
-
-static const struct IP_BASE DCN_BASE = { { { { 0x00000012, 0x000000C0, 0x000034C0, 0x00009000, 0x02403C00, 0 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } },
- { { 0, 0, 0, 0, 0, 0 } } } };
-
#define DC_LOGGER_INIT(logger)
-#define fixed16_to_double(x) (((double)x) / ((double) (1 << 16)))
-#define fixed16_to_double_to_cpu(x) fixed16_to_double(le32_to_cpu(x))
enum dcn321_clk_src_array_id {
DCN321_CLK_SRC_PLL0,
@@ -159,6 +132,13 @@ enum dcn321_clk_src_array_id {
REG_STRUCT[id].reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
reg ## block ## id ## _ ## reg_name
+#define SR_ARR_I2C(reg_name, id) \
+ REG_STRUCT[id-1].reg_name = BASE(reg##reg_name##_BASE_IDX) + reg##reg_name
+
+#define SRI_ARR_I2C(reg_name, block, id)\
+ REG_STRUCT[id-1].reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ reg ## block ## id ## _ ## reg_name
+
#define SRI_ARR_ALPHABET(reg_name, block, index, id)\
REG_STRUCT[index].reg_name = BASE(reg ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
reg ## block ## id ## _ ## reg_name
@@ -466,21 +446,15 @@ static const struct dcn20_dsc_mask dsc_mask = {
static struct dcn30_mpc_registers mpc_regs;
#define dcn_mpc_regs_init()\
- ( \
- MPC_REG_LIST_DCN3_0_RI(0),\
- MPC_REG_LIST_DCN3_0_RI(1),\
- MPC_REG_LIST_DCN3_0_RI(2),\
- MPC_REG_LIST_DCN3_0_RI(3),\
- MPC_OUT_MUX_REG_LIST_DCN3_0_RI(0),\
- MPC_OUT_MUX_REG_LIST_DCN3_0_RI(1),\
- MPC_OUT_MUX_REG_LIST_DCN3_0_RI(2),\
- MPC_OUT_MUX_REG_LIST_DCN3_0_RI(3),\
- MPC_MCM_REG_LIST_DCN32_RI(0),\
- MPC_MCM_REG_LIST_DCN32_RI(1),\
- MPC_MCM_REG_LIST_DCN32_RI(2),\
- MPC_MCM_REG_LIST_DCN32_RI(3),\
- MPC_DWB_MUX_REG_LIST_DCN3_0_RI(0)\
- )
+ MPC_REG_LIST_DCN3_2_RI(0),\
+ MPC_REG_LIST_DCN3_2_RI(1),\
+ MPC_REG_LIST_DCN3_2_RI(2),\
+ MPC_REG_LIST_DCN3_2_RI(3),\
+ MPC_OUT_MUX_REG_LIST_DCN3_0_RI(0),\
+ MPC_OUT_MUX_REG_LIST_DCN3_0_RI(1),\
+ MPC_OUT_MUX_REG_LIST_DCN3_0_RI(2),\
+ MPC_OUT_MUX_REG_LIST_DCN3_0_RI(3),\
+ MPC_DWB_MUX_REG_LIST_DCN3_0_RI(0)
static const struct dcn30_mpc_shift mpc_shift = {
MPC_COMMON_MASK_SH_LIST_DCN32(__SHIFT)
@@ -742,7 +716,12 @@ static const struct dc_debug_options debug_defaults_drv = {
.force_disable_subvp = false,
.exit_idle_opt_for_cursor_updates = true,
.enable_single_display_2to1_odm_policy = true,
+
+ /*must match enable_single_display_2to1_odm_policy to support dynamic ODM transitions*/
+ .enable_double_buffered_dsc_pg_support = true,
.enable_dp_dig_pixel_rate_div_policy = 1,
+ .allow_sw_cursor_fallback = false,
+ .alloc_extra_way_for_cursor = true,
};
static const struct dc_debug_options debug_defaults_diags = {
@@ -796,7 +775,7 @@ static struct dce_aux *dcn321_aux_engine_create(
#define i2c_inst_regs_init(id)\
I2C_HW_ENGINE_COMMON_REG_LIST_DCN30_RI(id)
-static struct dce_i2c_registers i2c_hw_regs[6];
+static struct dce_i2c_registers i2c_hw_regs[5];
static const struct dce_i2c_shift i2c_shifts = {
I2C_COMMON_MASK_SH_LIST_DCN30(__SHIFT)
@@ -818,11 +797,11 @@ static struct dce_i2c_hw *dcn321_i2c_hw_create(
#undef REG_STRUCT
#define REG_STRUCT i2c_hw_regs
- i2c_inst_regs_init(1),
- i2c_inst_regs_init(2),
- i2c_inst_regs_init(3),
- i2c_inst_regs_init(4),
- i2c_inst_regs_init(5);
+ i2c_inst_regs_init(1),
+ i2c_inst_regs_init(2),
+ i2c_inst_regs_init(3),
+ i2c_inst_regs_init(4),
+ i2c_inst_regs_init(5);
dcn2_i2c_hw_construct(dce_i2c_hw, ctx, inst,
&i2c_hw_regs[inst], &i2c_shifts, &i2c_masks);
@@ -920,10 +899,10 @@ static struct hubp *dcn321_hubp_create(
#undef REG_STRUCT
#define REG_STRUCT hubp_regs
- hubp_regs_init(0),
- hubp_regs_init(1),
- hubp_regs_init(2),
- hubp_regs_init(3);
+ hubp_regs_init(0),
+ hubp_regs_init(1),
+ hubp_regs_init(2),
+ hubp_regs_init(3);
if (hubp32_construct(hubp2, ctx, inst,
&hubp_regs[inst], &hubp_shift, &hubp_mask))
@@ -1625,7 +1604,7 @@ static struct resource_funcs dcn321_res_pool_funcs = {
.validate_bandwidth = dcn32_validate_bandwidth,
.calculate_wm_and_dlg = dcn32_calculate_wm_and_dlg,
.populate_dml_pipes = dcn32_populate_dml_pipes_from_context,
- .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
+ .acquire_idle_pipe_for_head_pipe_in_layer = dcn32_acquire_idle_pipe_for_head_pipe_in_layer,
.add_stream_to_ctx = dcn30_add_stream_to_ctx,
.add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource,
.remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
@@ -1668,14 +1647,14 @@ static bool dcn321_resource_construct(
#undef REG_STRUCT
#define REG_STRUCT abm_regs
- abm_regs_init(0),
- abm_regs_init(1),
- abm_regs_init(2),
- abm_regs_init(3);
+ abm_regs_init(0),
+ abm_regs_init(1),
+ abm_regs_init(2),
+ abm_regs_init(3);
#undef REG_STRUCT
#define REG_STRUCT dccg_regs
- dccg_regs_init();
+ dccg_regs_init();
ctx->dc_bios->regs = &bios_regs;
@@ -1713,7 +1692,8 @@ static bool dcn321_resource_construct(
dc->caps.max_downscale_ratio = 600;
dc->caps.i2c_speed_in_khz = 100;
dc->caps.i2c_speed_in_khz_hdcp = 100; /*1.4 w/a applied by default*/
- dc->caps.max_cursor_size = 256;
+ /* TODO: Bring max cursor size back to 256 after subvp cursor corruption is fixed*/
+ dc->caps.max_cursor_size = 64;
dc->caps.min_horizontal_blanking_period = 80;
dc->caps.dmdata_alloc_size = 2048;
dc->caps.mall_size_per_mem_channel = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/dm_helpers.h b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
index e93187c06648..e3e5c39895a3 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_helpers.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
@@ -173,7 +173,8 @@ void dm_helpers_smu_timeout(struct dc_context *ctx, unsigned int msg_id, unsigne
(result == 0x0)
void dm_helpers_init_panel_settings(
struct dc_context *ctx,
- struct dc_panel_config *config);
+ struct dc_panel_config *config,
+ struct dc_sink *sink);
void dm_helpers_override_panel_settings(
struct dc_context *ctx,
struct dc_panel_config *config);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile
index 86a3b5bfd699..d70838edba80 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile
@@ -34,7 +34,7 @@ dml_ccflags := -mhard-float -maltivec
endif
ifdef CONFIG_CC_IS_GCC
-ifeq ($(call cc-ifversion, -lt, 0701, y), y)
+ifneq ($(call gcc-min-version, 70100),y)
IS_OLD_GCC = 1
endif
endif
@@ -70,6 +70,8 @@ CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_mode_vba_30.o := $(dml_ccflags) $(fram
CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_rq_dlg_calc_30.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn31/display_mode_vba_31.o := $(dml_ccflags) $(frame_warn_flag)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn31/display_rq_dlg_calc_31.o := $(dml_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml/dcn314/display_mode_vba_314.o := $(dml_ccflags) $(frame_warn_flag)
+CFLAGS_$(AMDDALPATH)/dc/dml/dcn314/display_rq_dlg_calc_314.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn314/dcn314_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/dcn30_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn32/dcn32_fpu.o := $(dml_ccflags)
@@ -123,6 +125,7 @@ DML += dcn20/display_rq_dlg_calc_20v2.o dcn20/display_mode_vba_20v2.o
DML += dcn21/display_rq_dlg_calc_21.o dcn21/display_mode_vba_21.o
DML += dcn30/dcn30_fpu.o dcn30/display_mode_vba_30.o dcn30/display_rq_dlg_calc_30.o
DML += dcn31/display_mode_vba_31.o dcn31/display_rq_dlg_calc_31.o
+DML += dcn314/display_mode_vba_314.o dcn314/display_rq_dlg_calc_314.o
DML += dcn32/display_mode_vba_32.o dcn32/display_rq_dlg_calc_32.o dcn32/display_mode_vba_util_32.o
DML += dcn31/dcn31_fpu.o
DML += dcn32/dcn32_fpu.o
diff --git a/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calc_auto.c b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calc_auto.c
index 41284e263325..288d22a16cf2 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calc_auto.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calc_auto.c
@@ -526,10 +526,10 @@ void mode_support_and_system_configuration(struct dcn_bw_internal_vars *v)
}
if (v->max_swath_height_c[k] > 0.0) {
v->swath_width_granularity_c = 256.0 /dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / v->max_swath_height_c[k];
- }
- v->rounded_up_max_swath_size_bytes_c = (dcn_bw_ceil2(v->swath_width_yper_state[i][j][k] / 2.0 - 1.0, v->swath_width_granularity_c) + v->swath_width_granularity_c) * v->byte_per_pixel_in_detc[k] * v->max_swath_height_c[k];
- if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_10) {
- v->rounded_up_max_swath_size_bytes_c =dcn_bw_ceil2(v->rounded_up_max_swath_size_bytes_c, 256.0) + 256;
+ v->rounded_up_max_swath_size_bytes_c = (dcn_bw_ceil2(v->swath_width_yper_state[i][j][k] / 2.0 - 1.0, v->swath_width_granularity_c) + v->swath_width_granularity_c) * v->byte_per_pixel_in_detc[k] * v->max_swath_height_c[k];
+ if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_10) {
+ v->rounded_up_max_swath_size_bytes_c = dcn_bw_ceil2(v->rounded_up_max_swath_size_bytes_c, 256.0) + 256;
+ }
}
if (v->rounded_up_max_swath_size_bytes_y + v->rounded_up_max_swath_size_bytes_c <= v->det_buffer_size_in_kbyte * 1024.0 / 2.0) {
v->swath_height_yper_state[i][j][k] = v->max_swath_height_y[k];
@@ -552,14 +552,14 @@ void mode_support_and_system_configuration(struct dcn_bw_internal_vars *v)
v->lines_in_det_chroma = v->det_buffer_size_in_kbyte * 1024.0 / 3.0 / v->byte_per_pixel_in_dety[k] / (v->swath_width_yper_state[i][j][k] / 2.0);
}
v->effective_lb_latency_hiding_source_lines_luma =dcn_bw_min2(v->max_line_buffer_lines,dcn_bw_floor2(v->line_buffer_size / v->lb_bit_per_pixel[k] / (v->swath_width_yper_state[i][j][k] /dcn_bw_max2(v->h_ratio[k], 1.0)), 1.0)) - (v->vtaps[k] - 1.0);
- v->effective_lb_latency_hiding_source_lines_chroma =dcn_bw_min2(v->max_line_buffer_lines,dcn_bw_floor2(v->line_buffer_size / v->lb_bit_per_pixel[k] / (v->swath_width_yper_state[i][j][k] / 2.0 /dcn_bw_max2(v->h_ratio[k] / 2.0, 1.0)), 1.0)) - (v->vta_pschroma[k] - 1.0);
v->effective_detlb_lines_luma =dcn_bw_floor2(v->lines_in_det_luma +dcn_bw_min2(v->lines_in_det_luma * v->required_dispclk[i][j] * v->byte_per_pixel_in_dety[k] * v->pscl_factor[k] / v->return_bw_per_state[i], v->effective_lb_latency_hiding_source_lines_luma), v->swath_height_yper_state[i][j][k]);
- v->effective_detlb_lines_chroma =dcn_bw_floor2(v->lines_in_det_chroma +dcn_bw_min2(v->lines_in_det_chroma * v->required_dispclk[i][j] * v->byte_per_pixel_in_detc[k] * v->pscl_factor_chroma[k] / v->return_bw_per_state[i], v->effective_lb_latency_hiding_source_lines_chroma), v->swath_height_cper_state[i][j][k]);
if (v->byte_per_pixel_in_detc[k] == 0.0) {
v->urgent_latency_support_us_per_state[i][j][k] = v->effective_detlb_lines_luma * (v->htotal[k] / v->pixel_clock[k]) / v->v_ratio[k] - v->effective_detlb_lines_luma * v->swath_width_yper_state[i][j][k] *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / (v->return_bw_per_state[i] / v->no_of_dpp[i][j][k]);
}
else {
- v->urgent_latency_support_us_per_state[i][j][k] =dcn_bw_min2(v->effective_detlb_lines_luma * (v->htotal[k] / v->pixel_clock[k]) / v->v_ratio[k] - v->effective_detlb_lines_luma * v->swath_width_yper_state[i][j][k] *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / (v->return_bw_per_state[i] / v->no_of_dpp[i][j][k]), v->effective_detlb_lines_chroma * (v->htotal[k] / v->pixel_clock[k]) / (v->v_ratio[k] / 2.0) - v->effective_detlb_lines_chroma * v->swath_width_yper_state[i][j][k] / 2.0 *dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / (v->return_bw_per_state[i] / v->no_of_dpp[i][j][k]));
+ v->effective_lb_latency_hiding_source_lines_chroma = dcn_bw_min2(v->max_line_buffer_lines, dcn_bw_floor2(v->line_buffer_size / v->lb_bit_per_pixel[k] / (v->swath_width_yper_state[i][j][k] / 2.0 / dcn_bw_max2(v->h_ratio[k] / 2.0, 1.0)), 1.0)) - (v->vta_pschroma[k] - 1.0);
+ v->effective_detlb_lines_chroma = dcn_bw_floor2(v->lines_in_det_chroma + dcn_bw_min2(v->lines_in_det_chroma * v->required_dispclk[i][j] * v->byte_per_pixel_in_detc[k] * v->pscl_factor_chroma[k] / v->return_bw_per_state[i], v->effective_lb_latency_hiding_source_lines_chroma), v->swath_height_cper_state[i][j][k]);
+ v->urgent_latency_support_us_per_state[i][j][k] = dcn_bw_min2(v->effective_detlb_lines_luma * (v->htotal[k] / v->pixel_clock[k]) / v->v_ratio[k] - v->effective_detlb_lines_luma * v->swath_width_yper_state[i][j][k] * dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / (v->return_bw_per_state[i] / v->no_of_dpp[i][j][k]), v->effective_detlb_lines_chroma * (v->htotal[k] / v->pixel_clock[k]) / (v->v_ratio[k] / 2.0) - v->effective_detlb_lines_chroma * v->swath_width_yper_state[i][j][k] / 2.0 * dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / (v->return_bw_per_state[i] / v->no_of_dpp[i][j][k]));
}
}
}
@@ -1146,10 +1146,10 @@ void display_pipe_configuration(struct dcn_bw_internal_vars *v)
}
if (v->maximum_swath_height_c > 0.0) {
v->swath_width_granularity_c = 256.0 /dcn_bw_ceil2(v->byte_per_pix_detc, 2.0) / v->maximum_swath_height_c;
- }
- v->rounded_up_max_swath_size_bytes_c = (dcn_bw_ceil2(v->swath_width / 2.0 - 1.0, v->swath_width_granularity_c) + v->swath_width_granularity_c) * v->byte_per_pix_detc * v->maximum_swath_height_c;
- if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_10) {
- v->rounded_up_max_swath_size_bytes_c =dcn_bw_ceil2(v->rounded_up_max_swath_size_bytes_c, 256.0) + 256;
+ v->rounded_up_max_swath_size_bytes_c = (dcn_bw_ceil2(v->swath_width / 2.0 - 1.0, v->swath_width_granularity_c) + v->swath_width_granularity_c) * v->byte_per_pix_detc * v->maximum_swath_height_c;
+ if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_10) {
+ v->rounded_up_max_swath_size_bytes_c = dcn_bw_ceil2(v->rounded_up_max_swath_size_bytes_c, 256.0) + 256;
+ }
}
if (v->rounded_up_max_swath_size_bytes_y + v->rounded_up_max_swath_size_bytes_c <= v->det_buffer_size_in_kbyte * 1024.0 / 2.0) {
v->swath_height_y[k] = v->maximum_swath_height_y;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calc_math.c b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calc_math.c
index 07d18e78de49..cac72413a097 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calc_math.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calc_math.c
@@ -23,6 +23,7 @@
*
*/
+#include "os_types.h"
#include "dcn_calc_math.h"
#define isNaN(number) ((number) != (number))
@@ -69,8 +70,8 @@ float dcn_bw_max2(const float arg1, const float arg2)
float dcn_bw_floor2(const float arg, const float significance)
{
- if (significance == 0)
- return 0;
+ ASSERT(significance != 0);
+
return ((int) (arg / significance)) * significance;
}
float dcn_bw_floor(const float arg)
@@ -80,17 +81,14 @@ float dcn_bw_floor(const float arg)
float dcn_bw_ceil(const float arg)
{
- float flr = dcn_bw_floor2(arg, 1);
-
- return flr + 0.00001 >= arg ? arg : flr + 1;
+ return (int) (arg + 0.99999);
}
float dcn_bw_ceil2(const float arg, const float significance)
{
- float flr = dcn_bw_floor2(arg, significance);
- if (significance == 0)
- return 0;
- return flr + 0.00001 >= arg ? arg : flr + significance;
+ ASSERT(significance != 0);
+
+ return ((int) (arg / significance + 0.99999)) * significance;
}
float dcn_bw_max3(float v1, float v2, float v3)
diff --git a/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c
index d46adc849d2a..e73f089c84bb 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c
@@ -1444,81 +1444,67 @@ unsigned int dcn_find_dcfclk_suits_all(
return dcf_clk;
}
-static bool verify_clock_values(struct dm_pp_clock_levels_with_voltage *clks)
+void dcn_bw_update_from_pplib_fclks(
+ struct dc *dc,
+ struct dm_pp_clock_levels_with_voltage *fclks)
{
- int i;
-
- if (clks->num_levels == 0)
- return false;
-
- for (i = 0; i < clks->num_levels; i++)
- /* Ensure that the result is sane */
- if (clks->data[i].clocks_in_khz == 0)
- return false;
+ unsigned vmin0p65_idx, vmid0p72_idx, vnom0p8_idx, vmax0p9_idx;
- return true;
+ ASSERT(fclks->num_levels);
+
+ vmin0p65_idx = 0;
+ vmid0p72_idx = fclks->num_levels -
+ (fclks->num_levels > 2 ? 3 : (fclks->num_levels > 1 ? 2 : 1));
+ vnom0p8_idx = fclks->num_levels - (fclks->num_levels > 1 ? 2 : 1);
+ vmax0p9_idx = fclks->num_levels - 1;
+
+ dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 =
+ 32 * (fclks->data[vmin0p65_idx].clocks_in_khz / 1000.0) / 1000.0;
+ dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72 =
+ dc->dcn_soc->number_of_channels *
+ (fclks->data[vmid0p72_idx].clocks_in_khz / 1000.0)
+ * ddr4_dram_factor_single_Channel / 1000.0;
+ dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8 =
+ dc->dcn_soc->number_of_channels *
+ (fclks->data[vnom0p8_idx].clocks_in_khz / 1000.0)
+ * ddr4_dram_factor_single_Channel / 1000.0;
+ dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 =
+ dc->dcn_soc->number_of_channels *
+ (fclks->data[vmax0p9_idx].clocks_in_khz / 1000.0)
+ * ddr4_dram_factor_single_Channel / 1000.0;
}
-void dcn_bw_update_from_pplib(struct dc *dc)
+void dcn_bw_update_from_pplib_dcfclks(
+ struct dc *dc,
+ struct dm_pp_clock_levels_with_voltage *dcfclks)
{
- struct dc_context *ctx = dc->ctx;
- struct dm_pp_clock_levels_with_voltage fclks = {0}, dcfclks = {0};
- bool res;
- unsigned vmin0p65_idx, vmid0p72_idx, vnom0p8_idx, vmax0p9_idx;
-
- /* TODO: This is not the proper way to obtain fabric_and_dram_bandwidth, should be min(fclk, memclk) */
- res = dm_pp_get_clock_levels_by_type_with_voltage(
- ctx, DM_PP_CLOCK_TYPE_FCLK, &fclks);
-
- if (res)
- res = verify_clock_values(&fclks);
-
- if (res) {
- ASSERT(fclks.num_levels);
-
- vmin0p65_idx = 0;
- vmid0p72_idx = fclks.num_levels -
- (fclks.num_levels > 2 ? 3 : (fclks.num_levels > 1 ? 2 : 1));
- vnom0p8_idx = fclks.num_levels - (fclks.num_levels > 1 ? 2 : 1);
- vmax0p9_idx = fclks.num_levels - 1;
-
- dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 =
- 32 * (fclks.data[vmin0p65_idx].clocks_in_khz / 1000.0) / 1000.0;
- dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72 =
- dc->dcn_soc->number_of_channels *
- (fclks.data[vmid0p72_idx].clocks_in_khz / 1000.0)
- * ddr4_dram_factor_single_Channel / 1000.0;
- dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8 =
- dc->dcn_soc->number_of_channels *
- (fclks.data[vnom0p8_idx].clocks_in_khz / 1000.0)
- * ddr4_dram_factor_single_Channel / 1000.0;
- dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 =
- dc->dcn_soc->number_of_channels *
- (fclks.data[vmax0p9_idx].clocks_in_khz / 1000.0)
- * ddr4_dram_factor_single_Channel / 1000.0;
- } else
- BREAK_TO_DEBUGGER();
-
- res = dm_pp_get_clock_levels_by_type_with_voltage(
- ctx, DM_PP_CLOCK_TYPE_DCFCLK, &dcfclks);
-
- if (res)
- res = verify_clock_values(&dcfclks);
+ if (dcfclks->num_levels >= 3) {
+ dc->dcn_soc->dcfclkv_min0p65 = dcfclks->data[0].clocks_in_khz / 1000.0;
+ dc->dcn_soc->dcfclkv_mid0p72 = dcfclks->data[dcfclks->num_levels - 3].clocks_in_khz / 1000.0;
+ dc->dcn_soc->dcfclkv_nom0p8 = dcfclks->data[dcfclks->num_levels - 2].clocks_in_khz / 1000.0;
+ dc->dcn_soc->dcfclkv_max0p9 = dcfclks->data[dcfclks->num_levels - 1].clocks_in_khz / 1000.0;
+ }
+}
- if (res && dcfclks.num_levels >= 3) {
- dc->dcn_soc->dcfclkv_min0p65 = dcfclks.data[0].clocks_in_khz / 1000.0;
- dc->dcn_soc->dcfclkv_mid0p72 = dcfclks.data[dcfclks.num_levels - 3].clocks_in_khz / 1000.0;
- dc->dcn_soc->dcfclkv_nom0p8 = dcfclks.data[dcfclks.num_levels - 2].clocks_in_khz / 1000.0;
- dc->dcn_soc->dcfclkv_max0p9 = dcfclks.data[dcfclks.num_levels - 1].clocks_in_khz / 1000.0;
- } else
- BREAK_TO_DEBUGGER();
+void dcn_get_soc_clks(
+ struct dc *dc,
+ int *min_fclk_khz,
+ int *min_dcfclk_khz,
+ int *socclk_khz)
+{
+ *min_fclk_khz = dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 * 1000000 / 32;
+ *min_dcfclk_khz = dc->dcn_soc->dcfclkv_min0p65 * 1000;
+ *socclk_khz = dc->dcn_soc->socclk * 1000;
}
-void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc)
+void dcn_bw_notify_pplib_of_wm_ranges(
+ struct dc *dc,
+ int min_fclk_khz,
+ int min_dcfclk_khz,
+ int socclk_khz)
{
struct pp_smu_funcs_rv *pp = NULL;
struct pp_smu_wm_range_sets ranges = {0};
- int min_fclk_khz, min_dcfclk_khz, socclk_khz;
const int overdrive = 5000000; /* 5 GHz to cover Overdrive */
if (dc->res_pool->pp_smu)
@@ -1526,10 +1512,6 @@ void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc)
if (!pp || !pp->set_wm_ranges)
return;
- min_fclk_khz = dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 * 1000000 / 32;
- min_dcfclk_khz = dc->dcn_soc->dcfclkv_min0p65 * 1000;
- socclk_khz = dc->dcn_soc->socclk * 1000;
-
/* Now notify PPLib/SMU about which Watermarks sets they should select
* depending on DPM state they are in. And update BW MGR GFX Engine and
* Memory clock member variables for Watermarks calculations for each
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
index 63bbdf8b8678..edd098c7eb92 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
@@ -4478,17 +4478,17 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
locals->EffectiveLBLatencyHidingSourceLinesLuma),
locals->SwathHeightYPerState[i][j][k]);
- locals->EffectiveDETLBLinesChroma = dml_floor(locals->LinesInDETChroma + dml_min(
- locals->LinesInDETChroma * locals->RequiredDISPCLK[i][j] * locals->BytePerPixelInDETC[k] *
- locals->PSCL_FACTOR_CHROMA[k] / locals->ReturnBWPerState[i][0],
- locals->EffectiveLBLatencyHidingSourceLinesChroma),
- locals->SwathHeightCPerState[i][j][k]);
if (locals->BytePerPixelInDETC[k] == 0) {
locals->UrgentLatencySupportUsPerState[i][j][k] = locals->EffectiveDETLBLinesLuma * (locals->HTotal[k] / locals->PixelClock[k])
/ locals->VRatio[k] - locals->EffectiveDETLBLinesLuma * locals->SwathWidthYPerState[i][j][k] *
dml_ceil(locals->BytePerPixelInDETY[k], 1) / (locals->ReturnBWPerState[i][0] / locals->NoOfDPP[i][j][k]);
} else {
+ locals->EffectiveDETLBLinesChroma = dml_floor(locals->LinesInDETChroma + dml_min(
+ locals->LinesInDETChroma * locals->RequiredDISPCLK[i][j] * locals->BytePerPixelInDETC[k] *
+ locals->PSCL_FACTOR_CHROMA[k] / locals->ReturnBWPerState[i][0],
+ locals->EffectiveLBLatencyHidingSourceLinesChroma),
+ locals->SwathHeightCPerState[i][j][k]);
locals->UrgentLatencySupportUsPerState[i][j][k] = dml_min(
locals->EffectiveDETLBLinesLuma * (locals->HTotal[k] / locals->PixelClock[k])
/ locals->VRatio[k] - locals->EffectiveDETLBLinesLuma * locals->SwathWidthYPerState[i][j][k] *
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
index 8a7485e21d53..1d84ae50311d 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
@@ -806,10 +806,12 @@ static bool CalculatePrefetchSchedule(
if (myPipe->SourceScan == dm_horz) {
*swath_width_luma_ub = dml_ceil(SwathWidthY - 1, myPipe->BlockWidth256BytesY) + myPipe->BlockWidth256BytesY;
- *swath_width_chroma_ub = dml_ceil(SwathWidthY / 2 - 1, myPipe->BlockWidth256BytesC) + myPipe->BlockWidth256BytesC;
+ if (myPipe->BlockWidth256BytesC > 0)
+ *swath_width_chroma_ub = dml_ceil(SwathWidthY / 2 - 1, myPipe->BlockWidth256BytesC) + myPipe->BlockWidth256BytesC;
} else {
*swath_width_luma_ub = dml_ceil(SwathWidthY - 1, myPipe->BlockHeight256BytesY) + myPipe->BlockHeight256BytesY;
- *swath_width_chroma_ub = dml_ceil(SwathWidthY / 2 - 1, myPipe->BlockHeight256BytesC) + myPipe->BlockHeight256BytesC;
+ if (myPipe->BlockWidth256BytesC > 0)
+ *swath_width_chroma_ub = dml_ceil(SwathWidthY / 2 - 1, myPipe->BlockHeight256BytesC) + myPipe->BlockHeight256BytesC;
}
prefetch_bw_oto = (PrefetchSourceLinesY * *swath_width_luma_ub * dml_ceil(BytePerPixelDETY, 1) + PrefetchSourceLinesC * *swath_width_chroma_ub * dml_ceil(BytePerPixelDETC, 2)) / Tsw_oto;
@@ -2634,7 +2636,7 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
&mode_lib->vba.SrcActiveDrainRate,
&mode_lib->vba.TInitXFill,
&mode_lib->vba.TslvChk);
- locals->XFCRemoteSurfaceFlipLatency[k] =
+ locals->XFCRemoteSurfaceFlipLatency[k] =
dml_floor(
mode_lib->vba.XFCRemoteSurfaceFlipDelay
/ (mode_lib->vba.HTotal[k]
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
index b7fa003ffe06..479e2c1a1301 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
@@ -6322,10 +6322,6 @@ static void CalculateSwathWidth(
for (k = 0; k < NumberOfActivePlanes; ++k) {
enum odm_combine_mode MainPlaneODMCombine = 0;
- surface_width_ub_l = dml_ceil(SurfaceWidthY[k], Read256BytesBlockWidthY[k]);
- surface_height_ub_l = dml_ceil(SurfaceHeightY[k], Read256BytesBlockHeightY[k]);
- surface_width_ub_c = dml_ceil(SurfaceWidthC[k], Read256BytesBlockWidthC[k]);
- surface_height_ub_c = dml_ceil(SurfaceHeightC[k], Read256BytesBlockHeightC[k]);
if (SourceScan[k] != dm_vert) {
SwathWidthSingleDPPY[k] = ViewportWidth[k];
@@ -6365,8 +6361,6 @@ static void CalculateSwathWidth(
surface_width_ub_l = dml_ceil(SurfaceWidthY[k], Read256BytesBlockWidthY[k]);
surface_height_ub_l = dml_ceil(SurfaceHeightY[k], Read256BytesBlockHeightY[k]);
- surface_width_ub_c = dml_ceil(SurfaceWidthC[k], Read256BytesBlockWidthC[k]);
- surface_height_ub_c = dml_ceil(SurfaceHeightC[k], Read256BytesBlockHeightC[k]);
if (SourceScan[k] != dm_vert) {
MaximumSwathHeightY[k] = Read256BytesBlockHeightY[k];
@@ -6374,6 +6368,7 @@ static void CalculateSwathWidth(
swath_width_luma_ub[k] = dml_min(surface_width_ub_l, (long) dml_ceil(SwathWidthY[k] - 1,
Read256BytesBlockWidthY[k]) + Read256BytesBlockWidthY[k]);
if (BytePerPixC[k] > 0) {
+ surface_width_ub_c = dml_ceil(SurfaceWidthC[k], Read256BytesBlockWidthC[k]);
swath_width_chroma_ub[k] = dml_min(surface_width_ub_c, (long) dml_ceil(SwathWidthC[k] - 1,
Read256BytesBlockWidthC[k]) + Read256BytesBlockWidthC[k]);
} else {
@@ -6385,6 +6380,7 @@ static void CalculateSwathWidth(
swath_width_luma_ub[k] = dml_min(surface_height_ub_l, (long) dml_ceil(SwathWidthY[k] - 1,
Read256BytesBlockHeightY[k]) + Read256BytesBlockHeightY[k]);
if (BytePerPixC[k] > 0) {
+ surface_height_ub_c = dml_ceil(SurfaceHeightC[k], Read256BytesBlockHeightC[k]);
swath_width_chroma_ub[k] = dml_min(surface_height_ub_c, (long) dml_ceil(SwathWidthC[k] - 1,
Read256BytesBlockHeightC[k]) + Read256BytesBlockHeightC[k]);
} else {
@@ -6497,7 +6493,7 @@ static double CalculateUrgentLatency(
return ret;
}
-static void UseMinimumDCFCLK(
+static noinline_for_stack void UseMinimumDCFCLK(
struct display_mode_lib *mode_lib,
struct vba_vars_st *v,
int MaxPrefetchMode,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c
index 241d28d0b7fb..422f17aefd4a 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c
@@ -379,6 +379,11 @@ void dcn301_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
dcn3_01_soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
dc->dml.soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
+ if ((int)(dcn3_01_soc.dram_clock_change_latency_us * 1000)
+ != dc->debug.dram_clock_change_latency_ns
+ && dc->debug.dram_clock_change_latency_ns) {
+ dcn3_01_soc.dram_clock_change_latency_us = dc->debug.dram_clock_change_latency_ns / 1000.0;
+ }
dml_init_instance(&dc->dml, &dcn3_01_soc, &dcn3_01_ip, DML_PROJECT_DCN30);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
index 0e62eb823e34..7dd0845d1bd9 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
@@ -291,6 +291,8 @@ static struct _vcs_dpi_soc_bounding_box_st dcn3_15_soc = {
.do_urgent_latency_adjustment = false,
.urgent_latency_adjustment_fabric_clock_component_us = 0,
.urgent_latency_adjustment_fabric_clock_reference_mhz = 0,
+ .num_chans = 4,
+ .dummy_pstate_latency_us = 10.0
};
struct _vcs_dpi_ip_params_st dcn3_16_ip = {
@@ -458,13 +460,30 @@ void dcn31_update_soc_for_wm_a(struct dc *dc, struct dc_state *context)
}
}
+void dcn315_update_soc_for_wm_a(struct dc *dc, struct dc_state *context)
+{
+ dc_assert_fp_enabled();
+
+ if (dc->clk_mgr->bw_params->wm_table.entries[WM_A].valid) {
+ /* For 315 pstate change is only supported if possible in vactive */
+ if (context->bw_ctx.dml.vba.DRAMClockChangeSupport[context->bw_ctx.dml.vba.VoltageLevel][context->bw_ctx.dml.vba.maxMpcComb] != dm_dram_clock_change_vactive)
+ context->bw_ctx.dml.soc.dram_clock_change_latency_us = context->bw_ctx.dml.soc.dummy_pstate_latency_us;
+ else
+ context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.entries[WM_A].pstate_latency_us;
+ context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us =
+ dc->clk_mgr->bw_params->wm_table.entries[WM_A].sr_enter_plus_exit_time_us;
+ context->bw_ctx.dml.soc.sr_exit_time_us =
+ dc->clk_mgr->bw_params->wm_table.entries[WM_A].sr_exit_time_us;
+ }
+}
+
void dcn31_calculate_wm_and_dlg_fp(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
int pipe_cnt,
int vlevel)
{
- int i, pipe_idx;
+ int i, pipe_idx, active_dpp_count = 0;
double dcfclk = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
dc_assert_fp_enabled();
@@ -485,72 +504,6 @@ void dcn31_calculate_wm_and_dlg_fp(
pipes[0].clks_cfg.dcfclk_mhz = dcfclk;
pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].socclk_mhz;
-#if 0 // TODO
- /* Set B:
- * TODO
- */
- if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].valid) {
- if (vlevel == 0) {
- pipes[0].clks_cfg.voltage = 1;
- pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dcfclk_mhz;
- }
- context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.pstate_latency_us;
- context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_enter_plus_exit_time_us;
- context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_exit_time_us;
- }
- context->bw_ctx.bw.dcn.watermarks.b.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_z8_ns = get_wm_z8_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_exit_z8_ns = get_wm_z8_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-
- pipes[0].clks_cfg.voltage = vlevel;
- pipes[0].clks_cfg.dcfclk_mhz = dcfclk;
-
- /* Set C:
- * TODO
- */
- if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].valid) {
- context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.pstate_latency_us;
- context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_enter_plus_exit_time_us;
- context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_exit_time_us;
- }
- context->bw_ctx.bw.dcn.watermarks.c.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_z8_ns = get_wm_z8_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_z8_ns = get_wm_z8_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-
- /* Set D:
- * TODO
- */
- if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].valid) {
- context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.pstate_latency_us;
- context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_enter_plus_exit_time_us;
- context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_exit_time_us;
- }
- context->bw_ctx.bw.dcn.watermarks.d.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_z8_ns = get_wm_z8_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_exit_z8_ns = get_wm_z8_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-#endif
-
/* Set A:
* All clocks min required
*
@@ -567,16 +520,17 @@ void dcn31_calculate_wm_and_dlg_fp(
context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.a.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- /* TODO: remove: */
context->bw_ctx.bw.dcn.watermarks.b = context->bw_ctx.bw.dcn.watermarks.a;
context->bw_ctx.bw.dcn.watermarks.c = context->bw_ctx.bw.dcn.watermarks.a;
context->bw_ctx.bw.dcn.watermarks.d = context->bw_ctx.bw.dcn.watermarks.a;
- /* end remove*/
for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
if (!context->res_ctx.pipe_ctx[i].stream)
continue;
+ if (context->res_ctx.pipe_ctx[i].plane_state)
+ active_dpp_count++;
+
pipes[pipe_idx].clks_cfg.dispclk_mhz = get_dispclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt);
pipes[pipe_idx].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
@@ -593,6 +547,9 @@ void dcn31_calculate_wm_and_dlg_fp(
}
dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
+ /* For 31x apu pstate change is only supported if possible in vactive or if there are no active dpps */
+ context->bw_ctx.bw.dcn.clk.p_state_change_support =
+ context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] == dm_dram_clock_change_vactive || !active_dpp_count;
}
void dcn31_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
@@ -667,6 +624,12 @@ void dcn31_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params
dcn3_1_soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
dc->dml.soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
+ if ((int)(dcn3_1_soc.dram_clock_change_latency_us * 1000)
+ != dc->debug.dram_clock_change_latency_ns
+ && dc->debug.dram_clock_change_latency_ns) {
+ dcn3_1_soc.dram_clock_change_latency_us = dc->debug.dram_clock_change_latency_ns / 1000;
+ }
+
if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
dml_init_instance(&dc->dml, &dcn3_1_soc, &dcn3_1_ip, DML_PROJECT_DCN31);
else
@@ -682,7 +645,11 @@ void dcn315_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
dcn3_15_ip.max_num_otg = dc->res_pool->res_cap->num_timing_generator;
dcn3_15_ip.max_num_dpp = dc->res_pool->pipe_count;
- dcn3_15_soc.num_chans = bw_params->num_channels;
+
+ if (bw_params->num_channels > 0)
+ dcn3_15_soc.num_chans = bw_params->num_channels;
+ if (bw_params->dram_channel_width_bytes > 0)
+ dcn3_15_soc.dram_channel_width_bytes = bw_params->dram_channel_width_bytes;
ASSERT(clk_table->num_entries);
@@ -721,8 +688,14 @@ void dcn315_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
*/
dcn3_15_soc.dispclk_dppclk_vco_speed_mhz = max_dispclk_mhz * 2;
+ if ((int)(dcn3_15_soc.dram_clock_change_latency_us * 1000)
+ != dc->debug.dram_clock_change_latency_ns
+ && dc->debug.dram_clock_change_latency_ns) {
+ dcn3_15_soc.dram_clock_change_latency_us = dc->debug.dram_clock_change_latency_ns / 1000;
+ }
+
if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
- dml_init_instance(&dc->dml, &dcn3_15_soc, &dcn3_15_ip, DML_PROJECT_DCN31);
+ dml_init_instance(&dc->dml, &dcn3_15_soc, &dcn3_15_ip, DML_PROJECT_DCN315);
else
dml_init_instance(&dc->dml, &dcn3_15_soc, &dcn3_15_ip, DML_PROJECT_DCN31_FPGA);
}
@@ -813,6 +786,11 @@ void dcn316_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
dcn3_16_soc.dispclk_dppclk_vco_speed_mhz = max_dispclk_mhz * 2;
dc->dml.soc.dispclk_dppclk_vco_speed_mhz = max_dispclk_mhz * 2;
}
+ if ((int)(dcn3_16_soc.dram_clock_change_latency_us * 1000)
+ != dc->debug.dram_clock_change_latency_ns
+ && dc->debug.dram_clock_change_latency_ns) {
+ dcn3_16_soc.dram_clock_change_latency_us = dc->debug.dram_clock_change_latency_ns / 1000;
+ }
if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
dml_init_instance(&dc->dml, &dcn3_16_soc, &dcn3_16_ip, DML_PROJECT_DCN31);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h
index 4372f17b55d4..fd58b2561ec9 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h
@@ -35,6 +35,7 @@ void dcn31_zero_pipe_dcc_fraction(display_e2e_pipe_params_st *pipes,
int pipe_cnt);
void dcn31_update_soc_for_wm_a(struct dc *dc, struct dc_state *context);
+void dcn315_update_soc_for_wm_a(struct dc *dc, struct dc_state *context);
void dcn31_calculate_wm_and_dlg_fp(
struct dc *dc, struct dc_state *context,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
index d63b4209b14c..b612edb14417 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
@@ -43,6 +43,8 @@
#define BPP_BLENDED_PIPE 0xffffffff
#define DCN31_MAX_DSC_IMAGE_WIDTH 5184
#define DCN31_MAX_FMT_420_BUFFER_WIDTH 4096
+#define DCN3_15_MIN_COMPBUF_SIZE_KB 128
+#define DCN3_15_MAX_DET_SIZE 384
// For DML-C changes that hasn't been propagated to VBA yet
//#define __DML_VBA_ALLOW_DELTA__
@@ -251,33 +253,13 @@ static void CalculateRowBandwidth(
static void CalculateFlipSchedule(
struct display_mode_lib *mode_lib,
+ unsigned int k,
double HostVMInefficiencyFactor,
double UrgentExtraLatency,
double UrgentLatency,
- unsigned int GPUVMMaxPageTableLevels,
- bool HostVMEnable,
- unsigned int HostVMMaxNonCachedPageTableLevels,
- bool GPUVMEnable,
- double HostVMMinPageSize,
double PDEAndMetaPTEBytesPerFrame,
double MetaRowBytes,
- double DPTEBytesPerRow,
- double BandwidthAvailableForImmediateFlip,
- unsigned int TotImmediateFlipBytes,
- enum source_format_class SourcePixelFormat,
- double LineTime,
- double VRatio,
- double VRatioChroma,
- double Tno_bw,
- bool DCCEnable,
- unsigned int dpte_row_height,
- unsigned int meta_row_height,
- unsigned int dpte_row_height_chroma,
- unsigned int meta_row_height_chroma,
- double *DestinationLinesToRequestVMInImmediateFlip,
- double *DestinationLinesToRequestRowInImmediateFlip,
- double *final_flip_bw,
- bool *ImmediateFlipSupportedForPipe);
+ double DPTEBytesPerRow);
static double CalculateWriteBackDelay(
enum source_format_class WritebackPixelFormat,
double WritebackHRatio,
@@ -311,64 +293,28 @@ static void CalculateVupdateAndDynamicMetadataParameters(
static void CalculateWatermarksAndDRAMSpeedChangeSupport(
struct display_mode_lib *mode_lib,
unsigned int PrefetchMode,
- unsigned int NumberOfActivePlanes,
- unsigned int MaxLineBufferLines,
- unsigned int LineBufferSize,
- unsigned int WritebackInterfaceBufferSize,
double DCFCLK,
double ReturnBW,
- bool SynchronizedVBlank,
- unsigned int dpte_group_bytes[],
- unsigned int MetaChunkSize,
double UrgentLatency,
double ExtraLatency,
- double WritebackLatency,
- double WritebackChunkSize,
double SOCCLK,
- double DRAMClockChangeLatency,
- double SRExitTime,
- double SREnterPlusExitTime,
- double SRExitZ8Time,
- double SREnterPlusExitZ8Time,
double DCFCLKDeepSleep,
unsigned int DETBufferSizeY[],
unsigned int DETBufferSizeC[],
unsigned int SwathHeightY[],
unsigned int SwathHeightC[],
- unsigned int LBBitPerPixel[],
double SwathWidthY[],
double SwathWidthC[],
- double HRatio[],
- double HRatioChroma[],
- unsigned int vtaps[],
- unsigned int VTAPsChroma[],
- double VRatio[],
- double VRatioChroma[],
- unsigned int HTotal[],
- double PixelClock[],
- unsigned int BlendingAndTiming[],
unsigned int DPPPerPlane[],
double BytePerPixelDETY[],
double BytePerPixelDETC[],
- double DSTXAfterScaler[],
- double DSTYAfterScaler[],
- bool WritebackEnable[],
- enum source_format_class WritebackPixelFormat[],
- double WritebackDestinationWidth[],
- double WritebackDestinationHeight[],
- double WritebackSourceHeight[],
bool UnboundedRequestEnabled,
int unsigned CompressedBufferSizeInkByte,
enum clock_change_support *DRAMClockChangeSupport,
- double *UrgentWatermark,
- double *WritebackUrgentWatermark,
- double *DRAMClockChangeWatermark,
- double *WritebackDRAMClockChangeWatermark,
double *StutterExitWatermark,
double *StutterEnterPlusExitWatermark,
double *Z8StutterExitWatermark,
- double *Z8StutterEnterPlusExitWatermark,
- double *MinActiveDRAMClockChangeLatencySupported);
+ double *Z8StutterEnterPlusExitWatermark);
static void CalculateDCFCLKDeepSleep(
struct display_mode_lib *mode_lib,
@@ -1107,10 +1053,10 @@ static bool CalculatePrefetchSchedule(
bytes_pp = myPipe->BytePerPixelY + myPipe->BytePerPixelC;
/*rev 99*/
prefetch_bw_pr = dml_min(1, bytes_pp * myPipe->PixelClock / (double) myPipe->DPPPerPlane);
- max_Tsw = dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC) * LineTime;
+ max_Tsw = dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC) * LineTime;
prefetch_sw_bytes = PrefetchSourceLinesY * swath_width_luma_ub * myPipe->BytePerPixelY + PrefetchSourceLinesC * swath_width_chroma_ub * myPipe->BytePerPixelC;
prefetch_bw_oto = dml_max(bytes_pp * myPipe->PixelClock / myPipe->DPPPerPlane, prefetch_sw_bytes / (dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC) * LineTime));
- prefetch_bw_oto = dml_max(prefetch_bw_pr, prefetch_sw_bytes / max_Tsw);
+ prefetch_bw_oto = dml_max(prefetch_bw_pr, prefetch_sw_bytes / max_Tsw);
min_Lsw = dml_max(1, dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC) / max_vratio_pre);
Lsw_oto = dml_ceil(4 * dml_max(prefetch_sw_bytes / prefetch_bw_oto / LineTime, min_Lsw), 1) / 4;
@@ -2904,33 +2850,13 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
CalculateFlipSchedule(
mode_lib,
+ k,
HostVMInefficiencyFactor,
v->UrgentExtraLatency,
v->UrgentLatency,
- v->GPUVMMaxPageTableLevels,
- v->HostVMEnable,
- v->HostVMMaxNonCachedPageTableLevels,
- v->GPUVMEnable,
- v->HostVMMinPageSize,
v->PDEAndMetaPTEBytesFrame[k],
v->MetaRowByte[k],
- v->PixelPTEBytesPerRow[k],
- v->BandwidthAvailableForImmediateFlip,
- v->TotImmediateFlipBytes,
- v->SourcePixelFormat[k],
- v->HTotal[k] / v->PixelClock[k],
- v->VRatio[k],
- v->VRatioChroma[k],
- v->Tno_bw[k],
- v->DCCEnable[k],
- v->dpte_row_height[k],
- v->meta_row_height[k],
- v->dpte_row_height_chroma[k],
- v->meta_row_height_chroma[k],
- &v->DestinationLinesToRequestVMInImmediateFlip[k],
- &v->DestinationLinesToRequestRowInImmediateFlip[k],
- &v->final_flip_bw[k],
- &v->ImmediateFlipSupportedForPipe[k]);
+ v->PixelPTEBytesPerRow[k]);
}
v->total_dcn_read_bw_with_flip = 0.0;
@@ -3017,64 +2943,28 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
CalculateWatermarksAndDRAMSpeedChangeSupport(
mode_lib,
PrefetchMode,
- v->NumberOfActivePlanes,
- v->MaxLineBufferLines,
- v->LineBufferSize,
- v->WritebackInterfaceBufferSize,
v->DCFCLK,
v->ReturnBW,
- v->SynchronizedVBlank,
- v->dpte_group_bytes,
- v->MetaChunkSize,
v->UrgentLatency,
v->UrgentExtraLatency,
- v->WritebackLatency,
- v->WritebackChunkSize,
v->SOCCLK,
- v->DRAMClockChangeLatency,
- v->SRExitTime,
- v->SREnterPlusExitTime,
- v->SRExitZ8Time,
- v->SREnterPlusExitZ8Time,
v->DCFCLKDeepSleep,
v->DETBufferSizeY,
v->DETBufferSizeC,
v->SwathHeightY,
v->SwathHeightC,
- v->LBBitPerPixel,
v->SwathWidthY,
v->SwathWidthC,
- v->HRatio,
- v->HRatioChroma,
- v->vtaps,
- v->VTAPsChroma,
- v->VRatio,
- v->VRatioChroma,
- v->HTotal,
- v->PixelClock,
- v->BlendingAndTiming,
v->DPPPerPlane,
v->BytePerPixelDETY,
v->BytePerPixelDETC,
- v->DSTXAfterScaler,
- v->DSTYAfterScaler,
- v->WritebackEnable,
- v->WritebackPixelFormat,
- v->WritebackDestinationWidth,
- v->WritebackDestinationHeight,
- v->WritebackSourceHeight,
v->UnboundedRequestEnabled,
v->CompressedBufferSizeInkByte,
&DRAMClockChangeSupport,
- &v->UrgentWatermark,
- &v->WritebackUrgentWatermark,
- &v->DRAMClockChangeWatermark,
- &v->WritebackDRAMClockChangeWatermark,
&v->StutterExitWatermark,
&v->StutterEnterPlusExitWatermark,
&v->Z8StutterExitWatermark,
- &v->Z8StutterEnterPlusExitWatermark,
- &v->MinActiveDRAMClockChangeLatencySupported);
+ &v->Z8StutterEnterPlusExitWatermark);
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (v->WritebackEnable[k] == true) {
@@ -3598,61 +3488,43 @@ static void CalculateRowBandwidth(
static void CalculateFlipSchedule(
struct display_mode_lib *mode_lib,
+ unsigned int k,
double HostVMInefficiencyFactor,
double UrgentExtraLatency,
double UrgentLatency,
- unsigned int GPUVMMaxPageTableLevels,
- bool HostVMEnable,
- unsigned int HostVMMaxNonCachedPageTableLevels,
- bool GPUVMEnable,
- double HostVMMinPageSize,
double PDEAndMetaPTEBytesPerFrame,
double MetaRowBytes,
- double DPTEBytesPerRow,
- double BandwidthAvailableForImmediateFlip,
- unsigned int TotImmediateFlipBytes,
- enum source_format_class SourcePixelFormat,
- double LineTime,
- double VRatio,
- double VRatioChroma,
- double Tno_bw,
- bool DCCEnable,
- unsigned int dpte_row_height,
- unsigned int meta_row_height,
- unsigned int dpte_row_height_chroma,
- unsigned int meta_row_height_chroma,
- double *DestinationLinesToRequestVMInImmediateFlip,
- double *DestinationLinesToRequestRowInImmediateFlip,
- double *final_flip_bw,
- bool *ImmediateFlipSupportedForPipe)
+ double DPTEBytesPerRow)
{
+ struct vba_vars_st *v = &mode_lib->vba;
double min_row_time = 0.0;
unsigned int HostVMDynamicLevelsTrips;
double TimeForFetchingMetaPTEImmediateFlip;
double TimeForFetchingRowInVBlankImmediateFlip;
double ImmediateFlipBW;
+ double LineTime = v->HTotal[k] / v->PixelClock[k];
- if (GPUVMEnable == true && HostVMEnable == true) {
- HostVMDynamicLevelsTrips = HostVMMaxNonCachedPageTableLevels;
+ if (v->GPUVMEnable == true && v->HostVMEnable == true) {
+ HostVMDynamicLevelsTrips = v->HostVMMaxNonCachedPageTableLevels;
} else {
HostVMDynamicLevelsTrips = 0;
}
- if (GPUVMEnable == true || DCCEnable == true) {
- ImmediateFlipBW = (PDEAndMetaPTEBytesPerFrame + MetaRowBytes + DPTEBytesPerRow) * BandwidthAvailableForImmediateFlip / TotImmediateFlipBytes;
+ if (v->GPUVMEnable == true || v->DCCEnable[k] == true) {
+ ImmediateFlipBW = (PDEAndMetaPTEBytesPerFrame + MetaRowBytes + DPTEBytesPerRow) * v->BandwidthAvailableForImmediateFlip / v->TotImmediateFlipBytes;
}
- if (GPUVMEnable == true) {
+ if (v->GPUVMEnable == true) {
TimeForFetchingMetaPTEImmediateFlip = dml_max3(
- Tno_bw + PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / ImmediateFlipBW,
- UrgentExtraLatency + UrgentLatency * (GPUVMMaxPageTableLevels * (HostVMDynamicLevelsTrips + 1) - 1),
+ v->Tno_bw[k] + PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / ImmediateFlipBW,
+ UrgentExtraLatency + UrgentLatency * (v->GPUVMMaxPageTableLevels * (HostVMDynamicLevelsTrips + 1) - 1),
LineTime / 4.0);
} else {
TimeForFetchingMetaPTEImmediateFlip = 0;
}
- *DestinationLinesToRequestVMInImmediateFlip = dml_ceil(4.0 * (TimeForFetchingMetaPTEImmediateFlip / LineTime), 1) / 4.0;
- if ((GPUVMEnable == true || DCCEnable == true)) {
+ v->DestinationLinesToRequestVMInImmediateFlip[k] = dml_ceil(4.0 * (TimeForFetchingMetaPTEImmediateFlip / LineTime), 1) / 4.0;
+ if ((v->GPUVMEnable == true || v->DCCEnable[k] == true)) {
TimeForFetchingRowInVBlankImmediateFlip = dml_max3(
(MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / ImmediateFlipBW,
UrgentLatency * (HostVMDynamicLevelsTrips + 1),
@@ -3661,54 +3533,54 @@ static void CalculateFlipSchedule(
TimeForFetchingRowInVBlankImmediateFlip = 0;
}
- *DestinationLinesToRequestRowInImmediateFlip = dml_ceil(4.0 * (TimeForFetchingRowInVBlankImmediateFlip / LineTime), 1) / 4.0;
+ v->DestinationLinesToRequestRowInImmediateFlip[k] = dml_ceil(4.0 * (TimeForFetchingRowInVBlankImmediateFlip / LineTime), 1) / 4.0;
- if (GPUVMEnable == true) {
- *final_flip_bw = dml_max(
- PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / (*DestinationLinesToRequestVMInImmediateFlip * LineTime),
- (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / (*DestinationLinesToRequestRowInImmediateFlip * LineTime));
- } else if ((GPUVMEnable == true || DCCEnable == true)) {
- *final_flip_bw = (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / (*DestinationLinesToRequestRowInImmediateFlip * LineTime);
+ if (v->GPUVMEnable == true) {
+ v->final_flip_bw[k] = dml_max(
+ PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / (v->DestinationLinesToRequestVMInImmediateFlip[k] * LineTime),
+ (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / (v->DestinationLinesToRequestRowInImmediateFlip[k] * LineTime));
+ } else if ((v->GPUVMEnable == true || v->DCCEnable[k] == true)) {
+ v->final_flip_bw[k] = (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / (v->DestinationLinesToRequestRowInImmediateFlip[k] * LineTime);
} else {
- *final_flip_bw = 0;
+ v->final_flip_bw[k] = 0;
}
- if (SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10 || SourcePixelFormat == dm_rgbe_alpha) {
- if (GPUVMEnable == true && DCCEnable != true) {
- min_row_time = dml_min(dpte_row_height * LineTime / VRatio, dpte_row_height_chroma * LineTime / VRatioChroma);
- } else if (GPUVMEnable != true && DCCEnable == true) {
- min_row_time = dml_min(meta_row_height * LineTime / VRatio, meta_row_height_chroma * LineTime / VRatioChroma);
+ if (v->SourcePixelFormat[k] == dm_420_8 || v->SourcePixelFormat[k] == dm_420_10 || v->SourcePixelFormat[k] == dm_rgbe_alpha) {
+ if (v->GPUVMEnable == true && v->DCCEnable[k] != true) {
+ min_row_time = dml_min(v->dpte_row_height[k] * LineTime / v->VRatio[k], v->dpte_row_height_chroma[k] * LineTime / v->VRatioChroma[k]);
+ } else if (v->GPUVMEnable != true && v->DCCEnable[k] == true) {
+ min_row_time = dml_min(v->meta_row_height[k] * LineTime / v->VRatio[k], v->meta_row_height_chroma[k] * LineTime / v->VRatioChroma[k]);
} else {
min_row_time = dml_min4(
- dpte_row_height * LineTime / VRatio,
- meta_row_height * LineTime / VRatio,
- dpte_row_height_chroma * LineTime / VRatioChroma,
- meta_row_height_chroma * LineTime / VRatioChroma);
+ v->dpte_row_height[k] * LineTime / v->VRatio[k],
+ v->meta_row_height[k] * LineTime / v->VRatio[k],
+ v->dpte_row_height_chroma[k] * LineTime / v->VRatioChroma[k],
+ v->meta_row_height_chroma[k] * LineTime / v->VRatioChroma[k]);
}
} else {
- if (GPUVMEnable == true && DCCEnable != true) {
- min_row_time = dpte_row_height * LineTime / VRatio;
- } else if (GPUVMEnable != true && DCCEnable == true) {
- min_row_time = meta_row_height * LineTime / VRatio;
+ if (v->GPUVMEnable == true && v->DCCEnable[k] != true) {
+ min_row_time = v->dpte_row_height[k] * LineTime / v->VRatio[k];
+ } else if (v->GPUVMEnable != true && v->DCCEnable[k] == true) {
+ min_row_time = v->meta_row_height[k] * LineTime / v->VRatio[k];
} else {
- min_row_time = dml_min(dpte_row_height * LineTime / VRatio, meta_row_height * LineTime / VRatio);
+ min_row_time = dml_min(v->dpte_row_height[k] * LineTime / v->VRatio[k], v->meta_row_height[k] * LineTime / v->VRatio[k]);
}
}
- if (*DestinationLinesToRequestVMInImmediateFlip >= 32 || *DestinationLinesToRequestRowInImmediateFlip >= 16
+ if (v->DestinationLinesToRequestVMInImmediateFlip[k] >= 32 || v->DestinationLinesToRequestRowInImmediateFlip[k] >= 16
|| TimeForFetchingMetaPTEImmediateFlip + 2 * TimeForFetchingRowInVBlankImmediateFlip > min_row_time) {
- *ImmediateFlipSupportedForPipe = false;
+ v->ImmediateFlipSupportedForPipe[k] = false;
} else {
- *ImmediateFlipSupportedForPipe = true;
+ v->ImmediateFlipSupportedForPipe[k] = true;
}
#ifdef __DML_VBA_DEBUG__
- dml_print("DML::%s: DestinationLinesToRequestVMInImmediateFlip = %f\n", __func__, *DestinationLinesToRequestVMInImmediateFlip);
- dml_print("DML::%s: DestinationLinesToRequestRowInImmediateFlip = %f\n", __func__, *DestinationLinesToRequestRowInImmediateFlip);
+ dml_print("DML::%s: DestinationLinesToRequestVMInImmediateFlip = %f\n", __func__, v->DestinationLinesToRequestVMInImmediateFlip[k]);
+ dml_print("DML::%s: DestinationLinesToRequestRowInImmediateFlip = %f\n", __func__, v->DestinationLinesToRequestRowInImmediateFlip[k]);
dml_print("DML::%s: TimeForFetchingMetaPTEImmediateFlip = %f\n", __func__, TimeForFetchingMetaPTEImmediateFlip);
dml_print("DML::%s: TimeForFetchingRowInVBlankImmediateFlip = %f\n", __func__, TimeForFetchingRowInVBlankImmediateFlip);
dml_print("DML::%s: min_row_time = %f\n", __func__, min_row_time);
- dml_print("DML::%s: ImmediateFlipSupportedForPipe = %d\n", __func__, *ImmediateFlipSupportedForPipe);
+ dml_print("DML::%s: ImmediateFlipSupportedForPipe = %d\n", __func__, v->ImmediateFlipSupportedForPipe[k]);
#endif
}
@@ -3905,6 +3777,17 @@ static noinline void CalculatePrefetchSchedulePerPlane(
&v->VReadyOffsetPix[k]);
}
+static void PatchDETBufferSizeInKByte(unsigned int NumberOfActivePlanes, int NoOfDPPThisState[], unsigned int config_return_buffer_size_in_kbytes, unsigned int *DETBufferSizeInKByte)
+{
+ int i, total_pipes = 0;
+ for (i = 0; i < NumberOfActivePlanes; i++)
+ total_pipes += NoOfDPPThisState[i];
+ *DETBufferSizeInKByte = ((config_return_buffer_size_in_kbytes - DCN3_15_MIN_COMPBUF_SIZE_KB) / 64 / total_pipes) * 64;
+ if (*DETBufferSizeInKByte > DCN3_15_MAX_DET_SIZE)
+ *DETBufferSizeInKByte = DCN3_15_MAX_DET_SIZE;
+}
+
+
void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_lib)
{
struct vba_vars_st *v = &mode_lib->vba;
@@ -4663,6 +4546,8 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
v->ODMCombineEnableThisState[k] = v->ODMCombineEnablePerState[i][k];
}
+ if (v->NumberOfActivePlanes > 1 && mode_lib->project == DML_PROJECT_DCN315)
+ PatchDETBufferSizeInKByte(v->NumberOfActivePlanes, v->NoOfDPPThisState, v->ip.config_return_buffer_size_in_kbytes, &v->DETBufferSizeInKByte[0]);
CalculateSwathAndDETConfiguration(
false,
v->NumberOfActivePlanes,
@@ -5300,33 +5185,13 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
for (k = 0; k < v->NumberOfActivePlanes; k++) {
CalculateFlipSchedule(
mode_lib,
+ k,
HostVMInefficiencyFactor,
v->ExtraLatency,
v->UrgLatency[i],
- v->GPUVMMaxPageTableLevels,
- v->HostVMEnable,
- v->HostVMMaxNonCachedPageTableLevels,
- v->GPUVMEnable,
- v->HostVMMinPageSize,
v->PDEAndMetaPTEBytesPerFrame[i][j][k],
v->MetaRowBytes[i][j][k],
- v->DPTEBytesPerRow[i][j][k],
- v->BandwidthAvailableForImmediateFlip,
- v->TotImmediateFlipBytes,
- v->SourcePixelFormat[k],
- v->HTotal[k] / v->PixelClock[k],
- v->VRatio[k],
- v->VRatioChroma[k],
- v->Tno_bw[k],
- v->DCCEnable[k],
- v->dpte_row_height[k],
- v->meta_row_height[k],
- v->dpte_row_height_chroma[k],
- v->meta_row_height_chroma[k],
- &v->DestinationLinesToRequestVMInImmediateFlip[k],
- &v->DestinationLinesToRequestRowInImmediateFlip[k],
- &v->final_flip_bw[k],
- &v->ImmediateFlipSupportedForPipe[k]);
+ v->DPTEBytesPerRow[i][j][k]);
}
v->total_dcn_read_bw_with_flip = 0.0;
for (k = 0; k < v->NumberOfActivePlanes; k++) {
@@ -5384,64 +5249,28 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
CalculateWatermarksAndDRAMSpeedChangeSupport(
mode_lib,
v->PrefetchModePerState[i][j],
- v->NumberOfActivePlanes,
- v->MaxLineBufferLines,
- v->LineBufferSize,
- v->WritebackInterfaceBufferSize,
v->DCFCLKState[i][j],
v->ReturnBWPerState[i][j],
- v->SynchronizedVBlank,
- v->dpte_group_bytes,
- v->MetaChunkSize,
v->UrgLatency[i],
v->ExtraLatency,
- v->WritebackLatency,
- v->WritebackChunkSize,
v->SOCCLKPerState[i],
- v->DRAMClockChangeLatency,
- v->SRExitTime,
- v->SREnterPlusExitTime,
- v->SRExitZ8Time,
- v->SREnterPlusExitZ8Time,
v->ProjectedDCFCLKDeepSleep[i][j],
v->DETBufferSizeYThisState,
v->DETBufferSizeCThisState,
v->SwathHeightYThisState,
v->SwathHeightCThisState,
- v->LBBitPerPixel,
v->SwathWidthYThisState,
v->SwathWidthCThisState,
- v->HRatio,
- v->HRatioChroma,
- v->vtaps,
- v->VTAPsChroma,
- v->VRatio,
- v->VRatioChroma,
- v->HTotal,
- v->PixelClock,
- v->BlendingAndTiming,
v->NoOfDPPThisState,
v->BytePerPixelInDETY,
v->BytePerPixelInDETC,
- v->DSTXAfterScaler,
- v->DSTYAfterScaler,
- v->WritebackEnable,
- v->WritebackPixelFormat,
- v->WritebackDestinationWidth,
- v->WritebackDestinationHeight,
- v->WritebackSourceHeight,
UnboundedRequestEnabledThisState,
CompressedBufferSizeInkByteThisState,
&v->DRAMClockChangeSupport[i][j],
- &v->UrgentWatermark,
- &v->WritebackUrgentWatermark,
- &v->DRAMClockChangeWatermark,
- &v->WritebackDRAMClockChangeWatermark,
&dummy,
&dummy,
&dummy,
- &dummy,
- &v->MinActiveDRAMClockChangeLatencySupported);
+ &dummy);
}
}
@@ -5566,64 +5395,28 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
static void CalculateWatermarksAndDRAMSpeedChangeSupport(
struct display_mode_lib *mode_lib,
unsigned int PrefetchMode,
- unsigned int NumberOfActivePlanes,
- unsigned int MaxLineBufferLines,
- unsigned int LineBufferSize,
- unsigned int WritebackInterfaceBufferSize,
double DCFCLK,
double ReturnBW,
- bool SynchronizedVBlank,
- unsigned int dpte_group_bytes[],
- unsigned int MetaChunkSize,
double UrgentLatency,
double ExtraLatency,
- double WritebackLatency,
- double WritebackChunkSize,
double SOCCLK,
- double DRAMClockChangeLatency,
- double SRExitTime,
- double SREnterPlusExitTime,
- double SRExitZ8Time,
- double SREnterPlusExitZ8Time,
double DCFCLKDeepSleep,
unsigned int DETBufferSizeY[],
unsigned int DETBufferSizeC[],
unsigned int SwathHeightY[],
unsigned int SwathHeightC[],
- unsigned int LBBitPerPixel[],
double SwathWidthY[],
double SwathWidthC[],
- double HRatio[],
- double HRatioChroma[],
- unsigned int vtaps[],
- unsigned int VTAPsChroma[],
- double VRatio[],
- double VRatioChroma[],
- unsigned int HTotal[],
- double PixelClock[],
- unsigned int BlendingAndTiming[],
unsigned int DPPPerPlane[],
double BytePerPixelDETY[],
double BytePerPixelDETC[],
- double DSTXAfterScaler[],
- double DSTYAfterScaler[],
- bool WritebackEnable[],
- enum source_format_class WritebackPixelFormat[],
- double WritebackDestinationWidth[],
- double WritebackDestinationHeight[],
- double WritebackSourceHeight[],
bool UnboundedRequestEnabled,
int unsigned CompressedBufferSizeInkByte,
enum clock_change_support *DRAMClockChangeSupport,
- double *UrgentWatermark,
- double *WritebackUrgentWatermark,
- double *DRAMClockChangeWatermark,
- double *WritebackDRAMClockChangeWatermark,
double *StutterExitWatermark,
double *StutterEnterPlusExitWatermark,
double *Z8StutterExitWatermark,
- double *Z8StutterEnterPlusExitWatermark,
- double *MinActiveDRAMClockChangeLatencySupported)
+ double *Z8StutterEnterPlusExitWatermark)
{
struct vba_vars_st *v = &mode_lib->vba;
double EffectiveLBLatencyHidingY;
@@ -5643,103 +5436,103 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
double TotalPixelBW = 0.0;
int k, j;
- *UrgentWatermark = UrgentLatency + ExtraLatency;
+ v->UrgentWatermark = UrgentLatency + ExtraLatency;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: UrgentLatency = %f\n", __func__, UrgentLatency);
dml_print("DML::%s: ExtraLatency = %f\n", __func__, ExtraLatency);
- dml_print("DML::%s: UrgentWatermark = %f\n", __func__, *UrgentWatermark);
+ dml_print("DML::%s: UrgentWatermark = %f\n", __func__, v->UrgentWatermark);
#endif
- *DRAMClockChangeWatermark = DRAMClockChangeLatency + *UrgentWatermark;
+ v->DRAMClockChangeWatermark = v->DRAMClockChangeLatency + v->UrgentWatermark;
#ifdef __DML_VBA_DEBUG__
- dml_print("DML::%s: DRAMClockChangeLatency = %f\n", __func__, DRAMClockChangeLatency);
- dml_print("DML::%s: DRAMClockChangeWatermark = %f\n", __func__, *DRAMClockChangeWatermark);
+ dml_print("DML::%s: v->DRAMClockChangeLatency = %f\n", __func__, v->DRAMClockChangeLatency);
+ dml_print("DML::%s: DRAMClockChangeWatermark = %f\n", __func__, v->DRAMClockChangeWatermark);
#endif
v->TotalActiveWriteback = 0;
- for (k = 0; k < NumberOfActivePlanes; ++k) {
- if (WritebackEnable[k] == true) {
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+ if (v->WritebackEnable[k] == true) {
v->TotalActiveWriteback = v->TotalActiveWriteback + 1;
}
}
if (v->TotalActiveWriteback <= 1) {
- *WritebackUrgentWatermark = WritebackLatency;
+ v->WritebackUrgentWatermark = v->WritebackLatency;
} else {
- *WritebackUrgentWatermark = WritebackLatency + WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
+ v->WritebackUrgentWatermark = v->WritebackLatency + v->WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
}
if (v->TotalActiveWriteback <= 1) {
- *WritebackDRAMClockChangeWatermark = DRAMClockChangeLatency + WritebackLatency;
+ v->WritebackDRAMClockChangeWatermark = v->DRAMClockChangeLatency + v->WritebackLatency;
} else {
- *WritebackDRAMClockChangeWatermark = DRAMClockChangeLatency + WritebackLatency + WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
+ v->WritebackDRAMClockChangeWatermark = v->DRAMClockChangeLatency + v->WritebackLatency + v->WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
}
- for (k = 0; k < NumberOfActivePlanes; ++k) {
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
TotalPixelBW = TotalPixelBW
- + DPPPerPlane[k] * (SwathWidthY[k] * BytePerPixelDETY[k] * VRatio[k] + SwathWidthC[k] * BytePerPixelDETC[k] * VRatioChroma[k])
- / (HTotal[k] / PixelClock[k]);
+ + DPPPerPlane[k] * (SwathWidthY[k] * BytePerPixelDETY[k] * v->VRatio[k] + SwathWidthC[k] * BytePerPixelDETC[k] * v->VRatioChroma[k])
+ / (v->HTotal[k] / v->PixelClock[k]);
}
- for (k = 0; k < NumberOfActivePlanes; ++k) {
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
double EffectiveDETBufferSizeY = DETBufferSizeY[k];
v->LBLatencyHidingSourceLinesY = dml_min(
- (double) MaxLineBufferLines,
- dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthY[k] / dml_max(HRatio[k], 1.0)), 1)) - (vtaps[k] - 1);
+ (double) v->MaxLineBufferLines,
+ dml_floor(v->LineBufferSize / v->LBBitPerPixel[k] / (SwathWidthY[k] / dml_max(v->HRatio[k], 1.0)), 1)) - (v->vtaps[k] - 1);
v->LBLatencyHidingSourceLinesC = dml_min(
- (double) MaxLineBufferLines,
- dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthC[k] / dml_max(HRatioChroma[k], 1.0)), 1)) - (VTAPsChroma[k] - 1);
+ (double) v->MaxLineBufferLines,
+ dml_floor(v->LineBufferSize / v->LBBitPerPixel[k] / (SwathWidthC[k] / dml_max(v->HRatioChroma[k], 1.0)), 1)) - (v->VTAPsChroma[k] - 1);
- EffectiveLBLatencyHidingY = v->LBLatencyHidingSourceLinesY / VRatio[k] * (HTotal[k] / PixelClock[k]);
+ EffectiveLBLatencyHidingY = v->LBLatencyHidingSourceLinesY / v->VRatio[k] * (v->HTotal[k] / v->PixelClock[k]);
- EffectiveLBLatencyHidingC = v->LBLatencyHidingSourceLinesC / VRatioChroma[k] * (HTotal[k] / PixelClock[k]);
+ EffectiveLBLatencyHidingC = v->LBLatencyHidingSourceLinesC / v->VRatioChroma[k] * (v->HTotal[k] / v->PixelClock[k]);
if (UnboundedRequestEnabled) {
EffectiveDETBufferSizeY = EffectiveDETBufferSizeY
- + CompressedBufferSizeInkByte * 1024 * SwathWidthY[k] * BytePerPixelDETY[k] * VRatio[k] / (HTotal[k] / PixelClock[k]) / TotalPixelBW;
+ + CompressedBufferSizeInkByte * 1024 * SwathWidthY[k] * BytePerPixelDETY[k] * v->VRatio[k] / (v->HTotal[k] / v->PixelClock[k]) / TotalPixelBW;
}
LinesInDETY[k] = (double) EffectiveDETBufferSizeY / BytePerPixelDETY[k] / SwathWidthY[k];
LinesInDETYRoundedDownToSwath[k] = dml_floor(LinesInDETY[k], SwathHeightY[k]);
- FullDETBufferingTimeY = LinesInDETYRoundedDownToSwath[k] * (HTotal[k] / PixelClock[k]) / VRatio[k];
+ FullDETBufferingTimeY = LinesInDETYRoundedDownToSwath[k] * (v->HTotal[k] / v->PixelClock[k]) / v->VRatio[k];
if (BytePerPixelDETC[k] > 0) {
LinesInDETC = v->DETBufferSizeC[k] / BytePerPixelDETC[k] / SwathWidthC[k];
LinesInDETCRoundedDownToSwath = dml_floor(LinesInDETC, SwathHeightC[k]);
- FullDETBufferingTimeC = LinesInDETCRoundedDownToSwath * (HTotal[k] / PixelClock[k]) / VRatioChroma[k];
+ FullDETBufferingTimeC = LinesInDETCRoundedDownToSwath * (v->HTotal[k] / v->PixelClock[k]) / v->VRatioChroma[k];
} else {
LinesInDETC = 0;
FullDETBufferingTimeC = 999999;
}
ActiveDRAMClockChangeLatencyMarginY = EffectiveLBLatencyHidingY + FullDETBufferingTimeY
- - ((double) DSTXAfterScaler[k] / HTotal[k] + DSTYAfterScaler[k]) * HTotal[k] / PixelClock[k] - *UrgentWatermark - *DRAMClockChangeWatermark;
+ - ((double) v->DSTXAfterScaler[k] / v->HTotal[k] + v->DSTYAfterScaler[k]) * v->HTotal[k] / v->PixelClock[k] - v->UrgentWatermark - v->DRAMClockChangeWatermark;
- if (NumberOfActivePlanes > 1) {
+ if (v->NumberOfActivePlanes > 1) {
ActiveDRAMClockChangeLatencyMarginY = ActiveDRAMClockChangeLatencyMarginY
- - (1 - 1.0 / NumberOfActivePlanes) * SwathHeightY[k] * HTotal[k] / PixelClock[k] / VRatio[k];
+ - (1 - 1.0 / v->NumberOfActivePlanes) * SwathHeightY[k] * v->HTotal[k] / v->PixelClock[k] / v->VRatio[k];
}
if (BytePerPixelDETC[k] > 0) {
ActiveDRAMClockChangeLatencyMarginC = EffectiveLBLatencyHidingC + FullDETBufferingTimeC
- - ((double) DSTXAfterScaler[k] / HTotal[k] + DSTYAfterScaler[k]) * HTotal[k] / PixelClock[k] - *UrgentWatermark - *DRAMClockChangeWatermark;
+ - ((double) v->DSTXAfterScaler[k] / v->HTotal[k] + v->DSTYAfterScaler[k]) * v->HTotal[k] / v->PixelClock[k] - v->UrgentWatermark - v->DRAMClockChangeWatermark;
- if (NumberOfActivePlanes > 1) {
+ if (v->NumberOfActivePlanes > 1) {
ActiveDRAMClockChangeLatencyMarginC = ActiveDRAMClockChangeLatencyMarginC
- - (1 - 1.0 / NumberOfActivePlanes) * SwathHeightC[k] * HTotal[k] / PixelClock[k] / VRatioChroma[k];
+ - (1 - 1.0 / v->NumberOfActivePlanes) * SwathHeightC[k] * v->HTotal[k] / v->PixelClock[k] / v->VRatioChroma[k];
}
v->ActiveDRAMClockChangeLatencyMargin[k] = dml_min(ActiveDRAMClockChangeLatencyMarginY, ActiveDRAMClockChangeLatencyMarginC);
} else {
v->ActiveDRAMClockChangeLatencyMargin[k] = ActiveDRAMClockChangeLatencyMarginY;
}
- if (WritebackEnable[k] == true) {
- WritebackDRAMClockChangeLatencyHiding = WritebackInterfaceBufferSize * 1024
- / (WritebackDestinationWidth[k] * WritebackDestinationHeight[k] / (WritebackSourceHeight[k] * HTotal[k] / PixelClock[k]) * 4);
- if (WritebackPixelFormat[k] == dm_444_64) {
+ if (v->WritebackEnable[k] == true) {
+ WritebackDRAMClockChangeLatencyHiding = v->WritebackInterfaceBufferSize * 1024
+ / (v->WritebackDestinationWidth[k] * v->WritebackDestinationHeight[k] / (v->WritebackSourceHeight[k] * v->HTotal[k] / v->PixelClock[k]) * 4);
+ if (v->WritebackPixelFormat[k] == dm_444_64) {
WritebackDRAMClockChangeLatencyHiding = WritebackDRAMClockChangeLatencyHiding / 2;
}
WritebackDRAMClockChangeLatencyMargin = WritebackDRAMClockChangeLatencyHiding - v->WritebackDRAMClockChangeWatermark;
@@ -5749,14 +5542,14 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
v->MinActiveDRAMClockChangeMargin = 999999;
PlaneWithMinActiveDRAMClockChangeMargin = 0;
- for (k = 0; k < NumberOfActivePlanes; ++k) {
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (v->ActiveDRAMClockChangeLatencyMargin[k] < v->MinActiveDRAMClockChangeMargin) {
v->MinActiveDRAMClockChangeMargin = v->ActiveDRAMClockChangeLatencyMargin[k];
- if (BlendingAndTiming[k] == k) {
+ if (v->BlendingAndTiming[k] == k) {
PlaneWithMinActiveDRAMClockChangeMargin = k;
} else {
- for (j = 0; j < NumberOfActivePlanes; ++j) {
- if (BlendingAndTiming[k] == j) {
+ for (j = 0; j < v->NumberOfActivePlanes; ++j) {
+ if (v->BlendingAndTiming[k] == j) {
PlaneWithMinActiveDRAMClockChangeMargin = j;
}
}
@@ -5764,11 +5557,11 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
}
}
- *MinActiveDRAMClockChangeLatencySupported = v->MinActiveDRAMClockChangeMargin + DRAMClockChangeLatency;
+ v->MinActiveDRAMClockChangeLatencySupported = v->MinActiveDRAMClockChangeMargin + v->DRAMClockChangeLatency ;
SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank = 999999;
- for (k = 0; k < NumberOfActivePlanes; ++k) {
- if (!((k == PlaneWithMinActiveDRAMClockChangeMargin) && (BlendingAndTiming[k] == k)) && !(BlendingAndTiming[k] == PlaneWithMinActiveDRAMClockChangeMargin)
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+ if (!((k == PlaneWithMinActiveDRAMClockChangeMargin) && (v->BlendingAndTiming[k] == k)) && !(v->BlendingAndTiming[k] == PlaneWithMinActiveDRAMClockChangeMargin)
&& v->ActiveDRAMClockChangeLatencyMargin[k] < SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank) {
SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank = v->ActiveDRAMClockChangeLatencyMargin[k];
}
@@ -5776,25 +5569,25 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
v->TotalNumberOfActiveOTG = 0;
- for (k = 0; k < NumberOfActivePlanes; ++k) {
- if (BlendingAndTiming[k] == k) {
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+ if (v->BlendingAndTiming[k] == k) {
v->TotalNumberOfActiveOTG = v->TotalNumberOfActiveOTG + 1;
}
}
if (v->MinActiveDRAMClockChangeMargin > 0 && PrefetchMode == 0) {
*DRAMClockChangeSupport = dm_dram_clock_change_vactive;
- } else if ((SynchronizedVBlank == true || v->TotalNumberOfActiveOTG == 1
+ } else if ((v->SynchronizedVBlank == true || v->TotalNumberOfActiveOTG == 1
|| SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank > 0) && PrefetchMode == 0) {
*DRAMClockChangeSupport = dm_dram_clock_change_vblank;
} else {
*DRAMClockChangeSupport = dm_dram_clock_change_unsupported;
}
- *StutterExitWatermark = SRExitTime + ExtraLatency + 10 / DCFCLKDeepSleep;
- *StutterEnterPlusExitWatermark = (SREnterPlusExitTime + ExtraLatency + 10 / DCFCLKDeepSleep);
- *Z8StutterExitWatermark = SRExitZ8Time + ExtraLatency + 10 / DCFCLKDeepSleep;
- *Z8StutterEnterPlusExitWatermark = SREnterPlusExitZ8Time + ExtraLatency + 10 / DCFCLKDeepSleep;
+ *StutterExitWatermark = v->SRExitTime + ExtraLatency + 10 / DCFCLKDeepSleep;
+ *StutterEnterPlusExitWatermark = (v->SREnterPlusExitTime + ExtraLatency + 10 / DCFCLKDeepSleep);
+ *Z8StutterExitWatermark = v->SRExitZ8Time + ExtraLatency + 10 / DCFCLKDeepSleep;
+ *Z8StutterEnterPlusExitWatermark = v->SREnterPlusExitZ8Time + ExtraLatency + 10 / DCFCLKDeepSleep;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: StutterExitWatermark = %f\n", __func__, *StutterExitWatermark);
@@ -6933,8 +6726,6 @@ static void CalculateSwathWidth(
{
int surface_width_ub_l = dml_ceil(SurfaceWidthY[k], Read256BytesBlockWidthY[k]);
int surface_height_ub_l = dml_ceil(SurfaceHeightY[k], Read256BytesBlockHeightY[k]);
- int surface_width_ub_c = dml_ceil(SurfaceWidthC[k], Read256BytesBlockWidthC[k]);
- int surface_height_ub_c = dml_ceil(SurfaceHeightC[k], Read256BytesBlockHeightC[k]);
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%d surface_width_ub_l=%0d\n", __func__, k, surface_width_ub_l);
@@ -6945,6 +6736,8 @@ static void CalculateSwathWidth(
MaximumSwathHeightC[k] = Read256BytesBlockHeightC[k];
swath_width_luma_ub[k] = dml_min(surface_width_ub_l, (int) dml_ceil(SwathWidthY[k] - 1, Read256BytesBlockWidthY[k]) + Read256BytesBlockWidthY[k]);
if (BytePerPixC[k] > 0) {
+ int surface_width_ub_c = dml_ceil(SurfaceWidthC[k], Read256BytesBlockWidthC[k]);
+
swath_width_chroma_ub[k] = dml_min(
surface_width_ub_c,
(int) dml_ceil(SwathWidthC[k] - 1, Read256BytesBlockWidthC[k]) + Read256BytesBlockWidthC[k]);
@@ -6956,6 +6749,8 @@ static void CalculateSwathWidth(
MaximumSwathHeightC[k] = Read256BytesBlockWidthC[k];
swath_width_luma_ub[k] = dml_min(surface_height_ub_l, (int) dml_ceil(SwathWidthY[k] - 1, Read256BytesBlockHeightY[k]) + Read256BytesBlockHeightY[k]);
if (BytePerPixC[k] > 0) {
+ int surface_height_ub_c = dml_ceil(SurfaceHeightC[k], Read256BytesBlockHeightC[k]);
+
swath_width_chroma_ub[k] = dml_min(
surface_height_ub_c,
(int) dml_ceil(SwathWidthC[k] - 1, Read256BytesBlockHeightC[k]) + Read256BytesBlockHeightC[k]);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
index 34a5d0f87b5f..cf420ad2b8dc 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
@@ -194,6 +194,9 @@ void dcn314_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_p
dcn3_14_ip.max_num_otg = dc->res_pool->res_cap->num_timing_generator;
dcn3_14_ip.max_num_dpp = dc->res_pool->pipe_count;
+ if (bw_params->dram_channel_width_bytes > 0)
+ dcn3_14_soc.dram_channel_width_bytes = bw_params->dram_channel_width_bytes;
+
if (bw_params->num_channels > 0)
dcn3_14_soc.num_chans = bw_params->num_channels;
@@ -261,8 +264,13 @@ void dcn314_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_p
dc->dml.soc.dispclk_dppclk_vco_speed_mhz = max_dispclk_mhz * 2;
}
+ if ((int)(dcn3_14_soc.dram_clock_change_latency_us * 1000)
+ != dc->debug.dram_clock_change_latency_ns
+ && dc->debug.dram_clock_change_latency_ns) {
+ dcn3_14_soc.dram_clock_change_latency_us = dc->debug.dram_clock_change_latency_ns / 1000;
+ }
if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
- dml_init_instance(&dc->dml, &dcn3_14_soc, &dcn3_14_ip, DML_PROJECT_DCN31);
+ dml_init_instance(&dc->dml, &dcn3_14_soc, &dcn3_14_ip, DML_PROJECT_DCN314);
else
dml_init_instance(&dc->dml, &dcn3_14_soc, &dcn3_14_ip, DML_PROJECT_DCN31_FPGA);
}
@@ -315,6 +323,8 @@ int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *c
pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_luma = 0;
pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_chroma = 0;
pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch;
+ pipes[pipe_cnt].pipe.dest.vblank_nom =
+ dcn3_14_ip.VBlankNomDefaultUS / (timing->h_total / (timing->pix_clk_100hz / 10000.0));
pipes[pipe_cnt].pipe.src.dcc_rate = 3;
pipes[pipe_cnt].dout.dsc_input_bpc = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
index fc4d7474c111..0d12fd079cd6 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
@@ -61,7 +61,7 @@
// fudge factor for min dcfclk calclation
#define __DML_MIN_DCFCLK_FACTOR__ 1.15
-struct {
+typedef struct {
double DPPCLK;
double DISPCLK;
double PixelClock;
@@ -265,33 +265,13 @@ static void CalculateRowBandwidth(
static void CalculateFlipSchedule(
struct display_mode_lib *mode_lib,
+ unsigned int k,
double HostVMInefficiencyFactor,
double UrgentExtraLatency,
double UrgentLatency,
- unsigned int GPUVMMaxPageTableLevels,
- bool HostVMEnable,
- unsigned int HostVMMaxNonCachedPageTableLevels,
- bool GPUVMEnable,
- double HostVMMinPageSize,
double PDEAndMetaPTEBytesPerFrame,
double MetaRowBytes,
- double DPTEBytesPerRow,
- double BandwidthAvailableForImmediateFlip,
- unsigned int TotImmediateFlipBytes,
- enum source_format_class SourcePixelFormat,
- double LineTime,
- double VRatio,
- double VRatioChroma,
- double Tno_bw,
- bool DCCEnable,
- unsigned int dpte_row_height,
- unsigned int meta_row_height,
- unsigned int dpte_row_height_chroma,
- unsigned int meta_row_height_chroma,
- double *DestinationLinesToRequestVMInImmediateFlip,
- double *DestinationLinesToRequestRowInImmediateFlip,
- double *final_flip_bw,
- bool *ImmediateFlipSupportedForPipe);
+ double DPTEBytesPerRow);
static double CalculateWriteBackDelay(
enum source_format_class WritebackPixelFormat,
double WritebackHRatio,
@@ -325,64 +305,28 @@ static void CalculateVupdateAndDynamicMetadataParameters(
static void CalculateWatermarksAndDRAMSpeedChangeSupport(
struct display_mode_lib *mode_lib,
unsigned int PrefetchMode,
- unsigned int NumberOfActivePlanes,
- unsigned int MaxLineBufferLines,
- unsigned int LineBufferSize,
- unsigned int WritebackInterfaceBufferSize,
double DCFCLK,
double ReturnBW,
- bool SynchronizedVBlank,
- unsigned int dpte_group_bytes[],
- unsigned int MetaChunkSize,
double UrgentLatency,
double ExtraLatency,
- double WritebackLatency,
- double WritebackChunkSize,
double SOCCLK,
- double DRAMClockChangeLatency,
- double SRExitTime,
- double SREnterPlusExitTime,
- double SRExitZ8Time,
- double SREnterPlusExitZ8Time,
double DCFCLKDeepSleep,
unsigned int DETBufferSizeY[],
unsigned int DETBufferSizeC[],
unsigned int SwathHeightY[],
unsigned int SwathHeightC[],
- unsigned int LBBitPerPixel[],
double SwathWidthY[],
double SwathWidthC[],
- double HRatio[],
- double HRatioChroma[],
- unsigned int vtaps[],
- unsigned int VTAPsChroma[],
- double VRatio[],
- double VRatioChroma[],
- unsigned int HTotal[],
- double PixelClock[],
- unsigned int BlendingAndTiming[],
unsigned int DPPPerPlane[],
double BytePerPixelDETY[],
double BytePerPixelDETC[],
- double DSTXAfterScaler[],
- double DSTYAfterScaler[],
- bool WritebackEnable[],
- enum source_format_class WritebackPixelFormat[],
- double WritebackDestinationWidth[],
- double WritebackDestinationHeight[],
- double WritebackSourceHeight[],
bool UnboundedRequestEnabled,
unsigned int CompressedBufferSizeInkByte,
enum clock_change_support *DRAMClockChangeSupport,
- double *UrgentWatermark,
- double *WritebackUrgentWatermark,
- double *DRAMClockChangeWatermark,
- double *WritebackDRAMClockChangeWatermark,
double *StutterExitWatermark,
double *StutterEnterPlusExitWatermark,
double *Z8StutterExitWatermark,
- double *Z8StutterEnterPlusExitWatermark,
- double *MinActiveDRAMClockChangeLatencySupported);
+ double *Z8StutterEnterPlusExitWatermark);
static void CalculateDCFCLKDeepSleep(
struct display_mode_lib *mode_lib,
@@ -1362,7 +1306,7 @@ static bool CalculatePrefetchSchedule(
// - ((NumberOfCursors > 0 || GPUVMEnable || DCCEnable) ?
- ((GPUVMEnable || myPipe->DCCEnable) ? (*DestinationLinesToRequestVMInVBlank + 2 * *DestinationLinesToRequestRowInVBlank) : 0.0); // TODO: Did someone else add this??
#else
- LinesToRequestPrefetchPixelData = *DestinationLinesForPrefetch - *DestinationLinesToRequestVMInVBlank - 2 * *DestinationLinesToRequestRowInVBlank;
+ LinesToRequestPrefetchPixelData = *DestinationLinesForPrefetch - *DestinationLinesToRequestVMInVBlank - 2 * *DestinationLinesToRequestRowInVBlank;
#endif
#ifdef __DML_VBA_DEBUG__
@@ -1599,7 +1543,7 @@ static void CalculateDCCConfiguration(
int segment_order_vert_contiguous_luma;
int segment_order_vert_contiguous_chroma;
- enum {
+ typedef enum {
REQ_256Bytes, REQ_128BytesNonContiguous, REQ_128BytesContiguous, REQ_NA
} RequestType;
RequestType RequestLuma;
@@ -2928,33 +2872,13 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
CalculateFlipSchedule(
mode_lib,
+ k,
HostVMInefficiencyFactor,
v->UrgentExtraLatency,
v->UrgentLatency,
- v->GPUVMMaxPageTableLevels,
- v->HostVMEnable,
- v->HostVMMaxNonCachedPageTableLevels,
- v->GPUVMEnable,
- v->HostVMMinPageSize,
v->PDEAndMetaPTEBytesFrame[k],
v->MetaRowByte[k],
- v->PixelPTEBytesPerRow[k],
- v->BandwidthAvailableForImmediateFlip,
- v->TotImmediateFlipBytes,
- v->SourcePixelFormat[k],
- v->HTotal[k] / v->PixelClock[k],
- v->VRatio[k],
- v->VRatioChroma[k],
- v->Tno_bw[k],
- v->DCCEnable[k],
- v->dpte_row_height[k],
- v->meta_row_height[k],
- v->dpte_row_height_chroma[k],
- v->meta_row_height_chroma[k],
- &v->DestinationLinesToRequestVMInImmediateFlip[k],
- &v->DestinationLinesToRequestRowInImmediateFlip[k],
- &v->final_flip_bw[k],
- &v->ImmediateFlipSupportedForPipe[k]);
+ v->PixelPTEBytesPerRow[k]);
}
v->total_dcn_read_bw_with_flip = 0.0;
@@ -3041,64 +2965,28 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
CalculateWatermarksAndDRAMSpeedChangeSupport(
mode_lib,
PrefetchMode,
- v->NumberOfActivePlanes,
- v->MaxLineBufferLines,
- v->LineBufferSize,
- v->WritebackInterfaceBufferSize,
v->DCFCLK,
v->ReturnBW,
- v->SynchronizedVBlank,
- v->dpte_group_bytes,
- v->MetaChunkSize,
v->UrgentLatency,
v->UrgentExtraLatency,
- v->WritebackLatency,
- v->WritebackChunkSize,
v->SOCCLK,
- v->DRAMClockChangeLatency,
- v->SRExitTime,
- v->SREnterPlusExitTime,
- v->SRExitZ8Time,
- v->SREnterPlusExitZ8Time,
v->DCFCLKDeepSleep,
v->DETBufferSizeY,
v->DETBufferSizeC,
v->SwathHeightY,
v->SwathHeightC,
- v->LBBitPerPixel,
v->SwathWidthY,
v->SwathWidthC,
- v->HRatio,
- v->HRatioChroma,
- v->vtaps,
- v->VTAPsChroma,
- v->VRatio,
- v->VRatioChroma,
- v->HTotal,
- v->PixelClock,
- v->BlendingAndTiming,
v->DPPPerPlane,
v->BytePerPixelDETY,
v->BytePerPixelDETC,
- v->DSTXAfterScaler,
- v->DSTYAfterScaler,
- v->WritebackEnable,
- v->WritebackPixelFormat,
- v->WritebackDestinationWidth,
- v->WritebackDestinationHeight,
- v->WritebackSourceHeight,
v->UnboundedRequestEnabled,
v->CompressedBufferSizeInkByte,
&DRAMClockChangeSupport,
- &v->UrgentWatermark,
- &v->WritebackUrgentWatermark,
- &v->DRAMClockChangeWatermark,
- &v->WritebackDRAMClockChangeWatermark,
&v->StutterExitWatermark,
&v->StutterEnterPlusExitWatermark,
&v->Z8StutterExitWatermark,
- &v->Z8StutterEnterPlusExitWatermark,
- &v->MinActiveDRAMClockChangeLatencySupported);
+ &v->Z8StutterEnterPlusExitWatermark);
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (v->WritebackEnable[k] == true) {
@@ -3710,61 +3598,43 @@ static void CalculateRowBandwidth(
static void CalculateFlipSchedule(
struct display_mode_lib *mode_lib,
+ unsigned int k,
double HostVMInefficiencyFactor,
double UrgentExtraLatency,
double UrgentLatency,
- unsigned int GPUVMMaxPageTableLevels,
- bool HostVMEnable,
- unsigned int HostVMMaxNonCachedPageTableLevels,
- bool GPUVMEnable,
- double HostVMMinPageSize,
double PDEAndMetaPTEBytesPerFrame,
double MetaRowBytes,
- double DPTEBytesPerRow,
- double BandwidthAvailableForImmediateFlip,
- unsigned int TotImmediateFlipBytes,
- enum source_format_class SourcePixelFormat,
- double LineTime,
- double VRatio,
- double VRatioChroma,
- double Tno_bw,
- bool DCCEnable,
- unsigned int dpte_row_height,
- unsigned int meta_row_height,
- unsigned int dpte_row_height_chroma,
- unsigned int meta_row_height_chroma,
- double *DestinationLinesToRequestVMInImmediateFlip,
- double *DestinationLinesToRequestRowInImmediateFlip,
- double *final_flip_bw,
- bool *ImmediateFlipSupportedForPipe)
+ double DPTEBytesPerRow)
{
+ struct vba_vars_st *v = &mode_lib->vba;
double min_row_time = 0.0;
unsigned int HostVMDynamicLevelsTrips;
double TimeForFetchingMetaPTEImmediateFlip;
double TimeForFetchingRowInVBlankImmediateFlip;
double ImmediateFlipBW;
+ double LineTime = v->HTotal[k] / v->PixelClock[k];
- if (GPUVMEnable == true && HostVMEnable == true) {
- HostVMDynamicLevelsTrips = HostVMMaxNonCachedPageTableLevels;
+ if (v->GPUVMEnable == true && v->HostVMEnable == true) {
+ HostVMDynamicLevelsTrips = v->HostVMMaxNonCachedPageTableLevels;
} else {
HostVMDynamicLevelsTrips = 0;
}
- if (GPUVMEnable == true || DCCEnable == true) {
- ImmediateFlipBW = (PDEAndMetaPTEBytesPerFrame + MetaRowBytes + DPTEBytesPerRow) * BandwidthAvailableForImmediateFlip / TotImmediateFlipBytes;
+ if (v->GPUVMEnable == true || v->DCCEnable[k] == true) {
+ ImmediateFlipBW = (PDEAndMetaPTEBytesPerFrame + MetaRowBytes + DPTEBytesPerRow) * v->BandwidthAvailableForImmediateFlip / v->TotImmediateFlipBytes;
}
- if (GPUVMEnable == true) {
+ if (v->GPUVMEnable == true) {
TimeForFetchingMetaPTEImmediateFlip = dml_max3(
- Tno_bw + PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / ImmediateFlipBW,
- UrgentExtraLatency + UrgentLatency * (GPUVMMaxPageTableLevels * (HostVMDynamicLevelsTrips + 1) - 1),
+ v->Tno_bw[k] + PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / ImmediateFlipBW,
+ UrgentExtraLatency + UrgentLatency * (v->GPUVMMaxPageTableLevels * (HostVMDynamicLevelsTrips + 1) - 1),
LineTime / 4.0);
} else {
TimeForFetchingMetaPTEImmediateFlip = 0;
}
- *DestinationLinesToRequestVMInImmediateFlip = dml_ceil(4.0 * (TimeForFetchingMetaPTEImmediateFlip / LineTime), 1) / 4.0;
- if ((GPUVMEnable == true || DCCEnable == true)) {
+ v->DestinationLinesToRequestVMInImmediateFlip[k] = dml_ceil(4.0 * (TimeForFetchingMetaPTEImmediateFlip / LineTime), 1) / 4.0;
+ if ((v->GPUVMEnable == true || v->DCCEnable[k] == true)) {
TimeForFetchingRowInVBlankImmediateFlip = dml_max3(
(MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / ImmediateFlipBW,
UrgentLatency * (HostVMDynamicLevelsTrips + 1),
@@ -3773,54 +3643,54 @@ static void CalculateFlipSchedule(
TimeForFetchingRowInVBlankImmediateFlip = 0;
}
- *DestinationLinesToRequestRowInImmediateFlip = dml_ceil(4.0 * (TimeForFetchingRowInVBlankImmediateFlip / LineTime), 1) / 4.0;
+ v->DestinationLinesToRequestRowInImmediateFlip[k] = dml_ceil(4.0 * (TimeForFetchingRowInVBlankImmediateFlip / LineTime), 1) / 4.0;
- if (GPUVMEnable == true) {
- *final_flip_bw = dml_max(
- PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / (*DestinationLinesToRequestVMInImmediateFlip * LineTime),
- (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / (*DestinationLinesToRequestRowInImmediateFlip * LineTime));
- } else if ((GPUVMEnable == true || DCCEnable == true)) {
- *final_flip_bw = (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / (*DestinationLinesToRequestRowInImmediateFlip * LineTime);
+ if (v->GPUVMEnable == true) {
+ v->final_flip_bw[k] = dml_max(
+ PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / (v->DestinationLinesToRequestVMInImmediateFlip[k] * LineTime),
+ (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / (v->DestinationLinesToRequestRowInImmediateFlip[k] * LineTime));
+ } else if ((v->GPUVMEnable == true || v->DCCEnable[k] == true)) {
+ v->final_flip_bw[k] = (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / (v->DestinationLinesToRequestRowInImmediateFlip[k] * LineTime);
} else {
- *final_flip_bw = 0;
+ v->final_flip_bw[k] = 0;
}
- if (SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10 || SourcePixelFormat == dm_rgbe_alpha) {
- if (GPUVMEnable == true && DCCEnable != true) {
- min_row_time = dml_min(dpte_row_height * LineTime / VRatio, dpte_row_height_chroma * LineTime / VRatioChroma);
- } else if (GPUVMEnable != true && DCCEnable == true) {
- min_row_time = dml_min(meta_row_height * LineTime / VRatio, meta_row_height_chroma * LineTime / VRatioChroma);
+ if (v->SourcePixelFormat[k] == dm_420_8 || v->SourcePixelFormat[k] == dm_420_10 || v->SourcePixelFormat[k] == dm_rgbe_alpha) {
+ if (v->GPUVMEnable == true && v->DCCEnable[k] != true) {
+ min_row_time = dml_min(v->dpte_row_height[k] * LineTime / v->VRatio[k], v->dpte_row_height_chroma[k] * LineTime / v->VRatioChroma[k]);
+ } else if (v->GPUVMEnable != true && v->DCCEnable[k] == true) {
+ min_row_time = dml_min(v->meta_row_height[k] * LineTime / v->VRatio[k], v->meta_row_height_chroma[k] * LineTime / v->VRatioChroma[k]);
} else {
min_row_time = dml_min4(
- dpte_row_height * LineTime / VRatio,
- meta_row_height * LineTime / VRatio,
- dpte_row_height_chroma * LineTime / VRatioChroma,
- meta_row_height_chroma * LineTime / VRatioChroma);
+ v->dpte_row_height[k] * LineTime / v->VRatio[k],
+ v->meta_row_height[k] * LineTime / v->VRatio[k],
+ v->dpte_row_height_chroma[k] * LineTime / v->VRatioChroma[k],
+ v->meta_row_height_chroma[k] * LineTime / v->VRatioChroma[k]);
}
} else {
- if (GPUVMEnable == true && DCCEnable != true) {
- min_row_time = dpte_row_height * LineTime / VRatio;
- } else if (GPUVMEnable != true && DCCEnable == true) {
- min_row_time = meta_row_height * LineTime / VRatio;
+ if (v->GPUVMEnable == true && v->DCCEnable[k] != true) {
+ min_row_time = v->dpte_row_height[k] * LineTime / v->VRatio[k];
+ } else if (v->GPUVMEnable != true && v->DCCEnable[k] == true) {
+ min_row_time = v->meta_row_height[k] * LineTime / v->VRatio[k];
} else {
- min_row_time = dml_min(dpte_row_height * LineTime / VRatio, meta_row_height * LineTime / VRatio);
+ min_row_time = dml_min(v->dpte_row_height[k] * LineTime / v->VRatio[k], v->meta_row_height[k] * LineTime / v->VRatio[k]);
}
}
- if (*DestinationLinesToRequestVMInImmediateFlip >= 32 || *DestinationLinesToRequestRowInImmediateFlip >= 16
+ if (v->DestinationLinesToRequestVMInImmediateFlip[k] >= 32 || v->DestinationLinesToRequestRowInImmediateFlip[k] >= 16
|| TimeForFetchingMetaPTEImmediateFlip + 2 * TimeForFetchingRowInVBlankImmediateFlip > min_row_time) {
- *ImmediateFlipSupportedForPipe = false;
+ v->ImmediateFlipSupportedForPipe[k] = false;
} else {
- *ImmediateFlipSupportedForPipe = true;
+ v->ImmediateFlipSupportedForPipe[k] = true;
}
#ifdef __DML_VBA_DEBUG__
- dml_print("DML::%s: DestinationLinesToRequestVMInImmediateFlip = %f\n", __func__, *DestinationLinesToRequestVMInImmediateFlip);
- dml_print("DML::%s: DestinationLinesToRequestRowInImmediateFlip = %f\n", __func__, *DestinationLinesToRequestRowInImmediateFlip);
+ dml_print("DML::%s: DestinationLinesToRequestVMInImmediateFlip = %f\n", __func__, v->DestinationLinesToRequestVMInImmediateFlip[k]);
+ dml_print("DML::%s: DestinationLinesToRequestRowInImmediateFlip = %f\n", __func__, v->DestinationLinesToRequestRowInImmediateFlip[k]);
dml_print("DML::%s: TimeForFetchingMetaPTEImmediateFlip = %f\n", __func__, TimeForFetchingMetaPTEImmediateFlip);
dml_print("DML::%s: TimeForFetchingRowInVBlankImmediateFlip = %f\n", __func__, TimeForFetchingRowInVBlankImmediateFlip);
dml_print("DML::%s: min_row_time = %f\n", __func__, min_row_time);
- dml_print("DML::%s: ImmediateFlipSupportedForPipe = %d\n", __func__, *ImmediateFlipSupportedForPipe);
+ dml_print("DML::%s: ImmediateFlipSupportedForPipe = %d\n", __func__, v->ImmediateFlipSupportedForPipe[k]);
#endif
}
@@ -4071,9 +3941,7 @@ void dml314_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_
v->SourceFormatPixelAndScanSupport = true;
for (k = 0; k < v->NumberOfActivePlanes; k++) {
- if ((v->SurfaceTiling[k] == dm_sw_linear && (!(v->SourceScan[k] != dm_vert) || v->DCCEnable[k] == true))
- || ((v->SurfaceTiling[k] == dm_sw_64kb_d || v->SurfaceTiling[k] == dm_sw_64kb_d_t
- || v->SurfaceTiling[k] == dm_sw_64kb_d_x) && !(v->SourcePixelFormat[k] == dm_444_64))) {
+ if (v->SurfaceTiling[k] == dm_sw_linear && (!(v->SourceScan[k] != dm_vert) || v->DCCEnable[k] == true)) {
v->SourceFormatPixelAndScanSupport = false;
}
}
@@ -5414,33 +5282,13 @@ void dml314_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_
for (k = 0; k < v->NumberOfActivePlanes; k++) {
CalculateFlipSchedule(
mode_lib,
+ k,
HostVMInefficiencyFactor,
v->ExtraLatency,
v->UrgLatency[i],
- v->GPUVMMaxPageTableLevels,
- v->HostVMEnable,
- v->HostVMMaxNonCachedPageTableLevels,
- v->GPUVMEnable,
- v->HostVMMinPageSize,
v->PDEAndMetaPTEBytesPerFrame[i][j][k],
v->MetaRowBytes[i][j][k],
- v->DPTEBytesPerRow[i][j][k],
- v->BandwidthAvailableForImmediateFlip,
- v->TotImmediateFlipBytes,
- v->SourcePixelFormat[k],
- v->HTotal[k] / v->PixelClock[k],
- v->VRatio[k],
- v->VRatioChroma[k],
- v->Tno_bw[k],
- v->DCCEnable[k],
- v->dpte_row_height[k],
- v->meta_row_height[k],
- v->dpte_row_height_chroma[k],
- v->meta_row_height_chroma[k],
- &v->DestinationLinesToRequestVMInImmediateFlip[k],
- &v->DestinationLinesToRequestRowInImmediateFlip[k],
- &v->final_flip_bw[k],
- &v->ImmediateFlipSupportedForPipe[k]);
+ v->DPTEBytesPerRow[i][j][k]);
}
v->total_dcn_read_bw_with_flip = 0.0;
for (k = 0; k < v->NumberOfActivePlanes; k++) {
@@ -5498,64 +5346,28 @@ void dml314_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_
CalculateWatermarksAndDRAMSpeedChangeSupport(
mode_lib,
v->PrefetchModePerState[i][j],
- v->NumberOfActivePlanes,
- v->MaxLineBufferLines,
- v->LineBufferSize,
- v->WritebackInterfaceBufferSize,
v->DCFCLKState[i][j],
v->ReturnBWPerState[i][j],
- v->SynchronizedVBlank,
- v->dpte_group_bytes,
- v->MetaChunkSize,
v->UrgLatency[i],
v->ExtraLatency,
- v->WritebackLatency,
- v->WritebackChunkSize,
v->SOCCLKPerState[i],
- v->DRAMClockChangeLatency,
- v->SRExitTime,
- v->SREnterPlusExitTime,
- v->SRExitZ8Time,
- v->SREnterPlusExitZ8Time,
v->ProjectedDCFCLKDeepSleep[i][j],
v->DETBufferSizeYThisState,
v->DETBufferSizeCThisState,
v->SwathHeightYThisState,
v->SwathHeightCThisState,
- v->LBBitPerPixel,
v->SwathWidthYThisState,
v->SwathWidthCThisState,
- v->HRatio,
- v->HRatioChroma,
- v->vtaps,
- v->VTAPsChroma,
- v->VRatio,
- v->VRatioChroma,
- v->HTotal,
- v->PixelClock,
- v->BlendingAndTiming,
v->NoOfDPPThisState,
v->BytePerPixelInDETY,
v->BytePerPixelInDETC,
- v->DSTXAfterScaler,
- v->DSTYAfterScaler,
- v->WritebackEnable,
- v->WritebackPixelFormat,
- v->WritebackDestinationWidth,
- v->WritebackDestinationHeight,
- v->WritebackSourceHeight,
UnboundedRequestEnabledThisState,
CompressedBufferSizeInkByteThisState,
&v->DRAMClockChangeSupport[i][j],
- &v->UrgentWatermark,
- &v->WritebackUrgentWatermark,
- &v->DRAMClockChangeWatermark,
- &v->WritebackDRAMClockChangeWatermark,
- &dummy,
&dummy,
&dummy,
&dummy,
- &v->MinActiveDRAMClockChangeLatencySupported);
+ &dummy);
}
}
@@ -5681,64 +5493,28 @@ void dml314_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_
static void CalculateWatermarksAndDRAMSpeedChangeSupport(
struct display_mode_lib *mode_lib,
unsigned int PrefetchMode,
- unsigned int NumberOfActivePlanes,
- unsigned int MaxLineBufferLines,
- unsigned int LineBufferSize,
- unsigned int WritebackInterfaceBufferSize,
double DCFCLK,
double ReturnBW,
- bool SynchronizedVBlank,
- unsigned int dpte_group_bytes[],
- unsigned int MetaChunkSize,
double UrgentLatency,
double ExtraLatency,
- double WritebackLatency,
- double WritebackChunkSize,
double SOCCLK,
- double DRAMClockChangeLatency,
- double SRExitTime,
- double SREnterPlusExitTime,
- double SRExitZ8Time,
- double SREnterPlusExitZ8Time,
double DCFCLKDeepSleep,
unsigned int DETBufferSizeY[],
unsigned int DETBufferSizeC[],
unsigned int SwathHeightY[],
unsigned int SwathHeightC[],
- unsigned int LBBitPerPixel[],
double SwathWidthY[],
double SwathWidthC[],
- double HRatio[],
- double HRatioChroma[],
- unsigned int vtaps[],
- unsigned int VTAPsChroma[],
- double VRatio[],
- double VRatioChroma[],
- unsigned int HTotal[],
- double PixelClock[],
- unsigned int BlendingAndTiming[],
unsigned int DPPPerPlane[],
double BytePerPixelDETY[],
double BytePerPixelDETC[],
- double DSTXAfterScaler[],
- double DSTYAfterScaler[],
- bool WritebackEnable[],
- enum source_format_class WritebackPixelFormat[],
- double WritebackDestinationWidth[],
- double WritebackDestinationHeight[],
- double WritebackSourceHeight[],
bool UnboundedRequestEnabled,
unsigned int CompressedBufferSizeInkByte,
enum clock_change_support *DRAMClockChangeSupport,
- double *UrgentWatermark,
- double *WritebackUrgentWatermark,
- double *DRAMClockChangeWatermark,
- double *WritebackDRAMClockChangeWatermark,
double *StutterExitWatermark,
double *StutterEnterPlusExitWatermark,
double *Z8StutterExitWatermark,
- double *Z8StutterEnterPlusExitWatermark,
- double *MinActiveDRAMClockChangeLatencySupported)
+ double *Z8StutterEnterPlusExitWatermark)
{
struct vba_vars_st *v = &mode_lib->vba;
double EffectiveLBLatencyHidingY;
@@ -5758,103 +5534,103 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
double TotalPixelBW = 0.0;
int k, j;
- *UrgentWatermark = UrgentLatency + ExtraLatency;
+ v->UrgentWatermark = UrgentLatency + ExtraLatency;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: UrgentLatency = %f\n", __func__, UrgentLatency);
dml_print("DML::%s: ExtraLatency = %f\n", __func__, ExtraLatency);
- dml_print("DML::%s: UrgentWatermark = %f\n", __func__, *UrgentWatermark);
+ dml_print("DML::%s: UrgentWatermark = %f\n", __func__, v->UrgentWatermark);
#endif
- *DRAMClockChangeWatermark = DRAMClockChangeLatency + *UrgentWatermark;
+ v->DRAMClockChangeWatermark = v->DRAMClockChangeLatency + v->UrgentWatermark;
#ifdef __DML_VBA_DEBUG__
- dml_print("DML::%s: DRAMClockChangeLatency = %f\n", __func__, DRAMClockChangeLatency);
- dml_print("DML::%s: DRAMClockChangeWatermark = %f\n", __func__, *DRAMClockChangeWatermark);
+ dml_print("DML::%s: v->DRAMClockChangeLatency = %f\n", __func__, v->DRAMClockChangeLatency);
+ dml_print("DML::%s: DRAMClockChangeWatermark = %f\n", __func__, v->DRAMClockChangeWatermark);
#endif
v->TotalActiveWriteback = 0;
- for (k = 0; k < NumberOfActivePlanes; ++k) {
- if (WritebackEnable[k] == true) {
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+ if (v->WritebackEnable[k] == true) {
v->TotalActiveWriteback = v->TotalActiveWriteback + 1;
}
}
if (v->TotalActiveWriteback <= 1) {
- *WritebackUrgentWatermark = WritebackLatency;
+ v->WritebackUrgentWatermark = v->WritebackLatency;
} else {
- *WritebackUrgentWatermark = WritebackLatency + WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
+ v->WritebackUrgentWatermark = v->WritebackLatency + v->WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
}
if (v->TotalActiveWriteback <= 1) {
- *WritebackDRAMClockChangeWatermark = DRAMClockChangeLatency + WritebackLatency;
+ v->WritebackDRAMClockChangeWatermark = v->DRAMClockChangeLatency + v->WritebackLatency;
} else {
- *WritebackDRAMClockChangeWatermark = DRAMClockChangeLatency + WritebackLatency + WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
+ v->WritebackDRAMClockChangeWatermark = v->DRAMClockChangeLatency + v->WritebackLatency + v->WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
}
- for (k = 0; k < NumberOfActivePlanes; ++k) {
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
TotalPixelBW = TotalPixelBW
- + DPPPerPlane[k] * (SwathWidthY[k] * BytePerPixelDETY[k] * VRatio[k] + SwathWidthC[k] * BytePerPixelDETC[k] * VRatioChroma[k])
- / (HTotal[k] / PixelClock[k]);
+ + DPPPerPlane[k] * (SwathWidthY[k] * BytePerPixelDETY[k] * v->VRatio[k] + SwathWidthC[k] * BytePerPixelDETC[k] * v->VRatioChroma[k])
+ / (v->HTotal[k] / v->PixelClock[k]);
}
- for (k = 0; k < NumberOfActivePlanes; ++k) {
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
double EffectiveDETBufferSizeY = DETBufferSizeY[k];
v->LBLatencyHidingSourceLinesY = dml_min(
- (double) MaxLineBufferLines,
- dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthY[k] / dml_max(HRatio[k], 1.0)), 1)) - (vtaps[k] - 1);
+ (double) v->MaxLineBufferLines,
+ dml_floor(v->LineBufferSize / v->LBBitPerPixel[k] / (SwathWidthY[k] / dml_max(v->HRatio[k], 1.0)), 1)) - (v->vtaps[k] - 1);
v->LBLatencyHidingSourceLinesC = dml_min(
- (double) MaxLineBufferLines,
- dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthC[k] / dml_max(HRatioChroma[k], 1.0)), 1)) - (VTAPsChroma[k] - 1);
+ (double) v->MaxLineBufferLines,
+ dml_floor(v->LineBufferSize / v->LBBitPerPixel[k] / (SwathWidthC[k] / dml_max(v->HRatioChroma[k], 1.0)), 1)) - (v->VTAPsChroma[k] - 1);
- EffectiveLBLatencyHidingY = v->LBLatencyHidingSourceLinesY / VRatio[k] * (HTotal[k] / PixelClock[k]);
+ EffectiveLBLatencyHidingY = v->LBLatencyHidingSourceLinesY / v->VRatio[k] * (v->HTotal[k] / v->PixelClock[k]);
- EffectiveLBLatencyHidingC = v->LBLatencyHidingSourceLinesC / VRatioChroma[k] * (HTotal[k] / PixelClock[k]);
+ EffectiveLBLatencyHidingC = v->LBLatencyHidingSourceLinesC / v->VRatioChroma[k] * (v->HTotal[k] / v->PixelClock[k]);
if (UnboundedRequestEnabled) {
EffectiveDETBufferSizeY = EffectiveDETBufferSizeY
- + CompressedBufferSizeInkByte * 1024 * SwathWidthY[k] * BytePerPixelDETY[k] * VRatio[k] / (HTotal[k] / PixelClock[k]) / TotalPixelBW;
+ + CompressedBufferSizeInkByte * 1024 * SwathWidthY[k] * BytePerPixelDETY[k] * v->VRatio[k] / (v->HTotal[k] / v->PixelClock[k]) / TotalPixelBW;
}
LinesInDETY[k] = (double) EffectiveDETBufferSizeY / BytePerPixelDETY[k] / SwathWidthY[k];
LinesInDETYRoundedDownToSwath[k] = dml_floor(LinesInDETY[k], SwathHeightY[k]);
- FullDETBufferingTimeY = LinesInDETYRoundedDownToSwath[k] * (HTotal[k] / PixelClock[k]) / VRatio[k];
+ FullDETBufferingTimeY = LinesInDETYRoundedDownToSwath[k] * (v->HTotal[k] / v->PixelClock[k]) / v->VRatio[k];
if (BytePerPixelDETC[k] > 0) {
LinesInDETC = v->DETBufferSizeC[k] / BytePerPixelDETC[k] / SwathWidthC[k];
LinesInDETCRoundedDownToSwath = dml_floor(LinesInDETC, SwathHeightC[k]);
- FullDETBufferingTimeC = LinesInDETCRoundedDownToSwath * (HTotal[k] / PixelClock[k]) / VRatioChroma[k];
+ FullDETBufferingTimeC = LinesInDETCRoundedDownToSwath * (v->HTotal[k] / v->PixelClock[k]) / v->VRatioChroma[k];
} else {
LinesInDETC = 0;
FullDETBufferingTimeC = 999999;
}
ActiveDRAMClockChangeLatencyMarginY = EffectiveLBLatencyHidingY + FullDETBufferingTimeY
- - ((double) DSTXAfterScaler[k] / HTotal[k] + DSTYAfterScaler[k]) * HTotal[k] / PixelClock[k] - *UrgentWatermark - *DRAMClockChangeWatermark;
+ - ((double) v->DSTXAfterScaler[k] / v->HTotal[k] + v->DSTYAfterScaler[k]) * v->HTotal[k] / v->PixelClock[k] - v->UrgentWatermark - v->DRAMClockChangeWatermark;
- if (NumberOfActivePlanes > 1) {
+ if (v->NumberOfActivePlanes > 1) {
ActiveDRAMClockChangeLatencyMarginY = ActiveDRAMClockChangeLatencyMarginY
- - (1 - 1.0 / NumberOfActivePlanes) * SwathHeightY[k] * HTotal[k] / PixelClock[k] / VRatio[k];
+ - (1 - 1.0 / v->NumberOfActivePlanes) * SwathHeightY[k] * v->HTotal[k] / v->PixelClock[k] / v->VRatio[k];
}
if (BytePerPixelDETC[k] > 0) {
ActiveDRAMClockChangeLatencyMarginC = EffectiveLBLatencyHidingC + FullDETBufferingTimeC
- - ((double) DSTXAfterScaler[k] / HTotal[k] + DSTYAfterScaler[k]) * HTotal[k] / PixelClock[k] - *UrgentWatermark - *DRAMClockChangeWatermark;
+ - ((double) v->DSTXAfterScaler[k] / v->HTotal[k] + v->DSTYAfterScaler[k]) * v->HTotal[k] / v->PixelClock[k] - v->UrgentWatermark - v->DRAMClockChangeWatermark;
- if (NumberOfActivePlanes > 1) {
+ if (v->NumberOfActivePlanes > 1) {
ActiveDRAMClockChangeLatencyMarginC = ActiveDRAMClockChangeLatencyMarginC
- - (1 - 1.0 / NumberOfActivePlanes) * SwathHeightC[k] * HTotal[k] / PixelClock[k] / VRatioChroma[k];
+ - (1 - 1.0 / v->NumberOfActivePlanes) * SwathHeightC[k] * v->HTotal[k] / v->PixelClock[k] / v->VRatioChroma[k];
}
v->ActiveDRAMClockChangeLatencyMargin[k] = dml_min(ActiveDRAMClockChangeLatencyMarginY, ActiveDRAMClockChangeLatencyMarginC);
} else {
v->ActiveDRAMClockChangeLatencyMargin[k] = ActiveDRAMClockChangeLatencyMarginY;
}
- if (WritebackEnable[k] == true) {
- WritebackDRAMClockChangeLatencyHiding = WritebackInterfaceBufferSize * 1024
- / (WritebackDestinationWidth[k] * WritebackDestinationHeight[k] / (WritebackSourceHeight[k] * HTotal[k] / PixelClock[k]) * 4);
- if (WritebackPixelFormat[k] == dm_444_64) {
+ if (v->WritebackEnable[k] == true) {
+ WritebackDRAMClockChangeLatencyHiding = v->WritebackInterfaceBufferSize * 1024
+ / (v->WritebackDestinationWidth[k] * v->WritebackDestinationHeight[k] / (v->WritebackSourceHeight[k] * v->HTotal[k] / v->PixelClock[k]) * 4);
+ if (v->WritebackPixelFormat[k] == dm_444_64) {
WritebackDRAMClockChangeLatencyHiding = WritebackDRAMClockChangeLatencyHiding / 2;
}
WritebackDRAMClockChangeLatencyMargin = WritebackDRAMClockChangeLatencyHiding - v->WritebackDRAMClockChangeWatermark;
@@ -5864,14 +5640,14 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
v->MinActiveDRAMClockChangeMargin = 999999;
PlaneWithMinActiveDRAMClockChangeMargin = 0;
- for (k = 0; k < NumberOfActivePlanes; ++k) {
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (v->ActiveDRAMClockChangeLatencyMargin[k] < v->MinActiveDRAMClockChangeMargin) {
v->MinActiveDRAMClockChangeMargin = v->ActiveDRAMClockChangeLatencyMargin[k];
- if (BlendingAndTiming[k] == k) {
+ if (v->BlendingAndTiming[k] == k) {
PlaneWithMinActiveDRAMClockChangeMargin = k;
} else {
- for (j = 0; j < NumberOfActivePlanes; ++j) {
- if (BlendingAndTiming[k] == j) {
+ for (j = 0; j < v->NumberOfActivePlanes; ++j) {
+ if (v->BlendingAndTiming[k] == j) {
PlaneWithMinActiveDRAMClockChangeMargin = j;
}
}
@@ -5879,11 +5655,11 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
}
}
- *MinActiveDRAMClockChangeLatencySupported = v->MinActiveDRAMClockChangeMargin + DRAMClockChangeLatency;
+ v->MinActiveDRAMClockChangeLatencySupported = v->MinActiveDRAMClockChangeMargin + v->DRAMClockChangeLatency ;
SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank = 999999;
- for (k = 0; k < NumberOfActivePlanes; ++k) {
- if (!((k == PlaneWithMinActiveDRAMClockChangeMargin) && (BlendingAndTiming[k] == k)) && !(BlendingAndTiming[k] == PlaneWithMinActiveDRAMClockChangeMargin)
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+ if (!((k == PlaneWithMinActiveDRAMClockChangeMargin) && (v->BlendingAndTiming[k] == k)) && !(v->BlendingAndTiming[k] == PlaneWithMinActiveDRAMClockChangeMargin)
&& v->ActiveDRAMClockChangeLatencyMargin[k] < SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank) {
SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank = v->ActiveDRAMClockChangeLatencyMargin[k];
}
@@ -5891,25 +5667,25 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
v->TotalNumberOfActiveOTG = 0;
- for (k = 0; k < NumberOfActivePlanes; ++k) {
- if (BlendingAndTiming[k] == k) {
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+ if (v->BlendingAndTiming[k] == k) {
v->TotalNumberOfActiveOTG = v->TotalNumberOfActiveOTG + 1;
}
}
if (v->MinActiveDRAMClockChangeMargin > 0 && PrefetchMode == 0) {
*DRAMClockChangeSupport = dm_dram_clock_change_vactive;
- } else if ((SynchronizedVBlank == true || v->TotalNumberOfActiveOTG == 1
+ } else if ((v->SynchronizedVBlank == true || v->TotalNumberOfActiveOTG == 1
|| SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank > 0) && PrefetchMode == 0) {
*DRAMClockChangeSupport = dm_dram_clock_change_vblank;
} else {
*DRAMClockChangeSupport = dm_dram_clock_change_unsupported;
}
- *StutterExitWatermark = SRExitTime + ExtraLatency + 10 / DCFCLKDeepSleep;
- *StutterEnterPlusExitWatermark = (SREnterPlusExitTime + ExtraLatency + 10 / DCFCLKDeepSleep);
- *Z8StutterExitWatermark = SRExitZ8Time + ExtraLatency + 10 / DCFCLKDeepSleep;
- *Z8StutterEnterPlusExitWatermark = SREnterPlusExitZ8Time + ExtraLatency + 10 / DCFCLKDeepSleep;
+ *StutterExitWatermark = v->SRExitTime + ExtraLatency + 10 / DCFCLKDeepSleep;
+ *StutterEnterPlusExitWatermark = (v->SREnterPlusExitTime + ExtraLatency + 10 / DCFCLKDeepSleep);
+ *Z8StutterExitWatermark = v->SRExitZ8Time + ExtraLatency + 10 / DCFCLKDeepSleep;
+ *Z8StutterEnterPlusExitWatermark = v->SREnterPlusExitZ8Time + ExtraLatency + 10 / DCFCLKDeepSleep;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: StutterExitWatermark = %f\n", __func__, *StutterExitWatermark);
@@ -7049,8 +6825,6 @@ static void CalculateSwathWidth(
{
int surface_width_ub_l = dml_ceil(SurfaceWidthY[k], Read256BytesBlockWidthY[k]);
int surface_height_ub_l = dml_ceil(SurfaceHeightY[k], Read256BytesBlockHeightY[k]);
- int surface_width_ub_c = dml_ceil(SurfaceWidthC[k], Read256BytesBlockWidthC[k]);
- int surface_height_ub_c = dml_ceil(SurfaceHeightC[k], Read256BytesBlockHeightC[k]);
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%d surface_width_ub_l=%0d\n", __func__, k, surface_width_ub_l);
@@ -7061,6 +6835,8 @@ static void CalculateSwathWidth(
MaximumSwathHeightC[k] = Read256BytesBlockHeightC[k];
swath_width_luma_ub[k] = dml_min(surface_width_ub_l, (int) dml_ceil(SwathWidthY[k] - 1, Read256BytesBlockWidthY[k]) + Read256BytesBlockWidthY[k]);
if (BytePerPixC[k] > 0) {
+ int surface_width_ub_c = dml_ceil(SurfaceWidthC[k], Read256BytesBlockWidthC[k]);
+
swath_width_chroma_ub[k] = dml_min(
surface_width_ub_c,
(int) dml_ceil(SwathWidthC[k] - 1, Read256BytesBlockWidthC[k]) + Read256BytesBlockWidthC[k]);
@@ -7072,6 +6848,8 @@ static void CalculateSwathWidth(
MaximumSwathHeightC[k] = Read256BytesBlockWidthC[k];
swath_width_luma_ub[k] = dml_min(surface_height_ub_l, (int) dml_ceil(SwathWidthY[k] - 1, Read256BytesBlockHeightY[k]) + Read256BytesBlockHeightY[k]);
if (BytePerPixC[k] > 0) {
+ int surface_height_ub_c = dml_ceil(SurfaceHeightC[k], Read256BytesBlockHeightC[k]);
+
swath_width_chroma_ub[k] = dml_min(
surface_height_ub_c,
(int) dml_ceil(SwathWidthC[k] - 1, Read256BytesBlockHeightC[k]) + Read256BytesBlockHeightC[k]);
@@ -7157,12 +6935,13 @@ static double CalculateExtraLatencyBytes(
HostVMDynamicLevels = dml_max(0, (int) HostVMMaxNonCachedPageTableLevels - 1);
else
HostVMDynamicLevels = dml_max(0, (int) HostVMMaxNonCachedPageTableLevels - 2);
- else
+ } else {
HostVMDynamicLevels = 0;
+ }
ret = ReorderingBytes + (TotalNumberOfActiveDPP * PixelChunkSizeInKByte + TotalNumberOfDCCActiveDPP * MetaChunkSize) * 1024.0;
- if (GPUVMEnable == true)
+ if (GPUVMEnable == true) {
for (k = 0; k < NumberOfActivePlanes; ++k)
ret = ret + NumberOfDPP[k] * dpte_group_bytes[k] * (1 + 8 * HostVMDynamicLevels) * HostVMInefficiencyFactor;
}
@@ -7406,7 +7185,7 @@ static unsigned int CalculateMaxVStartup(
double line_time_us = HTotal / PixelClock;
unsigned int vblank_actual = VTotal - VActive;
unsigned int vblank_nom_default_in_line = dml_floor(VBlankNomDefaultUS / line_time_us, 1.0);
- unsigned int vblank_nom_input = dml_min(VBlankNom, vblank_nom_default_in_line);
+ unsigned int vblank_nom_input = VBlankNom; //dml_min(VBlankNom, vblank_nom_default_in_line);
unsigned int vblank_avail = vblank_nom_input == 0 ? vblank_nom_default_in_line : vblank_nom_input;
vblank_size = (unsigned int) dml_min(vblank_actual, vblank_avail);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
index 7f6c977c4981..819de0f11012 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
@@ -121,8 +121,8 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_2_soc = {
},
},
.num_states = 1,
- .sr_exit_time_us = 20.16,
- .sr_enter_plus_exit_time_us = 27.13,
+ .sr_exit_time_us = 42.97,
+ .sr_enter_plus_exit_time_us = 49.94,
.sr_exit_z8_time_us = 285.0,
.sr_enter_plus_exit_z8_time_us = 320,
.writeback_latency_us = 12.0,
@@ -243,18 +243,61 @@ void dcn32_build_wm_range_table_fpu(struct clk_mgr_internal *clk_mgr)
clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.max_uclk = 0xFFFF;
}
+/*
+ * Finds dummy_latency_index when MCLK switching using firmware based
+ * vblank stretch is enabled. This function will iterate through the
+ * table of dummy pstate latencies until the lowest value that allows
+ * dm_allow_self_refresh_and_mclk_switch to happen is found
+ */
+int dcn32_find_dummy_latency_index_for_fw_based_mclk_switch(struct dc *dc,
+ struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ int pipe_cnt,
+ int vlevel)
+{
+ const int max_latency_table_entries = 4;
+ const struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
+ int dummy_latency_index = 0;
+
+ dc_assert_fp_enabled();
+
+ while (dummy_latency_index < max_latency_table_entries) {
+ context->bw_ctx.dml.soc.dram_clock_change_latency_us =
+ dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us;
+ dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false);
+
+ if (vlevel < context->bw_ctx.dml.vba.soc.num_states &&
+ vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] != dm_dram_clock_change_unsupported)
+ break;
+
+ dummy_latency_index++;
+ }
+
+ if (dummy_latency_index == max_latency_table_entries) {
+ ASSERT(dummy_latency_index != max_latency_table_entries);
+ /* If the execution gets here, it means dummy p_states are
+ * not possible. This should never happen and would mean
+ * something is severely wrong.
+ * Here we reset dummy_latency_index to 3, because it is
+ * better to have underflows than system crashes.
+ */
+ dummy_latency_index = max_latency_table_entries - 1;
+ }
+
+ return dummy_latency_index;
+}
+
/**
* dcn32_helper_populate_phantom_dlg_params - Get DLG params for phantom pipes
* and populate pipe_ctx with those params.
- *
- * This function must be called AFTER the phantom pipes are added to context
- * and run through DML (so that the DLG params for the phantom pipes can be
- * populated), and BEFORE we program the timing for the phantom pipes.
- *
* @dc: [in] current dc state
* @context: [in] new dc state
* @pipes: [in] DML pipe params array
* @pipe_cnt: [in] DML pipe count
+ *
+ * This function must be called AFTER the phantom pipes are added to context
+ * and run through DML (so that the DLG params for the phantom pipes can be
+ * populated), and BEFORE we program the timing for the phantom pipes.
*/
void dcn32_helper_populate_phantom_dlg_params(struct dc *dc,
struct dc_state *context,
@@ -286,41 +329,88 @@ void dcn32_helper_populate_phantom_dlg_params(struct dc *dc,
}
}
-bool dcn32_predict_pipe_split(struct dc_state *context, display_pipe_params_st pipe, int index)
+/**
+ * dcn32_predict_pipe_split - Predict if pipe split will occur for a given DML pipe
+ * @context: [in] New DC state to be programmed
+ * @pipe_e2e: [in] DML pipe end to end context
+ *
+ * This function takes in a DML pipe (pipe_e2e) and predicts if pipe split is required (both
+ * ODM and MPC). For pipe split, ODM combine is determined by the ODM mode, and MPC combine is
+ * determined by DPPClk requirements
+ *
+ * This function follows the same policy as DML:
+ * - Check for ODM combine requirements / policy first
+ * - MPC combine is only chosen if there is no ODM combine requirements / policy in place, and
+ * MPC is required
+ *
+ * Return: Number of splits expected (1 for 2:1 split, 3 for 4:1 split, 0 for no splits).
+ */
+uint8_t dcn32_predict_pipe_split(struct dc_state *context,
+ display_e2e_pipe_params_st *pipe_e2e)
{
double pscl_throughput;
double pscl_throughput_chroma;
double dpp_clk_single_dpp, clock;
double clk_frequency = 0.0;
double vco_speed = context->bw_ctx.dml.soc.dispclk_dppclk_vco_speed_mhz;
+ bool total_available_pipes_support = false;
+ uint32_t number_of_dpp = 0;
+ enum odm_combine_mode odm_mode = dm_odm_combine_mode_disabled;
+ double req_dispclk_per_surface = 0;
+ uint8_t num_splits = 0;
dc_assert_fp_enabled();
- dml32_CalculateSinglePipeDPPCLKAndSCLThroughput(pipe.scale_ratio_depth.hscl_ratio,
- pipe.scale_ratio_depth.hscl_ratio_c,
- pipe.scale_ratio_depth.vscl_ratio,
- pipe.scale_ratio_depth.vscl_ratio_c,
- context->bw_ctx.dml.ip.max_dchub_pscl_bw_pix_per_clk,
- context->bw_ctx.dml.ip.max_pscl_lb_bw_pix_per_clk,
- pipe.dest.pixel_rate_mhz,
- pipe.src.source_format,
- pipe.scale_taps.htaps,
- pipe.scale_taps.htaps_c,
- pipe.scale_taps.vtaps,
- pipe.scale_taps.vtaps_c,
- /* Output */
- &pscl_throughput, &pscl_throughput_chroma,
- &dpp_clk_single_dpp);
+ dml32_CalculateODMMode(context->bw_ctx.dml.ip.maximum_pixels_per_line_per_dsc_unit,
+ pipe_e2e->pipe.dest.hactive,
+ pipe_e2e->dout.output_format,
+ pipe_e2e->dout.output_type,
+ pipe_e2e->pipe.dest.odm_combine_policy,
+ context->bw_ctx.dml.soc.clock_limits[context->bw_ctx.dml.soc.num_states - 1].dispclk_mhz,
+ context->bw_ctx.dml.soc.clock_limits[context->bw_ctx.dml.soc.num_states - 1].dispclk_mhz,
+ pipe_e2e->dout.dsc_enable != 0,
+ 0, /* TotalNumberOfActiveDPP can be 0 since we're predicting pipe split requirement */
+ context->bw_ctx.dml.ip.max_num_dpp,
+ pipe_e2e->pipe.dest.pixel_rate_mhz,
+ context->bw_ctx.dml.soc.dcn_downspread_percent,
+ context->bw_ctx.dml.ip.dispclk_ramp_margin_percent,
+ context->bw_ctx.dml.soc.dispclk_dppclk_vco_speed_mhz,
+ pipe_e2e->dout.dsc_slices,
+ /* Output */
+ &total_available_pipes_support,
+ &number_of_dpp,
+ &odm_mode,
+ &req_dispclk_per_surface);
+
+ dml32_CalculateSinglePipeDPPCLKAndSCLThroughput(pipe_e2e->pipe.scale_ratio_depth.hscl_ratio,
+ pipe_e2e->pipe.scale_ratio_depth.hscl_ratio_c,
+ pipe_e2e->pipe.scale_ratio_depth.vscl_ratio,
+ pipe_e2e->pipe.scale_ratio_depth.vscl_ratio_c,
+ context->bw_ctx.dml.ip.max_dchub_pscl_bw_pix_per_clk,
+ context->bw_ctx.dml.ip.max_pscl_lb_bw_pix_per_clk,
+ pipe_e2e->pipe.dest.pixel_rate_mhz,
+ pipe_e2e->pipe.src.source_format,
+ pipe_e2e->pipe.scale_taps.htaps,
+ pipe_e2e->pipe.scale_taps.htaps_c,
+ pipe_e2e->pipe.scale_taps.vtaps,
+ pipe_e2e->pipe.scale_taps.vtaps_c,
+ /* Output */
+ &pscl_throughput, &pscl_throughput_chroma,
+ &dpp_clk_single_dpp);
clock = dpp_clk_single_dpp * (1 + context->bw_ctx.dml.soc.dcn_downspread_percent / 100);
if (clock > 0)
- clk_frequency = vco_speed * 4.0 / ((int)(vco_speed * 4.0));
+ clk_frequency = vco_speed * 4.0 / ((int)(vco_speed * 4.0) / clock);
- if (clk_frequency > context->bw_ctx.dml.soc.clock_limits[index].dppclk_mhz)
- return true;
- else
- return false;
+ if (odm_mode == dm_odm_combine_mode_2to1)
+ num_splits = 1;
+ else if (odm_mode == dm_odm_combine_mode_4to1)
+ num_splits = 3;
+ else if (clk_frequency > context->bw_ctx.dml.soc.clock_limits[context->bw_ctx.dml.soc.num_states - 1].dppclk_mhz)
+ num_splits = 1;
+
+ return num_splits;
}
static float calculate_net_bw_in_kbytes_sec(struct _vcs_dpi_voltage_scaling_st *entry)
@@ -409,7 +499,14 @@ void insert_entry_into_table_sorted(struct _vcs_dpi_voltage_scaling_st *table,
}
/**
- * dcn32_set_phantom_stream_timing: Set timing params for the phantom stream
+ * dcn32_set_phantom_stream_timing - Set timing params for the phantom stream
+ * @dc: current dc state
+ * @context: new dc state
+ * @ref_pipe: Main pipe for the phantom stream
+ * @phantom_stream: target phantom stream state
+ * @pipes: DML pipe params
+ * @pipe_cnt: number of DML pipes
+ * @dc_pipe_idx: DC pipe index for the main pipe (i.e. ref_pipe)
*
* Set timing params of the phantom stream based on calculated output from DML.
* This function first gets the DML pipe index using the DC pipe index, then
@@ -422,13 +519,6 @@ void insert_entry_into_table_sorted(struct _vcs_dpi_voltage_scaling_st *table,
* that separately.
*
* - Set phantom backporch = vstartup of main pipe
- *
- * @dc: current dc state
- * @context: new dc state
- * @ref_pipe: Main pipe for the phantom stream
- * @pipes: DML pipe params
- * @pipe_cnt: number of DML pipes
- * @dc_pipe_idx: DC pipe index for the main pipe (i.e. ref_pipe)
*/
void dcn32_set_phantom_stream_timing(struct dc *dc,
struct dc_state *context,
@@ -497,16 +587,14 @@ void dcn32_set_phantom_stream_timing(struct dc *dc,
}
/**
- * dcn32_get_num_free_pipes: Calculate number of free pipes
+ * dcn32_get_num_free_pipes - Calculate number of free pipes
+ * @dc: current dc state
+ * @context: new dc state
*
* This function assumes that a "used" pipe is a pipe that has
* both a stream and a plane assigned to it.
*
- * @dc: current dc state
- * @context: new dc state
- *
- * Return:
- * Number of free pipes available in the context
+ * Return: Number of free pipes available in the context
*/
static unsigned int dcn32_get_num_free_pipes(struct dc *dc, struct dc_state *context)
{
@@ -530,7 +618,10 @@ static unsigned int dcn32_get_num_free_pipes(struct dc *dc, struct dc_state *con
}
/**
- * dcn32_assign_subvp_pipe: Function to decide which pipe will use Sub-VP.
+ * dcn32_assign_subvp_pipe - Function to decide which pipe will use Sub-VP.
+ * @dc: current dc state
+ * @context: new dc state
+ * @index: [out] dc pipe index for the pipe chosen to have phantom pipes assigned
*
* We enter this function if we are Sub-VP capable (i.e. enough pipes available)
* and regular P-State switching (i.e. VACTIVE/VBLANK) is not supported, or if
@@ -544,12 +635,7 @@ static unsigned int dcn32_get_num_free_pipes(struct dc *dc, struct dc_state *con
* for determining which should be the SubVP pipe (need a way to determine if a pipe / plane doesn't
* support MCLK switching naturally [i.e. ACTIVE or VBLANK]).
*
- * @param dc: current dc state
- * @param context: new dc state
- * @param index: [out] dc pipe index for the pipe chosen to have phantom pipes assigned
- *
- * Return:
- * True if a valid pipe assignment was found for Sub-VP. Otherwise false.
+ * Return: True if a valid pipe assignment was found for Sub-VP. Otherwise false.
*/
static bool dcn32_assign_subvp_pipe(struct dc *dc,
struct dc_state *context,
@@ -579,9 +665,10 @@ static bool dcn32_assign_subvp_pipe(struct dc *dc,
* - Not able to switch in vactive naturally (switching in active means the
* DET provides enough buffer to hide the P-State switch latency -- trying
* to combine this with SubVP can cause issues with the scheduling).
+ * - Not TMZ surface
*/
if (pipe->plane_state && !pipe->top_pipe &&
- pipe->stream->mall_stream_config.type == SUBVP_NONE && refresh_rate < 120 &&
+ pipe->stream->mall_stream_config.type == SUBVP_NONE && refresh_rate < 120 && !pipe->plane_state->address.tmz_surface &&
vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] <= 0) {
while (pipe) {
num_pipes++;
@@ -615,7 +702,9 @@ static bool dcn32_assign_subvp_pipe(struct dc *dc,
}
/**
- * dcn32_enough_pipes_for_subvp: Function to check if there are "enough" pipes for SubVP.
+ * dcn32_enough_pipes_for_subvp - Function to check if there are "enough" pipes for SubVP.
+ * @dc: current dc state
+ * @context: new dc state
*
* This function returns true if there are enough free pipes
* to create the required phantom pipes for any given stream
@@ -627,9 +716,6 @@ static bool dcn32_assign_subvp_pipe(struct dc *dc,
* pipe which can be used as the phantom pipe for the non pipe
* split pipe.
*
- * @dc: current dc state
- * @context: new dc state
- *
* Return:
* True if there are enough free pipes to assign phantom pipes to at least one
* stream that does not already have phantom pipes assigned. Otherwise false.
@@ -668,7 +754,9 @@ static bool dcn32_enough_pipes_for_subvp(struct dc *dc, struct dc_state *context
}
/**
- * subvp_subvp_schedulable: Determine if SubVP + SubVP config is schedulable
+ * subvp_subvp_schedulable - Determine if SubVP + SubVP config is schedulable
+ * @dc: current dc state
+ * @context: new dc state
*
* High level algorithm:
* 1. Find longest microschedule length (in us) between the two SubVP pipes
@@ -676,11 +764,7 @@ static bool dcn32_enough_pipes_for_subvp(struct dc *dc, struct dc_state *context
* pipes still allows for the maximum microschedule to fit in the active
* region for both pipes.
*
- * @dc: current dc state
- * @context: new dc state
- *
- * Return:
- * bool - True if the SubVP + SubVP config is schedulable, false otherwise
+ * Return: True if the SubVP + SubVP config is schedulable, false otherwise
*/
static bool subvp_subvp_schedulable(struct dc *dc, struct dc_state *context)
{
@@ -740,7 +824,10 @@ static bool subvp_subvp_schedulable(struct dc *dc, struct dc_state *context)
}
/**
- * subvp_drr_schedulable: Determine if SubVP + DRR config is schedulable
+ * subvp_drr_schedulable - Determine if SubVP + DRR config is schedulable
+ * @dc: current dc state
+ * @context: new dc state
+ * @drr_pipe: DRR pipe_ctx for the SubVP + DRR config
*
* High level algorithm:
* 1. Get timing for SubVP pipe, phantom pipe, and DRR pipe
@@ -749,12 +836,7 @@ static bool subvp_subvp_schedulable(struct dc *dc, struct dc_state *context)
* 3.If (SubVP Active - Prefetch > Stretched DRR frame + max(MALL region, Stretched DRR frame))
* then report the configuration as supported
*
- * @dc: current dc state
- * @context: new dc state
- * @drr_pipe: DRR pipe_ctx for the SubVP + DRR config
- *
- * Return:
- * bool - True if the SubVP + DRR config is schedulable, false otherwise
+ * Return: True if the SubVP + DRR config is schedulable, false otherwise
*/
static bool subvp_drr_schedulable(struct dc *dc, struct dc_state *context, struct pipe_ctx *drr_pipe)
{
@@ -818,7 +900,9 @@ static bool subvp_drr_schedulable(struct dc *dc, struct dc_state *context, struc
/**
- * subvp_vblank_schedulable: Determine if SubVP + VBLANK config is schedulable
+ * subvp_vblank_schedulable - Determine if SubVP + VBLANK config is schedulable
+ * @dc: current dc state
+ * @context: new dc state
*
* High level algorithm:
* 1. Get timing for SubVP pipe, phantom pipe, and VBLANK pipe
@@ -826,11 +910,7 @@ static bool subvp_drr_schedulable(struct dc *dc, struct dc_state *context, struc
* then report the configuration as supported
* 3. If the VBLANK display is DRR, then take the DRR static schedulability path
*
- * @dc: current dc state
- * @context: new dc state
- *
- * Return:
- * bool - True if the SubVP + VBLANK/DRR config is schedulable, false otherwise
+ * Return: True if the SubVP + VBLANK/DRR config is schedulable, false otherwise
*/
static bool subvp_vblank_schedulable(struct dc *dc, struct dc_state *context)
{
@@ -907,20 +987,18 @@ static bool subvp_vblank_schedulable(struct dc *dc, struct dc_state *context)
}
/**
- * subvp_validate_static_schedulability: Check which SubVP case is calculated and handle
- * static analysis based on the case.
+ * subvp_validate_static_schedulability - Check which SubVP case is calculated
+ * and handle static analysis based on the case.
+ * @dc: current dc state
+ * @context: new dc state
+ * @vlevel: Voltage level calculated by DML
*
* Three cases:
* 1. SubVP + SubVP
* 2. SubVP + VBLANK (DRR checked internally)
* 3. SubVP + VACTIVE (currently unsupported)
*
- * @dc: current dc state
- * @context: new dc state
- * @vlevel: Voltage level calculated by DML
- *
- * Return:
- * bool - True if statically schedulable, false otherwise
+ * Return: True if statically schedulable, false otherwise
*/
static bool subvp_validate_static_schedulability(struct dc *dc,
struct dc_state *context,
@@ -1019,12 +1097,15 @@ static void dcn32_full_validate_bw_helper(struct dc *dc,
* 5. (Config doesn't support MCLK in VACTIVE/VBLANK || dc->debug.force_subvp_mclk_switch)
*/
if (!dc->debug.force_disable_subvp && dcn32_all_pipes_have_stream_and_plane(dc, context) &&
- !dcn32_mpo_in_use(context) && (*vlevel == context->bw_ctx.dml.soc.num_states ||
+ !dcn32_mpo_in_use(context) && !dcn32_any_surfaces_rotated(dc, context) &&
+ (*vlevel == context->bw_ctx.dml.soc.num_states ||
vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported ||
dc->debug.force_subvp_mclk_switch)) {
dcn32_merge_pipes_for_subvp(dc, context);
- // to re-initialize viewport after the pipe merge
+ memset(merge, 0, MAX_PIPES * sizeof(bool));
+
+ /* to re-initialize viewport after the pipe merge */
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
@@ -1109,17 +1190,23 @@ static void dcn32_full_validate_bw_helper(struct dc *dc,
vba->VoltageLevel = *vlevel;
}
} else {
- // only call dcn20_validate_apply_pipe_split_flags if we found a supported config
- memset(split, 0, MAX_PIPES * sizeof(int));
- memset(merge, 0, MAX_PIPES * sizeof(bool));
- *vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, *vlevel, split, merge);
- vba->VoltageLevel = *vlevel;
-
// Most populate phantom DLG params before programming hardware / timing for phantom pipe
DC_FP_START();
dcn32_helper_populate_phantom_dlg_params(dc, context, pipes, *pipe_cnt);
DC_FP_END();
+ /* Call validate_apply_pipe_split flags after calling DML getters for
+ * phantom dlg params, or some of the VBA params indicating pipe split
+ * can be overwritten by the getters.
+ *
+ * When setting up SubVP config, all pipes are merged before attempting to
+ * add phantom pipes. If pipe split (ODM / MPC) is required, both the main
+ * and phantom pipes will be split in the regular pipe splitting sequence.
+ */
+ memset(split, 0, MAX_PIPES * sizeof(int));
+ memset(merge, 0, MAX_PIPES * sizeof(bool));
+ *vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, *vlevel, split, merge);
+ vba->VoltageLevel = *vlevel;
// Note: We can't apply the phantom pipes to hardware at this time. We have to wait
// until driver has acquired the DMCUB lock to do it safely.
}
@@ -1487,6 +1574,33 @@ bool dcn32_internal_validate_bw(struct dc *dc,
if (pipe->next_odm_pipe)
pipe->next_odm_pipe->prev_odm_pipe = pipe->prev_odm_pipe;
+ /*2:1ODM+MPC Split MPO to Single Pipe + MPC Split MPO*/
+ if (pipe->bottom_pipe) {
+ if (pipe->bottom_pipe->prev_odm_pipe || pipe->bottom_pipe->next_odm_pipe) {
+ /*MPC split rules will handle this case*/
+ pipe->bottom_pipe->top_pipe = NULL;
+ } else {
+ /* when merging an ODM pipes, the bottom MPC pipe must now point to
+ * the previous ODM pipe and its associated stream assets
+ */
+ if (pipe->prev_odm_pipe->bottom_pipe) {
+ /* 3 plane MPO*/
+ pipe->bottom_pipe->top_pipe = pipe->prev_odm_pipe->bottom_pipe;
+ pipe->prev_odm_pipe->bottom_pipe->bottom_pipe = pipe->bottom_pipe;
+ } else {
+ /* 2 plane MPO*/
+ pipe->bottom_pipe->top_pipe = pipe->prev_odm_pipe;
+ pipe->prev_odm_pipe->bottom_pipe = pipe->bottom_pipe;
+ }
+
+ memcpy(&pipe->bottom_pipe->stream_res, &pipe->bottom_pipe->top_pipe->stream_res, sizeof(struct stream_resource));
+ }
+ }
+
+ if (pipe->top_pipe) {
+ pipe->top_pipe->bottom_pipe = NULL;
+ }
+
pipe->bottom_pipe = NULL;
pipe->next_odm_pipe = NULL;
pipe->plane_state = NULL;
@@ -1619,8 +1733,20 @@ bool dcn32_internal_validate_bw(struct dc *dc,
goto validate_fail;
}
- if (repopulate_pipes)
+ if (repopulate_pipes) {
pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate);
+
+ /* repopulate_pipes = 1 means the pipes were either split or merged. In this case
+ * we have to re-calculate the DET allocation and run through DML once more to
+ * ensure all the params are calculated correctly. We do not need to run the
+ * pipe split check again after this call (pipes are already split / merged).
+ * */
+ if (!fast_validate) {
+ context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final =
+ dm_prefetch_support_uclk_fclk_and_stutter_if_possible;
+ vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt);
+ }
+ }
*vlevel_out = vlevel;
*pipe_cnt_out = pipe_cnt;
@@ -1643,6 +1769,7 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
int i, pipe_idx, vlevel_temp = 0;
double dcfclk = dcn3_2_soc.clock_limits[0].dcfclk_mhz;
double dcfclk_from_validation = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
+ double dcfclk_from_fw_based_mclk_switching = dcfclk_from_validation;
bool pstate_en = context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] !=
dm_dram_clock_change_unsupported;
unsigned int dummy_latency_index = 0;
@@ -1666,7 +1793,7 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
dcn30_can_support_mclk_switch_using_fw_based_vblank_stretch(dc, context);
if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) {
- dummy_latency_index = dcn30_find_dummy_latency_index_for_fw_based_mclk_switch(dc,
+ dummy_latency_index = dcn32_find_dummy_latency_index_for_fw_based_mclk_switch(dc,
context, pipes, pipe_cnt, vlevel);
/* After calling dcn30_find_dummy_latency_index_for_fw_based_mclk_switch
@@ -1678,7 +1805,7 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false);
maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb;
- dcfclk = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
+ dcfclk_from_fw_based_mclk_switching = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
pstate_en = context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] !=
dm_dram_clock_change_unsupported;
}
@@ -1764,6 +1891,10 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
pipes[0].clks_cfg.dcfclk_mhz = dcfclk_from_validation;
pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].socclk_mhz;
+ if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) {
+ pipes[0].clks_cfg.dcfclk_mhz = dcfclk_from_fw_based_mclk_switching;
+ }
+
if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].valid) {
min_dram_speed_mts = context->bw_ctx.dml.vba.DRAMSpeed;
min_dram_speed_mts_margin = 160;
@@ -1906,6 +2037,45 @@ static void remove_entry_from_table_at_index(struct _vcs_dpi_voltage_scaling_st
memset(&table[--(*num_entries)], 0, sizeof(struct _vcs_dpi_voltage_scaling_st));
}
+void dcn32_patch_dpm_table(struct clk_bw_params *bw_params)
+{
+ int i;
+ unsigned int max_dcfclk_mhz = 0, max_dispclk_mhz = 0, max_dppclk_mhz = 0,
+ max_phyclk_mhz = 0, max_dtbclk_mhz = 0, max_fclk_mhz = 0, max_uclk_mhz = 0;
+
+ for (i = 0; i < MAX_NUM_DPM_LVL; i++) {
+ if (bw_params->clk_table.entries[i].dcfclk_mhz > max_dcfclk_mhz)
+ max_dcfclk_mhz = bw_params->clk_table.entries[i].dcfclk_mhz;
+ if (bw_params->clk_table.entries[i].fclk_mhz > max_fclk_mhz)
+ max_fclk_mhz = bw_params->clk_table.entries[i].fclk_mhz;
+ if (bw_params->clk_table.entries[i].memclk_mhz > max_uclk_mhz)
+ max_uclk_mhz = bw_params->clk_table.entries[i].memclk_mhz;
+ if (bw_params->clk_table.entries[i].dispclk_mhz > max_dispclk_mhz)
+ max_dispclk_mhz = bw_params->clk_table.entries[i].dispclk_mhz;
+ if (bw_params->clk_table.entries[i].dppclk_mhz > max_dppclk_mhz)
+ max_dppclk_mhz = bw_params->clk_table.entries[i].dppclk_mhz;
+ if (bw_params->clk_table.entries[i].phyclk_mhz > max_phyclk_mhz)
+ max_phyclk_mhz = bw_params->clk_table.entries[i].phyclk_mhz;
+ if (bw_params->clk_table.entries[i].dtbclk_mhz > max_dtbclk_mhz)
+ max_dtbclk_mhz = bw_params->clk_table.entries[i].dtbclk_mhz;
+ }
+
+ /* Scan through clock values we currently have and if they are 0,
+ * then populate it with dcn3_2_soc.clock_limits[] value.
+ *
+ * Do it for DCFCLK, DISPCLK, DTBCLK and UCLK as any of those being
+ * 0, will cause it to skip building the clock table.
+ */
+ if (max_dcfclk_mhz == 0)
+ bw_params->clk_table.entries[0].dcfclk_mhz = dcn3_2_soc.clock_limits[0].dcfclk_mhz;
+ if (max_dispclk_mhz == 0)
+ bw_params->clk_table.entries[0].dispclk_mhz = dcn3_2_soc.clock_limits[0].dispclk_mhz;
+ if (max_dtbclk_mhz == 0)
+ bw_params->clk_table.entries[0].dtbclk_mhz = dcn3_2_soc.clock_limits[0].dtbclk_mhz;
+ if (max_uclk_mhz == 0)
+ bw_params->clk_table.entries[0].memclk_mhz = dcn3_2_soc.clock_limits[0].dram_speed_mts / 16;
+}
+
static int build_synthetic_soc_states(struct clk_bw_params *bw_params,
struct _vcs_dpi_voltage_scaling_st *table, unsigned int *num_entries)
{
@@ -2098,7 +2268,7 @@ static int build_synthetic_soc_states(struct clk_bw_params *bw_params,
return 0;
}
-/**
+/*
* dcn32_update_bw_bounding_box
*
* This would override some dcn3_2 ip_or_soc initial parameters hardcoded from
@@ -2140,6 +2310,7 @@ void dcn32_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_pa
if ((int)(dcn3_2_soc.urgent_latency_us * 1000) != dc->bb_overrides.urgent_latency_ns
&& dc->bb_overrides.urgent_latency_ns) {
dcn3_2_soc.urgent_latency_us = dc->bb_overrides.urgent_latency_ns / 1000.0;
+ dcn3_2_soc.urgent_latency_pixel_data_only_us = dc->bb_overrides.urgent_latency_ns / 1000.0;
}
if ((int)(dcn3_2_soc.dram_clock_change_latency_us * 1000)
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h
index 3ed06ab855be..3a3dc2ce4c73 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h
@@ -29,11 +29,6 @@
#include "clk_mgr_internal.h"
-#define DCN3_2_DEFAULT_DET_SIZE 256
-#define DCN3_2_MAX_DET_SIZE 1152
-#define DCN3_2_MIN_DET_SIZE 128
-#define DCN3_2_MIN_COMPBUF_SIZE_KB 128
-
void dcn32_build_wm_range_table_fpu(struct clk_mgr_internal *clk_mgr);
void dcn32_helper_populate_phantom_dlg_params(struct dc *dc,
@@ -41,9 +36,8 @@ void dcn32_helper_populate_phantom_dlg_params(struct dc *dc,
display_e2e_pipe_params_st *pipes,
int pipe_cnt);
-bool dcn32_predict_pipe_split(struct dc_state *context,
- display_pipe_params_st pipe,
- int index);
+uint8_t dcn32_predict_pipe_split(struct dc_state *context,
+ display_e2e_pipe_params_st *pipe_e2e);
void insert_entry_into_table_sorted(struct _vcs_dpi_voltage_scaling_st *table,
unsigned int *num_entries,
@@ -71,4 +65,12 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
void dcn32_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_params);
+int dcn32_find_dummy_latency_index_for_fw_based_mclk_switch(struct dc *dc,
+ struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ int pipe_cnt,
+ int vlevel);
+
+void dcn32_patch_dpm_table(struct clk_bw_params *bw_params);
+
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
index d8014bfbc3fe..5b91660a6496 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
@@ -733,6 +733,8 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
mode_lib->vba.FCLKChangeLatency, v->UrgentLatency,
mode_lib->vba.SREnterPlusExitTime);
+ memset(&v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe, 0, sizeof(DmlPipe));
+
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe.Dppclk = mode_lib->vba.DPPCLK[k];
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe.Dispclk = mode_lib->vba.DISPCLK;
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe.PixelClock = mode_lib->vba.PixelClock[k];
@@ -755,30 +757,18 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe.BytePerPixelY = v->BytePerPixelY[k];
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe.BytePerPixelC = v->BytePerPixelC[k];
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe.ProgressiveToInterlaceUnitInOPP = mode_lib->vba.ProgressiveToInterlaceUnitInOPP;
- v->ErrorResult[k] = dml32_CalculatePrefetchSchedule(v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.HostVMInefficiencyFactor,
- &v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe, v->DSCDelay[k],
- mode_lib->vba.DPPCLKDelaySubtotal + mode_lib->vba.DPPCLKDelayCNVCFormater,
- mode_lib->vba.DPPCLKDelaySCL,
- mode_lib->vba.DPPCLKDelaySCLLBOnly,
- mode_lib->vba.DPPCLKDelayCNVCCursor,
- mode_lib->vba.DISPCLKDelaySubtotal,
- (unsigned int) (v->SwathWidthY[k] / mode_lib->vba.HRatio[k]),
- mode_lib->vba.OutputFormat[k],
- mode_lib->vba.MaxInterDCNTileRepeaters,
+ v->ErrorResult[k] = dml32_CalculatePrefetchSchedule(
+ v,
+ k,
+ v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.HostVMInefficiencyFactor,
+ &v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe,
+ v->DSCDelay[k],
+ (unsigned int) (v->SwathWidthY[k] / v->HRatio[k]),
dml_min(v->VStartupLines, v->MaxVStartupLines[k]),
v->MaxVStartupLines[k],
- mode_lib->vba.GPUVMMaxPageTableLevels,
- mode_lib->vba.GPUVMEnable,
- mode_lib->vba.HostVMEnable,
- mode_lib->vba.HostVMMaxNonCachedPageTableLevels,
- mode_lib->vba.HostVMMinPageSize,
- mode_lib->vba.DynamicMetadataEnable[k],
- mode_lib->vba.DynamicMetadataVMEnabled,
- mode_lib->vba.DynamicMetadataLinesBeforeActiveRequired[k],
- mode_lib->vba.DynamicMetadataTransmittedBytes[k],
v->UrgentLatency,
v->UrgentExtraLatency,
- mode_lib->vba.TCalc,
+ v->TCalc,
v->PDEAndMetaPTEBytesFrame[k],
v->MetaRowByte[k],
v->PixelPTEBytesPerRow[k],
@@ -792,8 +782,8 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
v->MaxNumSwathC[k],
v->swath_width_luma_ub[k],
v->swath_width_chroma_ub[k],
- mode_lib->vba.SwathHeightY[k],
- mode_lib->vba.SwathHeightC[k],
+ v->SwathHeightY[k],
+ v->SwathHeightC[k],
TWait,
/* Output */
&v->DSTXAfterScaler[k],
@@ -1163,58 +1153,28 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.mmSOCParameters.SMNLatency = mode_lib->vba.SMNLatency;
dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
- mode_lib->vba.USRRetrainingRequiredFinal,
- mode_lib->vba.UsesMALLForPStateChange,
- mode_lib->vba.PrefetchModePerState[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb],
- mode_lib->vba.NumberOfActiveSurfaces,
- mode_lib->vba.MaxLineBufferLines,
- mode_lib->vba.LineBufferSizeFinal,
- mode_lib->vba.WritebackInterfaceBufferSize,
- mode_lib->vba.DCFCLK,
- mode_lib->vba.ReturnBW,
- mode_lib->vba.SynchronizeTimingsFinal,
- mode_lib->vba.SynchronizeDRRDisplaysForUCLKPStateChangeFinal,
- mode_lib->vba.DRRDisplay,
- v->dpte_group_bytes,
- v->meta_row_height,
- v->meta_row_height_chroma,
+ v,
+ v->PrefetchModePerState[v->VoltageLevel][v->maxMpcComb],
+ v->DCFCLK,
+ v->ReturnBW,
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.mmSOCParameters,
- mode_lib->vba.WritebackChunkSize,
- mode_lib->vba.SOCCLK,
+ v->SOCCLK,
v->DCFCLKDeepSleep,
- mode_lib->vba.DETBufferSizeY,
- mode_lib->vba.DETBufferSizeC,
- mode_lib->vba.SwathHeightY,
- mode_lib->vba.SwathHeightC,
- mode_lib->vba.LBBitPerPixel,
+ v->DETBufferSizeY,
+ v->DETBufferSizeC,
+ v->SwathHeightY,
+ v->SwathHeightC,
v->SwathWidthY,
v->SwathWidthC,
- mode_lib->vba.HRatio,
- mode_lib->vba.HRatioChroma,
- mode_lib->vba.vtaps,
- mode_lib->vba.VTAPsChroma,
- mode_lib->vba.VRatio,
- mode_lib->vba.VRatioChroma,
- mode_lib->vba.HTotal,
- mode_lib->vba.VTotal,
- mode_lib->vba.VActive,
- mode_lib->vba.PixelClock,
- mode_lib->vba.BlendingAndTiming,
- mode_lib->vba.DPPPerPlane,
+ v->DPPPerPlane,
v->BytePerPixelDETY,
v->BytePerPixelDETC,
v->DSTXAfterScaler,
v->DSTYAfterScaler,
- mode_lib->vba.WritebackEnable,
- mode_lib->vba.WritebackPixelFormat,
- mode_lib->vba.WritebackDestinationWidth,
- mode_lib->vba.WritebackDestinationHeight,
- mode_lib->vba.WritebackSourceHeight,
v->UnboundedRequestEnabled,
v->CompressedBufferSizeInkByte,
/* Output */
- &v->Watermark,
&v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.dummy_dramchange_support,
v->MaxActiveDRAMClockChangeLatencySupported,
v->SubViewportLinesNeededInMALL,
@@ -1806,10 +1766,10 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
&mode_lib->vba.Read256BlockHeightC[k],
&mode_lib->vba.Read256BlockWidthY[k],
&mode_lib->vba.Read256BlockWidthC[k],
- &mode_lib->vba.MicroTileHeightY[k],
- &mode_lib->vba.MicroTileHeightC[k],
- &mode_lib->vba.MicroTileWidthY[k],
- &mode_lib->vba.MicroTileWidthC[k]);
+ &mode_lib->vba.MacroTileHeightY[k],
+ &mode_lib->vba.MacroTileHeightC[k],
+ &mode_lib->vba.MacroTileWidthY[k],
+ &mode_lib->vba.MacroTileWidthC[k]);
}
/*Bandwidth Support Check*/
@@ -2034,6 +1994,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
dml32_CalculateODMMode(
mode_lib->vba.MaximumPixelsPerLinePerDSCUnit,
mode_lib->vba.HActive[k],
+ mode_lib->vba.OutputFormat[k],
mode_lib->vba.Output[k],
mode_lib->vba.ODMUse[k],
mode_lib->vba.MaxDispclk[i],
@@ -2056,6 +2017,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
dml32_CalculateODMMode(
mode_lib->vba.MaximumPixelsPerLinePerDSCUnit,
mode_lib->vba.HActive[k],
+ mode_lib->vba.OutputFormat[k],
mode_lib->vba.Output[k],
mode_lib->vba.ODMUse[k],
mode_lib->vba.MaxDispclk[i],
@@ -2292,9 +2254,8 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
for (k = 0; k <= mode_lib->vba.NumberOfActiveSurfaces - 1; k++) {
if (!(mode_lib->vba.DSCInputBitPerComponent[k] == 12.0
|| mode_lib->vba.DSCInputBitPerComponent[k] == 10.0
- || mode_lib->vba.DSCInputBitPerComponent[k] == 8.0
- || mode_lib->vba.DSCInputBitPerComponent[k] >
- mode_lib->vba.MaximumDSCBitsPerComponent)) {
+ || mode_lib->vba.DSCInputBitPerComponent[k] == 8.0)
+ || mode_lib->vba.DSCInputBitPerComponent[k] > mode_lib->vba.MaximumDSCBitsPerComponent) {
mode_lib->vba.NonsupportedDSCInputBPC = true;
}
}
@@ -2370,16 +2331,15 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
if (mode_lib->vba.OutputMultistreamId[k] == k && mode_lib->vba.ForcedOutputLinkBPP[k] == 0)
mode_lib->vba.BPPForMultistreamNotIndicated = true;
for (j = 0; j < mode_lib->vba.NumberOfActiveSurfaces; ++j) {
- if (mode_lib->vba.OutputMultistreamId[k] == j && mode_lib->vba.OutputMultistreamEn[k]
+ if (mode_lib->vba.OutputMultistreamId[k] == j
&& mode_lib->vba.ForcedOutputLinkBPP[k] == 0)
mode_lib->vba.BPPForMultistreamNotIndicated = true;
}
}
if ((mode_lib->vba.Output[k] == dm_edp || mode_lib->vba.Output[k] == dm_hdmi)) {
- if (mode_lib->vba.OutputMultistreamId[k] == k && mode_lib->vba.OutputMultistreamEn[k])
+ if (mode_lib->vba.OutputMultistreamEn[k] == true && mode_lib->vba.OutputMultistreamId[k] == k)
mode_lib->vba.MultistreamWithHDMIOreDP = true;
-
for (j = 0; j < mode_lib->vba.NumberOfActiveSurfaces; ++j) {
if (mode_lib->vba.OutputMultistreamEn[k] == true && mode_lib->vba.OutputMultistreamId[k] == j)
mode_lib->vba.MultistreamWithHDMIOreDP = true;
@@ -2518,8 +2478,6 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.PixelClock[k], mode_lib->vba.PixelClockBackEnd[k]);
}
- m = 0;
-
for (k = 0; k <= mode_lib->vba.NumberOfActiveSurfaces - 1; k++) {
for (m = 0; m <= mode_lib->vba.NumberOfActiveSurfaces - 1; m++) {
for (j = 0; j <= mode_lib->vba.NumberOfActiveSurfaces - 1; j++) {
@@ -2661,10 +2619,10 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.Read256BlockWidthC,
mode_lib->vba.Read256BlockHeightY,
mode_lib->vba.Read256BlockHeightC,
- mode_lib->vba.MicroTileWidthY,
- mode_lib->vba.MicroTileWidthC,
- mode_lib->vba.MicroTileHeightY,
- mode_lib->vba.MicroTileHeightC,
+ mode_lib->vba.MacroTileWidthY,
+ mode_lib->vba.MacroTileWidthC,
+ mode_lib->vba.MacroTileHeightY,
+ mode_lib->vba.MacroTileHeightC,
/* Output */
mode_lib->vba.SurfaceSizeInMALL,
@@ -2711,10 +2669,10 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BlockHeight256BytesY = mode_lib->vba.Read256BlockHeightY[k];
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BlockWidth256BytesC = mode_lib->vba.Read256BlockWidthC[k];
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BlockHeight256BytesC = mode_lib->vba.Read256BlockHeightC[k];
- v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BlockWidthY = mode_lib->vba.MicroTileWidthY[k];
- v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BlockHeightY = mode_lib->vba.MicroTileHeightY[k];
- v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BlockWidthC = mode_lib->vba.MicroTileWidthC[k];
- v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BlockHeightC = mode_lib->vba.MicroTileHeightC[k];
+ v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BlockWidthY = mode_lib->vba.MacroTileWidthY[k];
+ v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BlockHeightY = mode_lib->vba.MacroTileHeightY[k];
+ v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BlockWidthC = mode_lib->vba.MacroTileWidthC[k];
+ v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BlockHeightC = mode_lib->vba.MacroTileHeightC[k];
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].InterlaceEnable = mode_lib->vba.Interlace[k];
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].HTotal = mode_lib->vba.HTotal[k];
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].DCCEnable = mode_lib->vba.DCCEnable[k];
@@ -2896,8 +2854,6 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
}
}
- m = 0;
-
//Calculate Return BW
for (i = 0; i < (int) v->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j) {
@@ -3260,63 +3216,47 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.NoTimeForPrefetch[i][j][k] =
dml32_CalculatePrefetchSchedule(
+ v,
+ k,
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.HostVMInefficiencyFactor,
&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.myPipe,
- mode_lib->vba.DSCDelayPerState[i][k],
- mode_lib->vba.DPPCLKDelaySubtotal +
- mode_lib->vba.DPPCLKDelayCNVCFormater,
- mode_lib->vba.DPPCLKDelaySCL,
- mode_lib->vba.DPPCLKDelaySCLLBOnly,
- mode_lib->vba.DPPCLKDelayCNVCCursor,
- mode_lib->vba.DISPCLKDelaySubtotal,
- mode_lib->vba.SwathWidthYThisState[k] /
- mode_lib->vba.HRatio[k],
- mode_lib->vba.OutputFormat[k],
- mode_lib->vba.MaxInterDCNTileRepeaters,
- dml_min(mode_lib->vba.MaxVStartup,
- mode_lib->vba.MaximumVStartup[i][j][k]),
- mode_lib->vba.MaximumVStartup[i][j][k],
- mode_lib->vba.GPUVMMaxPageTableLevels,
- mode_lib->vba.GPUVMEnable, mode_lib->vba.HostVMEnable,
- mode_lib->vba.HostVMMaxNonCachedPageTableLevels,
- mode_lib->vba.HostVMMinPageSize,
- mode_lib->vba.DynamicMetadataEnable[k],
- mode_lib->vba.DynamicMetadataVMEnabled,
- mode_lib->vba.DynamicMetadataLinesBeforeActiveRequired[k],
- mode_lib->vba.DynamicMetadataTransmittedBytes[k],
- mode_lib->vba.UrgLatency[i],
- mode_lib->vba.ExtraLatency,
- mode_lib->vba.TimeCalc,
- mode_lib->vba.PDEAndMetaPTEBytesPerFrame[i][j][k],
- mode_lib->vba.MetaRowBytes[i][j][k],
- mode_lib->vba.DPTEBytesPerRow[i][j][k],
- mode_lib->vba.PrefetchLinesY[i][j][k],
- mode_lib->vba.SwathWidthYThisState[k],
- mode_lib->vba.PrefillY[k],
- mode_lib->vba.MaxNumSwY[k],
- mode_lib->vba.PrefetchLinesC[i][j][k],
- mode_lib->vba.SwathWidthCThisState[k],
- mode_lib->vba.PrefillC[k],
- mode_lib->vba.MaxNumSwC[k],
- mode_lib->vba.swath_width_luma_ub_this_state[k],
- mode_lib->vba.swath_width_chroma_ub_this_state[k],
- mode_lib->vba.SwathHeightYThisState[k],
- mode_lib->vba.SwathHeightCThisState[k], mode_lib->vba.TWait,
+ v->DSCDelayPerState[i][k],
+ v->SwathWidthYThisState[k] / v->HRatio[k],
+ dml_min(v->MaxVStartup, v->MaximumVStartup[i][j][k]),
+ v->MaximumVStartup[i][j][k],
+ v->UrgLatency[i],
+ v->ExtraLatency,
+ v->TimeCalc,
+ v->PDEAndMetaPTEBytesPerFrame[i][j][k],
+ v->MetaRowBytes[i][j][k],
+ v->DPTEBytesPerRow[i][j][k],
+ v->PrefetchLinesY[i][j][k],
+ v->SwathWidthYThisState[k],
+ v->PrefillY[k],
+ v->MaxNumSwY[k],
+ v->PrefetchLinesC[i][j][k],
+ v->SwathWidthCThisState[k],
+ v->PrefillC[k],
+ v->MaxNumSwC[k],
+ v->swath_width_luma_ub_this_state[k],
+ v->swath_width_chroma_ub_this_state[k],
+ v->SwathHeightYThisState[k],
+ v->SwathHeightCThisState[k], v->TWait,
/* Output */
&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.DSTXAfterScaler[k],
&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.DSTYAfterScaler[k],
- &mode_lib->vba.LineTimesForPrefetch[k],
- &mode_lib->vba.PrefetchBW[k],
- &mode_lib->vba.LinesForMetaPTE[k],
- &mode_lib->vba.LinesForMetaAndDPTERow[k],
- &mode_lib->vba.VRatioPreY[i][j][k],
- &mode_lib->vba.VRatioPreC[i][j][k],
- &mode_lib->vba.RequiredPrefetchPixelDataBWLuma[0][0][k],
- &mode_lib->vba.RequiredPrefetchPixelDataBWChroma[0][0][k],
- &mode_lib->vba.NoTimeForDynamicMetadata[i][j][k],
- &mode_lib->vba.Tno_bw[k],
- &mode_lib->vba.prefetch_vmrow_bw[k],
+ &v->LineTimesForPrefetch[k],
+ &v->PrefetchBW[k],
+ &v->LinesForMetaPTE[k],
+ &v->LinesForMetaAndDPTERow[k],
+ &v->VRatioPreY[i][j][k],
+ &v->VRatioPreC[i][j][k],
+ &v->RequiredPrefetchPixelDataBWLuma[0][0][k],
+ &v->RequiredPrefetchPixelDataBWChroma[0][0][k],
+ &v->NoTimeForDynamicMetadata[i][j][k],
+ &v->Tno_bw[k],
+ &v->prefetch_vmrow_bw[k],
&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_single[0], // double *Tdmdl_vm
&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_single[1], // double *Tdmdl
&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_single[2], // double *TSetup
@@ -3559,62 +3499,32 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
{
dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
- mode_lib->vba.USRRetrainingRequiredFinal,
- mode_lib->vba.UsesMALLForPStateChange,
- mode_lib->vba.PrefetchModePerState[i][j],
- mode_lib->vba.NumberOfActiveSurfaces,
- mode_lib->vba.MaxLineBufferLines,
- mode_lib->vba.LineBufferSizeFinal,
- mode_lib->vba.WritebackInterfaceBufferSize,
- mode_lib->vba.DCFCLKState[i][j],
- mode_lib->vba.ReturnBWPerState[i][j],
- mode_lib->vba.SynchronizeTimingsFinal,
- mode_lib->vba.SynchronizeDRRDisplaysForUCLKPStateChangeFinal,
- mode_lib->vba.DRRDisplay,
- mode_lib->vba.dpte_group_bytes,
- mode_lib->vba.meta_row_height,
- mode_lib->vba.meta_row_height_chroma,
+ v,
+ v->PrefetchModePerState[i][j],
+ v->DCFCLKState[i][j],
+ v->ReturnBWPerState[i][j],
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.mSOCParameters,
- mode_lib->vba.WritebackChunkSize,
- mode_lib->vba.SOCCLKPerState[i],
- mode_lib->vba.ProjectedDCFCLKDeepSleep[i][j],
- mode_lib->vba.DETBufferSizeYThisState,
- mode_lib->vba.DETBufferSizeCThisState,
- mode_lib->vba.SwathHeightYThisState,
- mode_lib->vba.SwathHeightCThisState,
- mode_lib->vba.LBBitPerPixel,
- mode_lib->vba.SwathWidthYThisState, // 24
- mode_lib->vba.SwathWidthCThisState,
- mode_lib->vba.HRatio,
- mode_lib->vba.HRatioChroma,
- mode_lib->vba.vtaps,
- mode_lib->vba.VTAPsChroma,
- mode_lib->vba.VRatio,
- mode_lib->vba.VRatioChroma,
- mode_lib->vba.HTotal,
- mode_lib->vba.VTotal,
- mode_lib->vba.VActive,
- mode_lib->vba.PixelClock,
- mode_lib->vba.BlendingAndTiming,
- mode_lib->vba.NoOfDPPThisState,
- mode_lib->vba.BytePerPixelInDETY,
- mode_lib->vba.BytePerPixelInDETC,
+ v->SOCCLKPerState[i],
+ v->ProjectedDCFCLKDeepSleep[i][j],
+ v->DETBufferSizeYThisState,
+ v->DETBufferSizeCThisState,
+ v->SwathHeightYThisState,
+ v->SwathHeightCThisState,
+ v->SwathWidthYThisState, // 24
+ v->SwathWidthCThisState,
+ v->NoOfDPPThisState,
+ v->BytePerPixelInDETY,
+ v->BytePerPixelInDETC,
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.DSTXAfterScaler,
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.DSTYAfterScaler,
- mode_lib->vba.WritebackEnable,
- mode_lib->vba.WritebackPixelFormat,
- mode_lib->vba.WritebackDestinationWidth,
- mode_lib->vba.WritebackDestinationHeight,
- mode_lib->vba.WritebackSourceHeight,
- mode_lib->vba.UnboundedRequestEnabledThisState,
- mode_lib->vba.CompressedBufferSizeInkByteThisState,
+ v->UnboundedRequestEnabledThisState,
+ v->CompressedBufferSizeInkByteThisState,
/* Output */
- &mode_lib->vba.Watermark, // Store the values in vba
- &mode_lib->vba.DRAMClockChangeSupport[i][j],
+ &v->DRAMClockChangeSupport[i][j],
&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_single2[0], // double *MaxActiveDRAMClockChangeLatencySupported
&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_integer[0], // Long SubViewportLinesNeededInMALL[]
- &mode_lib->vba.FCLKChangeSupport[i][j],
+ &v->FCLKChangeSupport[i][j],
&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_single2[1], // double *MinActiveFCLKChangeLatencySupported
&mode_lib->vba.USRRetrainingSupport[i][j],
mode_lib->vba.ActiveDRAMClockChangeLatencyMarginPerState[i][j]);
@@ -3704,11 +3614,10 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.ModeIsSupported = mode_lib->vba.ModeSupport[i][0] == true
|| mode_lib->vba.ModeSupport[i][1] == true;
- if (mode_lib->vba.ModeSupport[i][0] == true) {
+ if (mode_lib->vba.ModeSupport[i][0] == true)
MaximumMPCCombine = 0;
- } else {
+ else
MaximumMPCCombine = 1;
- }
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
index dc501ee7d01a..ad66e241f9ae 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
@@ -27,6 +27,8 @@
#include "display_mode_vba_32.h"
#include "../display_mode_lib.h"
+#define DCN32_MAX_FMT_420_BUFFER_WIDTH 4096
+
unsigned int dml32_dscceComputeDelay(
unsigned int bpc,
double BPP,
@@ -719,8 +721,8 @@ void dml32_CalculateSwathWidth(
unsigned int surface_width_ub_l;
unsigned int surface_height_ub_l;
- unsigned int surface_width_ub_c;
- unsigned int surface_height_ub_c;
+ unsigned int surface_width_ub_c = 0;
+ unsigned int surface_height_ub_c = 0;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: ForceSingleDPP = %d\n", __func__, ForceSingleDPP);
@@ -784,21 +786,6 @@ void dml32_CalculateSwathWidth(
surface_width_ub_l = dml_ceil(SurfaceWidthY[k], Read256BytesBlockWidthY[k]);
surface_height_ub_l = dml_ceil(SurfaceHeightY[k], Read256BytesBlockHeightY[k]);
- surface_width_ub_c = dml_ceil(SurfaceWidthC[k], Read256BytesBlockWidthC[k]);
- surface_height_ub_c = dml_ceil(SurfaceHeightC[k], Read256BytesBlockHeightC[k]);
-
-#ifdef __DML_VBA_DEBUG__
- dml_print("DML::%s: k=%d surface_width_ub_l=%0d\n", __func__, k, surface_width_ub_l);
- dml_print("DML::%s: k=%d surface_height_ub_l=%0d\n", __func__, k, surface_height_ub_l);
- dml_print("DML::%s: k=%d surface_width_ub_c=%0d\n", __func__, k, surface_width_ub_c);
- dml_print("DML::%s: k=%d surface_height_ub_c=%0d\n", __func__, k, surface_height_ub_c);
- dml_print("DML::%s: k=%d Read256BytesBlockWidthY=%0d\n", __func__, k, Read256BytesBlockWidthY[k]);
- dml_print("DML::%s: k=%d Read256BytesBlockHeightY=%0d\n", __func__, k, Read256BytesBlockHeightY[k]);
- dml_print("DML::%s: k=%d Read256BytesBlockWidthC=%0d\n", __func__, k, Read256BytesBlockWidthC[k]);
- dml_print("DML::%s: k=%d Read256BytesBlockHeightC=%0d\n", __func__, k, Read256BytesBlockHeightC[k]);
- dml_print("DML::%s: k=%d ViewportStationary=%0d\n", __func__, k, ViewportStationary[k]);
- dml_print("DML::%s: k=%d DPPPerSurface=%0d\n", __func__, k, DPPPerSurface[k]);
-#endif
if (!IsVertical(SourceRotation[k])) {
MaximumSwathHeightY[k] = Read256BytesBlockHeightY[k];
@@ -818,6 +805,7 @@ void dml32_CalculateSwathWidth(
Read256BytesBlockWidthY[k]);
}
if (BytePerPixC[k] > 0) {
+ surface_width_ub_c = dml_ceil(SurfaceWidthC[k], Read256BytesBlockWidthC[k]);
if (ViewportStationary[k] && DPPPerSurface[k] == 1) {
swath_width_chroma_ub[k] = dml_min(surface_width_ub_c,
dml_floor(ViewportXStartC[k] + SwathWidthC[k] +
@@ -848,6 +836,7 @@ void dml32_CalculateSwathWidth(
Read256BytesBlockHeightY[k]) + Read256BytesBlockHeightY[k]);
}
if (BytePerPixC[k] > 0) {
+ surface_height_ub_c = dml_ceil(SurfaceHeightC[k], Read256BytesBlockHeightC[k]);
if (ViewportStationary[k] && DPPPerSurface[k] == 1) {
swath_width_chroma_ub[k] = dml_min(surface_height_ub_c,
dml_floor(ViewportYStartC[k] + SwathWidthC[k] +
@@ -866,6 +855,16 @@ void dml32_CalculateSwathWidth(
}
#ifdef __DML_VBA_DEBUG__
+ dml_print("DML::%s: k=%d surface_width_ub_l=%0d\n", __func__, k, surface_width_ub_l);
+ dml_print("DML::%s: k=%d surface_height_ub_l=%0d\n", __func__, k, surface_height_ub_l);
+ dml_print("DML::%s: k=%d surface_width_ub_c=%0d\n", __func__, k, surface_width_ub_c);
+ dml_print("DML::%s: k=%d surface_height_ub_c=%0d\n", __func__, k, surface_height_ub_c);
+ dml_print("DML::%s: k=%d Read256BytesBlockWidthY=%0d\n", __func__, k, Read256BytesBlockWidthY[k]);
+ dml_print("DML::%s: k=%d Read256BytesBlockHeightY=%0d\n", __func__, k, Read256BytesBlockHeightY[k]);
+ dml_print("DML::%s: k=%d Read256BytesBlockWidthC=%0d\n", __func__, k, Read256BytesBlockWidthC[k]);
+ dml_print("DML::%s: k=%d Read256BytesBlockHeightC=%0d\n", __func__, k, Read256BytesBlockHeightC[k]);
+ dml_print("DML::%s: k=%d ViewportStationary=%0d\n", __func__, k, ViewportStationary[k]);
+ dml_print("DML::%s: k=%d DPPPerSurface=%0d\n", __func__, k, DPPPerSurface[k]);
dml_print("DML::%s: k=%d swath_width_luma_ub=%0d\n", __func__, k, swath_width_luma_ub[k]);
dml_print("DML::%s: k=%d swath_width_chroma_ub=%0d\n", __func__, k, swath_width_chroma_ub[k]);
dml_print("DML::%s: k=%d MaximumSwathHeightY=%0d\n", __func__, k, MaximumSwathHeightY[k]);
@@ -1182,6 +1181,7 @@ void dml32_CalculateDETBufferSize(
void dml32_CalculateODMMode(
unsigned int MaximumPixelsPerLinePerDSCUnit,
unsigned int HActive,
+ enum output_format_class OutFormat,
enum output_encoder_class Output,
enum odm_combine_policy ODMUse,
double StateDispclk,
@@ -1256,6 +1256,29 @@ void dml32_CalculateODMMode(
else
*TotalAvailablePipesSupport = false;
}
+ if (OutFormat == dm_420 && HActive > DCN32_MAX_FMT_420_BUFFER_WIDTH &&
+ ODMUse != dm_odm_combine_policy_4to1) {
+ if (HActive > DCN32_MAX_FMT_420_BUFFER_WIDTH * 4) {
+ *ODMMode = dm_odm_combine_mode_disabled;
+ *NumberOfDPP = 0;
+ *TotalAvailablePipesSupport = false;
+ } else if (HActive > DCN32_MAX_FMT_420_BUFFER_WIDTH * 2 ||
+ *ODMMode == dm_odm_combine_mode_4to1) {
+ *ODMMode = dm_odm_combine_mode_4to1;
+ *RequiredDISPCLKPerSurface = SurfaceRequiredDISPCLKWithODMCombineFourToOne;
+ *NumberOfDPP = 4;
+ } else {
+ *ODMMode = dm_odm_combine_mode_2to1;
+ *RequiredDISPCLKPerSurface = SurfaceRequiredDISPCLKWithODMCombineTwoToOne;
+ *NumberOfDPP = 2;
+ }
+ }
+ if (Output == dm_hdmi && OutFormat == dm_420 &&
+ HActive > DCN32_MAX_FMT_420_BUFFER_WIDTH) {
+ *ODMMode = dm_odm_combine_mode_disabled;
+ *NumberOfDPP = 0;
+ *TotalAvailablePipesSupport = false;
+ }
}
double dml32_CalculateRequiredDispclk(
@@ -1873,7 +1896,7 @@ void dml32_CalculateSurfaceSizeInMall(
if (UseMALLForStaticScreen[k] == dm_use_mall_static_screen_enable)
TotalSurfaceSizeInMALL = TotalSurfaceSizeInMALL + SurfaceSizeInMALL[k];
}
- *ExceededMALLSize = (TotalSurfaceSizeInMALL <= MALLAllocatedForDCN * 1024 * 1024 ? false : true);
+ *ExceededMALLSize = (TotalSurfaceSizeInMALL > MALLAllocatedForDCN * 1024 * 1024);
} // CalculateSurfaceSizeInMall
void dml32_CalculateVMRowAndSwath(
@@ -3366,28 +3389,14 @@ double dml32_CalculateExtraLatency(
} // CalculateExtraLatency
bool dml32_CalculatePrefetchSchedule(
+ struct vba_vars_st *v,
+ unsigned int k,
double HostVMInefficiencyFactor,
DmlPipe *myPipe,
unsigned int DSCDelay,
- double DPPCLKDelaySubtotalPlusCNVCFormater,
- double DPPCLKDelaySCL,
- double DPPCLKDelaySCLLBOnly,
- double DPPCLKDelayCNVCCursor,
- double DISPCLKDelaySubtotal,
unsigned int DPP_RECOUT_WIDTH,
- enum output_format_class OutputFormat,
- unsigned int MaxInterDCNTileRepeaters,
unsigned int VStartup,
unsigned int MaxVStartup,
- unsigned int GPUVMPageTableLevels,
- bool GPUVMEnable,
- bool HostVMEnable,
- unsigned int HostVMMaxNonCachedPageTableLevels,
- double HostVMMinPageSize,
- bool DynamicMetadataEnable,
- bool DynamicMetadataVMEnabled,
- int DynamicMetadataLinesBeforeActiveRequired,
- unsigned int DynamicMetadataTransmittedBytes,
double UrgentLatency,
double UrgentExtraLatency,
double TCalc,
@@ -3428,6 +3437,7 @@ bool dml32_CalculatePrefetchSchedule(
double *VUpdateWidthPix,
double *VReadyOffsetPix)
{
+ double DPPCLKDelaySubtotalPlusCNVCFormater = v->DPPCLKDelaySubtotal + v->DPPCLKDelayCNVCFormater;
bool MyError = false;
unsigned int DPPCycles, DISPCLKCycles;
double DSTTotalPixelsAfterScaler;
@@ -3464,27 +3474,27 @@ bool dml32_CalculatePrefetchSchedule(
double Tsw_est1 = 0;
double Tsw_est3 = 0;
- if (GPUVMEnable == true && HostVMEnable == true)
- HostVMDynamicLevelsTrips = HostVMMaxNonCachedPageTableLevels;
+ if (v->GPUVMEnable == true && v->HostVMEnable == true)
+ HostVMDynamicLevelsTrips = v->HostVMMaxNonCachedPageTableLevels;
else
HostVMDynamicLevelsTrips = 0;
#ifdef __DML_VBA_DEBUG__
- dml_print("DML::%s: GPUVMEnable = %d\n", __func__, GPUVMEnable);
- dml_print("DML::%s: GPUVMPageTableLevels = %d\n", __func__, GPUVMPageTableLevels);
+ dml_print("DML::%s: v->GPUVMEnable = %d\n", __func__, v->GPUVMEnable);
+ dml_print("DML::%s: v->GPUVMMaxPageTableLevels = %d\n", __func__, v->GPUVMMaxPageTableLevels);
dml_print("DML::%s: DCCEnable = %d\n", __func__, myPipe->DCCEnable);
- dml_print("DML::%s: HostVMEnable=%d HostVMInefficiencyFactor=%f\n",
- __func__, HostVMEnable, HostVMInefficiencyFactor);
+ dml_print("DML::%s: v->HostVMEnable=%d HostVMInefficiencyFactor=%f\n",
+ __func__, v->HostVMEnable, HostVMInefficiencyFactor);
#endif
dml32_CalculateVUpdateAndDynamicMetadataParameters(
- MaxInterDCNTileRepeaters,
+ v->MaxInterDCNTileRepeaters,
myPipe->Dppclk,
myPipe->Dispclk,
myPipe->DCFClkDeepSleep,
myPipe->PixelClock,
myPipe->HTotal,
myPipe->VBlank,
- DynamicMetadataTransmittedBytes,
- DynamicMetadataLinesBeforeActiveRequired,
+ v->DynamicMetadataTransmittedBytes[k],
+ v->DynamicMetadataLinesBeforeActiveRequired[k],
myPipe->InterlaceEnable,
myPipe->ProgressiveToInterlaceUnitInOPP,
TSetup,
@@ -3499,19 +3509,19 @@ bool dml32_CalculatePrefetchSchedule(
LineTime = myPipe->HTotal / myPipe->PixelClock;
trip_to_mem = UrgentLatency;
- Tvm_trips = UrgentExtraLatency + trip_to_mem * (GPUVMPageTableLevels * (HostVMDynamicLevelsTrips + 1) - 1);
+ Tvm_trips = UrgentExtraLatency + trip_to_mem * (v->GPUVMMaxPageTableLevels * (HostVMDynamicLevelsTrips + 1) - 1);
- if (DynamicMetadataVMEnabled == true)
+ if (v->DynamicMetadataVMEnabled == true)
*Tdmdl = TWait + Tvm_trips + trip_to_mem;
else
*Tdmdl = TWait + UrgentExtraLatency;
#ifdef __DML_VBA_ALLOW_DELTA__
- if (DynamicMetadataEnable == false)
+ if (v->DynamicMetadataEnable[k] == false)
*Tdmdl = 0.0;
#endif
- if (DynamicMetadataEnable == true) {
+ if (v->DynamicMetadataEnable[k] == true) {
if (VStartup * LineTime < *TSetup + *Tdmdl + Tdmbf + Tdmec + Tdmsks) {
*NotEnoughTimeForDynamicMetadata = true;
#ifdef __DML_VBA_DEBUG__
@@ -3531,17 +3541,17 @@ bool dml32_CalculatePrefetchSchedule(
*NotEnoughTimeForDynamicMetadata = false;
}
- *Tdmdl_vm = (DynamicMetadataEnable == true && DynamicMetadataVMEnabled == true &&
- GPUVMEnable == true ? TWait + Tvm_trips : 0);
+ *Tdmdl_vm = (v->DynamicMetadataEnable[k] == true && v->DynamicMetadataVMEnabled == true &&
+ v->GPUVMEnable == true ? TWait + Tvm_trips : 0);
if (myPipe->ScalerEnabled)
- DPPCycles = DPPCLKDelaySubtotalPlusCNVCFormater + DPPCLKDelaySCL;
+ DPPCycles = DPPCLKDelaySubtotalPlusCNVCFormater + v->DPPCLKDelaySCL;
else
- DPPCycles = DPPCLKDelaySubtotalPlusCNVCFormater + DPPCLKDelaySCLLBOnly;
+ DPPCycles = DPPCLKDelaySubtotalPlusCNVCFormater + v->DPPCLKDelaySCLLBOnly;
- DPPCycles = DPPCycles + myPipe->NumberOfCursors * DPPCLKDelayCNVCCursor;
+ DPPCycles = DPPCycles + myPipe->NumberOfCursors * v->DPPCLKDelayCNVCCursor;
- DISPCLKCycles = DISPCLKDelaySubtotal;
+ DISPCLKCycles = v->DISPCLKDelaySubtotal;
if (myPipe->Dppclk == 0.0 || myPipe->Dispclk == 0.0)
return true;
@@ -3567,7 +3577,7 @@ bool dml32_CalculatePrefetchSchedule(
dml_print("DML::%s: DSTXAfterScaler: %d\n", __func__, *DSTXAfterScaler);
#endif
- if (OutputFormat == dm_420 || (myPipe->InterlaceEnable && myPipe->ProgressiveToInterlaceUnitInOPP))
+ if (v->OutputFormat[k] == dm_420 || (myPipe->InterlaceEnable && myPipe->ProgressiveToInterlaceUnitInOPP))
*DSTYAfterScaler = 1;
else
*DSTYAfterScaler = 0;
@@ -3584,13 +3594,13 @@ bool dml32_CalculatePrefetchSchedule(
Tr0_trips = trip_to_mem * (HostVMDynamicLevelsTrips + 1);
- if (GPUVMEnable == true) {
+ if (v->GPUVMEnable == true) {
Tvm_trips_rounded = dml_ceil(4.0 * Tvm_trips / LineTime, 1.0) / 4.0 * LineTime;
Tr0_trips_rounded = dml_ceil(4.0 * Tr0_trips / LineTime, 1.0) / 4.0 * LineTime;
- if (GPUVMPageTableLevels >= 3) {
+ if (v->GPUVMMaxPageTableLevels >= 3) {
*Tno_bw = UrgentExtraLatency + trip_to_mem *
- (double) ((GPUVMPageTableLevels - 2) * (HostVMDynamicLevelsTrips + 1) - 1);
- } else if (GPUVMPageTableLevels == 1 && myPipe->DCCEnable != true) {
+ (double) ((v->GPUVMMaxPageTableLevels - 2) * (HostVMDynamicLevelsTrips + 1) - 1);
+ } else if (v->GPUVMMaxPageTableLevels == 1 && myPipe->DCCEnable != true) {
Tr0_trips_rounded = dml_ceil(4.0 * UrgentExtraLatency / LineTime, 1.0) /
4.0 * LineTime; // VBA_ERROR
*Tno_bw = UrgentExtraLatency;
@@ -3625,7 +3635,7 @@ bool dml32_CalculatePrefetchSchedule(
min_Lsw = dml_max(min_Lsw, 1.0);
Lsw_oto = dml_ceil(4.0 * dml_max(prefetch_sw_bytes / prefetch_bw_oto / LineTime, min_Lsw), 1.0) / 4.0;
- if (GPUVMEnable == true) {
+ if (v->GPUVMEnable == true) {
Tvm_oto = dml_max3(
Tvm_trips,
*Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / prefetch_bw_oto,
@@ -3633,7 +3643,7 @@ bool dml32_CalculatePrefetchSchedule(
} else
Tvm_oto = LineTime / 4.0;
- if ((GPUVMEnable == true || myPipe->DCCEnable == true)) {
+ if ((v->GPUVMEnable == true || myPipe->DCCEnable == true)) {
Tr0_oto = dml_max4(
Tr0_trips,
(MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / prefetch_bw_oto,
@@ -3836,7 +3846,7 @@ bool dml32_CalculatePrefetchSchedule(
#endif
if (prefetch_bw_equ > 0) {
- if (GPUVMEnable == true) {
+ if (v->GPUVMEnable == true) {
Tvm_equ = dml_max3(*Tno_bw + PDEAndMetaPTEBytesFrame *
HostVMInefficiencyFactor / prefetch_bw_equ,
Tvm_trips, LineTime / 4);
@@ -3844,7 +3854,7 @@ bool dml32_CalculatePrefetchSchedule(
Tvm_equ = LineTime / 4;
}
- if ((GPUVMEnable == true || myPipe->DCCEnable == true)) {
+ if ((v->GPUVMEnable == true || myPipe->DCCEnable == true)) {
Tr0_equ = dml_max4((MetaRowByte + PixelPTEBytesPerRow *
HostVMInefficiencyFactor) / prefetch_bw_equ, Tr0_trips,
(LineTime - Tvm_equ) / 2, LineTime / 4);
@@ -4209,58 +4219,28 @@ void dml32_CalculateFlipSchedule(
} // CalculateFlipSchedule
void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
- bool USRRetrainingRequiredFinal,
- enum dm_use_mall_for_pstate_change_mode UseMALLForPStateChange[],
+ struct vba_vars_st *v,
unsigned int PrefetchMode,
- unsigned int NumberOfActiveSurfaces,
- unsigned int MaxLineBufferLines,
- unsigned int LineBufferSize,
- unsigned int WritebackInterfaceBufferSize,
double DCFCLK,
double ReturnBW,
- bool SynchronizeTimingsFinal,
- bool SynchronizeDRRDisplaysForUCLKPStateChangeFinal,
- bool DRRDisplay[],
- unsigned int dpte_group_bytes[],
- unsigned int meta_row_height[],
- unsigned int meta_row_height_chroma[],
SOCParametersList mmSOCParameters,
- unsigned int WritebackChunkSize,
double SOCCLK,
double DCFClkDeepSleep,
unsigned int DETBufferSizeY[],
unsigned int DETBufferSizeC[],
unsigned int SwathHeightY[],
unsigned int SwathHeightC[],
- unsigned int LBBitPerPixel[],
double SwathWidthY[],
double SwathWidthC[],
- double HRatio[],
- double HRatioChroma[],
- unsigned int VTaps[],
- unsigned int VTapsChroma[],
- double VRatio[],
- double VRatioChroma[],
- unsigned int HTotal[],
- unsigned int VTotal[],
- unsigned int VActive[],
- double PixelClock[],
- unsigned int BlendingAndTiming[],
unsigned int DPPPerSurface[],
double BytePerPixelDETY[],
double BytePerPixelDETC[],
double DSTXAfterScaler[],
double DSTYAfterScaler[],
- bool WritebackEnable[],
- enum source_format_class WritebackPixelFormat[],
- double WritebackDestinationWidth[],
- double WritebackDestinationHeight[],
- double WritebackSourceHeight[],
bool UnboundedRequestEnabled,
unsigned int CompressedBufferSizeInkByte,
/* Output */
- Watermarks *Watermark,
enum clock_change_support *DRAMClockChangeSupport,
double MaxActiveDRAMClockChangeLatencySupported[],
unsigned int SubViewportLinesNeededInMALL[],
@@ -4302,136 +4282,136 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
unsigned int LBLatencyHidingSourceLinesY[DC__NUM_DPP__MAX];
unsigned int LBLatencyHidingSourceLinesC[DC__NUM_DPP__MAX];
- Watermark->UrgentWatermark = mmSOCParameters.UrgentLatency + mmSOCParameters.ExtraLatency;
- Watermark->USRRetrainingWatermark = mmSOCParameters.UrgentLatency + mmSOCParameters.ExtraLatency
+ v->Watermark.UrgentWatermark = mmSOCParameters.UrgentLatency + mmSOCParameters.ExtraLatency;
+ v->Watermark.USRRetrainingWatermark = mmSOCParameters.UrgentLatency + mmSOCParameters.ExtraLatency
+ mmSOCParameters.USRRetrainingLatency + mmSOCParameters.SMNLatency;
- Watermark->DRAMClockChangeWatermark = mmSOCParameters.DRAMClockChangeLatency + Watermark->UrgentWatermark;
- Watermark->FCLKChangeWatermark = mmSOCParameters.FCLKChangeLatency + Watermark->UrgentWatermark;
- Watermark->StutterExitWatermark = mmSOCParameters.SRExitTime + mmSOCParameters.ExtraLatency
+ v->Watermark.DRAMClockChangeWatermark = mmSOCParameters.DRAMClockChangeLatency + v->Watermark.UrgentWatermark;
+ v->Watermark.FCLKChangeWatermark = mmSOCParameters.FCLKChangeLatency + v->Watermark.UrgentWatermark;
+ v->Watermark.StutterExitWatermark = mmSOCParameters.SRExitTime + mmSOCParameters.ExtraLatency
+ 10 / DCFClkDeepSleep;
- Watermark->StutterEnterPlusExitWatermark = mmSOCParameters.SREnterPlusExitTime + mmSOCParameters.ExtraLatency
+ v->Watermark.StutterEnterPlusExitWatermark = mmSOCParameters.SREnterPlusExitTime + mmSOCParameters.ExtraLatency
+ 10 / DCFClkDeepSleep;
- Watermark->Z8StutterExitWatermark = mmSOCParameters.SRExitZ8Time + mmSOCParameters.ExtraLatency
+ v->Watermark.Z8StutterExitWatermark = mmSOCParameters.SRExitZ8Time + mmSOCParameters.ExtraLatency
+ 10 / DCFClkDeepSleep;
- Watermark->Z8StutterEnterPlusExitWatermark = mmSOCParameters.SREnterPlusExitZ8Time
+ v->Watermark.Z8StutterEnterPlusExitWatermark = mmSOCParameters.SREnterPlusExitZ8Time
+ mmSOCParameters.ExtraLatency + 10 / DCFClkDeepSleep;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: UrgentLatency = %f\n", __func__, mmSOCParameters.UrgentLatency);
dml_print("DML::%s: ExtraLatency = %f\n", __func__, mmSOCParameters.ExtraLatency);
dml_print("DML::%s: DRAMClockChangeLatency = %f\n", __func__, mmSOCParameters.DRAMClockChangeLatency);
- dml_print("DML::%s: UrgentWatermark = %f\n", __func__, Watermark->UrgentWatermark);
- dml_print("DML::%s: USRRetrainingWatermark = %f\n", __func__, Watermark->USRRetrainingWatermark);
- dml_print("DML::%s: DRAMClockChangeWatermark = %f\n", __func__, Watermark->DRAMClockChangeWatermark);
- dml_print("DML::%s: FCLKChangeWatermark = %f\n", __func__, Watermark->FCLKChangeWatermark);
- dml_print("DML::%s: StutterExitWatermark = %f\n", __func__, Watermark->StutterExitWatermark);
- dml_print("DML::%s: StutterEnterPlusExitWatermark = %f\n", __func__, Watermark->StutterEnterPlusExitWatermark);
- dml_print("DML::%s: Z8StutterExitWatermark = %f\n", __func__, Watermark->Z8StutterExitWatermark);
+ dml_print("DML::%s: UrgentWatermark = %f\n", __func__, v->Watermark.UrgentWatermark);
+ dml_print("DML::%s: USRRetrainingWatermark = %f\n", __func__, v->Watermark.USRRetrainingWatermark);
+ dml_print("DML::%s: DRAMClockChangeWatermark = %f\n", __func__, v->Watermark.DRAMClockChangeWatermark);
+ dml_print("DML::%s: FCLKChangeWatermark = %f\n", __func__, v->Watermark.FCLKChangeWatermark);
+ dml_print("DML::%s: StutterExitWatermark = %f\n", __func__, v->Watermark.StutterExitWatermark);
+ dml_print("DML::%s: StutterEnterPlusExitWatermark = %f\n", __func__, v->Watermark.StutterEnterPlusExitWatermark);
+ dml_print("DML::%s: Z8StutterExitWatermark = %f\n", __func__, v->Watermark.Z8StutterExitWatermark);
dml_print("DML::%s: Z8StutterEnterPlusExitWatermark = %f\n",
- __func__, Watermark->Z8StutterEnterPlusExitWatermark);
+ __func__, v->Watermark.Z8StutterEnterPlusExitWatermark);
#endif
TotalActiveWriteback = 0;
- for (k = 0; k < NumberOfActiveSurfaces; ++k) {
- if (WritebackEnable[k] == true)
+ for (k = 0; k < v->NumberOfActiveSurfaces; ++k) {
+ if (v->WritebackEnable[k] == true)
TotalActiveWriteback = TotalActiveWriteback + 1;
}
if (TotalActiveWriteback <= 1) {
- Watermark->WritebackUrgentWatermark = mmSOCParameters.WritebackLatency;
+ v->Watermark.WritebackUrgentWatermark = mmSOCParameters.WritebackLatency;
} else {
- Watermark->WritebackUrgentWatermark = mmSOCParameters.WritebackLatency
- + WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
+ v->Watermark.WritebackUrgentWatermark = mmSOCParameters.WritebackLatency
+ + v->WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
}
- if (USRRetrainingRequiredFinal)
- Watermark->WritebackUrgentWatermark = Watermark->WritebackUrgentWatermark
+ if (v->USRRetrainingRequiredFinal)
+ v->Watermark.WritebackUrgentWatermark = v->Watermark.WritebackUrgentWatermark
+ mmSOCParameters.USRRetrainingLatency;
if (TotalActiveWriteback <= 1) {
- Watermark->WritebackDRAMClockChangeWatermark = mmSOCParameters.DRAMClockChangeLatency
+ v->Watermark.WritebackDRAMClockChangeWatermark = mmSOCParameters.DRAMClockChangeLatency
+ mmSOCParameters.WritebackLatency;
- Watermark->WritebackFCLKChangeWatermark = mmSOCParameters.FCLKChangeLatency
+ v->Watermark.WritebackFCLKChangeWatermark = mmSOCParameters.FCLKChangeLatency
+ mmSOCParameters.WritebackLatency;
} else {
- Watermark->WritebackDRAMClockChangeWatermark = mmSOCParameters.DRAMClockChangeLatency
- + mmSOCParameters.WritebackLatency + WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
- Watermark->WritebackFCLKChangeWatermark = mmSOCParameters.FCLKChangeLatency
- + mmSOCParameters.WritebackLatency + WritebackChunkSize * 1024 / 32 / SOCCLK;
+ v->Watermark.WritebackDRAMClockChangeWatermark = mmSOCParameters.DRAMClockChangeLatency
+ + mmSOCParameters.WritebackLatency + v->WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
+ v->Watermark.WritebackFCLKChangeWatermark = mmSOCParameters.FCLKChangeLatency
+ + mmSOCParameters.WritebackLatency + v->WritebackChunkSize * 1024 / 32 / SOCCLK;
}
- if (USRRetrainingRequiredFinal)
- Watermark->WritebackDRAMClockChangeWatermark = Watermark->WritebackDRAMClockChangeWatermark
+ if (v->USRRetrainingRequiredFinal)
+ v->Watermark.WritebackDRAMClockChangeWatermark = v->Watermark.WritebackDRAMClockChangeWatermark
+ mmSOCParameters.USRRetrainingLatency;
- if (USRRetrainingRequiredFinal)
- Watermark->WritebackFCLKChangeWatermark = Watermark->WritebackFCLKChangeWatermark
+ if (v->USRRetrainingRequiredFinal)
+ v->Watermark.WritebackFCLKChangeWatermark = v->Watermark.WritebackFCLKChangeWatermark
+ mmSOCParameters.USRRetrainingLatency;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: WritebackDRAMClockChangeWatermark = %f\n",
- __func__, Watermark->WritebackDRAMClockChangeWatermark);
- dml_print("DML::%s: WritebackFCLKChangeWatermark = %f\n", __func__, Watermark->WritebackFCLKChangeWatermark);
- dml_print("DML::%s: WritebackUrgentWatermark = %f\n", __func__, Watermark->WritebackUrgentWatermark);
- dml_print("DML::%s: USRRetrainingRequiredFinal = %d\n", __func__, USRRetrainingRequiredFinal);
+ __func__, v->Watermark.WritebackDRAMClockChangeWatermark);
+ dml_print("DML::%s: WritebackFCLKChangeWatermark = %f\n", __func__, v->Watermark.WritebackFCLKChangeWatermark);
+ dml_print("DML::%s: WritebackUrgentWatermark = %f\n", __func__, v->Watermark.WritebackUrgentWatermark);
+ dml_print("DML::%s: v->USRRetrainingRequiredFinal = %d\n", __func__, v->USRRetrainingRequiredFinal);
dml_print("DML::%s: USRRetrainingLatency = %f\n", __func__, mmSOCParameters.USRRetrainingLatency);
#endif
- for (k = 0; k < NumberOfActiveSurfaces; ++k) {
- TotalPixelBW = TotalPixelBW + DPPPerSurface[k] * (SwathWidthY[k] * BytePerPixelDETY[k] * VRatio[k] +
- SwathWidthC[k] * BytePerPixelDETC[k] * VRatioChroma[k]) / (HTotal[k] / PixelClock[k]);
+ for (k = 0; k < v->NumberOfActiveSurfaces; ++k) {
+ TotalPixelBW = TotalPixelBW + DPPPerSurface[k] * (SwathWidthY[k] * BytePerPixelDETY[k] * v->VRatio[k] +
+ SwathWidthC[k] * BytePerPixelDETC[k] * v->VRatioChroma[k]) / (v->HTotal[k] / v->PixelClock[k]);
}
- for (k = 0; k < NumberOfActiveSurfaces; ++k) {
+ for (k = 0; k < v->NumberOfActiveSurfaces; ++k) {
- LBLatencyHidingSourceLinesY[k] = dml_min((double) MaxLineBufferLines, dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthY[k] / dml_max(HRatio[k], 1.0)), 1)) - (VTaps[k] - 1);
- LBLatencyHidingSourceLinesC[k] = dml_min((double) MaxLineBufferLines, dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthC[k] / dml_max(HRatioChroma[k], 1.0)), 1)) - (VTapsChroma[k] - 1);
+ LBLatencyHidingSourceLinesY[k] = dml_min((double) v->MaxLineBufferLines, dml_floor(v->LineBufferSizeFinal / v->LBBitPerPixel[k] / (SwathWidthY[k] / dml_max(v->HRatio[k], 1.0)), 1)) - (v->vtaps[k] - 1);
+ LBLatencyHidingSourceLinesC[k] = dml_min((double) v->MaxLineBufferLines, dml_floor(v->LineBufferSizeFinal / v->LBBitPerPixel[k] / (SwathWidthC[k] / dml_max(v->HRatioChroma[k], 1.0)), 1)) - (v->VTAPsChroma[k] - 1);
#ifdef __DML_VBA_DEBUG__
- dml_print("DML::%s: k=%d, MaxLineBufferLines = %d\n", __func__, k, MaxLineBufferLines);
- dml_print("DML::%s: k=%d, LineBufferSize = %d\n", __func__, k, LineBufferSize);
- dml_print("DML::%s: k=%d, LBBitPerPixel = %d\n", __func__, k, LBBitPerPixel[k]);
- dml_print("DML::%s: k=%d, HRatio = %f\n", __func__, k, HRatio[k]);
- dml_print("DML::%s: k=%d, VTaps = %d\n", __func__, k, VTaps[k]);
+ dml_print("DML::%s: k=%d, v->MaxLineBufferLines = %d\n", __func__, k, v->MaxLineBufferLines);
+ dml_print("DML::%s: k=%d, v->LineBufferSizeFinal = %d\n", __func__, k, v->LineBufferSizeFinal);
+ dml_print("DML::%s: k=%d, v->LBBitPerPixel = %d\n", __func__, k, v->LBBitPerPixel[k]);
+ dml_print("DML::%s: k=%d, v->HRatio = %f\n", __func__, k, v->HRatio[k]);
+ dml_print("DML::%s: k=%d, v->vtaps = %d\n", __func__, k, v->vtaps[k]);
#endif
- EffectiveLBLatencyHidingY = LBLatencyHidingSourceLinesY[k] / VRatio[k] * (HTotal[k] / PixelClock[k]);
- EffectiveLBLatencyHidingC = LBLatencyHidingSourceLinesC[k] / VRatioChroma[k] * (HTotal[k] / PixelClock[k]);
+ EffectiveLBLatencyHidingY = LBLatencyHidingSourceLinesY[k] / v->VRatio[k] * (v->HTotal[k] / v->PixelClock[k]);
+ EffectiveLBLatencyHidingC = LBLatencyHidingSourceLinesC[k] / v->VRatioChroma[k] * (v->HTotal[k] / v->PixelClock[k]);
EffectiveDETBufferSizeY = DETBufferSizeY[k];
if (UnboundedRequestEnabled) {
EffectiveDETBufferSizeY = EffectiveDETBufferSizeY
+ CompressedBufferSizeInkByte * 1024
- * (SwathWidthY[k] * BytePerPixelDETY[k] * VRatio[k])
- / (HTotal[k] / PixelClock[k]) / TotalPixelBW;
+ * (SwathWidthY[k] * BytePerPixelDETY[k] * v->VRatio[k])
+ / (v->HTotal[k] / v->PixelClock[k]) / TotalPixelBW;
}
LinesInDETY[k] = (double) EffectiveDETBufferSizeY / BytePerPixelDETY[k] / SwathWidthY[k];
LinesInDETYRoundedDownToSwath[k] = dml_floor(LinesInDETY[k], SwathHeightY[k]);
- FullDETBufferingTimeY = LinesInDETYRoundedDownToSwath[k] * (HTotal[k] / PixelClock[k]) / VRatio[k];
+ FullDETBufferingTimeY = LinesInDETYRoundedDownToSwath[k] * (v->HTotal[k] / v->PixelClock[k]) / v->VRatio[k];
ActiveClockChangeLatencyHidingY = EffectiveLBLatencyHidingY + FullDETBufferingTimeY
- - (DSTXAfterScaler[k] / HTotal[k] + DSTYAfterScaler[k]) * HTotal[k] / PixelClock[k];
+ - (DSTXAfterScaler[k] / v->HTotal[k] + DSTYAfterScaler[k]) * v->HTotal[k] / v->PixelClock[k];
- if (NumberOfActiveSurfaces > 1) {
+ if (v->NumberOfActiveSurfaces > 1) {
ActiveClockChangeLatencyHidingY = ActiveClockChangeLatencyHidingY
- - (1 - 1 / NumberOfActiveSurfaces) * SwathHeightY[k] * HTotal[k]
- / PixelClock[k] / VRatio[k];
+ - (1 - 1 / v->NumberOfActiveSurfaces) * SwathHeightY[k] * v->HTotal[k]
+ / v->PixelClock[k] / v->VRatio[k];
}
if (BytePerPixelDETC[k] > 0) {
LinesInDETC[k] = DETBufferSizeC[k] / BytePerPixelDETC[k] / SwathWidthC[k];
LinesInDETCRoundedDownToSwath[k] = dml_floor(LinesInDETC[k], SwathHeightC[k]);
- FullDETBufferingTimeC = LinesInDETCRoundedDownToSwath[k] * (HTotal[k] / PixelClock[k])
- / VRatioChroma[k];
+ FullDETBufferingTimeC = LinesInDETCRoundedDownToSwath[k] * (v->HTotal[k] / v->PixelClock[k])
+ / v->VRatioChroma[k];
ActiveClockChangeLatencyHidingC = EffectiveLBLatencyHidingC + FullDETBufferingTimeC
- - (DSTXAfterScaler[k] / HTotal[k] + DSTYAfterScaler[k]) * HTotal[k]
- / PixelClock[k];
- if (NumberOfActiveSurfaces > 1) {
+ - (DSTXAfterScaler[k] / v->HTotal[k] + DSTYAfterScaler[k]) * v->HTotal[k]
+ / v->PixelClock[k];
+ if (v->NumberOfActiveSurfaces > 1) {
ActiveClockChangeLatencyHidingC = ActiveClockChangeLatencyHidingC
- - (1 - 1 / NumberOfActiveSurfaces) * SwathHeightC[k] * HTotal[k]
- / PixelClock[k] / VRatioChroma[k];
+ - (1 - 1 / v->NumberOfActiveSurfaces) * SwathHeightC[k] * v->HTotal[k]
+ / v->PixelClock[k] / v->VRatioChroma[k];
}
ActiveClockChangeLatencyHiding = dml_min(ActiveClockChangeLatencyHidingY,
ActiveClockChangeLatencyHidingC);
@@ -4439,24 +4419,24 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
ActiveClockChangeLatencyHiding = ActiveClockChangeLatencyHidingY;
}
- ActiveDRAMClockChangeLatencyMargin[k] = ActiveClockChangeLatencyHiding - Watermark->UrgentWatermark
- - Watermark->DRAMClockChangeWatermark;
- ActiveFCLKChangeLatencyMargin[k] = ActiveClockChangeLatencyHiding - Watermark->UrgentWatermark
- - Watermark->FCLKChangeWatermark;
- USRRetrainingLatencyMargin[k] = ActiveClockChangeLatencyHiding - Watermark->USRRetrainingWatermark;
-
- if (WritebackEnable[k]) {
- WritebackLatencyHiding = WritebackInterfaceBufferSize * 1024
- / (WritebackDestinationWidth[k] * WritebackDestinationHeight[k]
- / (WritebackSourceHeight[k] * HTotal[k] / PixelClock[k]) * 4);
- if (WritebackPixelFormat[k] == dm_444_64)
+ ActiveDRAMClockChangeLatencyMargin[k] = ActiveClockChangeLatencyHiding - v->Watermark.UrgentWatermark
+ - v->Watermark.DRAMClockChangeWatermark;
+ ActiveFCLKChangeLatencyMargin[k] = ActiveClockChangeLatencyHiding - v->Watermark.UrgentWatermark
+ - v->Watermark.FCLKChangeWatermark;
+ USRRetrainingLatencyMargin[k] = ActiveClockChangeLatencyHiding - v->Watermark.USRRetrainingWatermark;
+
+ if (v->WritebackEnable[k]) {
+ WritebackLatencyHiding = v->WritebackInterfaceBufferSize * 1024
+ / (v->WritebackDestinationWidth[k] * v->WritebackDestinationHeight[k]
+ / (v->WritebackSourceHeight[k] * v->HTotal[k] / v->PixelClock[k]) * 4);
+ if (v->WritebackPixelFormat[k] == dm_444_64)
WritebackLatencyHiding = WritebackLatencyHiding / 2;
WritebackDRAMClockChangeLatencyMargin = WritebackLatencyHiding
- - Watermark->WritebackDRAMClockChangeWatermark;
+ - v->Watermark.WritebackDRAMClockChangeWatermark;
WritebackFCLKChangeLatencyMargin = WritebackLatencyHiding
- - Watermark->WritebackFCLKChangeWatermark;
+ - v->Watermark.WritebackFCLKChangeWatermark;
ActiveDRAMClockChangeLatencyMargin[k] = dml_min(ActiveDRAMClockChangeLatencyMargin[k],
WritebackFCLKChangeLatencyMargin);
@@ -4464,22 +4444,22 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
WritebackDRAMClockChangeLatencyMargin);
}
MaxActiveDRAMClockChangeLatencySupported[k] =
- (UseMALLForPStateChange[k] == dm_use_mall_pstate_change_phantom_pipe) ?
+ (v->UsesMALLForPStateChange[k] == dm_use_mall_pstate_change_phantom_pipe) ?
0 :
(ActiveDRAMClockChangeLatencyMargin[k]
+ mmSOCParameters.DRAMClockChangeLatency);
}
- for (i = 0; i < NumberOfActiveSurfaces; ++i) {
- for (j = 0; j < NumberOfActiveSurfaces; ++j) {
+ for (i = 0; i < v->NumberOfActiveSurfaces; ++i) {
+ for (j = 0; j < v->NumberOfActiveSurfaces; ++j) {
if (i == j ||
- (BlendingAndTiming[i] == i && BlendingAndTiming[j] == i) ||
- (BlendingAndTiming[j] == j && BlendingAndTiming[i] == j) ||
- (BlendingAndTiming[i] == BlendingAndTiming[j] && BlendingAndTiming[i] != i) ||
- (SynchronizeTimingsFinal && PixelClock[i] == PixelClock[j] &&
- HTotal[i] == HTotal[j] && VTotal[i] == VTotal[j] &&
- VActive[i] == VActive[j]) || (SynchronizeDRRDisplaysForUCLKPStateChangeFinal &&
- (DRRDisplay[i] || DRRDisplay[j]))) {
+ (v->BlendingAndTiming[i] == i && v->BlendingAndTiming[j] == i) ||
+ (v->BlendingAndTiming[j] == j && v->BlendingAndTiming[i] == j) ||
+ (v->BlendingAndTiming[i] == v->BlendingAndTiming[j] && v->BlendingAndTiming[i] != i) ||
+ (v->SynchronizeTimingsFinal && v->PixelClock[i] == v->PixelClock[j] &&
+ v->HTotal[i] == v->HTotal[j] && v->VTotal[i] == v->VTotal[j] &&
+ v->VActive[i] == v->VActive[j]) || (v->SynchronizeDRRDisplaysForUCLKPStateChangeFinal &&
+ (v->DRRDisplay[i] || v->DRRDisplay[j]))) {
SynchronizedSurfaces[i][j] = true;
} else {
SynchronizedSurfaces[i][j] = false;
@@ -4487,8 +4467,8 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
}
}
- for (k = 0; k < NumberOfActiveSurfaces; ++k) {
- if ((UseMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe) &&
+ for (k = 0; k < v->NumberOfActiveSurfaces; ++k) {
+ if ((v->UsesMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe) &&
(!FoundFirstSurfaceWithMinActiveFCLKChangeMargin ||
ActiveFCLKChangeLatencyMargin[k] < MinActiveFCLKChangeMargin)) {
FoundFirstSurfaceWithMinActiveFCLKChangeMargin = true;
@@ -4500,9 +4480,9 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
*MinActiveFCLKChangeLatencySupported = MinActiveFCLKChangeMargin + mmSOCParameters.FCLKChangeLatency;
SameTimingForFCLKChange = true;
- for (k = 0; k < NumberOfActiveSurfaces; ++k) {
+ for (k = 0; k < v->NumberOfActiveSurfaces; ++k) {
if (!SynchronizedSurfaces[k][SurfaceWithMinActiveFCLKChangeMargin]) {
- if ((UseMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe) &&
+ if ((v->UsesMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe) &&
(SameTimingForFCLKChange ||
ActiveFCLKChangeLatencyMargin[k] <
SecondMinActiveFCLKChangeMarginOneDisplayInVBLank)) {
@@ -4522,17 +4502,17 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
}
*USRRetrainingSupport = true;
- for (k = 0; k < NumberOfActiveSurfaces; ++k) {
- if ((UseMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe) &&
+ for (k = 0; k < v->NumberOfActiveSurfaces; ++k) {
+ if ((v->UsesMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe) &&
(USRRetrainingLatencyMargin[k] < 0)) {
*USRRetrainingSupport = false;
}
}
- for (k = 0; k < NumberOfActiveSurfaces; ++k) {
- if (UseMALLForPStateChange[k] != dm_use_mall_pstate_change_full_frame &&
- UseMALLForPStateChange[k] != dm_use_mall_pstate_change_sub_viewport &&
- UseMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe &&
+ for (k = 0; k < v->NumberOfActiveSurfaces; ++k) {
+ if (v->UsesMALLForPStateChange[k] != dm_use_mall_pstate_change_full_frame &&
+ v->UsesMALLForPStateChange[k] != dm_use_mall_pstate_change_sub_viewport &&
+ v->UsesMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe &&
ActiveDRAMClockChangeLatencyMargin[k] < 0) {
if (PrefetchMode > 0) {
DRAMClockChangeSupportNumber = 2;
@@ -4546,10 +4526,10 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
}
}
- for (k = 0; k < NumberOfActiveSurfaces; ++k) {
- if (UseMALLForPStateChange[k] == dm_use_mall_pstate_change_full_frame)
+ for (k = 0; k < v->NumberOfActiveSurfaces; ++k) {
+ if (v->UsesMALLForPStateChange[k] == dm_use_mall_pstate_change_full_frame)
DRAMClockChangeMethod = 1;
- else if (UseMALLForPStateChange[k] == dm_use_mall_pstate_change_sub_viewport)
+ else if (v->UsesMALLForPStateChange[k] == dm_use_mall_pstate_change_sub_viewport)
DRAMClockChangeMethod = 2;
}
@@ -4576,16 +4556,16 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
*DRAMClockChangeSupport = dm_dram_clock_change_unsupported;
}
- for (k = 0; k < NumberOfActiveSurfaces; ++k) {
+ for (k = 0; k < v->NumberOfActiveSurfaces; ++k) {
unsigned int dst_y_pstate;
unsigned int src_y_pstate_l;
unsigned int src_y_pstate_c;
unsigned int src_y_ahead_l, src_y_ahead_c, sub_vp_lines_l, sub_vp_lines_c;
- dst_y_pstate = dml_ceil((mmSOCParameters.DRAMClockChangeLatency + mmSOCParameters.UrgentLatency) / (HTotal[k] / PixelClock[k]), 1);
- src_y_pstate_l = dml_ceil(dst_y_pstate * VRatio[k], SwathHeightY[k]);
+ dst_y_pstate = dml_ceil((mmSOCParameters.DRAMClockChangeLatency + mmSOCParameters.UrgentLatency) / (v->HTotal[k] / v->PixelClock[k]), 1);
+ src_y_pstate_l = dml_ceil(dst_y_pstate * v->VRatio[k], SwathHeightY[k]);
src_y_ahead_l = dml_floor(DETBufferSizeY[k] / BytePerPixelDETY[k] / SwathWidthY[k], SwathHeightY[k]) + LBLatencyHidingSourceLinesY[k];
- sub_vp_lines_l = src_y_pstate_l + src_y_ahead_l + meta_row_height[k];
+ sub_vp_lines_l = src_y_pstate_l + src_y_ahead_l + v->meta_row_height[k];
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%d, DETBufferSizeY = %d\n", __func__, k, DETBufferSizeY[k]);
@@ -4596,21 +4576,21 @@ dml_print("DML::%s: k=%d, LBLatencyHidingSourceLinesY = %d\n", __func__, k, LBL
dml_print("DML::%s: k=%d, dst_y_pstate = %d\n", __func__, k, dst_y_pstate);
dml_print("DML::%s: k=%d, src_y_pstate_l = %d\n", __func__, k, src_y_pstate_l);
dml_print("DML::%s: k=%d, src_y_ahead_l = %d\n", __func__, k, src_y_ahead_l);
-dml_print("DML::%s: k=%d, meta_row_height = %d\n", __func__, k, meta_row_height[k]);
+dml_print("DML::%s: k=%d, v->meta_row_height = %d\n", __func__, k, v->meta_row_height[k]);
dml_print("DML::%s: k=%d, sub_vp_lines_l = %d\n", __func__, k, sub_vp_lines_l);
#endif
SubViewportLinesNeededInMALL[k] = sub_vp_lines_l;
if (BytePerPixelDETC[k] > 0) {
- src_y_pstate_c = dml_ceil(dst_y_pstate * VRatioChroma[k], SwathHeightC[k]);
+ src_y_pstate_c = dml_ceil(dst_y_pstate * v->VRatioChroma[k], SwathHeightC[k]);
src_y_ahead_c = dml_floor(DETBufferSizeC[k] / BytePerPixelDETC[k] / SwathWidthC[k], SwathHeightC[k]) + LBLatencyHidingSourceLinesC[k];
- sub_vp_lines_c = src_y_pstate_c + src_y_ahead_c + meta_row_height_chroma[k];
+ sub_vp_lines_c = src_y_pstate_c + src_y_ahead_c + v->meta_row_height_chroma[k];
SubViewportLinesNeededInMALL[k] = dml_max(sub_vp_lines_l, sub_vp_lines_c);
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%d, src_y_pstate_c = %d\n", __func__, k, src_y_pstate_c);
dml_print("DML::%s: k=%d, src_y_ahead_c = %d\n", __func__, k, src_y_ahead_c);
-dml_print("DML::%s: k=%d, meta_row_height_chroma = %d\n", __func__, k, meta_row_height_chroma[k]);
+dml_print("DML::%s: k=%d, v->meta_row_height_chroma = %d\n", __func__, k, v->meta_row_height_chroma[k]);
dml_print("DML::%s: k=%d, sub_vp_lines_c = %d\n", __func__, k, sub_vp_lines_c);
#endif
}
@@ -4663,10 +4643,6 @@ void dml32_CalculateMinAndMaxPrefetchMode(
} else if (AllowForPStateChangeOrStutterInVBlankFinal == dm_prefetch_support_uclk_fclk_and_stutter) {
*MinPrefetchMode = 0;
*MaxPrefetchMode = 0;
- } else if (AllowForPStateChangeOrStutterInVBlankFinal ==
- dm_prefetch_support_uclk_fclk_and_stutter_if_possible) {
- *MinPrefetchMode = 0;
- *MaxPrefetchMode = 3;
} else {
*MinPrefetchMode = 0;
*MaxPrefetchMode = 3;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h
index 626f6605e2d5..55cead0d4237 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h
@@ -30,6 +30,7 @@
#include "os_types.h"
#include "../dc_features.h"
#include "../display_mode_structs.h"
+#include "dml/display_mode_vba.h"
unsigned int dml32_dscceComputeDelay(
unsigned int bpc,
@@ -215,6 +216,7 @@ void dml32_CalculateDETBufferSize(
void dml32_CalculateODMMode(
unsigned int MaximumPixelsPerLinePerDSCUnit,
unsigned int HActive,
+ enum output_format_class OutFormat,
enum output_encoder_class Output,
enum odm_combine_policy ODMUse,
double StateDispclk,
@@ -713,28 +715,14 @@ double dml32_CalculateExtraLatency(
unsigned int HostVMMaxNonCachedPageTableLevels);
bool dml32_CalculatePrefetchSchedule(
+ struct vba_vars_st *v,
+ unsigned int k,
double HostVMInefficiencyFactor,
DmlPipe *myPipe,
unsigned int DSCDelay,
- double DPPCLKDelaySubtotalPlusCNVCFormater,
- double DPPCLKDelaySCL,
- double DPPCLKDelaySCLLBOnly,
- double DPPCLKDelayCNVCCursor,
- double DISPCLKDelaySubtotal,
unsigned int DPP_RECOUT_WIDTH,
- enum output_format_class OutputFormat,
- unsigned int MaxInterDCNTileRepeaters,
unsigned int VStartup,
unsigned int MaxVStartup,
- unsigned int GPUVMPageTableLevels,
- bool GPUVMEnable,
- bool HostVMEnable,
- unsigned int HostVMMaxNonCachedPageTableLevels,
- double HostVMMinPageSize,
- bool DynamicMetadataEnable,
- bool DynamicMetadataVMEnabled,
- int DynamicMetadataLinesBeforeActiveRequired,
- unsigned int DynamicMetadataTransmittedBytes,
double UrgentLatency,
double UrgentExtraLatency,
double TCalc,
@@ -808,58 +796,28 @@ void dml32_CalculateFlipSchedule(
bool *ImmediateFlipSupportedForPipe);
void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
- bool USRRetrainingRequiredFinal,
- enum dm_use_mall_for_pstate_change_mode UseMALLForPStateChange[],
+ struct vba_vars_st *v,
unsigned int PrefetchMode,
- unsigned int NumberOfActiveSurfaces,
- unsigned int MaxLineBufferLines,
- unsigned int LineBufferSize,
- unsigned int WritebackInterfaceBufferSize,
double DCFCLK,
double ReturnBW,
- bool SynchronizeTimingsFinal,
- bool SynchronizeDRRDisplaysForUCLKPStateChangeFinal,
- bool DRRDisplay[],
- unsigned int dpte_group_bytes[],
- unsigned int meta_row_height[],
- unsigned int meta_row_height_chroma[],
SOCParametersList mmSOCParameters,
- unsigned int WritebackChunkSize,
double SOCCLK,
double DCFClkDeepSleep,
unsigned int DETBufferSizeY[],
unsigned int DETBufferSizeC[],
unsigned int SwathHeightY[],
unsigned int SwathHeightC[],
- unsigned int LBBitPerPixel[],
double SwathWidthY[],
double SwathWidthC[],
- double HRatio[],
- double HRatioChroma[],
- unsigned int VTaps[],
- unsigned int VTapsChroma[],
- double VRatio[],
- double VRatioChroma[],
- unsigned int HTotal[],
- unsigned int VTotal[],
- unsigned int VActive[],
- double PixelClock[],
- unsigned int BlendingAndTiming[],
unsigned int DPPPerSurface[],
double BytePerPixelDETY[],
double BytePerPixelDETC[],
double DSTXAfterScaler[],
double DSTYAfterScaler[],
- bool WritebackEnable[],
- enum source_format_class WritebackPixelFormat[],
- double WritebackDestinationWidth[],
- double WritebackDestinationHeight[],
- double WritebackSourceHeight[],
bool UnboundedRequestEnabled,
unsigned int CompressedBufferSizeInkByte,
/* Output */
- Watermarks *Watermark,
enum clock_change_support *DRAMClockChangeSupport,
double MaxActiveDRAMClockChangeLatencySupported[],
unsigned int SubViewportLinesNeededInMALL[],
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
index 7ebf25e87933..dd90f241e906 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
@@ -489,6 +489,7 @@ void dcn321_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_p
if ((int)(dcn3_21_soc.urgent_latency_us * 1000) != dc->bb_overrides.urgent_latency_ns
&& dc->bb_overrides.urgent_latency_ns) {
dcn3_21_soc.urgent_latency_us = dc->bb_overrides.urgent_latency_ns / 1000.0;
+ dcn3_21_soc.urgent_latency_pixel_data_only_us = dc->bb_overrides.urgent_latency_ns / 1000.0;
}
if ((int)(dcn3_21_soc.dram_clock_change_latency_us * 1000)
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
index 5d27ff0ebb5f..4125d3d111d1 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
@@ -35,6 +35,8 @@
#include "dcn30/display_rq_dlg_calc_30.h"
#include "dcn31/display_mode_vba_31.h"
#include "dcn31/display_rq_dlg_calc_31.h"
+#include "dcn314/display_mode_vba_314.h"
+#include "dcn314/display_rq_dlg_calc_314.h"
#include "dcn32/display_mode_vba_32.h"
#include "dcn32/display_rq_dlg_calc_32.h"
#include "dml_logger.h"
@@ -74,6 +76,13 @@ const struct dml_funcs dml31_funcs = {
.rq_dlg_get_rq_reg = dml31_rq_dlg_get_rq_reg
};
+const struct dml_funcs dml314_funcs = {
+ .validate = dml314_ModeSupportAndSystemConfigurationFull,
+ .recalculate = dml314_recalculate,
+ .rq_dlg_get_dlg_reg = dml314_rq_dlg_get_dlg_reg,
+ .rq_dlg_get_rq_reg = dml314_rq_dlg_get_rq_reg
+};
+
const struct dml_funcs dml32_funcs = {
.validate = dml32_ModeSupportAndSystemConfigurationFull,
.recalculate = dml32_recalculate,
@@ -105,8 +114,12 @@ void dml_init_instance(struct display_mode_lib *lib,
break;
case DML_PROJECT_DCN31:
case DML_PROJECT_DCN31_FPGA:
+ case DML_PROJECT_DCN315:
lib->funcs = dml31_funcs;
break;
+ case DML_PROJECT_DCN314:
+ lib->funcs = dml314_funcs;
+ break;
case DML_PROJECT_DCN32:
lib->funcs = dml32_funcs;
break;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
index 2bdd6ed22611..3d643d50c3eb 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
@@ -40,7 +40,9 @@ enum dml_project {
DML_PROJECT_DCN21,
DML_PROJECT_DCN30,
DML_PROJECT_DCN31,
+ DML_PROJECT_DCN315,
DML_PROJECT_DCN31_FPGA,
+ DML_PROJECT_DCN314,
DML_PROJECT_DCN32,
};
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
index c596187a1e09..f33a8879b05a 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
@@ -510,6 +510,7 @@ struct _vcs_dpi_display_pipe_dest_params_st {
unsigned int htotal;
unsigned int vtotal;
unsigned int vfront_porch;
+ unsigned int vblank_nom;
unsigned int vactive;
unsigned int hactive;
unsigned int vstartup_start;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
index 503e7d984ff0..03924aed8d5c 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
@@ -597,6 +597,7 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib)
mode_lib->vba.HTotal[mode_lib->vba.NumberOfActivePlanes] = dst->htotal;
mode_lib->vba.VTotal[mode_lib->vba.NumberOfActivePlanes] = dst->vtotal;
mode_lib->vba.VFrontPorch[mode_lib->vba.NumberOfActivePlanes] = dst->vfront_porch;
+ mode_lib->vba.VBlankNom[mode_lib->vba.NumberOfActivePlanes] = dst->vblank_nom;
mode_lib->vba.DCCFractionOfZeroSizeRequestsLuma[mode_lib->vba.NumberOfActivePlanes] = src->dcc_fraction_of_zs_req_luma;
mode_lib->vba.DCCFractionOfZeroSizeRequestsChroma[mode_lib->vba.NumberOfActivePlanes] = src->dcc_fraction_of_zs_req_chroma;
mode_lib->vba.DCCEnable[mode_lib->vba.NumberOfActivePlanes] =
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
index da8acf59ccac..630f3395e90a 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
@@ -652,10 +652,10 @@ struct vba_vars_st {
unsigned int OutputTypeAndRatePerState[DC__VOLTAGE_STATES][DC__NUM_DPP__MAX];
double RequiredDISPCLKPerSurface[DC__VOLTAGE_STATES][2][DC__NUM_DPP__MAX];
- unsigned int MicroTileHeightY[DC__NUM_DPP__MAX];
- unsigned int MicroTileHeightC[DC__NUM_DPP__MAX];
- unsigned int MicroTileWidthY[DC__NUM_DPP__MAX];
- unsigned int MicroTileWidthC[DC__NUM_DPP__MAX];
+ unsigned int MacroTileHeightY[DC__NUM_DPP__MAX];
+ unsigned int MacroTileHeightC[DC__NUM_DPP__MAX];
+ unsigned int MacroTileWidthY[DC__NUM_DPP__MAX];
+ unsigned int MacroTileWidthC[DC__NUM_DPP__MAX];
bool ImmediateFlipRequiredFinal;
bool DCCProgrammingAssumesScanDirectionUnknownFinal;
bool EnoughWritebackUnits;
@@ -801,8 +801,6 @@ struct vba_vars_st {
double PSCL_FACTOR[DC__NUM_DPP__MAX];
double PSCL_FACTOR_CHROMA[DC__NUM_DPP__MAX];
double MaximumVStartup[DC__VOLTAGE_STATES][2][DC__NUM_DPP__MAX];
- unsigned int MacroTileWidthY[DC__NUM_DPP__MAX];
- unsigned int MacroTileWidthC[DC__NUM_DPP__MAX];
double AlignedDCCMetaPitch[DC__NUM_DPP__MAX];
double AlignedYPitch[DC__NUM_DPP__MAX];
double AlignedCPitch[DC__NUM_DPP__MAX];
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h b/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h
index 479d7d83220c..072bd0539605 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h
@@ -76,14 +76,9 @@ static inline double dml_floor(double a, double granularity)
static inline double dml_round(double a)
{
- double round_pt = 0.5;
- double ceil = dml_ceil(a, 1);
- double floor = dml_floor(a, 1);
+ const double round_pt = 0.5;
- if (a - floor >= round_pt)
- return ceil;
- else
- return floor;
+ return dml_floor(a + round_pt, 1);
}
/* float
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index 8919a2092ac5..9498105c98ab 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -39,6 +39,8 @@
#include "panel_cntl.h"
#define MAX_CLOCK_SOURCES 7
+#define MAX_SVP_PHANTOM_STREAMS 2
+#define MAX_SVP_PHANTOM_PLANES 2
void enable_surface_flip_reporting(struct dc_plane_state *plane_state,
uint32_t controller_id);
@@ -232,6 +234,7 @@ struct resource_funcs {
unsigned int index);
bool (*remove_phantom_pipes)(struct dc *dc, struct dc_state *context);
+ void (*get_panel_config_defaults)(struct dc_panel_config *panel_config);
};
struct audio_support{
@@ -438,7 +441,6 @@ struct pipe_ctx {
union pipe_update_flags update_flags;
struct dwbc *dwbc;
struct mcif_wb *mcif_wb;
- bool vtp_locked;
};
/* Data used for dynamic link encoder assignment.
@@ -492,6 +494,8 @@ struct dcn_bw_output {
struct dcn_watermark_set watermarks;
struct dcn_bw_writeback bw_writeback;
int compbuf_size_kb;
+ unsigned int legacy_svp_drr_stream_index;
+ bool legacy_svp_drr_stream_index_valid;
};
union bw_output {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
index 6682d9e181c6..b304d450b038 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
@@ -194,6 +194,11 @@ enum dc_status dpcd_configure_lttpr_mode(
enum dp_link_encoding dp_get_link_encoding_format(const struct dc_link_settings *link_settings);
bool dp_retrieve_lttpr_cap(struct dc_link *link);
+bool dp_is_lttpr_present(struct dc_link *link);
+enum lttpr_mode dp_decide_lttpr_mode(struct dc_link *link, struct dc_link_settings *link_setting);
+void dp_get_lttpr_mode_override(struct dc_link *link, enum lttpr_mode *override);
+enum lttpr_mode dp_decide_8b_10b_lttpr_mode(struct dc_link *link);
+enum lttpr_mode dp_decide_128b_132b_lttpr_mode(struct dc_link *link);
bool dpcd_write_128b_132b_sst_payload_allocation_table(
const struct dc_stream_state *stream,
struct dc_link *link,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
index 806f3041db14..9e4ddc985240 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
@@ -628,8 +628,23 @@ unsigned int dcn_find_dcfclk_suits_all(
const struct dc *dc,
struct dc_clocks *clocks);
-void dcn_bw_update_from_pplib(struct dc *dc);
-void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc);
+void dcn_get_soc_clks(
+ struct dc *dc,
+ int *min_fclk_khz,
+ int *min_dcfclk_khz,
+ int *socclk_khz);
+
+void dcn_bw_update_from_pplib_fclks(
+ struct dc *dc,
+ struct dm_pp_clock_levels_with_voltage *fclks);
+void dcn_bw_update_from_pplib_dcfclks(
+ struct dc *dc,
+ struct dm_pp_clock_levels_with_voltage *dcfclks);
+void dcn_bw_notify_pplib_of_wm_ranges(
+ struct dc *dc,
+ int min_fclk_khz,
+ int min_dcfclk_khz,
+ int socclk_khz);
void dcn_bw_sync_calcs_and_dml(struct dc *dc);
enum source_macro_tile_size swizzle_mode_to_macro_tile_size(enum swizzle_mode_values sw_mode);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
index 5d2b028e5dad..591ab1389e3b 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
@@ -95,10 +95,23 @@ struct clk_limit_table_entry {
unsigned int wck_ratio;
};
+struct clk_limit_num_entries {
+ unsigned int num_dcfclk_levels;
+ unsigned int num_fclk_levels;
+ unsigned int num_memclk_levels;
+ unsigned int num_socclk_levels;
+ unsigned int num_dtbclk_levels;
+ unsigned int num_dispclk_levels;
+ unsigned int num_dppclk_levels;
+ unsigned int num_phyclk_levels;
+ unsigned int num_phyclk_d18_levels;
+};
+
/* This table is contiguous */
struct clk_limit_table {
struct clk_limit_table_entry entries[MAX_NUM_DPM_LVL];
- unsigned int num_entries;
+ struct clk_limit_num_entries num_entries_per_clk;
+ unsigned int num_entries; /* highest populated dpm level for back compatibility */
};
struct wm_range_table_entry {
@@ -214,6 +227,7 @@ struct dummy_pstate_entry {
struct clk_bw_params {
unsigned int vram_type;
unsigned int num_channels;
+ unsigned int dram_channel_width_bytes;
unsigned int dispclk_vco_khz;
unsigned int dc_mode_softmax_memclk;
struct clk_limit_table clk_table;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
index 68c2ed434d2c..cff5fd55a0ad 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
@@ -340,6 +340,8 @@ struct clk_mgr_internal {
bool smu_present;
void *wm_range_table;
long long wm_range_table_addr;
+
+ bool dpm_present;
};
struct clk_mgr_internal_funcs {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/cursor_reg_cache.h b/drivers/gpu/drm/amd/display/dc/inc/hw/cursor_reg_cache.h
new file mode 100644
index 000000000000..45645f9fd86c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/cursor_reg_cache.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2022 Advanced Micro Devices, Inc. All rights reserved. */
+
+#ifndef __DAL_CURSOR_CACHE_H__
+#define __DAL_CURSOR_CACHE_H__
+
+union reg_cursor_control_cfg {
+ struct {
+ uint32_t cur_enable: 1;
+ uint32_t reser0: 3;
+ uint32_t cur_2x_magnify: 1;
+ uint32_t reser1: 3;
+ uint32_t mode: 3;
+ uint32_t reser2: 5;
+ uint32_t pitch: 2;
+ uint32_t reser3: 6;
+ uint32_t line_per_chunk: 5;
+ uint32_t reser4: 3;
+ } bits;
+ uint32_t raw;
+};
+struct cursor_position_cache_hubp {
+ union reg_cursor_control_cfg cur_ctl;
+ union reg_position_cfg {
+ struct {
+ uint32_t x_pos: 16;
+ uint32_t y_pos: 16;
+ } bits;
+ uint32_t raw;
+ } position;
+ union reg_hot_spot_cfg {
+ struct {
+ uint32_t x_hot: 16;
+ uint32_t y_hot: 16;
+ } bits;
+ uint32_t raw;
+ } hot_spot;
+ union reg_dst_offset_cfg {
+ struct {
+ uint32_t dst_x_offset: 13;
+ uint32_t reserved: 19;
+ } bits;
+ uint32_t raw;
+ } dst_offset;
+};
+
+struct cursor_attribute_cache_hubp {
+ uint32_t SURFACE_ADDR_HIGH;
+ uint32_t SURFACE_ADDR;
+ union reg_cursor_control_cfg cur_ctl;
+ union reg_cursor_size_cfg {
+ struct {
+ uint32_t width: 16;
+ uint32_t height: 16;
+ } bits;
+ uint32_t raw;
+ } size;
+ union reg_cursor_settings_cfg {
+ struct {
+ uint32_t dst_y_offset: 8;
+ uint32_t chunk_hdl_adjust: 2;
+ uint32_t reserved: 22;
+ } bits;
+ uint32_t raw;
+ } settings;
+};
+
+struct cursor_rect {
+ uint32_t x;
+ uint32_t y;
+ uint32_t w;
+ uint32_t h;
+};
+
+union reg_cur0_control_cfg {
+ struct {
+ uint32_t cur0_enable: 1;
+ uint32_t expansion_mode: 1;
+ uint32_t reser0: 1;
+ uint32_t cur0_rom_en: 1;
+ uint32_t mode: 3;
+ uint32_t reserved: 25;
+ } bits;
+ uint32_t raw;
+};
+struct cursor_position_cache_dpp {
+ union reg_cur0_control_cfg cur0_ctl;
+};
+
+struct cursor_attribute_cache_dpp {
+ union reg_cur0_control_cfg cur0_ctl;
+};
+
+struct cursor_attributes_cfg {
+ struct cursor_attribute_cache_hubp aHubp;
+ struct cursor_attribute_cache_dpp aDpp;
+};
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
index 3ef7faa92052..dcb80c4747b0 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
@@ -28,6 +28,7 @@
#define __DAL_DPP_H__
#include "transform.h"
+#include "cursor_reg_cache.h"
union defer_reg_writes {
struct {
@@ -58,6 +59,9 @@ struct dpp {
struct pwl_params shaper_params;
bool cm_bypass_mode;
+
+ struct cursor_position_cache_dpp pos;
+ struct cursor_attribute_cache_dpp att;
};
struct dpp_input_csc_matrix {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
index 44c4578193a3..d5ea7545583e 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
@@ -27,6 +27,7 @@
#define __DAL_HUBP_H__
#include "mem_input.h"
+#include "cursor_reg_cache.h"
#define OPP_ID_INVALID 0xf
#define MAX_TTU 0xffffff
@@ -65,6 +66,10 @@ struct hubp {
struct dc_cursor_attributes curs_attr;
struct dc_cursor_position curs_pos;
bool power_gated;
+
+ struct cursor_position_cache_hubp pos;
+ struct cursor_attribute_cache_hubp att;
+ struct cursor_rect cur_rect;
};
struct surface_flip_registers {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
index 437b64e87377..cd2be729846b 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
@@ -268,10 +268,18 @@ enum dc_lut_mode {
LUT_RAM_B
};
-enum phy_state {
- TX_OFF_SYMCLK_OFF,
- TX_ON_SYMCLK_ON,
- TX_OFF_SYMCLK_ON
+enum symclk_state {
+ SYMCLK_OFF_TX_OFF,
+ SYMCLK_ON_TX_ON,
+ SYMCLK_ON_TX_OFF,
+};
+
+struct phy_state {
+ struct {
+ uint8_t otg : 1;
+ uint8_t reserved : 7;
+ } symclk_ref_cnts;
+ enum symclk_state symclk_state;
};
/**
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
index 72eef7a5ed83..25a1df45b264 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
@@ -209,7 +209,6 @@ struct timing_generator_funcs {
void (*set_blank)(struct timing_generator *tg,
bool enable_blanking);
bool (*is_blanked)(struct timing_generator *tg);
- bool (*is_locked)(struct timing_generator *tg);
void (*set_overscan_blank_color) (struct timing_generator *tg, const struct tg_color *color);
void (*set_blank_color)(struct timing_generator *tg, const struct tg_color *color);
void (*set_colors)(struct timing_generator *tg,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
index 52b4350c9cd8..d04b68dad413 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
@@ -32,11 +32,6 @@
#include "inc/hw/link_encoder.h"
#include "core_status.h"
-enum vline_select {
- VLINE0,
- VLINE1
-};
-
struct pipe_ctx;
struct dc_state;
struct dc_stream_status;
@@ -48,6 +43,7 @@ struct dc_phy_addr_space_config;
struct dc_virtual_addr_space_config;
struct dpp;
struct dce_hwseq;
+struct link_resource;
struct hw_sequencer_funcs {
void (*hardware_release)(struct dc *dc);
@@ -88,6 +84,7 @@ struct hw_sequencer_funcs {
struct pipe_ctx *pipe_ctx, bool enableTripleBuffer);
void (*update_pending_status)(struct pipe_ctx *pipe_ctx);
void (*power_down)(struct dc *dc);
+ void (*update_dsc_pg)(struct dc *dc, struct dc_state *context, bool safe_to_disable);
/* Pipe Lock Related */
void (*pipe_control_lock)(struct dc *dc,
@@ -116,8 +113,7 @@ struct hw_sequencer_funcs {
int group_index, int group_size,
struct pipe_ctx *grouped_pipes[]);
void (*setup_periodic_interrupt)(struct dc *dc,
- struct pipe_ctx *pipe_ctx,
- enum vline_select vline);
+ struct pipe_ctx *pipe_ctx);
void (*set_drr)(struct pipe_ctx **pipe_ctx, int num_pipes,
struct dc_crtc_timing_adjust adjust);
void (*set_static_screen_control)(struct pipe_ctx **pipe_ctx,
@@ -218,6 +214,25 @@ struct hw_sequencer_funcs {
void (*set_pipe)(struct pipe_ctx *pipe_ctx);
+ void (*enable_dp_link_output)(struct dc_link *link,
+ const struct link_resource *link_res,
+ enum signal_type signal,
+ enum clock_source_id clock_source,
+ const struct dc_link_settings *link_settings);
+ void (*enable_tmds_link_output)(struct dc_link *link,
+ const struct link_resource *link_res,
+ enum signal_type signal,
+ enum clock_source_id clock_source,
+ enum dc_color_depth color_depth,
+ uint32_t pixel_clock);
+ void (*enable_lvds_link_output)(struct dc_link *link,
+ const struct link_resource *link_res,
+ enum clock_source_id clock_source,
+ uint32_t pixel_clock);
+ void (*disable_link_output)(struct dc_link *link,
+ const struct link_resource *link_res,
+ enum signal_type signal);
+
void (*get_dcc_en_bits)(struct dc *dc, int *dcc_en_bits);
/* Idle Optimization Related */
@@ -245,7 +260,9 @@ struct hw_sequencer_funcs {
struct tg_color *color,
int mpcc_id);
- void (*update_phy_state)(struct dc_state *state, struct pipe_ctx *pipe_ctx, enum phy_state target_state);
+ void (*update_phantom_vp_position)(struct dc *dc,
+ struct dc_state *context,
+ struct pipe_ctx *phantom_pipe);
void (*commit_subvp_config)(struct dc *dc, struct dc_state *context);
void (*subvp_pipe_control_lock)(struct dc *dc,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h
index 1cdea0efe5c1..a4d61bb724b6 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h
@@ -124,6 +124,8 @@ struct hwseq_private_funcs {
void (*dsc_pg_control)(struct dce_hwseq *hws,
unsigned int dsc_inst,
bool power_on);
+ bool (*dsc_pg_status)(struct dce_hwseq *hws,
+ unsigned int dsc_inst);
void (*update_odm)(struct dc *dc, struct dc_state *context,
struct pipe_ctx *pipe_ctx);
void (*program_all_writeback_pipes_in_tree)(struct dc *dc,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h b/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h
index 3482a877b6af..89964c980b87 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h
@@ -55,9 +55,6 @@ struct link_hwss_ext {
enum signal_type signal,
enum clock_source_id clock_source,
const struct dc_link_settings *link_settings);
- void (*disable_dp_link_output)(struct dc_link *link,
- const struct link_resource *link_res,
- enum signal_type signal);
void (*set_dp_link_test_pattern)(struct dc_link *link,
const struct link_resource *link_res,
struct encoder_set_dp_phy_pattern_param *tp_params);
@@ -79,6 +76,9 @@ struct link_hwss {
void (*setup_stream_encoder)(struct pipe_ctx *pipe_ctx);
void (*reset_stream_encoder)(struct pipe_ctx *pipe_ctx);
void (*setup_stream_attribute)(struct pipe_ctx *pipe_ctx);
+ void (*disable_link_output)(struct dc_link *link,
+ const struct link_resource *link_res,
+ enum signal_type signal);
};
#endif /* __DC_LINK_HWSS_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h
index 58158764adc0..5040836f404d 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/resource.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h
@@ -219,9 +219,21 @@ void check_syncd_pipes_for_disabled_master_pipe(struct dc *dc,
struct dc_state *context,
uint8_t disabled_master_pipe_idx);
+void reset_sync_context_for_pipe(const struct dc *dc,
+ struct dc_state *context,
+ uint8_t pipe_idx);
+
uint8_t resource_transmitter_to_phy_idx(const struct dc *dc, enum transmitter transmitter);
const struct link_hwss *get_link_hwss(const struct dc_link *link,
const struct link_resource *link_res);
+bool is_h_timing_divisible_by_2(struct dc_stream_state *stream);
+
+bool dc_resource_acquire_secondary_pipe_for_mpc_odm(
+ const struct dc *dc,
+ struct dc_state *state,
+ struct pipe_ctx *pri_pipe,
+ struct pipe_ctx *sec_pipe,
+ bool odm);
#endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_RESOURCE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_hwss_dio.c b/drivers/gpu/drm/amd/display/dc/link/link_hwss_dio.c
index 5e92019539c8..4227adbc646a 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_hwss_dio.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_hwss_dio.c
@@ -130,7 +130,7 @@ void enable_dio_dp_link_output(struct dc_link *link,
dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_LINK_PHY);
}
-void disable_dio_dp_link_output(struct dc_link *link,
+void disable_dio_link_output(struct dc_link *link,
const struct link_resource *link_res,
enum signal_type signal)
{
@@ -174,10 +174,10 @@ static const struct link_hwss dio_link_hwss = {
.setup_stream_encoder = setup_dio_stream_encoder,
.reset_stream_encoder = reset_dio_stream_encoder,
.setup_stream_attribute = setup_dio_stream_attribute,
+ .disable_link_output = disable_dio_link_output,
.ext = {
.set_throttled_vcp_size = set_dio_throttled_vcp_size,
.enable_dp_link_output = enable_dio_dp_link_output,
- .disable_dp_link_output = disable_dio_dp_link_output,
.set_dp_link_test_pattern = set_dio_dp_link_test_pattern,
.set_dp_lane_settings = set_dio_dp_lane_settings,
.update_stream_allocation_table = update_dio_stream_allocation_table,
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_hwss_dio.h b/drivers/gpu/drm/amd/display/dc/link/link_hwss_dio.h
index 08f22b32df48..126d37f847a1 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_hwss_dio.h
+++ b/drivers/gpu/drm/amd/display/dc/link/link_hwss_dio.h
@@ -40,7 +40,7 @@ void enable_dio_dp_link_output(struct dc_link *link,
enum signal_type signal,
enum clock_source_id clock_source,
const struct dc_link_settings *link_settings);
-void disable_dio_dp_link_output(struct dc_link *link,
+void disable_dio_link_output(struct dc_link *link,
const struct link_resource *link_res,
enum signal_type signal);
void set_dio_dp_link_test_pattern(struct dc_link *link,
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_hwss_dpia.c b/drivers/gpu/drm/amd/display/dc/link/link_hwss_dpia.c
index 89d4e8159138..64f7ea6a9aa3 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_hwss_dpia.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_hwss_dpia.c
@@ -56,10 +56,10 @@ static const struct link_hwss dpia_link_hwss = {
.setup_stream_encoder = setup_dio_stream_encoder,
.reset_stream_encoder = reset_dio_stream_encoder,
.setup_stream_attribute = setup_dio_stream_attribute,
+ .disable_link_output = disable_dio_link_output,
.ext = {
.set_throttled_vcp_size = set_dio_throttled_vcp_size,
.enable_dp_link_output = enable_dio_dp_link_output,
- .disable_dp_link_output = disable_dio_dp_link_output,
.set_dp_link_test_pattern = set_dio_dp_link_test_pattern,
.set_dp_lane_settings = set_dio_dp_lane_settings,
.update_stream_allocation_table = update_dpia_stream_allocation_table,
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c b/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c
index 226af06278ce..153a88381f2c 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c
@@ -111,7 +111,7 @@ static void setup_hpo_dp_stream_encoder(struct pipe_ctx *pipe_ctx)
enum phyd32clk_clock_source phyd32clk = get_phyd32clk_src(pipe_ctx->stream->link);
dto_params.otg_inst = tg->inst;
- dto_params.pixclk_khz = pipe_ctx->stream->phy_pix_clk;
+ dto_params.pixclk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10;
dto_params.num_odm_segments = get_odm_segment_count(pipe_ctx);
dto_params.timing = &pipe_ctx->stream->timing;
dto_params.ref_dtbclk_khz = dc->clk_mgr->funcs->get_dtb_ref_clk_frequency(dc->clk_mgr);
@@ -266,11 +266,11 @@ static const struct link_hwss hpo_dp_link_hwss = {
.setup_stream_encoder = setup_hpo_dp_stream_encoder,
.reset_stream_encoder = reset_hpo_dp_stream_encoder,
.setup_stream_attribute = setup_hpo_dp_stream_attribute,
+ .disable_link_output = disable_hpo_dp_link_output,
.ext = {
.set_throttled_vcp_size = set_hpo_dp_throttled_vcp_size,
.set_hblank_min_symbol_width = set_hpo_dp_hblank_min_symbol_width,
.enable_dp_link_output = enable_hpo_dp_link_output,
- .disable_dp_link_output = disable_hpo_dp_link_output,
.set_dp_link_test_pattern = set_hpo_dp_link_test_pattern,
.set_dp_lane_settings = set_hpo_dp_lane_settings,
.update_stream_allocation_table = update_hpo_dp_stream_allocation_table,
diff --git a/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_hwss.c b/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_hwss.c
index 501173ce270e..4f7f99156897 100644
--- a/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_hwss.c
+++ b/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_hwss.c
@@ -36,10 +36,18 @@ void virtual_setup_stream_attribute(struct pipe_ctx *pipe_ctx)
void virtual_reset_stream_encoder(struct pipe_ctx *pipe_ctx)
{
}
+
+static void virtual_disable_link_output(struct dc_link *link,
+ const struct link_resource *link_res,
+ enum signal_type signal)
+{
+}
+
static const struct link_hwss virtual_link_hwss = {
.setup_stream_encoder = virtual_setup_stream_encoder,
.reset_stream_encoder = virtual_reset_stream_encoder,
.setup_stream_attribute = virtual_setup_stream_attribute,
+ .disable_link_output = virtual_disable_link_output,
};
const struct link_hwss *get_virtual_link_hwss(void)
diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
index f34c45b19fcb..eb5b7eb292ef 100644
--- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
@@ -248,6 +248,7 @@ struct dmub_srv_hw_params {
bool disable_dpia;
bool usb4_cm_version;
bool fw_in_system_memory;
+ bool dpia_hpd_int_enable_supported;
};
/**
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index 7cddbc431b57..7a8f61517424 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -400,8 +400,9 @@ union dmub_fw_boot_options {
uint32_t diag_env: 1; /* 1 if diagnostic environment */
uint32_t gpint_scratch8: 1; /* 1 if GPINT is in scratch8*/
uint32_t usb4_cm_version: 1; /**< 1 CM support */
+ uint32_t dpia_hpd_int_enable_supported: 1; /* 1 if dpia hpd int enable supported */
- uint32_t reserved : 17; /**< reserved */
+ uint32_t reserved : 16; /**< reserved */
} bits; /**< boot bits */
uint32_t all; /**< 32-bit access to bits */
};
@@ -728,6 +729,12 @@ enum dmub_cmd_type {
/**
* Command type used for all VBIOS interface commands.
*/
+
+ /**
+ * Command type used to set DPIA HPD interrupt state
+ */
+ DMUB_CMD__DPIA_HPD_INT_ENABLE = 86,
+
DMUB_CMD__VBIOS = 128,
};
@@ -998,7 +1005,8 @@ struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 {
uint8_t scale_factor_numerator;
uint8_t scale_factor_denominator;
uint8_t is_drr;
- uint8_t pad[2];
+ uint8_t main_split_pipe_index;
+ uint8_t phantom_split_pipe_index;
} subvp_data;
struct {
@@ -1255,6 +1263,14 @@ struct dmub_rb_cmd_set_mst_alloc_slots {
};
/**
+ * DMUB command structure for DPIA HPD int enable control.
+ */
+struct dmub_rb_cmd_dpia_hpd_int_enable {
+ struct dmub_cmd_header header; /* header */
+ uint32_t enable; /* dpia hpd interrupt enable */
+};
+
+/**
* struct dmub_rb_cmd_dpphy_init - DPPHY init.
*/
struct dmub_rb_cmd_dpphy_init {
@@ -2083,7 +2099,99 @@ struct dmub_rb_cmd_update_dirty_rect {
/**
* Data passed from driver to FW in a DMUB_CMD__UPDATE_CURSOR_INFO command.
*/
-struct dmub_cmd_update_cursor_info_data {
+union dmub_reg_cursor_control_cfg {
+ struct {
+ uint32_t cur_enable: 1;
+ uint32_t reser0: 3;
+ uint32_t cur_2x_magnify: 1;
+ uint32_t reser1: 3;
+ uint32_t mode: 3;
+ uint32_t reser2: 5;
+ uint32_t pitch: 2;
+ uint32_t reser3: 6;
+ uint32_t line_per_chunk: 5;
+ uint32_t reser4: 3;
+ } bits;
+ uint32_t raw;
+};
+struct dmub_cursor_position_cache_hubp {
+ union dmub_reg_cursor_control_cfg cur_ctl;
+ union dmub_reg_position_cfg {
+ struct {
+ uint32_t cur_x_pos: 16;
+ uint32_t cur_y_pos: 16;
+ } bits;
+ uint32_t raw;
+ } position;
+ union dmub_reg_hot_spot_cfg {
+ struct {
+ uint32_t hot_x: 16;
+ uint32_t hot_y: 16;
+ } bits;
+ uint32_t raw;
+ } hot_spot;
+ union dmub_reg_dst_offset_cfg {
+ struct {
+ uint32_t dst_x_offset: 13;
+ uint32_t reserved: 19;
+ } bits;
+ uint32_t raw;
+ } dst_offset;
+};
+
+union dmub_reg_cur0_control_cfg {
+ struct {
+ uint32_t cur0_enable: 1;
+ uint32_t expansion_mode: 1;
+ uint32_t reser0: 1;
+ uint32_t cur0_rom_en: 1;
+ uint32_t mode: 3;
+ uint32_t reserved: 25;
+ } bits;
+ uint32_t raw;
+};
+struct dmub_cursor_position_cache_dpp {
+ union dmub_reg_cur0_control_cfg cur0_ctl;
+};
+struct dmub_cursor_position_cfg {
+ struct dmub_cursor_position_cache_hubp pHubp;
+ struct dmub_cursor_position_cache_dpp pDpp;
+ uint8_t pipe_idx;
+ /*
+ * Padding is required. To be 4 Bytes Aligned.
+ */
+ uint8_t padding[3];
+};
+
+struct dmub_cursor_attribute_cache_hubp {
+ uint32_t SURFACE_ADDR_HIGH;
+ uint32_t SURFACE_ADDR;
+ union dmub_reg_cursor_control_cfg cur_ctl;
+ union dmub_reg_cursor_size_cfg {
+ struct {
+ uint32_t width: 16;
+ uint32_t height: 16;
+ } bits;
+ uint32_t raw;
+ } size;
+ union dmub_reg_cursor_settings_cfg {
+ struct {
+ uint32_t dst_y_offset: 8;
+ uint32_t chunk_hdl_adjust: 2;
+ uint32_t reserved: 22;
+ } bits;
+ uint32_t raw;
+ } settings;
+};
+struct dmub_cursor_attribute_cache_dpp {
+ union dmub_reg_cur0_control_cfg cur0_ctl;
+};
+struct dmub_cursor_attributes_cfg {
+ struct dmub_cursor_attribute_cache_hubp aHubp;
+ struct dmub_cursor_attribute_cache_dpp aDpp;
+};
+
+struct dmub_cmd_update_cursor_payload0 {
/**
* Cursor dirty rects.
*/
@@ -2110,6 +2218,20 @@ struct dmub_cmd_update_cursor_info_data {
* Currently the support is only for 0 or 1
*/
uint8_t panel_inst;
+ /**
+ * Cursor Position Register.
+ * Registers contains Hubp & Dpp modules
+ */
+ struct dmub_cursor_position_cfg position_cfg;
+};
+
+struct dmub_cmd_update_cursor_payload1 {
+ struct dmub_cursor_attributes_cfg attribute_cfg;
+};
+
+union dmub_cmd_update_cursor_info_data {
+ struct dmub_cmd_update_cursor_payload0 payload0;
+ struct dmub_cmd_update_cursor_payload1 payload1;
};
/**
* Definition of a DMUB_CMD__UPDATE_CURSOR_INFO command.
@@ -2122,7 +2244,7 @@ struct dmub_rb_cmd_update_cursor_info {
/**
* Data passed from driver to FW in a DMUB_CMD__UPDATE_CURSOR_INFO command.
*/
- struct dmub_cmd_update_cursor_info_data update_cursor_info_data;
+ union dmub_cmd_update_cursor_info_data update_cursor_info_data;
};
/**
@@ -2819,11 +2941,7 @@ struct dmub_rb_cmd_get_visual_confirm_color {
struct dmub_optc_state {
uint32_t v_total_max;
uint32_t v_total_min;
- uint32_t v_total_mid;
- uint32_t v_total_mid_frame_num;
uint32_t tg_inst;
- uint32_t enable_manual_trigger;
- uint32_t clear_force_vsync;
};
struct dmub_rb_cmd_drr_update {
@@ -3229,6 +3347,10 @@ union dmub_rb_cmd {
* Definition of a DMUB_CMD__QUERY_HPD_STATE command.
*/
struct dmub_rb_cmd_query_hpd_state query_hpd;
+ /**
+ * Definition of a DMUB_CMD__DPIA_HPD_INT_ENABLE command.
+ */
+ struct dmub_rb_cmd_dpia_hpd_int_enable dpia_hpd_int_enable;
};
/**
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
index c7bd7e216710..c90b9ee42e12 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
@@ -350,6 +350,7 @@ void dmub_dcn31_enable_dmub_boot_options(struct dmub_srv *dmub, const struct dmu
boot_options.bits.dpia_supported = params->dpia_supported;
boot_options.bits.enable_dpia = params->disable_dpia ? 0 : 1;
boot_options.bits.usb4_cm_version = params->usb4_cm_version;
+ boot_options.bits.dpia_hpd_int_enable_supported = params->dpia_hpd_int_enable_supported;
boot_options.bits.power_optimization = params->power_optimization;
boot_options.bits.sel_mux_phy_c_d_phy_f_g = (dmub->asic == DMUB_ASIC_DCN31B) ? 1 : 0;
diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
index 9f3558c0ef11..c3089c673975 100644
--- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h
+++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
@@ -215,6 +215,7 @@ enum {
#define DEVICE_ID_NV_143F 0x143F
#define FAMILY_VGH 144
#define DEVICE_ID_VGH_163F 0x163F
+#define DEVICE_ID_VGH_1435 0x1435
#define VANGOGH_A0 0x01
#define VANGOGH_UNKNOWN 0xFF
diff --git a/drivers/gpu/drm/amd/display/include/ddc_service_types.h b/drivers/gpu/drm/amd/display/include/ddc_service_types.h
index 05096c644a60..a7ba5bd8dc16 100644
--- a/drivers/gpu/drm/amd/display/include/ddc_service_types.h
+++ b/drivers/gpu/drm/amd/display/include/ddc_service_types.h
@@ -128,8 +128,8 @@ struct av_sync_data {
uint8_t aud_del_ins3;/* DPCD 0002Dh */
};
-static const uint8_t DP_SINK_DEVICE_STR_ID_1[] = {7, 1, 8, 7, 3, 0};
-static const uint8_t DP_SINK_DEVICE_STR_ID_2[] = {7, 1, 8, 7, 5, 0};
+static const uint8_t DP_SINK_DEVICE_STR_ID_1[] = {7, 1, 8, 7, 3};
+static const uint8_t DP_SINK_DEVICE_STR_ID_2[] = {7, 1, 8, 7, 5};
static const u8 DP_SINK_BRANCH_DEV_NAME_7580[] = "7580\x80u";
diff --git a/drivers/gpu/drm/amd/display/include/link_service_types.h b/drivers/gpu/drm/amd/display/include/link_service_types.h
index d76ab72baf0c..d1e91d31d151 100644
--- a/drivers/gpu/drm/amd/display/include/link_service_types.h
+++ b/drivers/gpu/drm/amd/display/include/link_service_types.h
@@ -83,6 +83,7 @@ enum link_training_result {
};
enum lttpr_mode {
+ LTTPR_MODE_UNKNOWN,
LTTPR_MODE_NON_LTTPR,
LTTPR_MODE_TRANSPARENT,
LTTPR_MODE_NON_TRANSPARENT,
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
index 859ffd8725c5..447a0ec9cbe2 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
@@ -1600,6 +1600,7 @@ static void interpolate_user_regamma(uint32_t hw_points_num,
struct fixed31_32 lut2;
struct fixed31_32 delta_lut;
struct fixed31_32 delta_index;
+ const struct fixed31_32 one = dc_fixpt_from_int(1);
i = 0;
/* fixed_pt library has problems handling too small values */
@@ -1628,6 +1629,9 @@ static void interpolate_user_regamma(uint32_t hw_points_num,
} else
hw_x = coordinates_x[i].x;
+ if (dc_fixpt_le(one, hw_x))
+ hw_x = one;
+
norm_x = dc_fixpt_mul(norm_factor, hw_x);
index = dc_fixpt_floor(norm_x);
if (index < 0 || index > 255)
@@ -1688,7 +1692,7 @@ static void apply_degamma_for_user_regamma(struct pwl_float_data_ex *rgb_regamma
struct pwl_float_data_ex *rgb = rgb_regamma;
const struct hw_x_point *coord_x = coordinates_x;
- build_coefficients(&coeff, true);
+ build_coefficients(&coeff, TRANSFER_FUNCTION_SRGB);
i = 0;
while (i != hw_points_num + 1) {
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_offset.h
index 1115dfc6ae1f..3973110f149c 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_offset.h
@@ -9800,18 +9800,118 @@
// addressBlock: gc_pwrdec
// base address: 0x3c000
+#define mmCGTS_RD_CTRL_REG 0x5004
+#define mmCGTS_RD_CTRL_REG_BASE_IDX 1
+#define mmCGTS_RD_REG 0x5005
+#define mmCGTS_RD_REG_BASE_IDX 1
#define mmCGTS_TCC_DISABLE 0x5006
#define mmCGTS_TCC_DISABLE_BASE_IDX 1
#define mmCGTS_USER_TCC_DISABLE 0x5007
#define mmCGTS_USER_TCC_DISABLE_BASE_IDX 1
+#define mmCGTS_STATUS_REG 0x5008
+#define mmCGTS_STATUS_REG_BASE_IDX 1
+#define mmCGTT_SPI_CGTSSM_CLK_CTRL 0x5009
+#define mmCGTT_SPI_CGTSSM_CLK_CTRL_BASE_IDX 1
+#define mmCGTT_SPI_PS_CLK_CTRL 0x507d
+#define mmCGTT_SPI_PS_CLK_CTRL_BASE_IDX 1
+#define mmCGTT_SPIS_CLK_CTRL 0x507e
+#define mmCGTT_SPIS_CLK_CTRL_BASE_IDX 1
+#define mmCGTT_SPI_CLK_CTRL 0x5080
+#define mmCGTT_SPI_CLK_CTRL_BASE_IDX 1
+#define mmCGTT_PC_CLK_CTRL 0x5081
+#define mmCGTT_PC_CLK_CTRL_BASE_IDX 1
+#define mmCGTT_BCI_CLK_CTRL 0x5082
+#define mmCGTT_BCI_CLK_CTRL_BASE_IDX 1
+#define mmCGTT_VGT_CLK_CTRL 0x5084
+#define mmCGTT_VGT_CLK_CTRL_BASE_IDX 1
+#define mmCGTT_IA_CLK_CTRL 0x5085
+#define mmCGTT_IA_CLK_CTRL_BASE_IDX 1
+#define mmCGTT_WD_CLK_CTRL 0x5086
+#define mmCGTT_WD_CLK_CTRL_BASE_IDX 1
+#define mmCGTT_GS_NGG_CLK_CTRL 0x5087
+#define mmCGTT_GS_NGG_CLK_CTRL_BASE_IDX 1
+#define mmCGTT_PA_CLK_CTRL 0x5088
+#define mmCGTT_PA_CLK_CTRL_BASE_IDX 1
+#define mmCGTT_SC_CLK_CTRL0 0x5089
+#define mmCGTT_SC_CLK_CTRL0_BASE_IDX 1
+#define mmCGTT_SC_CLK_CTRL1 0x508a
+#define mmCGTT_SC_CLK_CTRL1_BASE_IDX 1
+#define mmCGTT_SC_CLK_CTRL2 0x508b
+#define mmCGTT_SC_CLK_CTRL2_BASE_IDX 1
+#define mmCGTT_SQ_CLK_CTRL 0x508c
+#define mmCGTT_SQ_CLK_CTRL_BASE_IDX 1
+#define mmCGTT_SQG_CLK_CTRL 0x508d
+#define mmCGTT_SQG_CLK_CTRL_BASE_IDX 1
#define mmSQ_ALU_CLK_CTRL 0x508e
#define mmSQ_ALU_CLK_CTRL_BASE_IDX 1
#define mmSQ_TEX_CLK_CTRL 0x508f
#define mmSQ_TEX_CLK_CTRL_BASE_IDX 1
#define mmSQ_LDS_CLK_CTRL 0x5090
#define mmSQ_LDS_CLK_CTRL_BASE_IDX 1
+#define mmCGTT_SX_CLK_CTRL0 0x5094
+#define mmCGTT_SX_CLK_CTRL0_BASE_IDX 1
+#define mmCGTT_SX_CLK_CTRL1 0x5095
+#define mmCGTT_SX_CLK_CTRL1_BASE_IDX 1
+#define mmCGTT_SX_CLK_CTRL2 0x5096
+#define mmCGTT_SX_CLK_CTRL2_BASE_IDX 1
+#define mmCGTT_SX_CLK_CTRL3 0x5097
+#define mmCGTT_SX_CLK_CTRL3_BASE_IDX 1
+#define mmCGTT_SX_CLK_CTRL4 0x5098
+#define mmCGTT_SX_CLK_CTRL4_BASE_IDX 1
+#define mmTD_CGTT_CTRL 0x509c
+#define mmTD_CGTT_CTRL_BASE_IDX 1
+#define mmTA_CGTT_CTRL 0x509d
+#define mmTA_CGTT_CTRL_BASE_IDX 1
+#define mmCGTT_TCPI_CLK_CTRL 0x5109
+#define mmCGTT_TCPI_CLK_CTRL_BASE_IDX 1
+#define mmCGTT_GDS_CLK_CTRL 0x50a0
+#define mmCGTT_GDS_CLK_CTRL_BASE_IDX 1
+#define mmDB_CGTT_CLK_CTRL_0 0x50a4
+#define mmDB_CGTT_CLK_CTRL_0_BASE_IDX 1
+#define mmCB_CGTT_SCLK_CTRL 0x50a8
+#define mmCB_CGTT_SCLK_CTRL_BASE_IDX 1
+#define mmGL2C_CGTT_SCLK_CTRL 0x50fc
+#define mmGL2C_CGTT_SCLK_CTRL_BASE_IDX 1
+#define mmGL2A_CGTT_SCLK_CTRL 0x50ac
+#define mmGL2A_CGTT_SCLK_CTRL_BASE_IDX 1
+#define mmGL2A_CGTT_SCLK_CTRL_1 0x50ad
+#define mmGL2A_CGTT_SCLK_CTRL_1_BASE_IDX 1
+#define mmCGTT_CP_CLK_CTRL 0x50b0
+#define mmCGTT_CP_CLK_CTRL_BASE_IDX 1
+#define mmCGTT_CPF_CLK_CTRL 0x50b1
+#define mmCGTT_CPF_CLK_CTRL_BASE_IDX 1
+#define mmCGTT_CPC_CLK_CTRL 0x50b2
+#define mmCGTT_CPC_CLK_CTRL_BASE_IDX 1
+#define mmCGTT_RLC_CLK_CTRL 0x50b5
+#define mmCGTT_RLC_CLK_CTRL_BASE_IDX 1
#define mmRLC_GFX_RM_CNTL 0x50b6
#define mmRLC_GFX_RM_CNTL_BASE_IDX 1
+#define mmRMI_CGTT_SCLK_CTRL 0x50c0
+#define mmRMI_CGTT_SCLK_CTRL_BASE_IDX 1
+#define mmCGTT_TCPF_CLK_CTRL 0x5111
+#define mmCGTT_TCPF_CLK_CTRL_BASE_IDX 1
+#define mmGCR_CGTT_SCLK_CTRL 0x50c2
+#define mmGCR_CGTT_SCLK_CTRL_BASE_IDX 1
+#define mmUTCL1_CGTT_CLK_CTRL 0x50c3
+#define mmUTCL1_CGTT_CLK_CTRL_BASE_IDX 1
+#define mmGCEA_CGTT_CLK_CTRL 0x50c4
+#define mmGCEA_CGTT_CLK_CTRL_BASE_IDX 1
+#define mmSE_CAC_CGTT_CLK_CTRL 0x50d0
+#define mmSE_CAC_CGTT_CLK_CTRL_BASE_IDX 1
+#define mmGC_CAC_CGTT_CLK_CTRL 0x50d8
+#define mmGC_CAC_CGTT_CLK_CTRL_BASE_IDX 1
+#define mmGRBM_CGTT_CLK_CNTL 0x50e0
+#define mmGRBM_CGTT_CLK_CNTL_BASE_IDX 1
+#define mmGUS_CGTT_CLK_CTRL 0x50f4
+#define mmGUS_CGTT_CLK_CTRL_BASE_IDX 1
+#define mmCGTT_PH_CLK_CTRL0 0x50f8
+#define mmCGTT_PH_CLK_CTRL0_BASE_IDX 1
+#define mmCGTT_PH_CLK_CTRL1 0x50f9
+#define mmCGTT_PH_CLK_CTRL1_BASE_IDX 1
+#define mmCGTT_PH_CLK_CTRL2 0x50fa
+#define mmCGTT_PH_CLK_CTRL2_BASE_IDX 1
+#define mmCGTT_PH_CLK_CTRL3 0x50fb
+#define mmCGTT_PH_CLK_CTRL3_BASE_IDX 1
// addressBlock: gc_hypdec
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_sh_mask.h
index 83faa276523f..d4e8ff22ecb8 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_sh_mask.h
@@ -34547,6 +34547,14 @@
// addressBlock: gc_pwrdec
+//CGTS_RD_CTRL_REG
+#define CGTS_RD_CTRL_REG__ROW_MUX_SEL__SHIFT 0x0
+#define CGTS_RD_CTRL_REG__REG_MUX_SEL__SHIFT 0x4
+#define CGTS_RD_CTRL_REG__ROW_MUX_SEL_MASK 0x0000000FL
+#define CGTS_RD_CTRL_REG__REG_MUX_SEL_MASK 0x000000F0L
+//CGTS_RD_REG
+#define CGTS_RD_REG__READ_DATA__SHIFT 0x0
+#define CGTS_RD_REG__READ_DATA_MASK 0xFFFFFFFFL
//CGTS_TCC_DISABLE
#define CGTS_TCC_DISABLE__HI_TCC_DISABLE__SHIFT 0x8
#define CGTS_TCC_DISABLE__TCC_DISABLE__SHIFT 0x10
@@ -34557,6 +34565,485 @@
#define CGTS_USER_TCC_DISABLE__TCC_DISABLE__SHIFT 0x10
#define CGTS_USER_TCC_DISABLE__HI_TCC_DISABLE_MASK 0x0000FF00L
#define CGTS_USER_TCC_DISABLE__TCC_DISABLE_MASK 0xFFFF0000L
+//CGTS_STATUS_REG
+#define CGTS_STATUS_REG__SA0_QUAD0_MGCG_ENABLED__SHIFT 0x0
+#define CGTS_STATUS_REG__SA0_QUAD0_CG_STATUS__SHIFT 0x1
+#define CGTS_STATUS_REG__SA1_QUAD0_MGCG_ENABLED__SHIFT 0x8
+#define CGTS_STATUS_REG__SA1_QUAD0_CG_STATUS__SHIFT 0x9
+#define CGTS_STATUS_REG__SA0_QUAD0_MGCG_ENABLED_MASK 0x00000001L
+#define CGTS_STATUS_REG__SA0_QUAD0_CG_STATUS_MASK 0x00000006L
+#define CGTS_STATUS_REG__SA1_QUAD0_MGCG_ENABLED_MASK 0x00000100L
+#define CGTS_STATUS_REG__SA1_QUAD0_CG_STATUS_MASK 0x00000600L
+//CGTT_SPI_CGTSSM_CLK_CTRL
+#define CGTT_SPI_CGTSSM_CLK_CTRL__GRP3_OVERRIDE__SHIFT 0x1b
+#define CGTT_SPI_CGTSSM_CLK_CTRL__GRP2_OVERRIDE__SHIFT 0x1c
+#define CGTT_SPI_CGTSSM_CLK_CTRL__GRP1_OVERRIDE__SHIFT 0x1d
+#define CGTT_SPI_CGTSSM_CLK_CTRL__GRP0_OVERRIDE__SHIFT 0x1e
+#define CGTT_SPI_CGTSSM_CLK_CTRL__GRP3_OVERRIDE_MASK 0x08000000L
+#define CGTT_SPI_CGTSSM_CLK_CTRL__GRP2_OVERRIDE_MASK 0x10000000L
+#define CGTT_SPI_CGTSSM_CLK_CTRL__GRP1_OVERRIDE_MASK 0x20000000L
+#define CGTT_SPI_CGTSSM_CLK_CTRL__GRP0_OVERRIDE_MASK 0x40000000L
+//CGTT_SPI_PS_CLK_CTRL
+#define CGTT_SPI_PS_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_SPI_PS_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_SPI_PS_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x10
+#define CGTT_SPI_PS_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x11
+#define CGTT_SPI_PS_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x12
+#define CGTT_SPI_PS_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x13
+#define CGTT_SPI_PS_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x14
+#define CGTT_SPI_PS_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x15
+#define CGTT_SPI_PS_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x16
+#define CGTT_SPI_PS_CLK_CTRL__GRP6_OVERRIDE__SHIFT 0x18
+#define CGTT_SPI_PS_CLK_CTRL__GRP5_OVERRIDE__SHIFT 0x19
+#define CGTT_SPI_PS_CLK_CTRL__GRP4_OVERRIDE__SHIFT 0x1a
+#define CGTT_SPI_PS_CLK_CTRL__GRP3_OVERRIDE__SHIFT 0x1b
+#define CGTT_SPI_PS_CLK_CTRL__GRP2_OVERRIDE__SHIFT 0x1c
+#define CGTT_SPI_PS_CLK_CTRL__GRP1_OVERRIDE__SHIFT 0x1d
+#define CGTT_SPI_PS_CLK_CTRL__GRP0_OVERRIDE__SHIFT 0x1e
+#define CGTT_SPI_PS_CLK_CTRL__REG_OVERRIDE__SHIFT 0x1f
+#define CGTT_SPI_PS_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_SPI_PS_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_SPI_PS_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00010000L
+#define CGTT_SPI_PS_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00020000L
+#define CGTT_SPI_PS_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00040000L
+#define CGTT_SPI_PS_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00080000L
+#define CGTT_SPI_PS_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00100000L
+#define CGTT_SPI_PS_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00200000L
+#define CGTT_SPI_PS_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00400000L
+#define CGTT_SPI_PS_CLK_CTRL__GRP6_OVERRIDE_MASK 0x01000000L
+#define CGTT_SPI_PS_CLK_CTRL__GRP5_OVERRIDE_MASK 0x02000000L
+#define CGTT_SPI_PS_CLK_CTRL__GRP4_OVERRIDE_MASK 0x04000000L
+#define CGTT_SPI_PS_CLK_CTRL__GRP3_OVERRIDE_MASK 0x08000000L
+#define CGTT_SPI_PS_CLK_CTRL__GRP2_OVERRIDE_MASK 0x10000000L
+#define CGTT_SPI_PS_CLK_CTRL__GRP1_OVERRIDE_MASK 0x20000000L
+#define CGTT_SPI_PS_CLK_CTRL__GRP0_OVERRIDE_MASK 0x40000000L
+#define CGTT_SPI_PS_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
+//CGTT_SPIS_CLK_CTRL
+#define CGTT_SPIS_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_SPIS_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_SPIS_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x10
+#define CGTT_SPIS_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x11
+#define CGTT_SPIS_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x12
+#define CGTT_SPIS_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x13
+#define CGTT_SPIS_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x14
+#define CGTT_SPIS_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x15
+#define CGTT_SPIS_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x16
+#define CGTT_SPIS_CLK_CTRL__GRP6_OVERRIDE__SHIFT 0x18
+#define CGTT_SPIS_CLK_CTRL__GRP5_OVERRIDE__SHIFT 0x19
+#define CGTT_SPIS_CLK_CTRL__GRP4_OVERRIDE__SHIFT 0x1a
+#define CGTT_SPIS_CLK_CTRL__GRP3_OVERRIDE__SHIFT 0x1b
+#define CGTT_SPIS_CLK_CTRL__GRP2_OVERRIDE__SHIFT 0x1c
+#define CGTT_SPIS_CLK_CTRL__GRP1_OVERRIDE__SHIFT 0x1d
+#define CGTT_SPIS_CLK_CTRL__GRP0_OVERRIDE__SHIFT 0x1e
+#define CGTT_SPIS_CLK_CTRL__REG_OVERRIDE__SHIFT 0x1f
+#define CGTT_SPIS_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_SPIS_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_SPIS_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00010000L
+#define CGTT_SPIS_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00020000L
+#define CGTT_SPIS_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00040000L
+#define CGTT_SPIS_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00080000L
+#define CGTT_SPIS_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00100000L
+#define CGTT_SPIS_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00200000L
+#define CGTT_SPIS_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00400000L
+#define CGTT_SPIS_CLK_CTRL__GRP6_OVERRIDE_MASK 0x01000000L
+#define CGTT_SPIS_CLK_CTRL__GRP5_OVERRIDE_MASK 0x02000000L
+#define CGTT_SPIS_CLK_CTRL__GRP4_OVERRIDE_MASK 0x04000000L
+#define CGTT_SPIS_CLK_CTRL__GRP3_OVERRIDE_MASK 0x08000000L
+#define CGTT_SPIS_CLK_CTRL__GRP2_OVERRIDE_MASK 0x10000000L
+#define CGTT_SPIS_CLK_CTRL__GRP1_OVERRIDE_MASK 0x20000000L
+#define CGTT_SPIS_CLK_CTRL__GRP0_OVERRIDE_MASK 0x40000000L
+#define CGTT_SPIS_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
+//CGTT_SPI_CLK_CTRL
+#define CGTT_SPI_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_SPI_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_SPI_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x10
+#define CGTT_SPI_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x11
+#define CGTT_SPI_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x12
+#define CGTT_SPI_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x13
+#define CGTT_SPI_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x14
+#define CGTT_SPI_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x15
+#define CGTT_SPI_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x16
+#define CGTT_SPI_CLK_CTRL__GRP6_OVERRIDE__SHIFT 0x18
+#define CGTT_SPI_CLK_CTRL__GRP5_OVERRIDE__SHIFT 0x19
+#define CGTT_SPI_CLK_CTRL__GRP4_OVERRIDE__SHIFT 0x1a
+#define CGTT_SPI_CLK_CTRL__GRP3_OVERRIDE__SHIFT 0x1b
+#define CGTT_SPI_CLK_CTRL__GRP2_OVERRIDE__SHIFT 0x1c
+#define CGTT_SPI_CLK_CTRL__GRP1_OVERRIDE__SHIFT 0x1d
+#define CGTT_SPI_CLK_CTRL__GRP0_OVERRIDE__SHIFT 0x1e
+#define CGTT_SPI_CLK_CTRL__REG_OVERRIDE__SHIFT 0x1f
+#define CGTT_SPI_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_SPI_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_SPI_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00010000L
+#define CGTT_SPI_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00020000L
+#define CGTT_SPI_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00040000L
+#define CGTT_SPI_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00080000L
+#define CGTT_SPI_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00100000L
+#define CGTT_SPI_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00200000L
+#define CGTT_SPI_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00400000L
+#define CGTT_SPI_CLK_CTRL__GRP6_OVERRIDE_MASK 0x01000000L
+#define CGTT_SPI_CLK_CTRL__GRP5_OVERRIDE_MASK 0x02000000L
+#define CGTT_SPI_CLK_CTRL__GRP4_OVERRIDE_MASK 0x04000000L
+#define CGTT_SPI_CLK_CTRL__GRP3_OVERRIDE_MASK 0x08000000L
+#define CGTT_SPI_CLK_CTRL__GRP2_OVERRIDE_MASK 0x10000000L
+#define CGTT_SPI_CLK_CTRL__GRP1_OVERRIDE_MASK 0x20000000L
+#define CGTT_SPI_CLK_CTRL__GRP0_OVERRIDE_MASK 0x40000000L
+#define CGTT_SPI_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
+//CGTT_PC_CLK_CTRL
+#define CGTT_PC_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_PC_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_PC_CLK_CTRL__PC_RAM_FGCG_OVERRIDE__SHIFT 0x11
+#define CGTT_PC_CLK_CTRL__PC_WRITE_CLK_EN_OVERRIDE__SHIFT 0xd
+#define CGTT_PC_CLK_CTRL__PC_READ_CLK_EN_OVERRIDE__SHIFT 0xe
+#define CGTT_PC_CLK_CTRL__CORE3_OVERRIDE__SHIFT 0x1b
+#define CGTT_PC_CLK_CTRL__CORE2_OVERRIDE__SHIFT 0x1c
+#define CGTT_PC_CLK_CTRL__CORE1_OVERRIDE__SHIFT 0x1d
+#define CGTT_PC_CLK_CTRL__CORE0_OVERRIDE__SHIFT 0x1e
+#define CGTT_PC_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_PC_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_PC_CLK_CTRL__PC_RAM_FGCG_OVERRIDE_MASK 0x00020000L
+#define CGTT_PC_CLK_CTRL__PC_WRITE_CLK_EN_OVERRIDE_MASK 0x00002000L
+#define CGTT_PC_CLK_CTRL__PC_READ_CLK_EN_OVERRIDE_MASK 0x00004000L
+#define CGTT_PC_CLK_CTRL__CORE3_OVERRIDE_MASK 0x08000000L
+#define CGTT_PC_CLK_CTRL__CORE2_OVERRIDE_MASK 0x10000000L
+#define CGTT_PC_CLK_CTRL__CORE1_OVERRIDE_MASK 0x20000000L
+#define CGTT_PC_CLK_CTRL__CORE0_OVERRIDE_MASK 0x40000000L
+//CGTT_BCI_CLK_CTRL
+#define CGTT_BCI_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_BCI_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_BCI_CLK_CTRL__RESERVED__SHIFT 0xc
+#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CGTT_BCI_CLK_CTRL__CORE6_OVERRIDE__SHIFT 0x18
+#define CGTT_BCI_CLK_CTRL__CORE5_OVERRIDE__SHIFT 0x19
+#define CGTT_BCI_CLK_CTRL__CORE4_OVERRIDE__SHIFT 0x1a
+#define CGTT_BCI_CLK_CTRL__CORE3_OVERRIDE__SHIFT 0x1b
+#define CGTT_BCI_CLK_CTRL__CORE2_OVERRIDE__SHIFT 0x1c
+#define CGTT_BCI_CLK_CTRL__CORE1_OVERRIDE__SHIFT 0x1d
+#define CGTT_BCI_CLK_CTRL__CORE0_OVERRIDE__SHIFT 0x1e
+#define CGTT_BCI_CLK_CTRL__REG_OVERRIDE__SHIFT 0x1f
+#define CGTT_BCI_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_BCI_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_BCI_CLK_CTRL__RESERVED_MASK 0x0000F000L
+#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_BCI_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CGTT_BCI_CLK_CTRL__CORE6_OVERRIDE_MASK 0x01000000L
+#define CGTT_BCI_CLK_CTRL__CORE5_OVERRIDE_MASK 0x02000000L
+#define CGTT_BCI_CLK_CTRL__CORE4_OVERRIDE_MASK 0x04000000L
+#define CGTT_BCI_CLK_CTRL__CORE3_OVERRIDE_MASK 0x08000000L
+#define CGTT_BCI_CLK_CTRL__CORE2_OVERRIDE_MASK 0x10000000L
+#define CGTT_BCI_CLK_CTRL__CORE1_OVERRIDE_MASK 0x20000000L
+#define CGTT_BCI_CLK_CTRL__CORE0_OVERRIDE_MASK 0x40000000L
+#define CGTT_BCI_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
+//CGTT_VGT_CLK_CTRL
+#define CGTT_VGT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_VGT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_VGT_CLK_CTRL__PERF_ENABLE__SHIFT 0xf
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CGTT_VGT_CLK_CTRL__TESS_OVERRIDE__SHIFT 0x1c
+#define CGTT_VGT_CLK_CTRL__CORE_OVERRIDE__SHIFT 0x1d
+#define CGTT_VGT_CLK_CTRL__REG_OVERRIDE__SHIFT 0x1f
+#define CGTT_VGT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_VGT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_VGT_CLK_CTRL__PERF_ENABLE_MASK 0x00008000L
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_VGT_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CGTT_VGT_CLK_CTRL__TESS_OVERRIDE_MASK 0x10000000L
+#define CGTT_VGT_CLK_CTRL__CORE_OVERRIDE_MASK 0x20000000L
+#define CGTT_VGT_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
+//CGTT_IA_CLK_CTRL
+#define CGTT_IA_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_IA_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CGTT_IA_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
+#define CGTT_IA_CLK_CTRL__PERF_ENABLE__SHIFT 0x19
+#define CGTT_IA_CLK_CTRL__CORE_OVERRIDE__SHIFT 0x1e
+#define CGTT_IA_CLK_CTRL__REG_OVERRIDE__SHIFT 0x1f
+#define CGTT_IA_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_IA_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_IA_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CGTT_IA_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define CGTT_IA_CLK_CTRL__PERF_ENABLE_MASK 0x02000000L
+#define CGTT_IA_CLK_CTRL__CORE_OVERRIDE_MASK 0x40000000L
+#define CGTT_IA_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
+//CGTT_WD_CLK_CTRL
+#define CGTT_WD_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_WD_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_WD_CLK_CTRL__PERF_ENABLE__SHIFT 0xf
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CGTT_WD_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
+#define CGTT_WD_CLK_CTRL__CORE_OVERRIDE__SHIFT 0x1d
+#define CGTT_WD_CLK_CTRL__RBIU_INPUT_OVERRIDE__SHIFT 0x1e
+#define CGTT_WD_CLK_CTRL__REG_OVERRIDE__SHIFT 0x1f
+#define CGTT_WD_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_WD_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_WD_CLK_CTRL__PERF_ENABLE_MASK 0x00008000L
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_WD_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CGTT_WD_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define CGTT_WD_CLK_CTRL__CORE_OVERRIDE_MASK 0x20000000L
+#define CGTT_WD_CLK_CTRL__RBIU_INPUT_OVERRIDE_MASK 0x40000000L
+#define CGTT_WD_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
+//CGTT_GS_NGG_CLK_CTRL
+#define CGTT_GS_NGG_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_GS_NGG_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_GS_NGG_CLK_CTRL__PERF_ENABLE__SHIFT 0xf
+#define CGTT_GS_NGG_CLK_CTRL__DBG_ENABLE__SHIFT 0x10
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
+#define CGTT_GS_NGG_CLK_CTRL__PRIMGEN_OVERRIDE__SHIFT 0x1c
+#define CGTT_GS_NGG_CLK_CTRL__GS1_OVERRIDE__SHIFT 0x1d
+#define CGTT_GS_NGG_CLK_CTRL__GS0_OVERRIDE__SHIFT 0x1e
+#define CGTT_GS_NGG_CLK_CTRL__REG_OVERRIDE__SHIFT 0x1f
+#define CGTT_GS_NGG_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_GS_NGG_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_GS_NGG_CLK_CTRL__PERF_ENABLE_MASK 0x00008000L
+#define CGTT_GS_NGG_CLK_CTRL__DBG_ENABLE_MASK 0x00010000L
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CGTT_GS_NGG_CLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CGTT_GS_NGG_CLK_CTRL__PRIMGEN_OVERRIDE_MASK 0x10000000L
+#define CGTT_GS_NGG_CLK_CTRL__GS1_OVERRIDE_MASK 0x20000000L
+#define CGTT_GS_NGG_CLK_CTRL__GS0_OVERRIDE_MASK 0x40000000L
+#define CGTT_GS_NGG_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
+//CGTT_PA_CLK_CTRL
+#define CGTT_PA_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_PA_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
+#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
+#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
+#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
+#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
+#define CGTT_PA_CLK_CTRL__SU_CLK_OVERRIDE__SHIFT 0x1d
+#define CGTT_PA_CLK_CTRL__CL_CLK_OVERRIDE__SHIFT 0x1e
+#define CGTT_PA_CLK_CTRL__REG_CLK_OVERRIDE__SHIFT 0x1f
+#define CGTT_PA_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_PA_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_PA_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define CGTT_PA_CLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define CGTT_PA_CLK_CTRL__SU_CLK_OVERRIDE_MASK 0x20000000L
+#define CGTT_PA_CLK_CTRL__CL_CLK_OVERRIDE_MASK 0x40000000L
+#define CGTT_PA_CLK_CTRL__REG_CLK_OVERRIDE_MASK 0x80000000L
+//CGTT_SC_CLK_CTRL0
+#define CGTT_SC_CLK_CTRL0__ON_DELAY__SHIFT 0x0
+#define CGTT_SC_CLK_CTRL0__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_SC_CLK_CTRL0__PFF_ZFF_MEM_CLK_STALL_OVERRIDE__SHIFT 0x10
+#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE5__SHIFT 0x11
+#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE4__SHIFT 0x12
+#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE3__SHIFT 0x13
+#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE2__SHIFT 0x14
+#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE1__SHIFT 0x15
+#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE0__SHIFT 0x16
+#define CGTT_SC_CLK_CTRL0__REG_CLK_STALL_OVERRIDE__SHIFT 0x17
+#define CGTT_SC_CLK_CTRL0__PFF_ZFF_MEM_CLK_OVERRIDE__SHIFT 0x18
+#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE5__SHIFT 0x19
+#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE4__SHIFT 0x1a
+#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE3__SHIFT 0x1b
+#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE2__SHIFT 0x1c
+#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE1__SHIFT 0x1d
+#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE0__SHIFT 0x1e
+#define CGTT_SC_CLK_CTRL0__REG_CLK_OVERRIDE__SHIFT 0x1f
+#define CGTT_SC_CLK_CTRL0__ON_DELAY_MASK 0x0000000FL
+#define CGTT_SC_CLK_CTRL0__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_SC_CLK_CTRL0__PFF_ZFF_MEM_CLK_STALL_OVERRIDE_MASK 0x00010000L
+#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE5_MASK 0x00020000L
+#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE4_MASK 0x00040000L
+#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE3_MASK 0x00080000L
+#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE2_MASK 0x00100000L
+#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE1_MASK 0x00200000L
+#define CGTT_SC_CLK_CTRL0__SOFT_STALL_OVERRIDE0_MASK 0x00400000L
+#define CGTT_SC_CLK_CTRL0__REG_CLK_STALL_OVERRIDE_MASK 0x00800000L
+#define CGTT_SC_CLK_CTRL0__PFF_ZFF_MEM_CLK_OVERRIDE_MASK 0x01000000L
+#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE5_MASK 0x02000000L
+#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE4_MASK 0x04000000L
+#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE3_MASK 0x08000000L
+#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE2_MASK 0x10000000L
+#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE1_MASK 0x20000000L
+#define CGTT_SC_CLK_CTRL0__SOFT_OVERRIDE0_MASK 0x40000000L
+#define CGTT_SC_CLK_CTRL0__REG_CLK_OVERRIDE_MASK 0x80000000L
+//CGTT_SC_CLK_CTRL1
+#define CGTT_SC_CLK_CTRL1__ON_DELAY__SHIFT 0x0
+#define CGTT_SC_CLK_CTRL1__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_SC_CLK_CTRL1__PBB_BINNING_CLK_STALL_OVERRIDE0__SHIFT 0x10
+#define CGTT_SC_CLK_CTRL1__PBB_BINNING_CLK_STALL_OVERRIDE__SHIFT 0x11
+#define CGTT_SC_CLK_CTRL1__PBB_SCISSOR_CLK_STALL_OVERRIDE__SHIFT 0x12
+#define CGTT_SC_CLK_CTRL1__OTHER_SPECIAL_SC_REG_CLK_STALL_OVERRIDE__SHIFT 0x13
+#define CGTT_SC_CLK_CTRL1__SCREEN_EXT_REG_CLK_STALL_OVERRIDE__SHIFT 0x14
+#define CGTT_SC_CLK_CTRL1__VPORT_REG_MEM_CLK_STALL_OVERRIDE__SHIFT 0x15
+#define CGTT_SC_CLK_CTRL1__PBB_CLK_STALL_OVERRIDE__SHIFT 0x16
+#define CGTT_SC_CLK_CTRL1__PBB_WARP_CLK_STALL_OVERRIDE__SHIFT 0x17
+#define CGTT_SC_CLK_CTRL1__PBB_BINNING_CLK_OVERRIDE0__SHIFT 0x18
+#define CGTT_SC_CLK_CTRL1__PBB_BINNING_CLK_OVERRIDE__SHIFT 0x19
+#define CGTT_SC_CLK_CTRL1__PBB_SCISSOR_CLK_OVERRIDE__SHIFT 0x1a
+#define CGTT_SC_CLK_CTRL1__OTHER_SPECIAL_SC_REG_CLK_OVERRIDE__SHIFT 0x1b
+#define CGTT_SC_CLK_CTRL1__SCREEN_EXT_REG_CLK_OVERRIDE__SHIFT 0x1c
+#define CGTT_SC_CLK_CTRL1__VPORT_REG_MEM_CLK_OVERRIDE__SHIFT 0x1d
+#define CGTT_SC_CLK_CTRL1__PBB_CLK_OVERRIDE__SHIFT 0x1e
+#define CGTT_SC_CLK_CTRL1__PBB_WARP_CLK_OVERRIDE__SHIFT 0x1f
+#define CGTT_SC_CLK_CTRL1__ON_DELAY_MASK 0x0000000FL
+#define CGTT_SC_CLK_CTRL1__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_SC_CLK_CTRL1__PBB_BINNING_CLK_STALL_OVERRIDE0_MASK 0x00010000L
+#define CGTT_SC_CLK_CTRL1__PBB_BINNING_CLK_STALL_OVERRIDE_MASK 0x00020000L
+#define CGTT_SC_CLK_CTRL1__PBB_SCISSOR_CLK_STALL_OVERRIDE_MASK 0x00040000L
+#define CGTT_SC_CLK_CTRL1__OTHER_SPECIAL_SC_REG_CLK_STALL_OVERRIDE_MASK 0x00080000L
+#define CGTT_SC_CLK_CTRL1__SCREEN_EXT_REG_CLK_STALL_OVERRIDE_MASK 0x00100000L
+#define CGTT_SC_CLK_CTRL1__VPORT_REG_MEM_CLK_STALL_OVERRIDE_MASK 0x00200000L
+#define CGTT_SC_CLK_CTRL1__PBB_CLK_STALL_OVERRIDE_MASK 0x00400000L
+#define CGTT_SC_CLK_CTRL1__PBB_WARP_CLK_STALL_OVERRIDE_MASK 0x00800000L
+#define CGTT_SC_CLK_CTRL1__PBB_BINNING_CLK_OVERRIDE0_MASK 0x01000000L
+#define CGTT_SC_CLK_CTRL1__PBB_BINNING_CLK_OVERRIDE_MASK 0x02000000L
+#define CGTT_SC_CLK_CTRL1__PBB_SCISSOR_CLK_OVERRIDE_MASK 0x04000000L
+#define CGTT_SC_CLK_CTRL1__OTHER_SPECIAL_SC_REG_CLK_OVERRIDE_MASK 0x08000000L
+#define CGTT_SC_CLK_CTRL1__SCREEN_EXT_REG_CLK_OVERRIDE_MASK 0x10000000L
+#define CGTT_SC_CLK_CTRL1__VPORT_REG_MEM_CLK_OVERRIDE_MASK 0x20000000L
+#define CGTT_SC_CLK_CTRL1__PBB_CLK_OVERRIDE_MASK 0x40000000L
+#define CGTT_SC_CLK_CTRL1__PBB_WARP_CLK_OVERRIDE_MASK 0x80000000L
+//CGTT_SC_CLK_CTRL2
+#define CGTT_SC_CLK_CTRL2__ON_DELAY__SHIFT 0x0
+#define CGTT_SC_CLK_CTRL2__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_SC_CLK_CTRL2__DBR_CLK_OVERRIDE__SHIFT 0x1a
+#define CGTT_SC_CLK_CTRL2__SCF_SCB_INTF_CLK_OVERRIDE__SHIFT 0x1b
+#define CGTT_SC_CLK_CTRL2__SC_PKR_INTF_CLK_OVERRIDE__SHIFT 0x1c
+#define CGTT_SC_CLK_CTRL2__SC_DB_INTF_CLK_OVERRIDE__SHIFT 0x1d
+#define CGTT_SC_CLK_CTRL2__PA_SC_INTF_CLK_OVERRIDE__SHIFT 0x1e
+#define CGTT_SC_CLK_CTRL2__ON_DELAY_MASK 0x0000000FL
+#define CGTT_SC_CLK_CTRL2__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_SC_CLK_CTRL2__DBR_CLK_OVERRIDE_MASK 0x04000000L
+#define CGTT_SC_CLK_CTRL2__SCF_SCB_INTF_CLK_OVERRIDE_MASK 0x08000000L
+#define CGTT_SC_CLK_CTRL2__SC_PKR_INTF_CLK_OVERRIDE_MASK 0x10000000L
+#define CGTT_SC_CLK_CTRL2__SC_DB_INTF_CLK_OVERRIDE_MASK 0x20000000L
+#define CGTT_SC_CLK_CTRL2__PA_SC_INTF_CLK_OVERRIDE_MASK 0x40000000L
+//CGTT_SQ_CLK_CTRL
+#define CGTT_SQ_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_SQ_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CGTT_SQ_CLK_CTRL__CORE_OVERRIDE__SHIFT 0x1e
+#define CGTT_SQ_CLK_CTRL__REG_OVERRIDE__SHIFT 0x1f
+#define CGTT_SQ_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_SQ_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_SQ_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CGTT_SQ_CLK_CTRL__CORE_OVERRIDE_MASK 0x40000000L
+#define CGTT_SQ_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
+//CGTT_SQG_CLK_CTRL
+#define CGTT_SQG_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_SQG_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_SQG_CLK_CTRL__TTRACE_OVERRIDE__SHIFT 0x1c
+#define CGTT_SQG_CLK_CTRL__PERFMON_OVERRIDE__SHIFT 0x1d
+#define CGTT_SQG_CLK_CTRL__CORE_OVERRIDE__SHIFT 0x1e
+#define CGTT_SQG_CLK_CTRL__REG_OVERRIDE__SHIFT 0x1f
+#define CGTT_SQG_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_SQG_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_SQG_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_SQG_CLK_CTRL__TTRACE_OVERRIDE_MASK 0x10000000L
+#define CGTT_SQG_CLK_CTRL__PERFMON_OVERRIDE_MASK 0x20000000L
+#define CGTT_SQG_CLK_CTRL__CORE_OVERRIDE_MASK 0x40000000L
+#define CGTT_SQG_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L
//SQ_ALU_CLK_CTRL
#define SQ_ALU_CLK_CTRL__FORCE_WGP_ON_SA0__SHIFT 0x0
#define SQ_ALU_CLK_CTRL__FORCE_WGP_ON_SA1__SHIFT 0x10
@@ -34572,12 +35059,982 @@
#define SQ_LDS_CLK_CTRL__FORCE_WGP_ON_SA1__SHIFT 0x10
#define SQ_LDS_CLK_CTRL__FORCE_WGP_ON_SA0_MASK 0x0000FFFFL
#define SQ_LDS_CLK_CTRL__FORCE_WGP_ON_SA1_MASK 0xFFFF0000L
+//CGTT_SX_CLK_CTRL0
+#define CGTT_SX_CLK_CTRL0__ON_DELAY__SHIFT 0x0
+#define CGTT_SX_CLK_CTRL0__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_SX_CLK_CTRL0__RESERVED__SHIFT 0xc
+#define CGTT_SX_CLK_CTRL0__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define CGTT_SX_CLK_CTRL0__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_SX_CLK_CTRL0__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_SX_CLK_CTRL0__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_SX_CLK_CTRL0__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_SX_CLK_CTRL0__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_SX_CLK_CTRL0__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_SX_CLK_CTRL0__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE7__SHIFT 0x18
+#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE6__SHIFT 0x19
+#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE5__SHIFT 0x1a
+#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE4__SHIFT 0x1b
+#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE3__SHIFT 0x1c
+#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE2__SHIFT 0x1d
+#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE1__SHIFT 0x1e
+#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE0__SHIFT 0x1f
+#define CGTT_SX_CLK_CTRL0__ON_DELAY_MASK 0x0000000FL
+#define CGTT_SX_CLK_CTRL0__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_SX_CLK_CTRL0__RESERVED_MASK 0x0000F000L
+#define CGTT_SX_CLK_CTRL0__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define CGTT_SX_CLK_CTRL0__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_SX_CLK_CTRL0__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_SX_CLK_CTRL0__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_SX_CLK_CTRL0__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_SX_CLK_CTRL0__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_SX_CLK_CTRL0__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_SX_CLK_CTRL0__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE7_MASK 0x01000000L
+#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE4_MASK 0x08000000L
+#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE3_MASK 0x10000000L
+#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE2_MASK 0x20000000L
+#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE1_MASK 0x40000000L
+#define CGTT_SX_CLK_CTRL0__SOFT_OVERRIDE0_MASK 0x80000000L
+//CGTT_SX_CLK_CTRL1
+#define CGTT_SX_CLK_CTRL1__ON_DELAY__SHIFT 0x0
+#define CGTT_SX_CLK_CTRL1__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_SX_CLK_CTRL1__RESERVED__SHIFT 0xc
+#define CGTT_SX_CLK_CTRL1__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define CGTT_SX_CLK_CTRL1__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_SX_CLK_CTRL1__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_SX_CLK_CTRL1__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_SX_CLK_CTRL1__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_SX_CLK_CTRL1__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_SX_CLK_CTRL1__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_SX_CLK_CTRL1__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE6__SHIFT 0x19
+#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE5__SHIFT 0x1a
+#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE4__SHIFT 0x1b
+#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE3__SHIFT 0x1c
+#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE2__SHIFT 0x1d
+#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE1__SHIFT 0x1e
+#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE0__SHIFT 0x1f
+#define CGTT_SX_CLK_CTRL1__ON_DELAY_MASK 0x0000000FL
+#define CGTT_SX_CLK_CTRL1__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_SX_CLK_CTRL1__RESERVED_MASK 0x0000F000L
+#define CGTT_SX_CLK_CTRL1__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define CGTT_SX_CLK_CTRL1__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_SX_CLK_CTRL1__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_SX_CLK_CTRL1__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_SX_CLK_CTRL1__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_SX_CLK_CTRL1__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_SX_CLK_CTRL1__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_SX_CLK_CTRL1__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE4_MASK 0x08000000L
+#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE3_MASK 0x10000000L
+#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE2_MASK 0x20000000L
+#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE1_MASK 0x40000000L
+#define CGTT_SX_CLK_CTRL1__SOFT_OVERRIDE0_MASK 0x80000000L
+//CGTT_SX_CLK_CTRL2
+#define CGTT_SX_CLK_CTRL2__ON_DELAY__SHIFT 0x0
+#define CGTT_SX_CLK_CTRL2__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_SX_CLK_CTRL2__RESERVED__SHIFT 0xd
+#define CGTT_SX_CLK_CTRL2__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define CGTT_SX_CLK_CTRL2__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_SX_CLK_CTRL2__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_SX_CLK_CTRL2__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_SX_CLK_CTRL2__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_SX_CLK_CTRL2__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_SX_CLK_CTRL2__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_SX_CLK_CTRL2__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE6__SHIFT 0x19
+#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE5__SHIFT 0x1a
+#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE4__SHIFT 0x1b
+#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE3__SHIFT 0x1c
+#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE2__SHIFT 0x1d
+#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE1__SHIFT 0x1e
+#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE0__SHIFT 0x1f
+#define CGTT_SX_CLK_CTRL2__ON_DELAY_MASK 0x0000000FL
+#define CGTT_SX_CLK_CTRL2__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_SX_CLK_CTRL2__RESERVED_MASK 0x0000E000L
+#define CGTT_SX_CLK_CTRL2__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define CGTT_SX_CLK_CTRL2__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_SX_CLK_CTRL2__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_SX_CLK_CTRL2__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_SX_CLK_CTRL2__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_SX_CLK_CTRL2__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_SX_CLK_CTRL2__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_SX_CLK_CTRL2__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE4_MASK 0x08000000L
+#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE3_MASK 0x10000000L
+#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE2_MASK 0x20000000L
+#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE1_MASK 0x40000000L
+#define CGTT_SX_CLK_CTRL2__SOFT_OVERRIDE0_MASK 0x80000000L
+//CGTT_SX_CLK_CTRL3
+#define CGTT_SX_CLK_CTRL3__ON_DELAY__SHIFT 0x0
+#define CGTT_SX_CLK_CTRL3__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_SX_CLK_CTRL3__RESERVED__SHIFT 0xd
+#define CGTT_SX_CLK_CTRL3__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define CGTT_SX_CLK_CTRL3__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_SX_CLK_CTRL3__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_SX_CLK_CTRL3__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_SX_CLK_CTRL3__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_SX_CLK_CTRL3__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_SX_CLK_CTRL3__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_SX_CLK_CTRL3__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE6__SHIFT 0x19
+#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE5__SHIFT 0x1a
+#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE4__SHIFT 0x1b
+#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE3__SHIFT 0x1c
+#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE2__SHIFT 0x1d
+#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE1__SHIFT 0x1e
+#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE0__SHIFT 0x1f
+#define CGTT_SX_CLK_CTRL3__ON_DELAY_MASK 0x0000000FL
+#define CGTT_SX_CLK_CTRL3__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_SX_CLK_CTRL3__RESERVED_MASK 0x0000E000L
+#define CGTT_SX_CLK_CTRL3__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define CGTT_SX_CLK_CTRL3__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_SX_CLK_CTRL3__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_SX_CLK_CTRL3__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_SX_CLK_CTRL3__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_SX_CLK_CTRL3__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_SX_CLK_CTRL3__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_SX_CLK_CTRL3__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE4_MASK 0x08000000L
+#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE3_MASK 0x10000000L
+#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE2_MASK 0x20000000L
+#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE1_MASK 0x40000000L
+#define CGTT_SX_CLK_CTRL3__SOFT_OVERRIDE0_MASK 0x80000000L
+//CGTT_SX_CLK_CTRL4
+#define CGTT_SX_CLK_CTRL4__ON_DELAY__SHIFT 0x0
+#define CGTT_SX_CLK_CTRL4__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_SX_CLK_CTRL4__RESERVED__SHIFT 0xc
+#define CGTT_SX_CLK_CTRL4__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define CGTT_SX_CLK_CTRL4__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_SX_CLK_CTRL4__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_SX_CLK_CTRL4__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_SX_CLK_CTRL4__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_SX_CLK_CTRL4__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_SX_CLK_CTRL4__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_SX_CLK_CTRL4__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE6__SHIFT 0x19
+#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE5__SHIFT 0x1a
+#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE4__SHIFT 0x1b
+#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE3__SHIFT 0x1c
+#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE2__SHIFT 0x1d
+#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE1__SHIFT 0x1e
+#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE0__SHIFT 0x1f
+#define CGTT_SX_CLK_CTRL4__ON_DELAY_MASK 0x0000000FL
+#define CGTT_SX_CLK_CTRL4__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_SX_CLK_CTRL4__RESERVED_MASK 0x0000F000L
+#define CGTT_SX_CLK_CTRL4__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define CGTT_SX_CLK_CTRL4__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_SX_CLK_CTRL4__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_SX_CLK_CTRL4__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_SX_CLK_CTRL4__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_SX_CLK_CTRL4__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_SX_CLK_CTRL4__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_SX_CLK_CTRL4__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE4_MASK 0x08000000L
+#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE3_MASK 0x10000000L
+#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE2_MASK 0x20000000L
+#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE1_MASK 0x40000000L
+#define CGTT_SX_CLK_CTRL4__SOFT_OVERRIDE0_MASK 0x80000000L
+//TD_CGTT_CTRL
+#define TD_CGTT_CTRL__ON_DELAY__SHIFT 0x0
+#define TD_CGTT_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define TD_CGTT_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define TD_CGTT_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define TD_CGTT_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define TD_CGTT_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define TD_CGTT_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define TD_CGTT_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define TD_CGTT_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define TD_CGTT_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define TD_CGTT_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
+#define TD_CGTT_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
+#define TD_CGTT_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
+#define TD_CGTT_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
+#define TD_CGTT_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
+#define TD_CGTT_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
+#define TD_CGTT_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
+#define TD_CGTT_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
+#define TD_CGTT_CTRL__ON_DELAY_MASK 0x0000000FL
+#define TD_CGTT_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define TD_CGTT_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define TD_CGTT_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define TD_CGTT_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define TD_CGTT_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define TD_CGTT_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define TD_CGTT_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define TD_CGTT_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define TD_CGTT_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define TD_CGTT_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define TD_CGTT_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define TD_CGTT_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define TD_CGTT_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define TD_CGTT_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define TD_CGTT_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
+#define TD_CGTT_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
+#define TD_CGTT_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
+//TA_CGTT_CTRL
+#define TA_CGTT_CTRL__ON_DELAY__SHIFT 0x0
+#define TA_CGTT_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define TA_CGTT_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
+#define TA_CGTT_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
+#define TA_CGTT_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
+#define TA_CGTT_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
+#define TA_CGTT_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
+#define TA_CGTT_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
+#define TA_CGTT_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
+#define TA_CGTT_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
+#define TA_CGTT_CTRL__ON_DELAY_MASK 0x0000000FL
+#define TA_CGTT_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define TA_CGTT_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define TA_CGTT_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define TA_CGTT_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define TA_CGTT_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define TA_CGTT_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define TA_CGTT_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define TA_CGTT_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
+#define TA_CGTT_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
+#define TA_CGTT_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
+//CGTT_TCPI_CLK_CTRL
+#define CGTT_TCPI_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_TCPI_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_TCPI_CLK_CTRL__SPARE__SHIFT 0xc
+#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0xf
+#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x10
+#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x11
+#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x12
+#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x13
+#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x14
+#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x15
+#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x16
+#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x17
+#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x18
+#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x19
+#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1a
+#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1b
+#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1c
+#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1d
+#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1e
+#define CGTT_TCPI_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_TCPI_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_TCPI_CLK_CTRL__SPARE_MASK 0x00007000L
+#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00008000L
+#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00010000L
+#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00020000L
+#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00040000L
+#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00080000L
+#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00100000L
+#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00200000L
+#define CGTT_TCPI_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00400000L
+#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x00800000L
+#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x01000000L
+#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE5_MASK 0x02000000L
+#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x04000000L
+#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE3_MASK 0x08000000L
+#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE2_MASK 0x10000000L
+#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE1_MASK 0x20000000L
+#define CGTT_TCPI_CLK_CTRL__SOFT_OVERRIDE0_MASK 0x40000000L
+//CGTT_GDS_CLK_CTRL
+#define CGTT_GDS_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_GDS_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_GDS_CLK_CTRL__UNUSED__SHIFT 0xc
+#define CGTT_GDS_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define CGTT_GDS_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_GDS_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_GDS_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_GDS_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_GDS_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_GDS_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_GDS_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
+#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
+#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
+#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
+#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
+#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
+#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
+#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
+#define CGTT_GDS_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_GDS_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_GDS_CLK_CTRL__UNUSED_MASK 0x0000F000L
+#define CGTT_GDS_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define CGTT_GDS_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_GDS_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_GDS_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_GDS_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_GDS_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_GDS_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_GDS_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
+#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
+#define CGTT_GDS_CLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
+//DB_CGTT_CLK_CTRL_0
+#define DB_CGTT_CLK_CTRL_0__ON_DELAY__SHIFT 0x0
+#define DB_CGTT_CLK_CTRL_0__OFF_HYSTERESIS__SHIFT 0x4
+#define DB_CGTT_CLK_CTRL_0__RESERVED__SHIFT 0xc
+#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE7__SHIFT 0x18
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE6__SHIFT 0x19
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE5__SHIFT 0x1a
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE4__SHIFT 0x1b
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE3__SHIFT 0x1c
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE2__SHIFT 0x1d
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE1__SHIFT 0x1e
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE0__SHIFT 0x1f
+#define DB_CGTT_CLK_CTRL_0__ON_DELAY_MASK 0x0000000FL
+#define DB_CGTT_CLK_CTRL_0__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define DB_CGTT_CLK_CTRL_0__RESERVED_MASK 0x0000F000L
+#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define DB_CGTT_CLK_CTRL_0__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE7_MASK 0x01000000L
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE6_MASK 0x02000000L
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE5_MASK 0x04000000L
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE4_MASK 0x08000000L
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE3_MASK 0x10000000L
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE2_MASK 0x20000000L
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE1_MASK 0x40000000L
+#define DB_CGTT_CLK_CTRL_0__SOFT_OVERRIDE0_MASK 0x80000000L
+//CB_CGTT_SCLK_CTRL
+#define CB_CGTT_SCLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CB_CGTT_SCLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
+#define CB_CGTT_SCLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CB_CGTT_SCLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CB_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
+#define CB_CGTT_SCLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
+//GL2C_CGTT_SCLK_CTRL
+#define GL2C_CGTT_SCLK_CTRL__ON_DELAY__SHIFT 0x0
+#define GL2C_CGTT_SCLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define GL2C_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define GL2C_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define GL2C_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define GL2C_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define GL2C_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define GL2C_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define GL2C_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define GL2C_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define GL2C_CGTT_SCLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
+#define GL2C_CGTT_SCLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
+#define GL2C_CGTT_SCLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
+#define GL2C_CGTT_SCLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
+#define GL2C_CGTT_SCLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
+#define GL2C_CGTT_SCLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
+#define GL2C_CGTT_SCLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
+#define GL2C_CGTT_SCLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
+#define GL2C_CGTT_SCLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define GL2C_CGTT_SCLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define GL2C_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define GL2C_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define GL2C_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define GL2C_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define GL2C_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define GL2C_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define GL2C_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define GL2C_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define GL2C_CGTT_SCLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define GL2C_CGTT_SCLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define GL2C_CGTT_SCLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define GL2C_CGTT_SCLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define GL2C_CGTT_SCLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define GL2C_CGTT_SCLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
+#define GL2C_CGTT_SCLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
+#define GL2C_CGTT_SCLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
+//GL2A_CGTT_SCLK_CTRL
+#define GL2A_CGTT_SCLK_CTRL__ON_DELAY__SHIFT 0x0
+#define GL2A_CGTT_SCLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define GL2A_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define GL2A_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define GL2A_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define GL2A_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define GL2A_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define GL2A_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define GL2A_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define GL2A_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define GL2A_CGTT_SCLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18
+#define GL2A_CGTT_SCLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
+#define GL2A_CGTT_SCLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
+#define GL2A_CGTT_SCLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
+#define GL2A_CGTT_SCLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
+#define GL2A_CGTT_SCLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
+#define GL2A_CGTT_SCLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
+#define GL2A_CGTT_SCLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
+#define GL2A_CGTT_SCLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define GL2A_CGTT_SCLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define GL2A_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define GL2A_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define GL2A_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define GL2A_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define GL2A_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define GL2A_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define GL2A_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define GL2A_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define GL2A_CGTT_SCLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L
+#define GL2A_CGTT_SCLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define GL2A_CGTT_SCLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define GL2A_CGTT_SCLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define GL2A_CGTT_SCLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define GL2A_CGTT_SCLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
+#define GL2A_CGTT_SCLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
+#define GL2A_CGTT_SCLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
+//GL2A_CGTT_SCLK_CTRL_1
+#define GL2A_CGTT_SCLK_CTRL_1__ON_DELAY__SHIFT 0x0
+#define GL2A_CGTT_SCLK_CTRL_1__OFF_HYSTERESIS__SHIFT 0x4
+#define GL2A_CGTT_SCLK_CTRL_1__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define GL2A_CGTT_SCLK_CTRL_1__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define GL2A_CGTT_SCLK_CTRL_1__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define GL2A_CGTT_SCLK_CTRL_1__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define GL2A_CGTT_SCLK_CTRL_1__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define GL2A_CGTT_SCLK_CTRL_1__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define GL2A_CGTT_SCLK_CTRL_1__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define GL2A_CGTT_SCLK_CTRL_1__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define GL2A_CGTT_SCLK_CTRL_1__SOFT_OVERRIDE7__SHIFT 0x18
+#define GL2A_CGTT_SCLK_CTRL_1__SOFT_OVERRIDE6__SHIFT 0x19
+#define GL2A_CGTT_SCLK_CTRL_1__SOFT_OVERRIDE5__SHIFT 0x1a
+#define GL2A_CGTT_SCLK_CTRL_1__SOFT_OVERRIDE4__SHIFT 0x1b
+#define GL2A_CGTT_SCLK_CTRL_1__SOFT_OVERRIDE3__SHIFT 0x1c
+#define GL2A_CGTT_SCLK_CTRL_1__SOFT_OVERRIDE2__SHIFT 0x1d
+#define GL2A_CGTT_SCLK_CTRL_1__SOFT_OVERRIDE1__SHIFT 0x1e
+#define GL2A_CGTT_SCLK_CTRL_1__SOFT_OVERRIDE0__SHIFT 0x1f
+#define GL2A_CGTT_SCLK_CTRL_1__ON_DELAY_MASK 0x0000000FL
+#define GL2A_CGTT_SCLK_CTRL_1__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define GL2A_CGTT_SCLK_CTRL_1__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define GL2A_CGTT_SCLK_CTRL_1__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define GL2A_CGTT_SCLK_CTRL_1__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define GL2A_CGTT_SCLK_CTRL_1__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define GL2A_CGTT_SCLK_CTRL_1__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define GL2A_CGTT_SCLK_CTRL_1__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define GL2A_CGTT_SCLK_CTRL_1__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define GL2A_CGTT_SCLK_CTRL_1__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define GL2A_CGTT_SCLK_CTRL_1__SOFT_OVERRIDE7_MASK 0x01000000L
+#define GL2A_CGTT_SCLK_CTRL_1__SOFT_OVERRIDE6_MASK 0x02000000L
+#define GL2A_CGTT_SCLK_CTRL_1__SOFT_OVERRIDE5_MASK 0x04000000L
+#define GL2A_CGTT_SCLK_CTRL_1__SOFT_OVERRIDE4_MASK 0x08000000L
+#define GL2A_CGTT_SCLK_CTRL_1__SOFT_OVERRIDE3_MASK 0x10000000L
+#define GL2A_CGTT_SCLK_CTRL_1__SOFT_OVERRIDE2_MASK 0x20000000L
+#define GL2A_CGTT_SCLK_CTRL_1__SOFT_OVERRIDE1_MASK 0x40000000L
+#define GL2A_CGTT_SCLK_CTRL_1__SOFT_OVERRIDE0_MASK 0x80000000L
+//CGTT_CP_CLK_CTRL
+#define CGTT_CP_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_CP_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_CP_CLK_CTRL__MGLS_OVERRIDE__SHIFT 0xf
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CGTT_CP_CLK_CTRL__SOFT_OVERRIDE_PERFMON__SHIFT 0x1d
+#define CGTT_CP_CLK_CTRL__SOFT_OVERRIDE_DYN__SHIFT 0x1e
+#define CGTT_CP_CLK_CTRL__SOFT_OVERRIDE_REG__SHIFT 0x1f
+#define CGTT_CP_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_CP_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_CP_CLK_CTRL__MGLS_OVERRIDE_MASK 0x00008000L
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_CP_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CGTT_CP_CLK_CTRL__SOFT_OVERRIDE_PERFMON_MASK 0x20000000L
+#define CGTT_CP_CLK_CTRL__SOFT_OVERRIDE_DYN_MASK 0x40000000L
+#define CGTT_CP_CLK_CTRL__SOFT_OVERRIDE_REG_MASK 0x80000000L
+//CGTT_CPF_CLK_CTRL
+#define CGTT_CPF_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_CPF_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_CPF_CLK_CTRL__MGLS_OVERRIDE__SHIFT 0xf
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CGTT_CPF_CLK_CTRL__SOFT_OVERRIDE_PERFMON__SHIFT 0x1a
+#define CGTT_CPF_CLK_CTRL__SOFT_OVERRIDE_PRT__SHIFT 0x1b
+#define CGTT_CPF_CLK_CTRL__SOFT_OVERRIDE_CMP__SHIFT 0x1c
+#define CGTT_CPF_CLK_CTRL__SOFT_OVERRIDE_GFX__SHIFT 0x1d
+#define CGTT_CPF_CLK_CTRL__SOFT_OVERRIDE_DYN__SHIFT 0x1e
+#define CGTT_CPF_CLK_CTRL__SOFT_OVERRIDE_REG__SHIFT 0x1f
+#define CGTT_CPF_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_CPF_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_CPF_CLK_CTRL__MGLS_OVERRIDE_MASK 0x00008000L
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_CPF_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CGTT_CPF_CLK_CTRL__SOFT_OVERRIDE_PERFMON_MASK 0x04000000L
+#define CGTT_CPF_CLK_CTRL__SOFT_OVERRIDE_PRT_MASK 0x08000000L
+#define CGTT_CPF_CLK_CTRL__SOFT_OVERRIDE_CMP_MASK 0x10000000L
+#define CGTT_CPF_CLK_CTRL__SOFT_OVERRIDE_GFX_MASK 0x20000000L
+#define CGTT_CPF_CLK_CTRL__SOFT_OVERRIDE_DYN_MASK 0x40000000L
+#define CGTT_CPF_CLK_CTRL__SOFT_OVERRIDE_REG_MASK 0x80000000L
+//CGTT_CPC_CLK_CTRL
+#define CGTT_CPC_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_CPC_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_CPC_CLK_CTRL__MGLS_OVERRIDE__SHIFT 0xf
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CGTT_CPC_CLK_CTRL__SOFT_OVERRIDE_PERFMON__SHIFT 0x1d
+#define CGTT_CPC_CLK_CTRL__SOFT_OVERRIDE_DYN__SHIFT 0x1e
+#define CGTT_CPC_CLK_CTRL__SOFT_OVERRIDE_REG__SHIFT 0x1f
+#define CGTT_CPC_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_CPC_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_CPC_CLK_CTRL__MGLS_OVERRIDE_MASK 0x00008000L
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_CPC_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CGTT_CPC_CLK_CTRL__SOFT_OVERRIDE_PERFMON_MASK 0x20000000L
+#define CGTT_CPC_CLK_CTRL__SOFT_OVERRIDE_DYN_MASK 0x40000000L
+#define CGTT_CPC_CLK_CTRL__SOFT_OVERRIDE_REG_MASK 0x80000000L
+//CGTT_RLC_CLK_CTRL
+#define CGTT_RLC_CLK_CTRL__RESERVED__SHIFT 0x0
+#define CGTT_RLC_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define CGTT_RLC_CLK_CTRL__SOFT_OVERRIDE_DYN__SHIFT 0x1e
+#define CGTT_RLC_CLK_CTRL__SOFT_OVERRIDE_REG__SHIFT 0x1f
+#define CGTT_RLC_CLK_CTRL__RESERVED_MASK 0x0000000FL
+#define CGTT_RLC_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_RLC_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define CGTT_RLC_CLK_CTRL__SOFT_OVERRIDE_DYN_MASK 0x40000000L
+#define CGTT_RLC_CLK_CTRL__SOFT_OVERRIDE_REG_MASK 0x80000000L
//RLC_GFX_RM_CNTL
#define RLC_GFX_RM_CNTL__RLC_GFX_RM_VALID__SHIFT 0x0
#define RLC_GFX_RM_CNTL__RESERVED__SHIFT 0x1
#define RLC_GFX_RM_CNTL__RLC_GFX_RM_VALID_MASK 0x00000001L
#define RLC_GFX_RM_CNTL__RESERVED_MASK 0xFFFFFFFEL
-
+//RMI_CGTT_SCLK_CTRL
+#define RMI_CGTT_SCLK_CTRL__ON_DELAY__SHIFT 0x0
+#define RMI_CGTT_SCLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
+#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
+#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
+#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
+#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
+#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
+#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
+#define RMI_CGTT_SCLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define RMI_CGTT_SCLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define RMI_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
+#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
+#define RMI_CGTT_SCLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
+//CGTT_TCPF_CLK_CTRL
+#define CGTT_TCPF_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define CGTT_TCPF_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_TCPF_CLK_CTRL__SPARE__SHIFT 0xc
+#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0xf
+#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x10
+#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x11
+#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x12
+#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x13
+#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x14
+#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x15
+#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x16
+#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x17
+#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x18
+#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x19
+#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1a
+#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1b
+#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1c
+#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1d
+#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1e
+#define CGTT_TCPF_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define CGTT_TCPF_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_TCPF_CLK_CTRL__SPARE_MASK 0x00007000L
+#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00008000L
+#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00010000L
+#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00020000L
+#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00040000L
+#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00080000L
+#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00100000L
+#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00200000L
+#define CGTT_TCPF_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00400000L
+#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x00800000L
+#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x01000000L
+#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE5_MASK 0x02000000L
+#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x04000000L
+#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE3_MASK 0x08000000L
+#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE2_MASK 0x10000000L
+#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE1_MASK 0x20000000L
+#define CGTT_TCPF_CLK_CTRL__SOFT_OVERRIDE0_MASK 0x40000000L
+//GCR_CGTT_SCLK_CTRL
+#define GCR_CGTT_SCLK_CTRL__ON_DELAY__SHIFT 0x0
+#define GCR_CGTT_SCLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define GCR_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define GCR_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define GCR_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define GCR_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define GCR_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define GCR_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define GCR_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define GCR_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define GCR_CGTT_SCLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
+#define GCR_CGTT_SCLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
+#define GCR_CGTT_SCLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
+#define GCR_CGTT_SCLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
+#define GCR_CGTT_SCLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
+#define GCR_CGTT_SCLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
+#define GCR_CGTT_SCLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
+#define GCR_CGTT_SCLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define GCR_CGTT_SCLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define GCR_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define GCR_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define GCR_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define GCR_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define GCR_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define GCR_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define GCR_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define GCR_CGTT_SCLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define GCR_CGTT_SCLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define GCR_CGTT_SCLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define GCR_CGTT_SCLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define GCR_CGTT_SCLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define GCR_CGTT_SCLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
+#define GCR_CGTT_SCLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
+#define GCR_CGTT_SCLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
+//UTCL1_CGTT_CLK_CTRL
+#define UTCL1_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define UTCL1_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define UTCL1_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define UTCL1_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define UTCL1_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define UTCL1_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define UTCL1_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define UTCL1_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define UTCL1_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define UTCL1_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define UTCL1_CGTT_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19
+#define UTCL1_CGTT_CLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a
+#define UTCL1_CGTT_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b
+#define UTCL1_CGTT_CLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c
+#define UTCL1_CGTT_CLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d
+#define UTCL1_CGTT_CLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e
+#define UTCL1_CGTT_CLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f
+#define UTCL1_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define UTCL1_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define UTCL1_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define UTCL1_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define UTCL1_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define UTCL1_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define UTCL1_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define UTCL1_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define UTCL1_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define UTCL1_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define UTCL1_CGTT_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L
+#define UTCL1_CGTT_CLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L
+#define UTCL1_CGTT_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L
+#define UTCL1_CGTT_CLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L
+#define UTCL1_CGTT_CLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L
+#define UTCL1_CGTT_CLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L
+#define UTCL1_CGTT_CLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L
+//GCEA_CGTT_CLK_CTRL
+#define GCEA_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define GCEA_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define GCEA_CGTT_CLK_CTRL__SPARE0__SHIFT 0xc
+#define GCEA_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_WRITE__SHIFT 0x14
+#define GCEA_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_READ__SHIFT 0x15
+#define GCEA_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_RETURN__SHIFT 0x16
+#define GCEA_CGTT_CLK_CTRL__SPARE1__SHIFT 0x17
+#define GCEA_CGTT_CLK_CTRL__SOFT_OVERRIDE_WRITE__SHIFT 0x1c
+#define GCEA_CGTT_CLK_CTRL__SOFT_OVERRIDE_READ__SHIFT 0x1d
+#define GCEA_CGTT_CLK_CTRL__SOFT_OVERRIDE_RETURN__SHIFT 0x1e
+#define GCEA_CGTT_CLK_CTRL__SOFT_OVERRIDE_REGISTER__SHIFT 0x1f
+#define GCEA_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define GCEA_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define GCEA_CGTT_CLK_CTRL__SPARE0_MASK 0x000FF000L
+#define GCEA_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_WRITE_MASK 0x00100000L
+#define GCEA_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_READ_MASK 0x00200000L
+#define GCEA_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_RETURN_MASK 0x00400000L
+#define GCEA_CGTT_CLK_CTRL__SPARE1_MASK 0x0F800000L
+#define GCEA_CGTT_CLK_CTRL__SOFT_OVERRIDE_WRITE_MASK 0x10000000L
+#define GCEA_CGTT_CLK_CTRL__SOFT_OVERRIDE_READ_MASK 0x20000000L
+#define GCEA_CGTT_CLK_CTRL__SOFT_OVERRIDE_RETURN_MASK 0x40000000L
+#define GCEA_CGTT_CLK_CTRL__SOFT_OVERRIDE_REGISTER_MASK 0x80000000L
+//SE_CAC_CGTT_CLK_CTRL
+#define SE_CAC_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define SE_CAC_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define SE_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_DYN__SHIFT 0x1e
+#define SE_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_REG__SHIFT 0x1f
+#define SE_CAC_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define SE_CAC_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define SE_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_DYN_MASK 0x40000000L
+#define SE_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_REG_MASK 0x80000000L
+//GC_CAC_CGTT_CLK_CTRL
+#define GC_CAC_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define GC_CAC_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define GC_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_DYN__SHIFT 0x1e
+#define GC_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_REG__SHIFT 0x1f
+#define GC_CAC_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define GC_CAC_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define GC_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_DYN_MASK 0x40000000L
+#define GC_CAC_CGTT_CLK_CTRL__SOFT_OVERRIDE_REG_MASK 0x80000000L
+//GRBM_CGTT_CLK_CNTL
+#define GRBM_CGTT_CLK_CNTL__ON_DELAY__SHIFT 0x0
+#define GRBM_CGTT_CLK_CNTL__OFF_HYSTERESIS__SHIFT 0x4
+#define GRBM_CGTT_CLK_CNTL__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define GRBM_CGTT_CLK_CNTL__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define GRBM_CGTT_CLK_CNTL__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define GRBM_CGTT_CLK_CNTL__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define GRBM_CGTT_CLK_CNTL__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define GRBM_CGTT_CLK_CNTL__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define GRBM_CGTT_CLK_CNTL__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define GRBM_CGTT_CLK_CNTL__SOFT_STALL_OVERRIDE0__SHIFT 0x17
+#define GRBM_CGTT_CLK_CNTL__SOFT_OVERRIDE_DYN__SHIFT 0x1e
+#define GRBM_CGTT_CLK_CNTL__ON_DELAY_MASK 0x0000000FL
+#define GRBM_CGTT_CLK_CNTL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define GRBM_CGTT_CLK_CNTL__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define GRBM_CGTT_CLK_CNTL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define GRBM_CGTT_CLK_CNTL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define GRBM_CGTT_CLK_CNTL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define GRBM_CGTT_CLK_CNTL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define GRBM_CGTT_CLK_CNTL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define GRBM_CGTT_CLK_CNTL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define GRBM_CGTT_CLK_CNTL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L
+#define GRBM_CGTT_CLK_CNTL__SOFT_OVERRIDE_DYN_MASK 0x40000000L
+//GUS_CGTT_CLK_CTRL
+#define GUS_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0
+#define GUS_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4
+#define GUS_CGTT_CLK_CTRL__SPARE0__SHIFT 0xc
+#define GUS_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_DRAM__SHIFT 0x13
+#define GUS_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_WRITE__SHIFT 0x14
+#define GUS_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_READ__SHIFT 0x15
+#define GUS_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_RETURN__SHIFT 0x16
+#define GUS_CGTT_CLK_CTRL__SPARE1__SHIFT 0x17
+#define GUS_CGTT_CLK_CTRL__SOFT_OVERRIDE_DRAM__SHIFT 0x1b
+#define GUS_CGTT_CLK_CTRL__SOFT_OVERRIDE_WRITE__SHIFT 0x1c
+#define GUS_CGTT_CLK_CTRL__SOFT_OVERRIDE_READ__SHIFT 0x1d
+#define GUS_CGTT_CLK_CTRL__SOFT_OVERRIDE_RETURN__SHIFT 0x1e
+#define GUS_CGTT_CLK_CTRL__SOFT_OVERRIDE_REGISTER__SHIFT 0x1f
+#define GUS_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL
+#define GUS_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define GUS_CGTT_CLK_CTRL__SPARE0_MASK 0x0007F000L
+#define GUS_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_DRAM_MASK 0x00080000L
+#define GUS_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_WRITE_MASK 0x00100000L
+#define GUS_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_READ_MASK 0x00200000L
+#define GUS_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_RETURN_MASK 0x00400000L
+#define GUS_CGTT_CLK_CTRL__SPARE1_MASK 0x07800000L
+#define GUS_CGTT_CLK_CTRL__SOFT_OVERRIDE_DRAM_MASK 0x08000000L
+#define GUS_CGTT_CLK_CTRL__SOFT_OVERRIDE_WRITE_MASK 0x10000000L
+#define GUS_CGTT_CLK_CTRL__SOFT_OVERRIDE_READ_MASK 0x20000000L
+#define GUS_CGTT_CLK_CTRL__SOFT_OVERRIDE_RETURN_MASK 0x40000000L
+#define GUS_CGTT_CLK_CTRL__SOFT_OVERRIDE_REGISTER_MASK 0x80000000L
+//CGTT_PH_CLK_CTRL0
+#define CGTT_PH_CLK_CTRL0__ON_DELAY__SHIFT 0x0
+#define CGTT_PH_CLK_CTRL0__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_PH_CLK_CTRL0__SOFT_STALL_OVERRIDE7__SHIFT 0x10
+#define CGTT_PH_CLK_CTRL0__SOFT_STALL_OVERRIDE6__SHIFT 0x11
+#define CGTT_PH_CLK_CTRL0__SOFT_STALL_OVERRIDE5__SHIFT 0x12
+#define CGTT_PH_CLK_CTRL0__SOFT_STALL_OVERRIDE4__SHIFT 0x13
+#define CGTT_PH_CLK_CTRL0__SOFT_STALL_OVERRIDE3__SHIFT 0x14
+#define CGTT_PH_CLK_CTRL0__SOFT_STALL_OVERRIDE2__SHIFT 0x15
+#define CGTT_PH_CLK_CTRL0__SOFT_STALL_OVERRIDE1__SHIFT 0x16
+#define CGTT_PH_CLK_CTRL0__SOFT_OVERRIDE7__SHIFT 0x18
+#define CGTT_PH_CLK_CTRL0__SOFT_OVERRIDE6__SHIFT 0x19
+#define CGTT_PH_CLK_CTRL0__SOFT_OVERRIDE5__SHIFT 0x1a
+#define CGTT_PH_CLK_CTRL0__SOFT_OVERRIDE4__SHIFT 0x1b
+#define CGTT_PH_CLK_CTRL0__SOFT_OVERRIDE3__SHIFT 0x1c
+#define CGTT_PH_CLK_CTRL0__SOFT_OVERRIDE2__SHIFT 0x1d
+#define CGTT_PH_CLK_CTRL0__PERFMON_CLK_OVERRIDE__SHIFT 0x1e
+#define CGTT_PH_CLK_CTRL0__REG_CLK_OVERRIDE__SHIFT 0x1f
+#define CGTT_PH_CLK_CTRL0__ON_DELAY_MASK 0x0000000FL
+#define CGTT_PH_CLK_CTRL0__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_PH_CLK_CTRL0__SOFT_STALL_OVERRIDE7_MASK 0x00010000L
+#define CGTT_PH_CLK_CTRL0__SOFT_STALL_OVERRIDE6_MASK 0x00020000L
+#define CGTT_PH_CLK_CTRL0__SOFT_STALL_OVERRIDE5_MASK 0x00040000L
+#define CGTT_PH_CLK_CTRL0__SOFT_STALL_OVERRIDE4_MASK 0x00080000L
+#define CGTT_PH_CLK_CTRL0__SOFT_STALL_OVERRIDE3_MASK 0x00100000L
+#define CGTT_PH_CLK_CTRL0__SOFT_STALL_OVERRIDE2_MASK 0x00200000L
+#define CGTT_PH_CLK_CTRL0__SOFT_STALL_OVERRIDE1_MASK 0x00400000L
+#define CGTT_PH_CLK_CTRL0__SOFT_OVERRIDE7_MASK 0x01000000L
+#define CGTT_PH_CLK_CTRL0__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CGTT_PH_CLK_CTRL0__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CGTT_PH_CLK_CTRL0__SOFT_OVERRIDE4_MASK 0x08000000L
+#define CGTT_PH_CLK_CTRL0__SOFT_OVERRIDE3_MASK 0x10000000L
+#define CGTT_PH_CLK_CTRL0__SOFT_OVERRIDE2_MASK 0x20000000L
+#define CGTT_PH_CLK_CTRL0__PERFMON_CLK_OVERRIDE_MASK 0x40000000L
+#define CGTT_PH_CLK_CTRL0__REG_CLK_OVERRIDE_MASK 0x80000000L
+//CGTT_PH_CLK_CTRL1
+#define CGTT_PH_CLK_CTRL1__ON_DELAY__SHIFT 0x0
+#define CGTT_PH_CLK_CTRL1__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_PH_CLK_CTRL1__SOFT_OVERRIDE7__SHIFT 0x18
+#define CGTT_PH_CLK_CTRL1__SOFT_OVERRIDE6__SHIFT 0x19
+#define CGTT_PH_CLK_CTRL1__SOFT_OVERRIDE5__SHIFT 0x1a
+#define CGTT_PH_CLK_CTRL1__SOFT_OVERRIDE4__SHIFT 0x1b
+#define CGTT_PH_CLK_CTRL1__SOFT_OVERRIDE3__SHIFT 0x1c
+#define CGTT_PH_CLK_CTRL1__SOFT_OVERRIDE2__SHIFT 0x1d
+#define CGTT_PH_CLK_CTRL1__SOFT_OVERRIDE1__SHIFT 0x1e
+#define CGTT_PH_CLK_CTRL1__ON_DELAY_MASK 0x0000000FL
+#define CGTT_PH_CLK_CTRL1__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_PH_CLK_CTRL1__SOFT_OVERRIDE7_MASK 0x01000000L
+#define CGTT_PH_CLK_CTRL1__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CGTT_PH_CLK_CTRL1__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CGTT_PH_CLK_CTRL1__SOFT_OVERRIDE4_MASK 0x08000000L
+#define CGTT_PH_CLK_CTRL1__SOFT_OVERRIDE3_MASK 0x10000000L
+#define CGTT_PH_CLK_CTRL1__SOFT_OVERRIDE2_MASK 0x20000000L
+#define CGTT_PH_CLK_CTRL1__SOFT_OVERRIDE1_MASK 0x40000000L
+//CGTT_PH_CLK_CTRL2
+#define CGTT_PH_CLK_CTRL2__ON_DELAY__SHIFT 0x0
+#define CGTT_PH_CLK_CTRL2__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_PH_CLK_CTRL2__SOFT_OVERRIDE7__SHIFT 0x18
+#define CGTT_PH_CLK_CTRL2__SOFT_OVERRIDE6__SHIFT 0x19
+#define CGTT_PH_CLK_CTRL2__SOFT_OVERRIDE5__SHIFT 0x1a
+#define CGTT_PH_CLK_CTRL2__SOFT_OVERRIDE4__SHIFT 0x1b
+#define CGTT_PH_CLK_CTRL2__SOFT_OVERRIDE3__SHIFT 0x1c
+#define CGTT_PH_CLK_CTRL2__SOFT_OVERRIDE2__SHIFT 0x1d
+#define CGTT_PH_CLK_CTRL2__SOFT_OVERRIDE1__SHIFT 0x1e
+#define CGTT_PH_CLK_CTRL2__ON_DELAY_MASK 0x0000000FL
+#define CGTT_PH_CLK_CTRL2__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_PH_CLK_CTRL2__SOFT_OVERRIDE7_MASK 0x01000000L
+#define CGTT_PH_CLK_CTRL2__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CGTT_PH_CLK_CTRL2__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CGTT_PH_CLK_CTRL2__SOFT_OVERRIDE4_MASK 0x08000000L
+#define CGTT_PH_CLK_CTRL2__SOFT_OVERRIDE3_MASK 0x10000000L
+#define CGTT_PH_CLK_CTRL2__SOFT_OVERRIDE2_MASK 0x20000000L
+#define CGTT_PH_CLK_CTRL2__SOFT_OVERRIDE1_MASK 0x40000000L
+//CGTT_PH_CLK_CTRL3
+#define CGTT_PH_CLK_CTRL3__ON_DELAY__SHIFT 0x0
+#define CGTT_PH_CLK_CTRL3__OFF_HYSTERESIS__SHIFT 0x4
+#define CGTT_PH_CLK_CTRL3__SOFT_OVERRIDE7__SHIFT 0x18
+#define CGTT_PH_CLK_CTRL3__SOFT_OVERRIDE6__SHIFT 0x19
+#define CGTT_PH_CLK_CTRL3__SOFT_OVERRIDE5__SHIFT 0x1a
+#define CGTT_PH_CLK_CTRL3__SOFT_OVERRIDE4__SHIFT 0x1b
+#define CGTT_PH_CLK_CTRL3__SOFT_OVERRIDE3__SHIFT 0x1c
+#define CGTT_PH_CLK_CTRL3__SOFT_OVERRIDE2__SHIFT 0x1d
+#define CGTT_PH_CLK_CTRL3__SOFT_OVERRIDE1__SHIFT 0x1e
+#define CGTT_PH_CLK_CTRL3__ON_DELAY_MASK 0x0000000FL
+#define CGTT_PH_CLK_CTRL3__OFF_HYSTERESIS_MASK 0x00000FF0L
+#define CGTT_PH_CLK_CTRL3__SOFT_OVERRIDE7_MASK 0x01000000L
+#define CGTT_PH_CLK_CTRL3__SOFT_OVERRIDE6_MASK 0x02000000L
+#define CGTT_PH_CLK_CTRL3__SOFT_OVERRIDE5_MASK 0x04000000L
+#define CGTT_PH_CLK_CTRL3__SOFT_OVERRIDE4_MASK 0x08000000L
+#define CGTT_PH_CLK_CTRL3__SOFT_OVERRIDE3_MASK 0x10000000L
+#define CGTT_PH_CLK_CTRL3__SOFT_OVERRIDE2_MASK 0x20000000L
+#define CGTT_PH_CLK_CTRL3__SOFT_OVERRIDE1_MASK 0x40000000L
// addressBlock: gc_hypdec
//CP_HYP_PFP_UCODE_ADDR
diff --git a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_offset.h b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_offset.h
index d8632ccf3494..c488d4a50cf4 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_offset.h
@@ -4409,6 +4409,10 @@
#define mmVMSHAREDPF0_MC_VM_XGMI_LFB_SIZE_BASE_IDX 1
#define mmVMSHAREDPF0_MC_VM_CACHEABLE_DRAM_CNTL 0x0af9
#define mmVMSHAREDPF0_MC_VM_CACHEABLE_DRAM_CNTL_BASE_IDX 1
+#define mmMC_VM_XGMI_LFB_CNTL 0x0823
+#define mmMC_VM_XGMI_LFB_CNTL_BASE_IDX 0
+#define mmMC_VM_XGMI_LFB_SIZE 0x0824
+#define mmMC_VM_XGMI_LFB_SIZE_BASE_IDX 0
// addressBlock: mmhub_utcl2_vmsharedvcdec
diff --git a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_sh_mask.h
index 111a71b434e2..2969fbf282b7 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_sh_mask.h
@@ -26728,6 +26728,14 @@
//VMSHAREDPF0_MC_VM_CACHEABLE_DRAM_CNTL
#define VMSHAREDPF0_MC_VM_CACHEABLE_DRAM_CNTL__ENABLE_CACHEABLE_DRAM_ADDRESS_APERTURE__SHIFT 0x0
#define VMSHAREDPF0_MC_VM_CACHEABLE_DRAM_CNTL__ENABLE_CACHEABLE_DRAM_ADDRESS_APERTURE_MASK 0x00000001L
+//MC_VM_XGMI_LFB_CNTL
+#define MC_VM_XGMI_LFB_CNTL__PF_LFB_REGION__SHIFT 0x0
+#define MC_VM_XGMI_LFB_CNTL__PF_MAX_REGION__SHIFT 0x3
+#define MC_VM_XGMI_LFB_CNTL__PF_LFB_REGION_MASK 0x00000007L
+#define MC_VM_XGMI_LFB_CNTL__PF_MAX_REGION_MASK 0x00000038L
+//MC_VM_XGMI_LFB_SIZE
+#define MC_VM_XGMI_LFB_SIZE__PF_LFB_SIZE__SHIFT 0x0
+#define MC_VM_XGMI_LFB_SIZE__PF_LFB_SIZE_MASK 0x0000FFFFL
// addressBlock: mmhub_utcl2_vmsharedvcdec
diff --git a/drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_10_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_10_0_offset.h
index b798cf5a2c39..38adde3cae5a 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_10_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_10_0_offset.h
@@ -29,5 +29,7 @@
#define regMCA_UMC_UMC0_MCUMC_STATUST0_BASE_IDX 2
#define regMCA_UMC_UMC0_MCUMC_ADDRT0 0x03c4
#define regMCA_UMC_UMC0_MCUMC_ADDRT0_BASE_IDX 2
+#define regUMCCH0_0_GeccCtrl 0x0053
+#define regUMCCH0_0_GeccCtrl_BASE_IDX 2
#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_10_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_10_0_sh_mask.h
index bd99b431247f..4dbec524f943 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_10_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_10_0_sh_mask.h
@@ -90,5 +90,8 @@
#define MCA_UMC_UMC0_MCUMC_ADDRT0__ErrorAddr__SHIFT 0x0
#define MCA_UMC_UMC0_MCUMC_ADDRT0__Reserved__SHIFT 0x38
#define MCA_UMC_UMC0_MCUMC_ADDRT0__ErrorAddr_MASK 0x00FFFFFFFFFFFFFFL
+//UMCCH0_0_GeccCtrl
+#define UMCCH0_0_GeccCtrl__UCFatalEn__SHIFT 0xd
+#define UMCCH0_0_GeccCtrl__UCFatalEn_MASK 0x00002000L
#endif
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index 7e3231c2191c..a40ead44778a 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -824,4 +824,62 @@ struct gpu_metrics_v2_2 {
uint64_t indep_throttle_status;
};
+struct gpu_metrics_v2_3 {
+ struct metrics_table_header common_header;
+
+ /* Temperature */
+ uint16_t temperature_gfx; // gfx temperature on APUs
+ uint16_t temperature_soc; // soc temperature on APUs
+ uint16_t temperature_core[8]; // CPU core temperature on APUs
+ uint16_t temperature_l3[2];
+
+ /* Utilization */
+ uint16_t average_gfx_activity;
+ uint16_t average_mm_activity; // UVD or VCN
+
+ /* Driver attached timestamp (in ns) */
+ uint64_t system_clock_counter;
+
+ /* Power/Energy */
+ uint16_t average_socket_power; // dGPU + APU power on A + A platform
+ uint16_t average_cpu_power;
+ uint16_t average_soc_power;
+ uint16_t average_gfx_power;
+ uint16_t average_core_power[8]; // CPU core power on APUs
+
+ /* Average clocks */
+ uint16_t average_gfxclk_frequency;
+ uint16_t average_socclk_frequency;
+ uint16_t average_uclk_frequency;
+ uint16_t average_fclk_frequency;
+ uint16_t average_vclk_frequency;
+ uint16_t average_dclk_frequency;
+
+ /* Current clocks */
+ uint16_t current_gfxclk;
+ uint16_t current_socclk;
+ uint16_t current_uclk;
+ uint16_t current_fclk;
+ uint16_t current_vclk;
+ uint16_t current_dclk;
+ uint16_t current_coreclk[8]; // CPU core clocks
+ uint16_t current_l3clk[2];
+
+ /* Throttle status (ASIC dependent) */
+ uint32_t throttle_status;
+
+ /* Fans */
+ uint16_t fan_pwm;
+
+ uint16_t padding[3];
+
+ /* Throttle status (ASIC independent) */
+ uint64_t indep_throttle_status;
+
+ /* Average Temperature */
+ uint16_t average_temperature_gfx; // average gfx temperature on APUs
+ uint16_t average_temperature_soc; // average soc temperature on APUs
+ uint16_t average_temperature_core[8]; // average CPU core temperature on APUs
+ uint16_t average_temperature_l3[2];
+};
#endif
diff --git a/drivers/gpu/drm/amd/include/mes_v11_api_def.h b/drivers/gpu/drm/amd/include/mes_v11_api_def.h
index 50bfa513cb35..7e85cdc5bd34 100644
--- a/drivers/gpu/drm/amd/include/mes_v11_api_def.h
+++ b/drivers/gpu/drm/amd/include/mes_v11_api_def.h
@@ -269,7 +269,8 @@ union MESAPI__ADD_QUEUE {
uint32_t map_kiq_utility_queue : 1;
uint32_t is_kfd_process : 1;
uint32_t trap_en : 1;
- uint32_t reserved : 21;
+ uint32_t is_aql_queue : 1;
+ uint32_t reserved : 20;
};
struct MES_API_STATUS api_status;
uint64_t tma_addr;
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index 5e318b3f6c0f..948cc75376f8 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -3405,9 +3405,6 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
{
- if (adev->pm.dpm_enabled == 0)
- return;
-
if (adev->pm.int_hwmon_dev)
hwmon_device_unregister(adev->pm.int_hwmon_dev);
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
index 8fd0782a2b20..f5e08b60f66e 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
@@ -1384,13 +1384,16 @@ static int kv_dpm_enable(struct amdgpu_device *adev)
static void kv_dpm_disable(struct amdgpu_device *adev)
{
struct kv_power_info *pi = kv_get_pi(adev);
+ int err;
amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
AMDGPU_THERMAL_IRQ_LOW_TO_HIGH);
amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
AMDGPU_THERMAL_IRQ_HIGH_TO_LOW);
- amdgpu_kv_smc_bapm_enable(adev, false);
+ err = amdgpu_kv_smc_bapm_enable(adev, false);
+ if (err)
+ DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n");
if (adev->asic_type == CHIP_MULLINS)
kv_enable_nb_dpm(adev, false);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
index 1eb4e613b27a..ec055858eb95 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
@@ -1485,6 +1485,7 @@ static int pp_get_prv_buffer_details(void *handle, void **addr, size_t *size)
{
struct pp_hwmgr *hwmgr = handle;
struct amdgpu_device *adev = hwmgr->adev;
+ int err;
if (!addr || !size)
return -EINVAL;
@@ -1492,7 +1493,9 @@ static int pp_get_prv_buffer_details(void *handle, void **addr, size_t *size)
*addr = NULL;
*size = 0;
if (adev->pm.smu_prv_buffer) {
- amdgpu_bo_kmap(adev->pm.smu_prv_buffer, addr);
+ err = amdgpu_bo_kmap(adev->pm.smu_prv_buffer, addr);
+ if (err)
+ return err;
*size = adev->pm.smu_prv_buffer_size;
}
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
index e4fcbf8a7eb5..7ef7e81525a3 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
@@ -3603,7 +3603,7 @@ static int smu7_get_pp_table_entry_callback_func_v1(struct pp_hwmgr *hwmgr,
return -EINVAL);
PP_ASSERT_WITH_CODE(
- (smu7_power_state->performance_level_count <=
+ (smu7_power_state->performance_level_count <
hwmgr->platform_descriptor.hardwareActivityPerformanceLevels),
"Performance levels exceeds Driver limit!",
return -EINVAL);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
index 99bfe5efe171..c8c9fb827bda 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
@@ -3155,7 +3155,7 @@ static int vega10_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr,
return -1);
PP_ASSERT_WITH_CODE(
- (vega10_ps->performance_level_count <=
+ (vega10_ps->performance_level_count <
hwmgr->platform_descriptor.
hardwareActivityPerformanceLevels),
"Performance levels exceeds Driver limit!",
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c
index dad3e3741a4e..190af79f3236 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c
@@ -67,22 +67,21 @@ int vega10_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr,
int vega10_fan_ctrl_get_fan_speed_pwm(struct pp_hwmgr *hwmgr,
uint32_t *speed)
{
- uint32_t current_rpm;
- uint32_t percent = 0;
-
- if (hwmgr->thermal_controller.fanInfo.bNoFan)
- return 0;
+ struct amdgpu_device *adev = hwmgr->adev;
+ uint32_t duty100, duty;
+ uint64_t tmp64;
- if (vega10_get_current_rpm(hwmgr, &current_rpm))
- return -1;
+ duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL1),
+ CG_FDO_CTRL1, FMAX_DUTY100);
+ duty = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_THERMAL_STATUS),
+ CG_THERMAL_STATUS, FDO_PWM_DUTY);
- if (hwmgr->thermal_controller.
- advanceFanControlParameters.usMaxFanRPM != 0)
- percent = current_rpm * 255 /
- hwmgr->thermal_controller.
- advanceFanControlParameters.usMaxFanRPM;
+ if (!duty100)
+ return -EINVAL;
- *speed = MIN(percent, 255);
+ tmp64 = (uint64_t)duty * 255;
+ do_div(tmp64, duty100);
+ *speed = MIN((uint32_t)tmp64, 255);
return 0;
}
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_processpptables.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_processpptables.c
index 1e79baab753e..bd54fbd393b9 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_processpptables.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_processpptables.c
@@ -195,7 +195,6 @@ static int init_powerplay_table_information(
struct phm_ppt_v3_information *pptable_information =
(struct phm_ppt_v3_information *)hwmgr->pptable;
uint32_t disable_power_control = 0;
- int result;
hwmgr->thermal_controller.ucType = powerplay_table->ucThermalControllerType;
pptable_information->uc_thermal_controller_type = powerplay_table->ucThermalControllerType;
@@ -257,9 +256,7 @@ static int init_powerplay_table_information(
if (pptable_information->smc_pptable == NULL)
return -ENOMEM;
- result = append_vbios_pptable(hwmgr, (pptable_information->smc_pptable));
-
- return result;
+ return append_vbios_pptable(hwmgr, (pptable_information->smc_pptable));
}
static int vega12_pp_tables_initialize(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
index 45214a364baa..e7ed2a7adf8f 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
@@ -2567,15 +2567,13 @@ static uint8_t polaris10_get_memory_modile_index(struct pp_hwmgr *hwmgr)
static int polaris10_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
{
- int result;
struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
pp_atomctrl_mc_reg_table *mc_reg_table = &smu_data->mc_reg_table;
uint8_t module_index = polaris10_get_memory_modile_index(hwmgr);
memset(mc_reg_table, 0, sizeof(pp_atomctrl_mc_reg_table));
- result = atomctrl_initialize_mc_reg_table_v2_2(hwmgr, module_index, mc_reg_table);
- return result;
+ return atomctrl_initialize_mc_reg_table_v2_2(hwmgr, module_index, mc_reg_table);
}
static bool polaris10_is_dpm_running(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index 7ed4d4265797..74996a8fb671 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -369,6 +369,17 @@ static void sienna_cichlid_check_bxco_support(struct smu_context *smu)
smu_baco->platform_support =
(val & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK) ? true :
false;
+
+ /*
+ * Disable BACO entry/exit completely on below SKUs to
+ * avoid hardware intermittent failures.
+ */
+ if (((adev->pdev->device == 0x73A1) &&
+ (adev->pdev->revision == 0x00)) ||
+ ((adev->pdev->device == 0x73BF) &&
+ (adev->pdev->revision == 0xCF)))
+ smu_baco->platform_support = false;
+
}
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
index 847990145dcd..cb10c7e31264 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
@@ -223,14 +223,13 @@ static int vangogh_tables_init(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
struct smu_table *tables = smu_table->tables;
- struct amdgpu_device *adev = smu->adev;
uint32_t if_version;
+ uint32_t smu_version;
uint32_t ret = 0;
- ret = smu_cmn_get_smc_version(smu, &if_version, NULL);
+ ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version);
if (ret) {
- dev_err(adev->dev, "Failed to get smu if version!\n");
- goto err0_out;
+ return ret;
}
SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t),
@@ -255,7 +254,10 @@ static int vangogh_tables_init(struct smu_context *smu)
goto err0_out;
smu_table->metrics_time = 0;
- smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_2);
+ if (smu_version >= 0x043F3E00)
+ smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_3);
+ else
+ smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_2);
smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
if (!smu_table->gpu_metrics_table)
goto err1_out;
@@ -1648,6 +1650,63 @@ static int vangogh_set_watermarks_table(struct smu_context *smu,
return 0;
}
+static ssize_t vangogh_get_legacy_gpu_metrics_v2_3(struct smu_context *smu,
+ void **table)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct gpu_metrics_v2_3 *gpu_metrics =
+ (struct gpu_metrics_v2_3 *)smu_table->gpu_metrics_table;
+ SmuMetrics_legacy_t metrics;
+ int ret = 0;
+
+ ret = smu_cmn_get_metrics_table(smu, &metrics, true);
+ if (ret)
+ return ret;
+
+ smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 3);
+
+ gpu_metrics->temperature_gfx = metrics.GfxTemperature;
+ gpu_metrics->temperature_soc = metrics.SocTemperature;
+ memcpy(&gpu_metrics->temperature_core[0],
+ &metrics.CoreTemperature[0],
+ sizeof(uint16_t) * 4);
+ gpu_metrics->temperature_l3[0] = metrics.L3Temperature[0];
+
+ gpu_metrics->average_gfx_activity = metrics.GfxActivity;
+ gpu_metrics->average_mm_activity = metrics.UvdActivity;
+
+ gpu_metrics->average_socket_power = metrics.CurrentSocketPower;
+ gpu_metrics->average_cpu_power = metrics.Power[0];
+ gpu_metrics->average_soc_power = metrics.Power[1];
+ gpu_metrics->average_gfx_power = metrics.Power[2];
+ memcpy(&gpu_metrics->average_core_power[0],
+ &metrics.CorePower[0],
+ sizeof(uint16_t) * 4);
+
+ gpu_metrics->average_gfxclk_frequency = metrics.GfxclkFrequency;
+ gpu_metrics->average_socclk_frequency = metrics.SocclkFrequency;
+ gpu_metrics->average_uclk_frequency = metrics.MemclkFrequency;
+ gpu_metrics->average_fclk_frequency = metrics.MemclkFrequency;
+ gpu_metrics->average_vclk_frequency = metrics.VclkFrequency;
+ gpu_metrics->average_dclk_frequency = metrics.DclkFrequency;
+
+ memcpy(&gpu_metrics->current_coreclk[0],
+ &metrics.CoreFrequency[0],
+ sizeof(uint16_t) * 4);
+ gpu_metrics->current_l3clk[0] = metrics.L3Frequency[0];
+
+ gpu_metrics->throttle_status = metrics.ThrottlerStatus;
+ gpu_metrics->indep_throttle_status =
+ smu_cmn_get_indep_throttler_status(metrics.ThrottlerStatus,
+ vangogh_throttler_map);
+
+ gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
+
+ *table = (void *)gpu_metrics;
+
+ return sizeof(struct gpu_metrics_v2_3);
+}
+
static ssize_t vangogh_get_legacy_gpu_metrics(struct smu_context *smu,
void **table)
{
@@ -1705,6 +1764,77 @@ static ssize_t vangogh_get_legacy_gpu_metrics(struct smu_context *smu,
return sizeof(struct gpu_metrics_v2_2);
}
+static ssize_t vangogh_get_gpu_metrics_v2_3(struct smu_context *smu,
+ void **table)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct gpu_metrics_v2_3 *gpu_metrics =
+ (struct gpu_metrics_v2_3 *)smu_table->gpu_metrics_table;
+ SmuMetrics_t metrics;
+ int ret = 0;
+
+ ret = smu_cmn_get_metrics_table(smu, &metrics, true);
+ if (ret)
+ return ret;
+
+ smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 3);
+
+ gpu_metrics->temperature_gfx = metrics.Current.GfxTemperature;
+ gpu_metrics->temperature_soc = metrics.Current.SocTemperature;
+ memcpy(&gpu_metrics->temperature_core[0],
+ &metrics.Current.CoreTemperature[0],
+ sizeof(uint16_t) * 4);
+ gpu_metrics->temperature_l3[0] = metrics.Current.L3Temperature[0];
+
+ gpu_metrics->average_temperature_gfx = metrics.Average.GfxTemperature;
+ gpu_metrics->average_temperature_soc = metrics.Average.SocTemperature;
+ memcpy(&gpu_metrics->average_temperature_core[0],
+ &metrics.Average.CoreTemperature[0],
+ sizeof(uint16_t) * 4);
+ gpu_metrics->average_temperature_l3[0] = metrics.Average.L3Temperature[0];
+
+ gpu_metrics->average_gfx_activity = metrics.Current.GfxActivity;
+ gpu_metrics->average_mm_activity = metrics.Current.UvdActivity;
+
+ gpu_metrics->average_socket_power = metrics.Current.CurrentSocketPower;
+ gpu_metrics->average_cpu_power = metrics.Current.Power[0];
+ gpu_metrics->average_soc_power = metrics.Current.Power[1];
+ gpu_metrics->average_gfx_power = metrics.Current.Power[2];
+ memcpy(&gpu_metrics->average_core_power[0],
+ &metrics.Average.CorePower[0],
+ sizeof(uint16_t) * 4);
+
+ gpu_metrics->average_gfxclk_frequency = metrics.Average.GfxclkFrequency;
+ gpu_metrics->average_socclk_frequency = metrics.Average.SocclkFrequency;
+ gpu_metrics->average_uclk_frequency = metrics.Average.MemclkFrequency;
+ gpu_metrics->average_fclk_frequency = metrics.Average.MemclkFrequency;
+ gpu_metrics->average_vclk_frequency = metrics.Average.VclkFrequency;
+ gpu_metrics->average_dclk_frequency = metrics.Average.DclkFrequency;
+
+ gpu_metrics->current_gfxclk = metrics.Current.GfxclkFrequency;
+ gpu_metrics->current_socclk = metrics.Current.SocclkFrequency;
+ gpu_metrics->current_uclk = metrics.Current.MemclkFrequency;
+ gpu_metrics->current_fclk = metrics.Current.MemclkFrequency;
+ gpu_metrics->current_vclk = metrics.Current.VclkFrequency;
+ gpu_metrics->current_dclk = metrics.Current.DclkFrequency;
+
+ memcpy(&gpu_metrics->current_coreclk[0],
+ &metrics.Current.CoreFrequency[0],
+ sizeof(uint16_t) * 4);
+ gpu_metrics->current_l3clk[0] = metrics.Current.L3Frequency[0];
+
+ gpu_metrics->throttle_status = metrics.Current.ThrottlerStatus;
+ gpu_metrics->indep_throttle_status =
+ smu_cmn_get_indep_throttler_status(metrics.Current.ThrottlerStatus,
+ vangogh_throttler_map);
+
+ gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
+
+ *table = (void *)gpu_metrics;
+
+ return sizeof(struct gpu_metrics_v2_3);
+}
+
static ssize_t vangogh_get_gpu_metrics(struct smu_context *smu,
void **table)
{
@@ -1772,20 +1902,26 @@ static ssize_t vangogh_get_gpu_metrics(struct smu_context *smu,
static ssize_t vangogh_common_get_gpu_metrics(struct smu_context *smu,
void **table)
{
- struct amdgpu_device *adev = smu->adev;
uint32_t if_version;
+ uint32_t smu_version;
int ret = 0;
- ret = smu_cmn_get_smc_version(smu, &if_version, NULL);
+ ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version);
if (ret) {
- dev_err(adev->dev, "Failed to get smu if version!\n");
return ret;
}
- if (if_version < 0x3)
- ret = vangogh_get_legacy_gpu_metrics(smu, table);
- else
- ret = vangogh_get_gpu_metrics(smu, table);
+ if (smu_version >= 0x043F3E00) {
+ if (if_version < 0x3)
+ ret = vangogh_get_legacy_gpu_metrics_v2_3(smu, table);
+ else
+ ret = vangogh_get_gpu_metrics_v2_3(smu, table);
+ } else {
+ if (if_version < 0x3)
+ ret = vangogh_get_legacy_gpu_metrics(smu, table);
+ else
+ ret = vangogh_get_gpu_metrics(smu, table);
+ }
return ret;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
index 6e4a052dc53d..93fffdbab4f0 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
@@ -210,7 +210,8 @@ int smu_v13_0_init_pptable_microcode(struct smu_context *smu)
if (!adev->scpm_enabled)
return 0;
- if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 7))
+ if ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 7)) ||
+ (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 0)))
return 0;
/* override pptable_id from driver parameter */
@@ -219,27 +220,6 @@ int smu_v13_0_init_pptable_microcode(struct smu_context *smu)
dev_info(adev->dev, "override pptable id %d\n", pptable_id);
} else {
pptable_id = smu->smu_table.boot_values.pp_table_id;
-
- /*
- * Temporary solution for SMU V13.0.0 with SCPM enabled:
- * - use vbios carried pptable when pptable_id is 3664, 3715 or 3795
- * - use 36831 soft pptable when pptable_id is 3683
- */
- if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 0)) {
- switch (pptable_id) {
- case 3664:
- case 3715:
- case 3795:
- pptable_id = 0;
- break;
- case 3683:
- pptable_id = 36831;
- break;
- default:
- dev_err(adev->dev, "Unsupported pptable id %d\n", pptable_id);
- return -EINVAL;
- }
- }
}
/* "pptable_id == 0" means vbios carries the pptable. */
@@ -475,28 +455,8 @@ int smu_v13_0_setup_pptable(struct smu_context *smu)
} else {
pptable_id = smu->smu_table.boot_values.pp_table_id;
- /*
- * Temporary solution for SMU V13.0.0 with SCPM disabled:
- * - use 3664, 3683 or 3715 on request
- * - use 3664 when pptable_id is 0
- * TODO: drop these when the pptable carried in vbios is ready.
- */
- if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 0)) {
- switch (pptable_id) {
- case 0:
- pptable_id = 3664;
- break;
- case 3664:
- case 3683:
- case 3715:
- break;
- default:
- dev_err(adev->dev, "Unsupported pptable id %d\n", pptable_id);
- return -EINVAL;
- }
- } else if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 10)) {
+ if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 10))
pptable_id = 6666;
- }
}
/* force using vbios pptable in sriov mode */
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index 7db2fd9ea74a..1d454485e0d9 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -239,82 +239,47 @@ smu_v13_0_0_get_allowed_feature_mask(struct smu_context *smu,
uint32_t *feature_mask, uint32_t num)
{
struct amdgpu_device *adev = smu->adev;
+ u32 smu_version;
if (num > 2)
return -EINVAL;
- memset(feature_mask, 0, sizeof(uint32_t) * num);
-
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_FW_DATA_READ_BIT);
+ memset(feature_mask, 0xff, sizeof(uint32_t) * num);
- if (adev->pm.pp_feature & PP_SCLK_DPM_MASK) {
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_IMU_BIT);
+ if (!(adev->pm.pp_feature & PP_SCLK_DPM_MASK)) {
+ *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT);
+ *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_GFX_IMU_BIT);
}
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_MM_DPM_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_VCN_BIT);
-
- if ((adev->pg_flags & AMD_PG_SUPPORT_ATHUB) &&
- (adev->pg_flags & AMD_PG_SUPPORT_MMHUB))
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_ATHUB_MMHUB_PG_BIT);
-
- if (adev->pm.pp_feature & PP_SOCCLK_DPM_MASK)
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT);
+ if (!(adev->pg_flags & AMD_PG_SUPPORT_ATHUB) ||
+ !(adev->pg_flags & AMD_PG_SUPPORT_MMHUB))
+ *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_ATHUB_MMHUB_PG_BIT);
-#if 0
- if (adev->pm.pp_feature & PP_GFXOFF_MASK)
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFXOFF_BIT);
-#endif
-
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_THROTTLERS_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_FAN_CONTROL_BIT);
+ if (!(adev->pm.pp_feature & PP_SOCCLK_DPM_MASK))
+ *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DF_CSTATE_BIT);
-
- if (adev->pm.pp_feature & PP_MCLK_DPM_MASK) {
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_UCLK_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_VMEMP_SCALING_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_VDDIO_MEM_SCALING_BIT);
+ /* PMFW 78.58 contains a critical fix for gfxoff feature */
+ smu_cmn_get_smc_version(smu, NULL, &smu_version);
+ if ((smu_version < 0x004e3a00) ||
+ !(adev->pm.pp_feature & PP_GFXOFF_MASK))
+ *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_GFXOFF_BIT);
+
+ if (!(adev->pm.pp_feature & PP_MCLK_DPM_MASK)) {
+ *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_UCLK_BIT);
+ *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_VMEMP_SCALING_BIT);
+ *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_VDDIO_MEM_SCALING_BIT);
}
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_MEM_TEMP_READ_BIT);
+ if (!(adev->pm.pp_feature & PP_SCLK_DEEP_SLEEP_MASK))
+ *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DS_GFXCLK_BIT);
- if (adev->pm.pp_feature & PP_SCLK_DEEP_SLEEP_MASK)
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_GFXCLK_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_SOCCLK_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_SOC_MPCLK_DS_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_BACO_MPCLK_DS_BIT);
-
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_FCLK_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_DCN_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_DCFCLK_BIT);
-
- if (adev->pm.pp_feature & PP_PCIE_DPM_MASK) {
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_LINK_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_LCLK_BIT);
+ if (!(adev->pm.pp_feature & PP_PCIE_DPM_MASK)) {
+ *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_LINK_BIT);
+ *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DS_LCLK_BIT);
}
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_BACO_BIT);
-
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_MP0CLK_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_FW_DSTATE_BIT);
-
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_OUT_OF_BAND_MONITOR_BIT);
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_SOC_CG_BIT);
-
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_FCLK_BIT);
-
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_FW_CTF_BIT);
-
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_UCLK_BIT);
-
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_VR0HOT_BIT);
-
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_GFX_POWER_OPTIMIZER_BIT);
-
- if (adev->pm.pp_feature & PP_ULV_MASK)
- *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_ULV_BIT);
+ if (!(adev->pm.pp_feature & PP_ULV_MASK))
+ *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_GFX_ULV_BIT);
return 0;
}
@@ -410,58 +375,11 @@ static int smu_v13_0_0_setup_pptable(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
struct amdgpu_device *adev = smu->adev;
- uint32_t pptable_id;
int ret = 0;
- /*
- * With SCPM enabled, the pptable used will be signed. It cannot
- * be used directly by driver. To get the raw pptable, we need to
- * rely on the combo pptable(and its revelant SMU message).
- */
- if (adev->scpm_enabled) {
- ret = smu_v13_0_0_get_pptable_from_pmfw(smu,
- &smu_table->power_play_table,
- &smu_table->power_play_table_size);
- } else {
- /* override pptable_id from driver parameter */
- if (amdgpu_smu_pptable_id >= 0) {
- pptable_id = amdgpu_smu_pptable_id;
- dev_info(adev->dev, "override pptable id %d\n", pptable_id);
- } else {
- pptable_id = smu_table->boot_values.pp_table_id;
- }
-
- /*
- * Temporary solution for SMU V13.0.0 with SCPM disabled:
- * - use vbios carried pptable when pptable_id is 3664, 3715 or 3795
- * - use soft pptable when pptable_id is 3683
- */
- if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 0)) {
- switch (pptable_id) {
- case 3664:
- case 3715:
- case 3795:
- pptable_id = 0;
- break;
- case 3683:
- break;
- default:
- dev_err(adev->dev, "Unsupported pptable id %d\n", pptable_id);
- return -EINVAL;
- }
- }
-
- /* force using vbios pptable in sriov mode */
- if ((amdgpu_sriov_vf(adev) || !pptable_id) && (amdgpu_emu_mode != 1))
- ret = smu_v13_0_0_get_pptable_from_pmfw(smu,
- &smu_table->power_play_table,
- &smu_table->power_play_table_size);
- else
- ret = smu_v13_0_get_pptable_from_firmware(smu,
- &smu_table->power_play_table,
- &smu_table->power_play_table_size,
- pptable_id);
- }
+ ret = smu_v13_0_0_get_pptable_from_pmfw(smu,
+ &smu_table->power_play_table,
+ &smu_table->power_play_table_size);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
index 15e4298c7cc8..e4f8f90ac5aa 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
@@ -969,6 +969,9 @@ void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev)
case METRICS_VERSION(2, 2):
structure_size = sizeof(struct gpu_metrics_v2_2);
break;
+ case METRICS_VERSION(2, 3):
+ structure_size = sizeof(struct gpu_metrics_v2_3);
+ break;
default:
return;
}
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
index 0f0950c11196..78b72739e5c3 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
@@ -1346,7 +1346,7 @@ uninit_regulators:
return ret;
}
-static int adv7511_remove(struct i2c_client *i2c)
+static void adv7511_remove(struct i2c_client *i2c)
{
struct adv7511 *adv7511 = i2c_get_clientdata(i2c);
@@ -1362,8 +1362,6 @@ static int adv7511_remove(struct i2c_client *i2c)
i2c_unregister_device(adv7511->i2c_packet);
i2c_unregister_device(adv7511->i2c_edid);
-
- return 0;
}
static const struct i2c_device_id adv7511_i2c_ids[] = {
diff --git a/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c b/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
index ae3d6e9a606c..660a54857929 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
@@ -787,7 +787,7 @@ err_unregister_i2c:
return err;
}
-static int anx6345_i2c_remove(struct i2c_client *client)
+static void anx6345_i2c_remove(struct i2c_client *client)
{
struct anx6345 *anx6345 = i2c_get_clientdata(client);
@@ -798,8 +798,6 @@ static int anx6345_i2c_remove(struct i2c_client *client)
kfree(anx6345->edid);
mutex_destroy(&anx6345->lock);
-
- return 0;
}
static const struct i2c_device_id anx6345_id[] = {
diff --git a/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c b/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
index d2fc8676fab6..5997049fde5b 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
@@ -1357,7 +1357,7 @@ err_unregister_i2c:
return err;
}
-static int anx78xx_i2c_remove(struct i2c_client *client)
+static void anx78xx_i2c_remove(struct i2c_client *client)
{
struct anx78xx *anx78xx = i2c_get_clientdata(client);
@@ -1366,8 +1366,6 @@ static int anx78xx_i2c_remove(struct i2c_client *client)
unregister_i2c_dummy_clients(anx78xx);
kfree(anx78xx->edid);
-
- return 0;
}
static const struct i2c_device_id anx78xx_id[] = {
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index 8aadcc0aa90b..df9370e0ff23 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -1864,12 +1864,6 @@ EXPORT_SYMBOL_GPL(analogix_dp_remove);
int analogix_dp_suspend(struct analogix_dp_device *dp)
{
clk_disable_unprepare(dp->clock);
-
- if (dp->plat_data->panel) {
- if (drm_panel_unprepare(dp->plat_data->panel))
- DRM_ERROR("failed to turnoff the panel\n");
- }
-
return 0;
}
EXPORT_SYMBOL_GPL(analogix_dp_suspend);
@@ -1884,13 +1878,6 @@ int analogix_dp_resume(struct analogix_dp_device *dp)
return ret;
}
- if (dp->plat_data->panel) {
- if (drm_panel_prepare(dp->plat_data->panel)) {
- DRM_ERROR("failed to setup the panel\n");
- return -EBUSY;
- }
- }
-
return 0;
}
EXPORT_SYMBOL_GPL(analogix_dp_resume);
diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c
index 0c323b5a1c99..b0ff1ecb80a5 100644
--- a/drivers/gpu/drm/bridge/analogix/anx7625.c
+++ b/drivers/gpu/drm/bridge/analogix/anx7625.c
@@ -2715,7 +2715,7 @@ free_hdcp_wq:
return ret;
}
-static int anx7625_i2c_remove(struct i2c_client *client)
+static void anx7625_i2c_remove(struct i2c_client *client)
{
struct anx7625_data *platform = i2c_get_clientdata(client);
@@ -2735,8 +2735,6 @@ static int anx7625_i2c_remove(struct i2c_client *client)
if (platform->pdata.audio_en)
anx7625_unregister_audio(platform);
-
- return 0;
}
static const struct i2c_device_id anx7625_id[] = {
diff --git a/drivers/gpu/drm/bridge/chrontel-ch7033.c b/drivers/gpu/drm/bridge/chrontel-ch7033.c
index ba060277c3fd..b94f39a86846 100644
--- a/drivers/gpu/drm/bridge/chrontel-ch7033.c
+++ b/drivers/gpu/drm/bridge/chrontel-ch7033.c
@@ -583,14 +583,12 @@ static int ch7033_probe(struct i2c_client *client,
return 0;
}
-static int ch7033_remove(struct i2c_client *client)
+static void ch7033_remove(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct ch7033_priv *priv = dev_get_drvdata(dev);
drm_bridge_remove(&priv->bridge);
-
- return 0;
}
static const struct of_device_id ch7033_dt_ids[] = {
diff --git a/drivers/gpu/drm/bridge/cros-ec-anx7688.c b/drivers/gpu/drm/bridge/cros-ec-anx7688.c
index 0f6d907432e3..fa91bdeddef0 100644
--- a/drivers/gpu/drm/bridge/cros-ec-anx7688.c
+++ b/drivers/gpu/drm/bridge/cros-ec-anx7688.c
@@ -159,13 +159,11 @@ static int cros_ec_anx7688_bridge_probe(struct i2c_client *client)
return 0;
}
-static int cros_ec_anx7688_bridge_remove(struct i2c_client *client)
+static void cros_ec_anx7688_bridge_remove(struct i2c_client *client)
{
struct cros_ec_anx7688 *anx7688 = i2c_get_clientdata(client);
drm_bridge_remove(&anx7688->bridge);
-
- return 0;
}
static const struct of_device_id cros_ec_anx7688_bridge_match_table[] = {
diff --git a/drivers/gpu/drm/bridge/ite-it6505.c b/drivers/gpu/drm/bridge/ite-it6505.c
index a4302492cf8d..1fd09911fe8c 100644
--- a/drivers/gpu/drm/bridge/ite-it6505.c
+++ b/drivers/gpu/drm/bridge/ite-it6505.c
@@ -3363,7 +3363,7 @@ static int it6505_i2c_probe(struct i2c_client *client,
return 0;
}
-static int it6505_i2c_remove(struct i2c_client *client)
+static void it6505_i2c_remove(struct i2c_client *client)
{
struct it6505 *it6505 = i2c_get_clientdata(client);
@@ -3371,8 +3371,6 @@ static int it6505_i2c_remove(struct i2c_client *client)
drm_dp_aux_unregister(&it6505->aux);
it6505_debugfs_remove(it6505);
it6505_poweroff(it6505);
-
- return 0;
}
static const struct i2c_device_id it6505_id[] = {
diff --git a/drivers/gpu/drm/bridge/ite-it66121.c b/drivers/gpu/drm/bridge/ite-it66121.c
index 44278d54d35d..4f6f1deba28c 100644
--- a/drivers/gpu/drm/bridge/ite-it66121.c
+++ b/drivers/gpu/drm/bridge/ite-it66121.c
@@ -1623,15 +1623,13 @@ static int it66121_probe(struct i2c_client *client,
return 0;
}
-static int it66121_remove(struct i2c_client *client)
+static void it66121_remove(struct i2c_client *client)
{
struct it66121_ctx *ctx = i2c_get_clientdata(client);
ite66121_power_off(ctx);
drm_bridge_remove(&ctx->bridge);
mutex_destroy(&ctx->lock);
-
- return 0;
}
static const struct of_device_id it66121_dt_match[] = {
diff --git a/drivers/gpu/drm/bridge/lontium-lt8912b.c b/drivers/gpu/drm/bridge/lontium-lt8912b.c
index 3e870d45f881..a98efef0ba0e 100644
--- a/drivers/gpu/drm/bridge/lontium-lt8912b.c
+++ b/drivers/gpu/drm/bridge/lontium-lt8912b.c
@@ -731,7 +731,7 @@ err_dt_parse:
return ret;
}
-static int lt8912_remove(struct i2c_client *client)
+static void lt8912_remove(struct i2c_client *client)
{
struct lt8912 *lt = i2c_get_clientdata(client);
@@ -739,7 +739,6 @@ static int lt8912_remove(struct i2c_client *client)
drm_bridge_remove(&lt->bridge);
lt8912_free_i2c(lt);
lt8912_put_dt(lt);
- return 0;
}
static const struct of_device_id lt8912_dt_match[] = {
diff --git a/drivers/gpu/drm/bridge/lontium-lt9211.c b/drivers/gpu/drm/bridge/lontium-lt9211.c
index 9a3e90427d12..933ca028d612 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9211.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9211.c
@@ -766,13 +766,11 @@ static int lt9211_probe(struct i2c_client *client,
return ret;
}
-static int lt9211_remove(struct i2c_client *client)
+static void lt9211_remove(struct i2c_client *client)
{
struct lt9211 *ctx = i2c_get_clientdata(client);
drm_bridge_remove(&ctx->bridge);
-
- return 0;
}
static struct i2c_device_id lt9211_id[] = {
diff --git a/drivers/gpu/drm/bridge/lontium-lt9611.c b/drivers/gpu/drm/bridge/lontium-lt9611.c
index 5fccacc159f0..7c0a99173b39 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9611.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9611.c
@@ -1217,7 +1217,7 @@ err_of_put:
return ret;
}
-static int lt9611_remove(struct i2c_client *client)
+static void lt9611_remove(struct i2c_client *client)
{
struct lt9611 *lt9611 = i2c_get_clientdata(client);
@@ -1229,8 +1229,6 @@ static int lt9611_remove(struct i2c_client *client)
of_node_put(lt9611->dsi1_node);
of_node_put(lt9611->dsi0_node);
-
- return 0;
}
static struct i2c_device_id lt9611_id[] = {
diff --git a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
index fdf12d4c6416..fa1ee6264d92 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
@@ -978,7 +978,7 @@ err_of_put:
return ret;
}
-static int lt9611uxc_remove(struct i2c_client *client)
+static void lt9611uxc_remove(struct i2c_client *client)
{
struct lt9611uxc *lt9611uxc = i2c_get_clientdata(client);
@@ -993,8 +993,6 @@ static int lt9611uxc_remove(struct i2c_client *client)
of_node_put(lt9611uxc->dsi1_node);
of_node_put(lt9611uxc->dsi0_node);
-
- return 0;
}
static struct i2c_device_id lt9611uxc_id[] = {
diff --git a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
index 72248a565579..97359f807bfc 100644
--- a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
+++ b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
@@ -357,11 +357,9 @@ static int stdp4028_ge_b850v3_fw_probe(struct i2c_client *stdp4028_i2c,
return ge_b850v3_register();
}
-static int stdp4028_ge_b850v3_fw_remove(struct i2c_client *stdp4028_i2c)
+static void stdp4028_ge_b850v3_fw_remove(struct i2c_client *stdp4028_i2c)
{
ge_b850v3_lvds_remove();
-
- return 0;
}
static const struct i2c_device_id stdp4028_ge_b850v3_fw_i2c_table[] = {
@@ -407,11 +405,9 @@ static int stdp2690_ge_b850v3_fw_probe(struct i2c_client *stdp2690_i2c,
return ge_b850v3_register();
}
-static int stdp2690_ge_b850v3_fw_remove(struct i2c_client *stdp2690_i2c)
+static void stdp2690_ge_b850v3_fw_remove(struct i2c_client *stdp2690_i2c)
{
ge_b850v3_lvds_remove();
-
- return 0;
}
static const struct i2c_device_id stdp2690_ge_b850v3_fw_i2c_table[] = {
diff --git a/drivers/gpu/drm/bridge/nxp-ptn3460.c b/drivers/gpu/drm/bridge/nxp-ptn3460.c
index 1ab91f4e057b..0851101a8c72 100644
--- a/drivers/gpu/drm/bridge/nxp-ptn3460.c
+++ b/drivers/gpu/drm/bridge/nxp-ptn3460.c
@@ -315,13 +315,11 @@ static int ptn3460_probe(struct i2c_client *client,
return 0;
}
-static int ptn3460_remove(struct i2c_client *client)
+static void ptn3460_remove(struct i2c_client *client)
{
struct ptn3460_bridge *ptn_bridge = i2c_get_clientdata(client);
drm_bridge_remove(&ptn_bridge->bridge);
-
- return 0;
}
static const struct i2c_device_id ptn3460_i2c_table[] = {
diff --git a/drivers/gpu/drm/bridge/parade-ps8622.c b/drivers/gpu/drm/bridge/parade-ps8622.c
index b5750e5f71d7..309de802863d 100644
--- a/drivers/gpu/drm/bridge/parade-ps8622.c
+++ b/drivers/gpu/drm/bridge/parade-ps8622.c
@@ -520,14 +520,12 @@ static int ps8622_probe(struct i2c_client *client,
return 0;
}
-static int ps8622_remove(struct i2c_client *client)
+static void ps8622_remove(struct i2c_client *client)
{
struct ps8622_bridge *ps8622 = i2c_get_clientdata(client);
backlight_device_unregister(ps8622->bl);
drm_bridge_remove(&ps8622->bridge);
-
- return 0;
}
static const struct i2c_device_id ps8622_i2c_table[] = {
diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c
index 7ab38d734ad6..878fb7d3732b 100644
--- a/drivers/gpu/drm/bridge/sii902x.c
+++ b/drivers/gpu/drm/bridge/sii902x.c
@@ -1145,7 +1145,7 @@ static int sii902x_probe(struct i2c_client *client,
return ret;
}
-static int sii902x_remove(struct i2c_client *client)
+static void sii902x_remove(struct i2c_client *client)
{
struct sii902x *sii902x = i2c_get_clientdata(client);
@@ -1154,8 +1154,6 @@ static int sii902x_remove(struct i2c_client *client)
drm_bridge_remove(&sii902x->bridge);
regulator_bulk_disable(ARRAY_SIZE(sii902x->supplies),
sii902x->supplies);
-
- return 0;
}
static const struct of_device_id sii902x_dt_ids[] = {
diff --git a/drivers/gpu/drm/bridge/sii9234.c b/drivers/gpu/drm/bridge/sii9234.c
index 15c98a7bd81c..5b3061d4b5c3 100644
--- a/drivers/gpu/drm/bridge/sii9234.c
+++ b/drivers/gpu/drm/bridge/sii9234.c
@@ -936,14 +936,12 @@ static int sii9234_probe(struct i2c_client *client,
return 0;
}
-static int sii9234_remove(struct i2c_client *client)
+static void sii9234_remove(struct i2c_client *client)
{
struct sii9234 *ctx = i2c_get_clientdata(client);
sii9234_cable_out(ctx);
drm_bridge_remove(&ctx->bridge);
-
- return 0;
}
static const struct of_device_id sii9234_dt_match[] = {
diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c
index ab0bce4a988c..511982a1cedb 100644
--- a/drivers/gpu/drm/bridge/sil-sii8620.c
+++ b/drivers/gpu/drm/bridge/sil-sii8620.c
@@ -2346,7 +2346,7 @@ static int sii8620_probe(struct i2c_client *client,
return 0;
}
-static int sii8620_remove(struct i2c_client *client)
+static void sii8620_remove(struct i2c_client *client)
{
struct sii8620 *ctx = i2c_get_clientdata(client);
@@ -2360,8 +2360,6 @@ static int sii8620_remove(struct i2c_client *client)
sii8620_cable_out(ctx);
}
drm_bridge_remove(&ctx->bridge);
-
- return 0;
}
static const struct of_device_id sii8620_dt_match[] = {
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
index 89e060b273ef..2a58eb271f70 100644
--- a/drivers/gpu/drm/bridge/tc358767.c
+++ b/drivers/gpu/drm/bridge/tc358767.c
@@ -2184,13 +2184,11 @@ static int tc_probe(struct i2c_client *client, const struct i2c_device_id *id)
return 0;
}
-static int tc_remove(struct i2c_client *client)
+static void tc_remove(struct i2c_client *client)
{
struct tc_data *tc = i2c_get_clientdata(client);
drm_bridge_remove(&tc->bridge);
-
- return 0;
}
static const struct i2c_device_id tc358767_i2c_ids[] = {
diff --git a/drivers/gpu/drm/bridge/tc358768.c b/drivers/gpu/drm/bridge/tc358768.c
index fd585bf925fe..4c4b77ce8aba 100644
--- a/drivers/gpu/drm/bridge/tc358768.c
+++ b/drivers/gpu/drm/bridge/tc358768.c
@@ -1072,13 +1072,11 @@ static int tc358768_i2c_probe(struct i2c_client *client,
return mipi_dsi_host_register(&priv->dsi_host);
}
-static int tc358768_i2c_remove(struct i2c_client *client)
+static void tc358768_i2c_remove(struct i2c_client *client)
{
struct tc358768_priv *priv = i2c_get_clientdata(client);
mipi_dsi_host_unregister(&priv->dsi_host);
-
- return 0;
}
static struct i2c_driver tc358768_driver = {
diff --git a/drivers/gpu/drm/bridge/tc358775.c b/drivers/gpu/drm/bridge/tc358775.c
index a5f5eae1e80f..3ceb0e9f9bdc 100644
--- a/drivers/gpu/drm/bridge/tc358775.c
+++ b/drivers/gpu/drm/bridge/tc358775.c
@@ -704,13 +704,11 @@ err_bridge_remove:
return ret;
}
-static int tc_remove(struct i2c_client *client)
+static void tc_remove(struct i2c_client *client)
{
struct tc_data *tc = i2c_get_clientdata(client);
drm_bridge_remove(&tc->bridge);
-
- return 0;
}
static const struct i2c_device_id tc358775_i2c_ids[] = {
diff --git a/drivers/gpu/drm/bridge/ti-dlpc3433.c b/drivers/gpu/drm/bridge/ti-dlpc3433.c
index cef454862b67..186a9e2ff24d 100644
--- a/drivers/gpu/drm/bridge/ti-dlpc3433.c
+++ b/drivers/gpu/drm/bridge/ti-dlpc3433.c
@@ -379,14 +379,12 @@ err_remove_bridge:
return ret;
}
-static int dlpc3433_remove(struct i2c_client *client)
+static void dlpc3433_remove(struct i2c_client *client)
{
struct dlpc *dlpc = i2c_get_clientdata(client);
drm_bridge_remove(&dlpc->bridge);
of_node_put(dlpc->host_node);
-
- return 0;
}
static const struct i2c_device_id dlpc3433_id[] = {
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi83.c b/drivers/gpu/drm/bridge/ti-sn65dsi83.c
index 14e7aa77e758..7ba9467fff12 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi83.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi83.c
@@ -708,13 +708,11 @@ err_remove_bridge:
return ret;
}
-static int sn65dsi83_remove(struct i2c_client *client)
+static void sn65dsi83_remove(struct i2c_client *client)
{
struct sn65dsi83 *ctx = i2c_get_clientdata(client);
drm_bridge_remove(&ctx->bridge);
-
- return 0;
}
static struct i2c_device_id sn65dsi83_id[] = {
diff --git a/drivers/gpu/drm/bridge/ti-tfp410.c b/drivers/gpu/drm/bridge/ti-tfp410.c
index 401fe61217c7..b9635abbad16 100644
--- a/drivers/gpu/drm/bridge/ti-tfp410.c
+++ b/drivers/gpu/drm/bridge/ti-tfp410.c
@@ -394,11 +394,9 @@ static int tfp410_i2c_probe(struct i2c_client *client,
return tfp410_init(&client->dev, true);
}
-static int tfp410_i2c_remove(struct i2c_client *client)
+static void tfp410_i2c_remove(struct i2c_client *client)
{
tfp410_fini(&client->dev);
-
- return 0;
}
static const struct i2c_device_id tfp410_i2c_ids[] = {
diff --git a/drivers/gpu/drm/display/drm_dp_helper.c b/drivers/gpu/drm/display/drm_dp_helper.c
index 9f055d9710ea..16565a0a5da6 100644
--- a/drivers/gpu/drm/display/drm_dp_helper.c
+++ b/drivers/gpu/drm/display/drm_dp_helper.c
@@ -30,6 +30,7 @@
#include <linux/sched.h>
#include <linux/seq_file.h>
#include <linux/string_helpers.h>
+#include <linux/dynamic_debug.h>
#include <drm/display/drm_dp_helper.h>
#include <drm/display/drm_dp_mst_helper.h>
@@ -40,6 +41,18 @@
#include "drm_dp_helper_internal.h"
+DECLARE_DYNDBG_CLASSMAP(drm_debug_classes, DD_CLASS_TYPE_DISJOINT_BITS, 0,
+ "DRM_UT_CORE",
+ "DRM_UT_DRIVER",
+ "DRM_UT_KMS",
+ "DRM_UT_PRIME",
+ "DRM_UT_ATOMIC",
+ "DRM_UT_VBL",
+ "DRM_UT_STATE",
+ "DRM_UT_LEASE",
+ "DRM_UT_DP",
+ "DRM_UT_DRMRES");
+
struct dp_aux_backlight {
struct backlight_device *base;
struct drm_dp_aux *aux;
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 1f0a270ac984..f5fb22e0d033 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -32,6 +32,7 @@
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/moduleparam.h>
+#include <linux/dynamic_debug.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
@@ -50,6 +51,18 @@
#include "drm_crtc_helper_internal.h"
+DECLARE_DYNDBG_CLASSMAP(drm_debug_classes, DD_CLASS_TYPE_DISJOINT_BITS, 0,
+ "DRM_UT_CORE",
+ "DRM_UT_DRIVER",
+ "DRM_UT_KMS",
+ "DRM_UT_PRIME",
+ "DRM_UT_ATOMIC",
+ "DRM_UT_VBL",
+ "DRM_UT_STATE",
+ "DRM_UT_LEASE",
+ "DRM_UT_DP",
+ "DRM_UT_DRMRES");
+
/**
* DOC: overview
*
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index 493922069c90..01ee3febb813 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -377,8 +377,8 @@ static int vrr_range_show(struct seq_file *m, void *data)
if (connector->status != connector_status_connected)
return -ENODEV;
- seq_printf(m, "Min: %u\n", (u8)connector->display_info.monitor_range.min_vfreq);
- seq_printf(m, "Max: %u\n", (u8)connector->display_info.monitor_range.max_vfreq);
+ seq_printf(m, "Min: %u\n", connector->display_info.monitor_range.min_vfreq);
+ seq_printf(m, "Max: %u\n", connector->display_info.monitor_range.max_vfreq);
return 0;
}
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 73c614efc132..47465b9765f1 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -6112,12 +6112,14 @@ static void drm_parse_cea_ext(struct drm_connector *connector,
}
static
-void get_monitor_range(const struct detailed_timing *timing,
- void *info_monitor_range)
+void get_monitor_range(const struct detailed_timing *timing, void *c)
{
- struct drm_monitor_range_info *monitor_range = info_monitor_range;
+ struct detailed_mode_closure *closure = c;
+ struct drm_display_info *info = &closure->connector->display_info;
+ struct drm_monitor_range_info *monitor_range = &info->monitor_range;
const struct detailed_non_pixel *data = &timing->data.other_data;
const struct detailed_data_monitor_range *range = &data->data.range;
+ const struct edid *edid = closure->drm_edid->edid;
if (!is_display_descriptor(timing, EDID_DETAIL_MONITOR_RANGE))
return;
@@ -6136,12 +6138,23 @@ void get_monitor_range(const struct detailed_timing *timing,
monitor_range->min_vfreq = range->min_vfreq;
monitor_range->max_vfreq = range->max_vfreq;
+
+ if (edid->revision >= 4) {
+ if (data->pad2 & DRM_EDID_RANGE_OFFSET_MIN_VFREQ)
+ monitor_range->min_vfreq += 255;
+ if (data->pad2 & DRM_EDID_RANGE_OFFSET_MAX_VFREQ)
+ monitor_range->max_vfreq += 255;
+ }
}
static void drm_get_monitor_range(struct drm_connector *connector,
const struct drm_edid *drm_edid)
{
- struct drm_display_info *info = &connector->display_info;
+ const struct drm_display_info *info = &connector->display_info;
+ struct detailed_mode_closure closure = {
+ .connector = connector,
+ .drm_edid = drm_edid,
+ };
if (drm_edid->edid->revision < 4)
return;
@@ -6149,8 +6162,7 @@ static void drm_get_monitor_range(struct drm_connector *connector,
if (!(drm_edid->edid->features & DRM_EDID_FEATURE_CONTINUOUS_FREQ))
return;
- drm_for_each_detailed_block(drm_edid, get_monitor_range,
- &info->monitor_range);
+ drm_for_each_detailed_block(drm_edid, get_monitor_range, &closure);
DRM_DEBUG_KMS("Supported Monitor Refresh rate range is %d Hz - %d Hz\n",
info->monitor_range.min_vfreq,
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index dbee4863e4f7..b8db675e7fb5 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -165,24 +165,10 @@ void drm_gem_private_object_init(struct drm_device *dev,
obj->resv = &obj->_resv;
drm_vma_node_reset(&obj->vma_node);
+ INIT_LIST_HEAD(&obj->lru_node);
}
EXPORT_SYMBOL(drm_gem_private_object_init);
-static void
-drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
-{
- /*
- * Note: obj->dma_buf can't disappear as long as we still hold a
- * handle reference in obj->handle_count.
- */
- mutex_lock(&filp->prime.lock);
- if (obj->dma_buf) {
- drm_prime_remove_buf_handle_locked(&filp->prime,
- obj->dma_buf);
- }
- mutex_unlock(&filp->prime.lock);
-}
-
/**
* drm_gem_object_handle_free - release resources bound to userspace handles
* @obj: GEM object to clean up.
@@ -253,7 +239,7 @@ drm_gem_object_release_handle(int id, void *ptr, void *data)
if (obj->funcs->close)
obj->funcs->close(obj, file_priv);
- drm_gem_remove_prime_handles(obj, file_priv);
+ drm_prime_remove_buf_handle(&file_priv->prime, id);
drm_vma_node_revoke(&obj->vma_node, file_priv);
drm_gem_object_handle_put_unlocked(obj);
@@ -951,6 +937,7 @@ drm_gem_object_release(struct drm_gem_object *obj)
dma_resv_fini(&obj->_resv);
drm_gem_free_mmap_offset(obj);
+ drm_gem_lru_remove(obj);
}
EXPORT_SYMBOL(drm_gem_object_release);
@@ -1298,3 +1285,171 @@ drm_gem_unlock_reservations(struct drm_gem_object **objs, int count,
ww_acquire_fini(acquire_ctx);
}
EXPORT_SYMBOL(drm_gem_unlock_reservations);
+
+/**
+ * drm_gem_lru_init - initialize a LRU
+ *
+ * @lru: The LRU to initialize
+ * @lock: The lock protecting the LRU
+ */
+void
+drm_gem_lru_init(struct drm_gem_lru *lru, struct mutex *lock)
+{
+ lru->lock = lock;
+ lru->count = 0;
+ INIT_LIST_HEAD(&lru->list);
+}
+EXPORT_SYMBOL(drm_gem_lru_init);
+
+static void
+drm_gem_lru_remove_locked(struct drm_gem_object *obj)
+{
+ obj->lru->count -= obj->size >> PAGE_SHIFT;
+ WARN_ON(obj->lru->count < 0);
+ list_del(&obj->lru_node);
+ obj->lru = NULL;
+}
+
+/**
+ * drm_gem_lru_remove - remove object from whatever LRU it is in
+ *
+ * If the object is currently in any LRU, remove it.
+ *
+ * @obj: The GEM object to remove from current LRU
+ */
+void
+drm_gem_lru_remove(struct drm_gem_object *obj)
+{
+ struct drm_gem_lru *lru = obj->lru;
+
+ if (!lru)
+ return;
+
+ mutex_lock(lru->lock);
+ drm_gem_lru_remove_locked(obj);
+ mutex_unlock(lru->lock);
+}
+EXPORT_SYMBOL(drm_gem_lru_remove);
+
+static void
+drm_gem_lru_move_tail_locked(struct drm_gem_lru *lru, struct drm_gem_object *obj)
+{
+ lockdep_assert_held_once(lru->lock);
+
+ if (obj->lru)
+ drm_gem_lru_remove_locked(obj);
+
+ lru->count += obj->size >> PAGE_SHIFT;
+ list_add_tail(&obj->lru_node, &lru->list);
+ obj->lru = lru;
+}
+
+/**
+ * drm_gem_lru_move_tail - move the object to the tail of the LRU
+ *
+ * If the object is already in this LRU it will be moved to the
+ * tail. Otherwise it will be removed from whichever other LRU
+ * it is in (if any) and moved into this LRU.
+ *
+ * @lru: The LRU to move the object into.
+ * @obj: The GEM object to move into this LRU
+ */
+void
+drm_gem_lru_move_tail(struct drm_gem_lru *lru, struct drm_gem_object *obj)
+{
+ mutex_lock(lru->lock);
+ drm_gem_lru_move_tail_locked(lru, obj);
+ mutex_unlock(lru->lock);
+}
+EXPORT_SYMBOL(drm_gem_lru_move_tail);
+
+/**
+ * drm_gem_lru_scan - helper to implement shrinker.scan_objects
+ *
+ * If the shrink callback succeeds, it is expected that the driver
+ * move the object out of this LRU.
+ *
+ * If the LRU possibly contain active buffers, it is the responsibility
+ * of the shrink callback to check for this (ie. dma_resv_test_signaled())
+ * or if necessary block until the buffer becomes idle.
+ *
+ * @lru: The LRU to scan
+ * @nr_to_scan: The number of pages to try to reclaim
+ * @shrink: Callback to try to shrink/reclaim the object.
+ */
+unsigned long
+drm_gem_lru_scan(struct drm_gem_lru *lru, unsigned nr_to_scan,
+ bool (*shrink)(struct drm_gem_object *obj))
+{
+ struct drm_gem_lru still_in_lru;
+ struct drm_gem_object *obj;
+ unsigned freed = 0;
+
+ drm_gem_lru_init(&still_in_lru, lru->lock);
+
+ mutex_lock(lru->lock);
+
+ while (freed < nr_to_scan) {
+ obj = list_first_entry_or_null(&lru->list, typeof(*obj), lru_node);
+
+ if (!obj)
+ break;
+
+ drm_gem_lru_move_tail_locked(&still_in_lru, obj);
+
+ /*
+ * If it's in the process of being freed, gem_object->free()
+ * may be blocked on lock waiting to remove it. So just
+ * skip it.
+ */
+ if (!kref_get_unless_zero(&obj->refcount))
+ continue;
+
+ /*
+ * Now that we own a reference, we can drop the lock for the
+ * rest of the loop body, to reduce contention with other
+ * code paths that need the LRU lock
+ */
+ mutex_unlock(lru->lock);
+
+ /*
+ * Note that this still needs to be trylock, since we can
+ * hit shrinker in response to trying to get backing pages
+ * for this obj (ie. while it's lock is already held)
+ */
+ if (!dma_resv_trylock(obj->resv))
+ goto tail;
+
+ if (shrink(obj)) {
+ freed += obj->size >> PAGE_SHIFT;
+
+ /*
+ * If we succeeded in releasing the object's backing
+ * pages, we expect the driver to have moved the object
+ * out of this LRU
+ */
+ WARN_ON(obj->lru == &still_in_lru);
+ WARN_ON(obj->lru == lru);
+ }
+
+ dma_resv_unlock(obj->resv);
+
+tail:
+ drm_gem_object_put(obj);
+ mutex_lock(lru->lock);
+ }
+
+ /*
+ * Move objects we've skipped over out of the temporary still_in_lru
+ * back into this LRU
+ */
+ list_for_each_entry (obj, &still_in_lru.list, lru_node)
+ obj->lru = lru;
+ list_splice_tail(&still_in_lru.list, &lru->list);
+ lru->count += still_in_lru.count;
+
+ mutex_unlock(lru->lock);
+
+ return freed;
+}
+EXPORT_SYMBOL(drm_gem_lru_scan);
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index 1fbbc19f1ac0..7bb98e6a446d 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -74,8 +74,8 @@ int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv);
void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv);
-void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv,
- struct dma_buf *dma_buf);
+void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv,
+ uint32_t handle);
/* drm_drv.c */
struct drm_minor *drm_minor_acquire(unsigned int minor_id);
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index ef50c4e2e509..20e109a802ae 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -190,29 +190,33 @@ static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpri
return -ENOENT;
}
-void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv,
- struct dma_buf *dma_buf)
+void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv,
+ uint32_t handle)
{
struct rb_node *rb;
- rb = prime_fpriv->dmabufs.rb_node;
+ mutex_lock(&prime_fpriv->lock);
+
+ rb = prime_fpriv->handles.rb_node;
while (rb) {
struct drm_prime_member *member;
- member = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
- if (member->dma_buf == dma_buf) {
+ member = rb_entry(rb, struct drm_prime_member, handle_rb);
+ if (member->handle == handle) {
rb_erase(&member->handle_rb, &prime_fpriv->handles);
rb_erase(&member->dmabuf_rb, &prime_fpriv->dmabufs);
- dma_buf_put(dma_buf);
+ dma_buf_put(member->dma_buf);
kfree(member);
- return;
- } else if (member->dma_buf < dma_buf) {
+ break;
+ } else if (member->handle < handle) {
rb = rb->rb_right;
} else {
rb = rb->rb_left;
}
}
+
+ mutex_unlock(&prime_fpriv->lock);
}
void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv)
diff --git a/drivers/gpu/drm/drm_print.c b/drivers/gpu/drm/drm_print.c
index f783d4963d4b..5b93c11895bb 100644
--- a/drivers/gpu/drm/drm_print.c
+++ b/drivers/gpu/drm/drm_print.c
@@ -23,14 +23,13 @@
* Rob Clark <robdclark@gmail.com>
*/
-#define DEBUG /* for pr_debug() */
-
#include <linux/stdarg.h>
#include <linux/io.h>
#include <linux/moduleparam.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
+#include <linux/dynamic_debug.h>
#include <drm/drm.h>
#include <drm/drm_drv.h>
@@ -40,7 +39,7 @@
* __drm_debug: Enable debug output.
* Bitmask of DRM_UT_x. See include/drm/drm_print.h for details.
*/
-unsigned int __drm_debug;
+unsigned long __drm_debug;
EXPORT_SYMBOL(__drm_debug);
MODULE_PARM_DESC(debug, "Enable debug output, where each bit enables a debug category.\n"
@@ -52,7 +51,30 @@ MODULE_PARM_DESC(debug, "Enable debug output, where each bit enables a debug cat
"\t\tBit 5 (0x20) will enable VBL messages (vblank code)\n"
"\t\tBit 7 (0x80) will enable LEASE messages (leasing code)\n"
"\t\tBit 8 (0x100) will enable DP messages (displayport code)");
-module_param_named(debug, __drm_debug, int, 0600);
+
+#if !defined(CONFIG_DRM_USE_DYNAMIC_DEBUG)
+module_param_named(debug, __drm_debug, ulong, 0600);
+#else
+/* classnames must match vals of enum drm_debug_category */
+DECLARE_DYNDBG_CLASSMAP(drm_debug_classes, DD_CLASS_TYPE_DISJOINT_BITS, 0,
+ "DRM_UT_CORE",
+ "DRM_UT_DRIVER",
+ "DRM_UT_KMS",
+ "DRM_UT_PRIME",
+ "DRM_UT_ATOMIC",
+ "DRM_UT_VBL",
+ "DRM_UT_STATE",
+ "DRM_UT_LEASE",
+ "DRM_UT_DP",
+ "DRM_UT_DRMRES");
+
+static struct ddebug_class_param drm_debug_bitmap = {
+ .bits = &__drm_debug,
+ .flags = "p",
+ .map = &drm_debug_classes,
+};
+module_param_cb(debug, &param_ops_dyndbg_classes, &drm_debug_bitmap, 0600);
+#endif
void __drm_puts_coredump(struct drm_printer *p, const char *str)
{
@@ -162,7 +184,8 @@ EXPORT_SYMBOL(__drm_printfn_info);
void __drm_printfn_debug(struct drm_printer *p, struct va_format *vaf)
{
- pr_debug("%s %pV", p->prefix, vaf);
+ /* pr_debug callsite decorations are unhelpful here */
+ printk(KERN_DEBUG "%s %pV", p->prefix, vaf);
}
EXPORT_SYMBOL(__drm_printfn_debug);
@@ -256,15 +279,16 @@ void drm_dev_printk(const struct device *dev, const char *level,
}
EXPORT_SYMBOL(drm_dev_printk);
-void drm_dev_dbg(const struct device *dev, enum drm_debug_category category,
- const char *format, ...)
+void __drm_dev_dbg(struct _ddebug *desc, const struct device *dev,
+ enum drm_debug_category category, const char *format, ...)
{
struct va_format vaf;
va_list args;
- if (!drm_debug_enabled(category))
+ if (!__drm_debug_enabled(category))
return;
+ /* we know we are printing for either syslog, tracefs, or both */
va_start(args, format);
vaf.fmt = format;
vaf.va = &args;
@@ -278,14 +302,14 @@ void drm_dev_dbg(const struct device *dev, enum drm_debug_category category,
va_end(args);
}
-EXPORT_SYMBOL(drm_dev_dbg);
+EXPORT_SYMBOL(__drm_dev_dbg);
-void __drm_dbg(enum drm_debug_category category, const char *format, ...)
+void ___drm_dbg(struct _ddebug *desc, enum drm_debug_category category, const char *format, ...)
{
struct va_format vaf;
va_list args;
- if (!drm_debug_enabled(category))
+ if (!__drm_debug_enabled(category))
return;
va_start(args, format);
@@ -297,7 +321,7 @@ void __drm_dbg(enum drm_debug_category category, const char *format, ...)
va_end(args);
}
-EXPORT_SYMBOL(__drm_dbg);
+EXPORT_SYMBOL(___drm_dbg);
void __drm_err(const char *format, ...)
{
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dma.c b/drivers/gpu/drm/exynos/exynos_drm_dma.c
index bf33c3084cb4..a971590b8132 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dma.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dma.c
@@ -4,7 +4,6 @@
// Author: Inki Dae <inki.dae@samsung.com>
// Author: Andrzej Hajda <a.hajda@samsung.com>
-#include <linux/dma-iommu.h>
#include <linux/dma-map-ops.h>
#include <linux/iommu.h>
#include <linux/platform_device.h>
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 10b0036f8a2e..b7c11bdce2c8 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -893,7 +893,7 @@ static int hdmi_get_modes(struct drm_connector *connector)
if (!edid)
return -ENODEV;
- hdata->dvi_mode = !drm_detect_hdmi_monitor(edid);
+ hdata->dvi_mode = !connector->display_info.is_hdmi;
DRM_DEV_DEBUG_KMS(hdata->dev, "%s : width[%d] x height[%d]\n",
(hdata->dvi_mode ? "dvi monitor" : "hdmi monitor"),
edid->width_cm, edid->height_cm);
@@ -922,8 +922,8 @@ static int hdmi_find_phy_conf(struct hdmi_context *hdata, u32 pixel_clock)
return -EINVAL;
}
-static int hdmi_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+static enum drm_mode_status hdmi_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
{
struct hdmi_context *hdata = connector_to_hdmi(connector);
int ret;
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 65260a658684..8d333db813b7 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -1045,7 +1045,7 @@ static void mixer_atomic_disable(struct exynos_drm_crtc *crtc)
clear_bit(MXR_BIT_POWERED, &ctx->flags);
}
-static int mixer_mode_valid(struct exynos_drm_crtc *crtc,
+static enum drm_mode_status mixer_mode_valid(struct exynos_drm_crtc *crtc,
const struct drm_display_mode *mode)
{
struct mixer_context *ctx = crtc->ctx;
diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c
index dffe37490206..4b7627a72637 100644
--- a/drivers/gpu/drm/gma500/gem.c
+++ b/drivers/gpu/drm/gma500/gem.c
@@ -112,12 +112,12 @@ static void psb_gem_free_object(struct drm_gem_object *obj)
{
struct psb_gem_object *pobj = to_psb_gem_object(obj);
- drm_gem_object_release(obj);
-
/* Undo the mmap pin if we are destroying the object */
if (pobj->mmapping)
psb_gem_unpin(pobj);
+ drm_gem_object_release(obj);
+
WARN_ON(pobj->in_gart && !pobj->stolen);
release_resource(&pobj->resource);
diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c
index b0ea911b27de..fe7b8436f87a 100644
--- a/drivers/gpu/drm/gma500/gma_display.c
+++ b/drivers/gpu/drm/gma500/gma_display.c
@@ -532,15 +532,18 @@ int gma_crtc_page_flip(struct drm_crtc *crtc,
WARN_ON(drm_crtc_vblank_get(crtc) != 0);
gma_crtc->page_flip_event = event;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
/* Call this locked if we want an event at vblank interrupt. */
ret = crtc_funcs->mode_set_base(crtc, crtc->x, crtc->y, old_fb);
if (ret) {
- gma_crtc->page_flip_event = NULL;
- drm_crtc_vblank_put(crtc);
+ spin_lock_irqsave(&dev->event_lock, flags);
+ if (gma_crtc->page_flip_event) {
+ gma_crtc->page_flip_event = NULL;
+ drm_crtc_vblank_put(crtc);
+ }
+ spin_unlock_irqrestore(&dev->event_lock, flags);
}
-
- spin_unlock_irqrestore(&dev->event_lock, flags);
} else {
ret = crtc_funcs->mode_set_base(crtc, crtc->x, crtc->y, old_fb);
}
diff --git a/drivers/gpu/drm/hisilicon/hibmc/Kconfig b/drivers/gpu/drm/hisilicon/hibmc/Kconfig
index 073adfe438dd..4e41c144a290 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/Kconfig
+++ b/drivers/gpu/drm/hisilicon/hibmc/Kconfig
@@ -2,6 +2,7 @@
config DRM_HISI_HIBMC
tristate "DRM Support for Hisilicon Hibmc"
depends on DRM && PCI && (ARM64 || COMPILE_TEST)
+ depends on MMU
select DRM_KMS_HELPER
select DRM_VRAM_HELPER
select DRM_TTM
diff --git a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
index 6d11e7938c83..ca127ff797f7 100644
--- a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
+++ b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
@@ -23,9 +23,6 @@
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
-#define PCI_VENDOR_ID_MICROSOFT 0x1414
-#define PCI_DEVICE_ID_HYPERV_VIDEO 0x5353
-
DEFINE_DRM_GEM_FOPS(hv_fops);
static struct drm_driver hyperv_driver = {
@@ -133,7 +130,6 @@ static int hyperv_vmbus_probe(struct hv_device *hdev,
}
ret = hyperv_setup_vram(hv, hdev);
-
if (ret)
goto err_vmbus_close;
@@ -146,22 +142,22 @@ static int hyperv_vmbus_probe(struct hv_device *hdev,
if (ret)
drm_warn(dev, "Failed to update vram location.\n");
- hv->dirt_needed = true;
-
ret = hyperv_mode_config_init(hv);
if (ret)
- goto err_vmbus_close;
+ goto err_free_mmio;
ret = drm_dev_register(dev, 0);
if (ret) {
drm_err(dev, "Failed to register drm driver.\n");
- goto err_vmbus_close;
+ goto err_free_mmio;
}
drm_fbdev_generic_setup(dev, 0);
return 0;
+err_free_mmio:
+ vmbus_free_mmio(hv->mem->start, hv->fb_size);
err_vmbus_close:
vmbus_close(hdev->channel);
err_hv_set_drv_data:
diff --git a/drivers/gpu/drm/hyperv/hyperv_drm_proto.c b/drivers/gpu/drm/hyperv/hyperv_drm_proto.c
index 76a182a9a765..013a7829182d 100644
--- a/drivers/gpu/drm/hyperv/hyperv_drm_proto.c
+++ b/drivers/gpu/drm/hyperv/hyperv_drm_proto.c
@@ -208,7 +208,7 @@ static inline int hyperv_sendpacket(struct hv_device *hdev, struct synthvid_msg
VM_PKT_DATA_INBAND, 0);
if (ret)
- drm_err(&hv->dev, "Unable to send packet via vmbus\n");
+ drm_err_ratelimited(&hv->dev, "Unable to send packet via vmbus; error %d\n", ret);
return ret;
}
diff --git a/drivers/gpu/drm/i2c/ch7006_drv.c b/drivers/gpu/drm/i2c/ch7006_drv.c
index b91e48d2190d..578b738859b9 100644
--- a/drivers/gpu/drm/i2c/ch7006_drv.c
+++ b/drivers/gpu/drm/i2c/ch7006_drv.c
@@ -417,11 +417,9 @@ fail:
return -ENODEV;
}
-static int ch7006_remove(struct i2c_client *client)
+static void ch7006_remove(struct i2c_client *client)
{
ch7006_dbg(client, "\n");
-
- return 0;
}
static int ch7006_resume(struct device *dev)
diff --git a/drivers/gpu/drm/i2c/sil164_drv.c b/drivers/gpu/drm/i2c/sil164_drv.c
index 741886b54419..1bc0b5de4499 100644
--- a/drivers/gpu/drm/i2c/sil164_drv.c
+++ b/drivers/gpu/drm/i2c/sil164_drv.c
@@ -370,12 +370,6 @@ sil164_probe(struct i2c_client *client, const struct i2c_device_id *id)
return 0;
}
-static int
-sil164_remove(struct i2c_client *client)
-{
- return 0;
-}
-
static struct i2c_client *
sil164_detect_slave(struct i2c_client *client)
{
@@ -427,7 +421,6 @@ MODULE_DEVICE_TABLE(i2c, sil164_ids);
static struct drm_i2c_encoder_driver sil164_driver = {
.i2c_driver = {
.probe = sil164_probe,
- .remove = sil164_remove,
.driver = {
.name = "sil164",
},
diff --git a/drivers/gpu/drm/i2c/tda9950.c b/drivers/gpu/drm/i2c/tda9950.c
index 5b03fdd1eaa4..9ed54e7ccff2 100644
--- a/drivers/gpu/drm/i2c/tda9950.c
+++ b/drivers/gpu/drm/i2c/tda9950.c
@@ -478,14 +478,12 @@ static int tda9950_probe(struct i2c_client *client,
return 0;
}
-static int tda9950_remove(struct i2c_client *client)
+static void tda9950_remove(struct i2c_client *client)
{
struct tda9950_priv *priv = i2c_get_clientdata(client);
cec_notifier_cec_adap_unregister(priv->notify, priv->adap);
cec_unregister_adapter(priv->adap);
-
- return 0;
}
static struct i2c_device_id tda9950_ids[] = {
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index f8eb6f69be05..d444e7fffb54 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -2076,11 +2076,10 @@ tda998x_probe(struct i2c_client *client, const struct i2c_device_id *id)
return ret;
}
-static int tda998x_remove(struct i2c_client *client)
+static void tda998x_remove(struct i2c_client *client)
{
component_del(&client->dev, &tda998x_ops);
tda998x_destroy(&client->dev);
- return 0;
}
#ifdef CONFIG_OF
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 522ef9b4aff3..a26edcdadc21 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -123,6 +123,7 @@ gt-y += \
gt/intel_ring.o \
gt/intel_ring_submission.o \
gt/intel_rps.o \
+ gt/intel_sa_media.o \
gt/intel_sseu.o \
gt/intel_sseu_debugfs.o \
gt/intel_timeline.o \
@@ -257,7 +258,8 @@ i915-y += \
display/intel_vga.o \
display/i9xx_plane.o \
display/skl_scaler.o \
- display/skl_universal_plane.o
+ display/skl_universal_plane.o \
+ display/skl_watermark.o
i915-$(CONFIG_ACPI) += \
display/intel_acpi.o \
display/intel_opregion.o
diff --git a/drivers/gpu/drm/i915/display/g4x_dp.c b/drivers/gpu/drm/i915/display/g4x_dp.c
index 82ad8fe7440c..e3e3d27ffb53 100644
--- a/drivers/gpu/drm/i915/display/g4x_dp.c
+++ b/drivers/gpu/drm/i915/display/g4x_dp.c
@@ -1169,7 +1169,7 @@ intel_dp_hotplug(struct intel_encoder *encoder,
static bool ibx_digital_port_connected(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 bit = dev_priv->hotplug.pch_hpd[encoder->hpd_pin];
+ u32 bit = dev_priv->display.hotplug.pch_hpd[encoder->hpd_pin];
return intel_de_read(dev_priv, SDEISR) & bit;
}
@@ -1223,7 +1223,7 @@ static bool gm45_digital_port_connected(struct intel_encoder *encoder)
static bool ilk_digital_port_connected(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 bit = dev_priv->hotplug.hpd[encoder->hpd_pin];
+ u32 bit = dev_priv->display.hotplug.hpd[encoder->hpd_pin];
return intel_de_read(dev_priv, DEISR) & bit;
}
diff --git a/drivers/gpu/drm/i915/display/g4x_hdmi.c b/drivers/gpu/drm/i915/display/g4x_hdmi.c
index 5fbd2ae95869..2b73f5ff0d02 100644
--- a/drivers/gpu/drm/i915/display/g4x_hdmi.c
+++ b/drivers/gpu/drm/i915/display/g4x_hdmi.c
@@ -120,7 +120,7 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
pipe_config->hw.adjusted_mode.flags |= flags;
if ((tmp & SDVO_COLOR_FORMAT_MASK) == HDMI_COLOR_FORMAT_12bpc)
- dotclock = pipe_config->port_clock * 2 / 3;
+ dotclock = DIV_ROUND_CLOSEST(pipe_config->port_clock * 2, 3);
else
dotclock = pipe_config->port_clock;
diff --git a/drivers/gpu/drm/i915/display/hsw_ips.c b/drivers/gpu/drm/i915/display/hsw_ips.c
index 861dcd2eb890..a5be4af792cb 100644
--- a/drivers/gpu/drm/i915/display/hsw_ips.c
+++ b/drivers/gpu/drm/i915/display/hsw_ips.c
@@ -202,7 +202,7 @@ bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
* Should measure whether using a lower cdclk w/o IPS
*/
if (IS_BROADWELL(i915) &&
- crtc_state->pixel_rate > i915->max_cdclk_freq * 95 / 100)
+ crtc_state->pixel_rate > i915->display.cdclk.max_cdclk_freq * 95 / 100)
return false;
return true;
diff --git a/drivers/gpu/drm/i915/display/i9xx_plane.c b/drivers/gpu/drm/i915/display/i9xx_plane.c
index 0f35f2facdfc..5afbe3e98ee8 100644
--- a/drivers/gpu/drm/i915/display/i9xx_plane.c
+++ b/drivers/gpu/drm/i915/display/i9xx_plane.c
@@ -125,7 +125,7 @@ static struct intel_fbc *i9xx_plane_fbc(struct drm_i915_private *dev_priv,
enum i9xx_plane_id i9xx_plane)
{
if (i9xx_plane_has_fbc(dev_priv, i9xx_plane))
- return dev_priv->fbc[INTEL_FBC_A];
+ return dev_priv->display.fbc[INTEL_FBC_A];
else
return NULL;
}
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index 5dcfa7feffa9..ed4d93942dbd 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -33,6 +33,7 @@
#include "icl_dsi_regs.h"
#include "intel_atomic.h"
#include "intel_backlight.h"
+#include "intel_backlight_regs.h"
#include "intel_combo_phy.h"
#include "intel_combo_phy_regs.h"
#include "intel_connector.h"
@@ -641,13 +642,13 @@ static void gen11_dsi_gate_clocks(struct intel_encoder *encoder)
u32 tmp;
enum phy phy;
- mutex_lock(&dev_priv->dpll.lock);
+ mutex_lock(&dev_priv->display.dpll.lock);
tmp = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0);
for_each_dsi_phy(phy, intel_dsi->phys)
tmp |= ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy);
intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, tmp);
- mutex_unlock(&dev_priv->dpll.lock);
+ mutex_unlock(&dev_priv->display.dpll.lock);
}
static void gen11_dsi_ungate_clocks(struct intel_encoder *encoder)
@@ -657,13 +658,13 @@ static void gen11_dsi_ungate_clocks(struct intel_encoder *encoder)
u32 tmp;
enum phy phy;
- mutex_lock(&dev_priv->dpll.lock);
+ mutex_lock(&dev_priv->display.dpll.lock);
tmp = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0);
for_each_dsi_phy(phy, intel_dsi->phys)
tmp &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy);
intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, tmp);
- mutex_unlock(&dev_priv->dpll.lock);
+ mutex_unlock(&dev_priv->display.dpll.lock);
}
static bool gen11_dsi_is_clock_enabled(struct intel_encoder *encoder)
@@ -693,7 +694,7 @@ static void gen11_dsi_map_pll(struct intel_encoder *encoder,
enum phy phy;
u32 val;
- mutex_lock(&dev_priv->dpll.lock);
+ mutex_lock(&dev_priv->display.dpll.lock);
val = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0);
for_each_dsi_phy(phy, intel_dsi->phys) {
@@ -709,7 +710,7 @@ static void gen11_dsi_map_pll(struct intel_encoder *encoder,
intel_de_posting_read(dev_priv, ICL_DPCLKA_CFGCR0);
- mutex_unlock(&dev_priv->dpll.lock);
+ mutex_unlock(&dev_priv->display.dpll.lock);
}
static void
@@ -1629,6 +1630,8 @@ static int gen11_dsi_dsc_compute_config(struct intel_encoder *encoder,
/* FIXME: initialize from VBT */
vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST;
+ vdsc_cfg->pic_height = crtc_state->hw.adjusted_mode.crtc_vdisplay;
+
ret = intel_dsc_compute_params(crtc_state);
if (ret)
return ret;
@@ -2070,8 +2073,11 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
else
intel_dsi->ports = BIT(port);
- intel_dsi->dcs_backlight_ports = intel_connector->panel.vbt.dsi.bl_ports;
- intel_dsi->dcs_cabc_ports = intel_connector->panel.vbt.dsi.cabc_ports;
+ if (drm_WARN_ON(&dev_priv->drm, intel_connector->panel.vbt.dsi.bl_ports & ~intel_dsi->ports))
+ intel_connector->panel.vbt.dsi.bl_ports &= intel_dsi->ports;
+
+ if (drm_WARN_ON(&dev_priv->drm, intel_connector->panel.vbt.dsi.cabc_ports & ~intel_dsi->ports))
+ intel_connector->panel.vbt.dsi.cabc_ports &= intel_dsi->ports;
for_each_dsi_port(port, intel_dsi->ports) {
struct intel_dsi_host *host;
diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c
index b94973b5633f..18f0a5ae3bac 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic.c
@@ -62,9 +62,9 @@ int intel_digital_connector_atomic_get_property(struct drm_connector *connector,
struct intel_digital_connector_state *intel_conn_state =
to_intel_digital_connector_state(state);
- if (property == dev_priv->force_audio_property)
+ if (property == dev_priv->display.properties.force_audio)
*val = intel_conn_state->force_audio;
- else if (property == dev_priv->broadcast_rgb_property)
+ else if (property == dev_priv->display.properties.broadcast_rgb)
*val = intel_conn_state->broadcast_rgb;
else {
drm_dbg_atomic(&dev_priv->drm,
@@ -95,12 +95,12 @@ int intel_digital_connector_atomic_set_property(struct drm_connector *connector,
struct intel_digital_connector_state *intel_conn_state =
to_intel_digital_connector_state(state);
- if (property == dev_priv->force_audio_property) {
+ if (property == dev_priv->display.properties.force_audio) {
intel_conn_state->force_audio = val;
return 0;
}
- if (property == dev_priv->broadcast_rgb_property) {
+ if (property == dev_priv->display.properties.broadcast_rgb) {
intel_conn_state->broadcast_rgb = val;
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
index dd876dbbaa39..aaa6708256d5 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
@@ -42,9 +42,9 @@
#include "intel_display_types.h"
#include "intel_fb.h"
#include "intel_fb_pin.h"
-#include "intel_pm.h"
#include "intel_sprite.h"
#include "skl_scaler.h"
+#include "skl_watermark.h"
static void intel_plane_state_reset(struct intel_plane_state *plane_state,
struct intel_plane *plane)
diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c
index 6c9ee905f132..aacbc6da84ef 100644
--- a/drivers/gpu/drm/i915/display/intel_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_audio.c
@@ -393,7 +393,7 @@ hsw_dp_audio_config_update(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct i915_audio_component *acomp = dev_priv->audio.component;
+ struct i915_audio_component *acomp = dev_priv->display.audio.component;
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
enum port port = encoder->port;
const struct dp_aud_n_m *nm;
@@ -441,7 +441,7 @@ hsw_hdmi_audio_config_update(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct i915_audio_component *acomp = dev_priv->audio.component;
+ struct i915_audio_component *acomp = dev_priv->display.audio.component;
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
enum port port = encoder->port;
int n, rate;
@@ -496,7 +496,7 @@ static void hsw_audio_codec_disable(struct intel_encoder *encoder,
enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
u32 tmp;
- mutex_lock(&dev_priv->audio.mutex);
+ mutex_lock(&dev_priv->display.audio.mutex);
/* Disable timestamps */
tmp = intel_de_read(dev_priv, HSW_AUD_CFG(cpu_transcoder));
@@ -514,7 +514,7 @@ static void hsw_audio_codec_disable(struct intel_encoder *encoder,
tmp &= ~AUDIO_OUTPUT_ENABLE(cpu_transcoder);
intel_de_write(dev_priv, HSW_AUD_PIN_ELD_CP_VLD, tmp);
- mutex_unlock(&dev_priv->audio.mutex);
+ mutex_unlock(&dev_priv->display.audio.mutex);
}
static unsigned int calc_hblank_early_prog(struct intel_encoder *encoder,
@@ -532,7 +532,7 @@ static unsigned int calc_hblank_early_prog(struct intel_encoder *encoder,
h_total = crtc_state->hw.adjusted_mode.crtc_htotal;
pixel_clk = crtc_state->hw.adjusted_mode.crtc_clock;
vdsc_bpp = crtc_state->dsc.compressed_bpp;
- cdclk = i915->cdclk.hw.cdclk;
+ cdclk = i915->display.cdclk.hw.cdclk;
/* fec= 0.972261, using rounding multiplier of 1000000 */
fec_coeff = 972261;
link_clk = crtc_state->port_clock;
@@ -639,7 +639,7 @@ static void hsw_audio_codec_enable(struct intel_encoder *encoder,
u32 tmp;
int len, i;
- mutex_lock(&dev_priv->audio.mutex);
+ mutex_lock(&dev_priv->display.audio.mutex);
/* Enable Audio WA for 4k DSC usecases */
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP))
@@ -677,7 +677,7 @@ static void hsw_audio_codec_enable(struct intel_encoder *encoder,
/* Enable timestamps */
hsw_audio_config_update(encoder, crtc_state);
- mutex_unlock(&dev_priv->audio.mutex);
+ mutex_unlock(&dev_priv->display.audio.mutex);
}
static void ilk_audio_codec_disable(struct intel_encoder *encoder,
@@ -814,7 +814,7 @@ void intel_audio_codec_enable(struct intel_encoder *encoder,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct i915_audio_component *acomp = dev_priv->audio.component;
+ struct i915_audio_component *acomp = dev_priv->display.audio.component;
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_connector *connector = conn_state->connector;
const struct drm_display_mode *adjusted_mode =
@@ -838,17 +838,17 @@ void intel_audio_codec_enable(struct intel_encoder *encoder,
connector->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2;
- if (dev_priv->audio.funcs)
- dev_priv->audio.funcs->audio_codec_enable(encoder,
- crtc_state,
- conn_state);
+ if (dev_priv->display.funcs.audio)
+ dev_priv->display.funcs.audio->audio_codec_enable(encoder,
+ crtc_state,
+ conn_state);
- mutex_lock(&dev_priv->audio.mutex);
+ mutex_lock(&dev_priv->display.audio.mutex);
encoder->audio_connector = connector;
/* referred in audio callbacks */
- dev_priv->audio.encoder_map[pipe] = encoder;
- mutex_unlock(&dev_priv->audio.mutex);
+ dev_priv->display.audio.encoder_map[pipe] = encoder;
+ mutex_unlock(&dev_priv->display.audio.mutex);
if (acomp && acomp->base.audio_ops &&
acomp->base.audio_ops->pin_eld_notify) {
@@ -878,7 +878,7 @@ void intel_audio_codec_disable(struct intel_encoder *encoder,
const struct drm_connector_state *old_conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct i915_audio_component *acomp = dev_priv->audio.component;
+ struct i915_audio_component *acomp = dev_priv->display.audio.component;
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
struct drm_connector *connector = old_conn_state->connector;
enum port port = encoder->port;
@@ -891,15 +891,15 @@ void intel_audio_codec_disable(struct intel_encoder *encoder,
connector->base.id, connector->name,
encoder->base.base.id, encoder->base.name, pipe_name(pipe));
- if (dev_priv->audio.funcs)
- dev_priv->audio.funcs->audio_codec_disable(encoder,
- old_crtc_state,
- old_conn_state);
+ if (dev_priv->display.funcs.audio)
+ dev_priv->display.funcs.audio->audio_codec_disable(encoder,
+ old_crtc_state,
+ old_conn_state);
- mutex_lock(&dev_priv->audio.mutex);
+ mutex_lock(&dev_priv->display.audio.mutex);
encoder->audio_connector = NULL;
- dev_priv->audio.encoder_map[pipe] = NULL;
- mutex_unlock(&dev_priv->audio.mutex);
+ dev_priv->display.audio.encoder_map[pipe] = NULL;
+ mutex_unlock(&dev_priv->display.audio.mutex);
if (acomp && acomp->base.audio_ops &&
acomp->base.audio_ops->pin_eld_notify) {
@@ -935,13 +935,13 @@ static const struct intel_audio_funcs hsw_audio_funcs = {
void intel_audio_hooks_init(struct drm_i915_private *dev_priv)
{
if (IS_G4X(dev_priv)) {
- dev_priv->audio.funcs = &g4x_audio_funcs;
+ dev_priv->display.funcs.audio = &g4x_audio_funcs;
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- dev_priv->audio.funcs = &ilk_audio_funcs;
+ dev_priv->display.funcs.audio = &ilk_audio_funcs;
} else if (IS_HASWELL(dev_priv) || DISPLAY_VER(dev_priv) >= 8) {
- dev_priv->audio.funcs = &hsw_audio_funcs;
+ dev_priv->display.funcs.audio = &hsw_audio_funcs;
} else if (HAS_PCH_SPLIT(dev_priv)) {
- dev_priv->audio.funcs = &ilk_audio_funcs;
+ dev_priv->display.funcs.audio = &ilk_audio_funcs;
}
}
@@ -971,7 +971,7 @@ void intel_audio_cdclk_change_post(struct drm_i915_private *i915)
struct aud_ts_cdclk_m_n aud_ts;
if (DISPLAY_VER(i915) >= 13) {
- get_aud_ts_cdclk_m_n(i915->cdclk.hw.ref, i915->cdclk.hw.cdclk, &aud_ts);
+ get_aud_ts_cdclk_m_n(i915->display.cdclk.hw.ref, i915->display.cdclk.hw.cdclk, &aud_ts);
intel_de_write(i915, AUD_TS_CDCLK_N, aud_ts.n);
intel_de_write(i915, AUD_TS_CDCLK_M, aud_ts.m | AUD_TS_CDCLK_M_EN);
@@ -1046,13 +1046,13 @@ static unsigned long i915_audio_component_get_power(struct device *kdev)
ret = intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO_PLAYBACK);
- if (dev_priv->audio.power_refcount++ == 0) {
+ if (dev_priv->display.audio.power_refcount++ == 0) {
if (DISPLAY_VER(dev_priv) >= 9) {
intel_de_write(dev_priv, AUD_FREQ_CNTRL,
- dev_priv->audio.freq_cntrl);
+ dev_priv->display.audio.freq_cntrl);
drm_dbg_kms(&dev_priv->drm,
"restored AUD_FREQ_CNTRL to 0x%x\n",
- dev_priv->audio.freq_cntrl);
+ dev_priv->display.audio.freq_cntrl);
}
/* Force CDCLK to 2*BCLK as long as we need audio powered. */
@@ -1073,7 +1073,7 @@ static void i915_audio_component_put_power(struct device *kdev,
struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
/* Stop forcing CDCLK to 2*BCLK if no need for audio to be powered. */
- if (--dev_priv->audio.power_refcount == 0)
+ if (--dev_priv->display.audio.power_refcount == 0)
if (IS_GEMINILAKE(dev_priv))
glk_force_audio_cdclk(dev_priv, false);
@@ -1119,7 +1119,7 @@ static int i915_audio_component_get_cdclk_freq(struct device *kdev)
if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_DDI(dev_priv)))
return -ENODEV;
- return dev_priv->cdclk.hw.cdclk;
+ return dev_priv->display.cdclk.hw.cdclk;
}
/*
@@ -1140,10 +1140,10 @@ static struct intel_encoder *get_saved_enc(struct drm_i915_private *dev_priv,
/* MST */
if (pipe >= 0) {
if (drm_WARN_ON(&dev_priv->drm,
- pipe >= ARRAY_SIZE(dev_priv->audio.encoder_map)))
+ pipe >= ARRAY_SIZE(dev_priv->display.audio.encoder_map)))
return NULL;
- encoder = dev_priv->audio.encoder_map[pipe];
+ encoder = dev_priv->display.audio.encoder_map[pipe];
/*
* when bootup, audio driver may not know it is
* MST or not. So it will poll all the port & pipe
@@ -1159,7 +1159,7 @@ static struct intel_encoder *get_saved_enc(struct drm_i915_private *dev_priv,
return NULL;
for_each_pipe(dev_priv, pipe) {
- encoder = dev_priv->audio.encoder_map[pipe];
+ encoder = dev_priv->display.audio.encoder_map[pipe];
if (encoder == NULL)
continue;
@@ -1177,7 +1177,7 @@ static int i915_audio_component_sync_audio_rate(struct device *kdev, int port,
int pipe, int rate)
{
struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
- struct i915_audio_component *acomp = dev_priv->audio.component;
+ struct i915_audio_component *acomp = dev_priv->display.audio.component;
struct intel_encoder *encoder;
struct intel_crtc *crtc;
unsigned long cookie;
@@ -1187,7 +1187,7 @@ static int i915_audio_component_sync_audio_rate(struct device *kdev, int port,
return 0;
cookie = i915_audio_component_get_power(kdev);
- mutex_lock(&dev_priv->audio.mutex);
+ mutex_lock(&dev_priv->display.audio.mutex);
/* 1. get the pipe */
encoder = get_saved_enc(dev_priv, port, pipe);
@@ -1206,7 +1206,7 @@ static int i915_audio_component_sync_audio_rate(struct device *kdev, int port,
hsw_audio_config_update(encoder, crtc->config);
unlock:
- mutex_unlock(&dev_priv->audio.mutex);
+ mutex_unlock(&dev_priv->display.audio.mutex);
i915_audio_component_put_power(kdev, cookie);
return err;
}
@@ -1220,13 +1220,13 @@ static int i915_audio_component_get_eld(struct device *kdev, int port,
const u8 *eld;
int ret = -EINVAL;
- mutex_lock(&dev_priv->audio.mutex);
+ mutex_lock(&dev_priv->display.audio.mutex);
intel_encoder = get_saved_enc(dev_priv, port, pipe);
if (!intel_encoder) {
drm_dbg_kms(&dev_priv->drm, "Not valid for port %c\n",
port_name(port));
- mutex_unlock(&dev_priv->audio.mutex);
+ mutex_unlock(&dev_priv->display.audio.mutex);
return ret;
}
@@ -1238,7 +1238,7 @@ static int i915_audio_component_get_eld(struct device *kdev, int port,
memcpy(buf, eld, min(max_bytes, ret));
}
- mutex_unlock(&dev_priv->audio.mutex);
+ mutex_unlock(&dev_priv->display.audio.mutex);
return ret;
}
@@ -1273,7 +1273,7 @@ static int i915_audio_component_bind(struct device *i915_kdev,
BUILD_BUG_ON(MAX_PORTS != I915_MAX_PORTS);
for (i = 0; i < ARRAY_SIZE(acomp->aud_sample_rate); i++)
acomp->aud_sample_rate[i] = 0;
- dev_priv->audio.component = acomp;
+ dev_priv->display.audio.component = acomp;
drm_modeset_unlock_all(&dev_priv->drm);
return 0;
@@ -1288,14 +1288,14 @@ static void i915_audio_component_unbind(struct device *i915_kdev,
drm_modeset_lock_all(&dev_priv->drm);
acomp->base.ops = NULL;
acomp->base.dev = NULL;
- dev_priv->audio.component = NULL;
+ dev_priv->display.audio.component = NULL;
drm_modeset_unlock_all(&dev_priv->drm);
device_link_remove(hda_kdev, i915_kdev);
- if (dev_priv->audio.power_refcount)
+ if (dev_priv->display.audio.power_refcount)
drm_err(&dev_priv->drm, "audio power refcount %d after unbind\n",
- dev_priv->audio.power_refcount);
+ dev_priv->display.audio.power_refcount);
}
static const struct component_ops i915_audio_component_bind_ops = {
@@ -1359,13 +1359,13 @@ static void i915_audio_component_init(struct drm_i915_private *dev_priv)
drm_dbg_kms(&dev_priv->drm, "use AUD_FREQ_CNTRL of 0x%x (init value 0x%x)\n",
aud_freq, aud_freq_init);
- dev_priv->audio.freq_cntrl = aud_freq;
+ dev_priv->display.audio.freq_cntrl = aud_freq;
}
/* init with current cdclk */
intel_audio_cdclk_change_post(dev_priv);
- dev_priv->audio.component_registered = true;
+ dev_priv->display.audio.component_registered = true;
}
/**
@@ -1377,11 +1377,11 @@ static void i915_audio_component_init(struct drm_i915_private *dev_priv)
*/
static void i915_audio_component_cleanup(struct drm_i915_private *dev_priv)
{
- if (!dev_priv->audio.component_registered)
+ if (!dev_priv->display.audio.component_registered)
return;
component_del(dev_priv->drm.dev, &i915_audio_component_bind_ops);
- dev_priv->audio.component_registered = false;
+ dev_priv->display.audio.component_registered = false;
}
/**
@@ -1403,7 +1403,7 @@ void intel_audio_init(struct drm_i915_private *dev_priv)
*/
void intel_audio_deinit(struct drm_i915_private *dev_priv)
{
- if ((dev_priv)->audio.lpe.platdev != NULL)
+ if (dev_priv->display.audio.lpe.platdev != NULL)
intel_lpe_audio_teardown(dev_priv);
else
i915_audio_component_cleanup(dev_priv);
diff --git a/drivers/gpu/drm/i915/display/intel_backlight.c b/drivers/gpu/drm/i915/display/intel_backlight.c
index 84639e9df5c9..beba39a38c87 100644
--- a/drivers/gpu/drm/i915/display/intel_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_backlight.c
@@ -11,6 +11,7 @@
#include <acpi/video.h>
#include "intel_backlight.h"
+#include "intel_backlight_regs.h"
#include "intel_connector.h"
#include "intel_de.h"
#include "intel_display_types.h"
@@ -18,6 +19,8 @@
#include "intel_dsi_dcs_backlight.h"
#include "intel_panel.h"
#include "intel_pci_config.h"
+#include "intel_pps.h"
+#include "intel_quirks.h"
/**
* scale - scale values from one range to another
@@ -88,7 +91,7 @@ u32 intel_backlight_invert_pwm_level(struct intel_connector *connector, u32 val)
return val;
if (dev_priv->params.invert_brightness > 0 ||
- dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) {
+ intel_has_quirk(dev_priv, QUIRK_INVERT_BRIGHTNESS)) {
return panel->backlight.pwm_level_max - val + panel->backlight.pwm_level_min;
}
@@ -128,7 +131,7 @@ u32 intel_backlight_level_from_pwm(struct intel_connector *connector, u32 val)
panel->backlight.max == 0 || panel->backlight.pwm_level_max == 0);
if (dev_priv->params.invert_brightness > 0 ||
- (dev_priv->params.invert_brightness == 0 && dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS))
+ (dev_priv->params.invert_brightness == 0 && intel_has_quirk(dev_priv, QUIRK_INVERT_BRIGHTNESS)))
val = panel->backlight.pwm_level_max - (val - panel->backlight.pwm_level_min);
return scale(val, panel->backlight.pwm_level_min, panel->backlight.pwm_level_max,
@@ -305,7 +308,7 @@ void intel_backlight_set_acpi(const struct drm_connector_state *conn_state,
if (!panel->backlight.present || !conn_state->crtc)
return;
- mutex_lock(&dev_priv->backlight_lock);
+ mutex_lock(&dev_priv->display.backlight.lock);
drm_WARN_ON(&dev_priv->drm, panel->backlight.max == 0);
@@ -321,7 +324,7 @@ void intel_backlight_set_acpi(const struct drm_connector_state *conn_state,
if (panel->backlight.enabled)
intel_panel_actually_set_backlight(conn_state, hw_level);
- mutex_unlock(&dev_priv->backlight_lock);
+ mutex_unlock(&dev_priv->display.backlight.lock);
}
static void lpt_disable_backlight(const struct drm_connector_state *old_conn_state, u32 level)
@@ -465,14 +468,14 @@ void intel_backlight_disable(const struct drm_connector_state *old_conn_state)
return;
}
- mutex_lock(&dev_priv->backlight_lock);
+ mutex_lock(&dev_priv->display.backlight.lock);
if (panel->backlight.device)
panel->backlight.device->props.power = FB_BLANK_POWERDOWN;
panel->backlight.enabled = false;
panel->backlight.funcs->disable(old_conn_state, 0);
- mutex_unlock(&dev_priv->backlight_lock);
+ mutex_unlock(&dev_priv->display.backlight.lock);
}
static void lpt_enable_backlight(const struct intel_crtc_state *crtc_state,
@@ -815,11 +818,11 @@ void intel_backlight_enable(const struct intel_crtc_state *crtc_state,
drm_dbg_kms(&dev_priv->drm, "pipe %c\n", pipe_name(pipe));
- mutex_lock(&dev_priv->backlight_lock);
+ mutex_lock(&dev_priv->display.backlight.lock);
__intel_backlight_enable(crtc_state, conn_state);
- mutex_unlock(&dev_priv->backlight_lock);
+ mutex_unlock(&dev_priv->display.backlight.lock);
}
#if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE)
@@ -829,12 +832,12 @@ static u32 intel_panel_get_backlight(struct intel_connector *connector)
struct intel_panel *panel = &connector->panel;
u32 val = 0;
- mutex_lock(&dev_priv->backlight_lock);
+ mutex_lock(&dev_priv->display.backlight.lock);
if (panel->backlight.enabled)
val = panel->backlight.funcs->get(connector, intel_connector_get_pipe(connector));
- mutex_unlock(&dev_priv->backlight_lock);
+ mutex_unlock(&dev_priv->display.backlight.lock);
drm_dbg_kms(&dev_priv->drm, "get backlight PWM = %d\n", val);
return val;
@@ -862,7 +865,7 @@ static void intel_panel_set_backlight(const struct drm_connector_state *conn_sta
if (!panel->backlight.present)
return;
- mutex_lock(&dev_priv->backlight_lock);
+ mutex_lock(&dev_priv->display.backlight.lock);
drm_WARN_ON(&dev_priv->drm, panel->backlight.max == 0);
@@ -872,7 +875,7 @@ static void intel_panel_set_backlight(const struct drm_connector_state *conn_sta
if (panel->backlight.enabled)
intel_panel_actually_set_backlight(conn_state, hw_level);
- mutex_unlock(&dev_priv->backlight_lock);
+ mutex_unlock(&dev_priv->display.backlight.lock);
}
static int intel_backlight_device_update_status(struct backlight_device *bd)
@@ -978,26 +981,24 @@ int intel_backlight_device_register(struct intel_connector *connector)
if (!name)
return -ENOMEM;
- bd = backlight_device_register(name, connector->base.kdev, connector,
- &intel_backlight_device_ops, &props);
-
- /*
- * Using the same name independent of the drm device or connector
- * prevents registration of multiple backlight devices in the
- * driver. However, we need to use the default name for backward
- * compatibility. Use unique names for subsequent backlight devices as a
- * fallback when the default name already exists.
- */
- if (IS_ERR(bd) && PTR_ERR(bd) == -EEXIST) {
+ bd = backlight_device_get_by_name(name);
+ if (bd) {
+ put_device(&bd->dev);
+ /*
+ * Using the same name independent of the drm device or connector
+ * prevents registration of multiple backlight devices in the
+ * driver. However, we need to use the default name for backward
+ * compatibility. Use unique names for subsequent backlight devices as a
+ * fallback when the default name already exists.
+ */
kfree(name);
name = kasprintf(GFP_KERNEL, "card%d-%s-backlight",
i915->drm.primary->index, connector->base.name);
if (!name)
return -ENOMEM;
-
- bd = backlight_device_register(name, connector->base.kdev, connector,
- &intel_backlight_device_ops, &props);
}
+ bd = backlight_device_register(name, connector->base.kdev, connector,
+ &intel_backlight_device_ops, &props);
if (IS_ERR(bd)) {
drm_err(&i915->drm,
@@ -1120,7 +1121,7 @@ static u32 i9xx_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
if (IS_PINEVIEW(dev_priv))
clock = KHz(RUNTIME_INFO(dev_priv)->rawclk_freq);
else
- clock = KHz(dev_priv->cdclk.hw.cdclk);
+ clock = KHz(dev_priv->display.cdclk.hw.cdclk);
return DIV_ROUND_CLOSEST(clock, pwm_freq_hz * 32);
}
@@ -1138,7 +1139,7 @@ static u32 i965_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
if (IS_G4X(dev_priv))
clock = KHz(RUNTIME_INFO(dev_priv)->rawclk_freq);
else
- clock = KHz(dev_priv->cdclk.hw.cdclk);
+ clock = KHz(dev_priv->display.cdclk.hw.cdclk);
return DIV_ROUND_CLOSEST(clock, pwm_freq_hz * 128);
}
@@ -1598,11 +1599,11 @@ void intel_backlight_update(struct intel_atomic_state *state,
if (!panel->backlight.present)
return;
- mutex_lock(&dev_priv->backlight_lock);
+ mutex_lock(&dev_priv->display.backlight.lock);
if (!panel->backlight.enabled)
__intel_backlight_enable(crtc_state, conn_state);
- mutex_unlock(&dev_priv->backlight_lock);
+ mutex_unlock(&dev_priv->display.backlight.lock);
}
int intel_backlight_setup(struct intel_connector *connector, enum pipe pipe)
@@ -1612,7 +1613,7 @@ int intel_backlight_setup(struct intel_connector *connector, enum pipe pipe)
int ret;
if (!connector->panel.vbt.backlight.present) {
- if (dev_priv->quirks & QUIRK_BACKLIGHT_PRESENT) {
+ if (intel_has_quirk(dev_priv, QUIRK_BACKLIGHT_PRESENT)) {
drm_dbg_kms(&dev_priv->drm,
"no backlight present per VBT, but present per quirk\n");
} else {
@@ -1627,9 +1628,9 @@ int intel_backlight_setup(struct intel_connector *connector, enum pipe pipe)
return -ENODEV;
/* set level and max in panel struct */
- mutex_lock(&dev_priv->backlight_lock);
+ mutex_lock(&dev_priv->display.backlight.lock);
ret = panel->backlight.funcs->setup(connector, pipe);
- mutex_unlock(&dev_priv->backlight_lock);
+ mutex_unlock(&dev_priv->display.backlight.lock);
if (ret) {
drm_dbg_kms(&dev_priv->drm,
@@ -1780,9 +1781,13 @@ void intel_backlight_init_funcs(struct intel_panel *panel)
panel->backlight.pwm_funcs = &i9xx_pwm_funcs;
}
- if (connector->base.connector_type == DRM_MODE_CONNECTOR_eDP &&
- intel_dp_aux_init_backlight_funcs(connector) == 0)
- return;
+ if (connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) {
+ if (intel_dp_aux_init_backlight_funcs(connector) == 0)
+ return;
+
+ if (!intel_has_quirk(dev_priv, QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK))
+ connector->panel.backlight.power = intel_pps_backlight_power;
+ }
/* We're using a standard PWM backlight interface */
panel->backlight.funcs = &pwm_bl_funcs;
diff --git a/drivers/gpu/drm/i915/display/intel_backlight_regs.h b/drivers/gpu/drm/i915/display/intel_backlight_regs.h
new file mode 100644
index 000000000000..50c1210f6d5d
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_backlight_regs.h
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef __INTEL_BACKLIGHT_REGS_H__
+#define __INTEL_BACKLIGHT_REGS_H__
+
+#include "i915_reg_defs.h"
+
+#define _VLV_BLC_PWM_CTL2_A (DISPLAY_MMIO_BASE(dev_priv) + 0x61250)
+#define _VLV_BLC_PWM_CTL2_B (DISPLAY_MMIO_BASE(dev_priv) + 0x61350)
+#define VLV_BLC_PWM_CTL2(pipe) _MMIO_PIPE(pipe, _VLV_BLC_PWM_CTL2_A, \
+ _VLV_BLC_PWM_CTL2_B)
+
+#define _VLV_BLC_PWM_CTL_A (DISPLAY_MMIO_BASE(dev_priv) + 0x61254)
+#define _VLV_BLC_PWM_CTL_B (DISPLAY_MMIO_BASE(dev_priv) + 0x61354)
+#define VLV_BLC_PWM_CTL(pipe) _MMIO_PIPE(pipe, _VLV_BLC_PWM_CTL_A, \
+ _VLV_BLC_PWM_CTL_B)
+
+#define _VLV_BLC_HIST_CTL_A (DISPLAY_MMIO_BASE(dev_priv) + 0x61260)
+#define _VLV_BLC_HIST_CTL_B (DISPLAY_MMIO_BASE(dev_priv) + 0x61360)
+#define VLV_BLC_HIST_CTL(pipe) _MMIO_PIPE(pipe, _VLV_BLC_HIST_CTL_A, \
+ _VLV_BLC_HIST_CTL_B)
+
+/* Backlight control */
+#define BLC_PWM_CTL2 _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61250) /* 965+ only */
+#define BLM_PWM_ENABLE (1 << 31)
+#define BLM_COMBINATION_MODE (1 << 30) /* gen4 only */
+#define BLM_PIPE_SELECT (1 << 29)
+#define BLM_PIPE_SELECT_IVB (3 << 29)
+#define BLM_PIPE_A (0 << 29)
+#define BLM_PIPE_B (1 << 29)
+#define BLM_PIPE_C (2 << 29) /* ivb + */
+#define BLM_TRANSCODER_A BLM_PIPE_A /* hsw */
+#define BLM_TRANSCODER_B BLM_PIPE_B
+#define BLM_TRANSCODER_C BLM_PIPE_C
+#define BLM_TRANSCODER_EDP (3 << 29)
+#define BLM_PIPE(pipe) ((pipe) << 29)
+#define BLM_POLARITY_I965 (1 << 28) /* gen4 only */
+#define BLM_PHASE_IN_INTERUPT_STATUS (1 << 26)
+#define BLM_PHASE_IN_ENABLE (1 << 25)
+#define BLM_PHASE_IN_INTERUPT_ENABL (1 << 24)
+#define BLM_PHASE_IN_TIME_BASE_SHIFT (16)
+#define BLM_PHASE_IN_TIME_BASE_MASK (0xff << 16)
+#define BLM_PHASE_IN_COUNT_SHIFT (8)
+#define BLM_PHASE_IN_COUNT_MASK (0xff << 8)
+#define BLM_PHASE_IN_INCR_SHIFT (0)
+#define BLM_PHASE_IN_INCR_MASK (0xff << 0)
+#define BLC_PWM_CTL _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61254)
+/*
+ * This is the most significant 15 bits of the number of backlight cycles in a
+ * complete cycle of the modulated backlight control.
+ *
+ * The actual value is this field multiplied by two.
+ */
+#define BACKLIGHT_MODULATION_FREQ_SHIFT (17)
+#define BACKLIGHT_MODULATION_FREQ_MASK (0x7fff << 17)
+#define BLM_LEGACY_MODE (1 << 16) /* gen2 only */
+/*
+ * This is the number of cycles out of the backlight modulation cycle for which
+ * the backlight is on.
+ *
+ * This field must be no greater than the number of cycles in the complete
+ * backlight modulation cycle.
+ */
+#define BACKLIGHT_DUTY_CYCLE_SHIFT (0)
+#define BACKLIGHT_DUTY_CYCLE_MASK (0xffff)
+#define BACKLIGHT_DUTY_CYCLE_MASK_PNV (0xfffe)
+#define BLM_POLARITY_PNV (1 << 0) /* pnv only */
+
+#define BLC_HIST_CTL _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61260)
+#define BLM_HISTOGRAM_ENABLE (1 << 31)
+
+/* New registers for PCH-split platforms. Safe where new bits show up, the
+ * register layout machtes with gen4 BLC_PWM_CTL[12]. */
+#define BLC_PWM_CPU_CTL2 _MMIO(0x48250)
+#define BLC_PWM_CPU_CTL _MMIO(0x48254)
+
+#define HSW_BLC_PWM2_CTL _MMIO(0x48350)
+
+/* PCH CTL1 is totally different, all but the below bits are reserved. CTL2 is
+ * like the normal CTL from gen4 and earlier. Hooray for confusing naming. */
+#define BLC_PWM_PCH_CTL1 _MMIO(0xc8250)
+#define BLM_PCH_PWM_ENABLE (1 << 31)
+#define BLM_PCH_OVERRIDE_ENABLE (1 << 30)
+#define BLM_PCH_POLARITY (1 << 29)
+#define BLC_PWM_PCH_CTL2 _MMIO(0xc8254)
+
+/* BXT backlight register definition. */
+#define _BXT_BLC_PWM_CTL1 0xC8250
+#define BXT_BLC_PWM_ENABLE (1 << 31)
+#define BXT_BLC_PWM_POLARITY (1 << 29)
+#define _BXT_BLC_PWM_FREQ1 0xC8254
+#define _BXT_BLC_PWM_DUTY1 0xC8258
+
+#define _BXT_BLC_PWM_CTL2 0xC8350
+#define _BXT_BLC_PWM_FREQ2 0xC8354
+#define _BXT_BLC_PWM_DUTY2 0xC8358
+
+#define BXT_BLC_PWM_CTL(controller) _MMIO_PIPE(controller, \
+ _BXT_BLC_PWM_CTL1, _BXT_BLC_PWM_CTL2)
+#define BXT_BLC_PWM_FREQ(controller) _MMIO_PIPE(controller, \
+ _BXT_BLC_PWM_FREQ1, _BXT_BLC_PWM_FREQ2)
+#define BXT_BLC_PWM_DUTY(controller) _MMIO_PIPE(controller, \
+ _BXT_BLC_PWM_DUTY1, _BXT_BLC_PWM_DUTY2)
+
+/* Utility pin */
+#define UTIL_PIN_CTL _MMIO(0x48400)
+#define UTIL_PIN_ENABLE (1 << 31)
+#define UTIL_PIN_PIPE_MASK (3 << 29)
+#define UTIL_PIN_PIPE(x) ((x) << 29)
+#define UTIL_PIN_MODE_MASK (0xf << 24)
+#define UTIL_PIN_MODE_DATA (0 << 24)
+#define UTIL_PIN_MODE_PWM (1 << 24)
+#define UTIL_PIN_MODE_VBLANK (4 << 24)
+#define UTIL_PIN_MODE_VSYNC (5 << 24)
+#define UTIL_PIN_MODE_EYE_LEVEL (8 << 24)
+#define UTIL_PIN_OUTPUT_DATA (1 << 23)
+#define UTIL_PIN_POLARITY (1 << 22)
+#define UTIL_PIN_DIRECTION_INPUT (1 << 19)
+#define UTIL_PIN_INPUT_DATA (1 << 16)
+
+#endif /* __INTEL_BACKLIGHT_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index 51dde5bfd956..28bdb936cd1f 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -135,18 +135,6 @@ static u32 raw_block_offset(const void *bdb, enum bdb_block_id section_id)
return block - bdb;
}
-/* size of the block excluding the header */
-static u32 raw_block_size(const void *bdb, enum bdb_block_id section_id)
-{
- const void *block;
-
- block = find_raw_section(bdb, section_id);
- if (!block)
- return 0;
-
- return get_blocksize(block);
-}
-
struct bdb_block_entry {
struct list_head node;
enum bdb_block_id section_id;
@@ -159,7 +147,7 @@ find_section(struct drm_i915_private *i915,
{
struct bdb_block_entry *entry;
- list_for_each_entry(entry, &i915->vbt.bdb_blocks, node) {
+ list_for_each_entry(entry, &i915->display.vbt.bdb_blocks, node) {
if (entry->section_id == section_id)
return entry->data + 3;
}
@@ -231,9 +219,14 @@ static bool validate_lfp_data_ptrs(const void *bdb,
{
int fp_timing_size, dvo_timing_size, panel_pnp_id_size, panel_name_size;
int data_block_size, lfp_data_size;
+ const void *data_block;
int i;
- data_block_size = raw_block_size(bdb, BDB_LVDS_LFP_DATA);
+ data_block = find_raw_section(bdb, BDB_LVDS_LFP_DATA);
+ if (!data_block)
+ return false;
+
+ data_block_size = get_blocksize(data_block);
if (data_block_size == 0)
return false;
@@ -261,21 +254,6 @@ static bool validate_lfp_data_ptrs(const void *bdb,
if (16 * lfp_data_size > data_block_size)
return false;
- /*
- * Except for vlv/chv machines all real VBTs seem to have 6
- * unaccounted bytes in the fp_timing table. And it doesn't
- * appear to be a really intentional hole as the fp_timing
- * 0xffff terminator is always within those 6 missing bytes.
- */
- if (fp_timing_size + dvo_timing_size + panel_pnp_id_size != lfp_data_size &&
- fp_timing_size + 6 + dvo_timing_size + panel_pnp_id_size != lfp_data_size)
- return false;
-
- if (ptrs->ptr[0].fp_timing.offset + fp_timing_size > ptrs->ptr[0].dvo_timing.offset ||
- ptrs->ptr[0].dvo_timing.offset + dvo_timing_size != ptrs->ptr[0].panel_pnp_id.offset ||
- ptrs->ptr[0].panel_pnp_id.offset + panel_pnp_id_size != lfp_data_size)
- return false;
-
/* make sure the table entries have uniform size */
for (i = 1; i < 16; i++) {
if (ptrs->ptr[i].fp_timing.table_size != fp_timing_size ||
@@ -289,6 +267,23 @@ static bool validate_lfp_data_ptrs(const void *bdb,
return false;
}
+ /*
+ * Except for vlv/chv machines all real VBTs seem to have 6
+ * unaccounted bytes in the fp_timing table. And it doesn't
+ * appear to be a really intentional hole as the fp_timing
+ * 0xffff terminator is always within those 6 missing bytes.
+ */
+ if (fp_timing_size + 6 + dvo_timing_size + panel_pnp_id_size == lfp_data_size)
+ fp_timing_size += 6;
+
+ if (fp_timing_size + dvo_timing_size + panel_pnp_id_size != lfp_data_size)
+ return false;
+
+ if (ptrs->ptr[0].fp_timing.offset + fp_timing_size != ptrs->ptr[0].dvo_timing.offset ||
+ ptrs->ptr[0].dvo_timing.offset + dvo_timing_size != ptrs->ptr[0].panel_pnp_id.offset ||
+ ptrs->ptr[0].panel_pnp_id.offset + panel_pnp_id_size != lfp_data_size)
+ return false;
+
/* make sure the tables fit inside the data block */
for (i = 0; i < 16; i++) {
if (ptrs->ptr[i].fp_timing.offset + fp_timing_size > data_block_size ||
@@ -300,6 +295,15 @@ static bool validate_lfp_data_ptrs(const void *bdb,
if (ptrs->panel_name.offset + 16 * panel_name_size > data_block_size)
return false;
+ /* make sure fp_timing terminators are present at expected locations */
+ for (i = 0; i < 16; i++) {
+ const u16 *t = data_block + ptrs->ptr[i].fp_timing.offset +
+ fp_timing_size - 2;
+
+ if (*t != 0xffff)
+ return false;
+ }
+
return true;
}
@@ -333,18 +337,6 @@ static bool fixup_lfp_data_ptrs(const void *bdb, void *ptrs_block)
return validate_lfp_data_ptrs(bdb, ptrs);
}
-static const void *find_fp_timing_terminator(const u8 *data, int size)
-{
- int i;
-
- for (i = 0; i < size - 1; i++) {
- if (data[i] == 0xff && data[i+1] == 0xff)
- return &data[i];
- }
-
- return NULL;
-}
-
static int make_lfp_data_ptr(struct lvds_lfp_data_ptr_table *table,
int table_size, int total_size)
{
@@ -368,11 +360,22 @@ static void next_lfp_data_ptr(struct lvds_lfp_data_ptr_table *next,
static void *generate_lfp_data_ptrs(struct drm_i915_private *i915,
const void *bdb)
{
- int i, size, table_size, block_size, offset;
- const void *t0, *t1, *block;
+ int i, size, table_size, block_size, offset, fp_timing_size;
struct bdb_lvds_lfp_data_ptrs *ptrs;
+ const void *block;
void *ptrs_block;
+ /*
+ * The hardcoded fp_timing_size is only valid for
+ * modernish VBTs. All older VBTs definitely should
+ * include block 41 and thus we don't need to
+ * generate one.
+ */
+ if (i915->display.vbt.version < 155)
+ return NULL;
+
+ fp_timing_size = 38;
+
block = find_raw_section(bdb, BDB_LVDS_LFP_DATA);
if (!block)
return NULL;
@@ -381,17 +384,8 @@ static void *generate_lfp_data_ptrs(struct drm_i915_private *i915,
block_size = get_blocksize(block);
- size = block_size;
- t0 = find_fp_timing_terminator(block, size);
- if (!t0)
- return NULL;
-
- size -= t0 - block - 2;
- t1 = find_fp_timing_terminator(t0 + 2, size);
- if (!t1)
- return NULL;
-
- size = t1 - t0;
+ size = fp_timing_size + sizeof(struct lvds_dvo_timing) +
+ sizeof(struct lvds_pnp_id);
if (size * 16 > block_size)
return NULL;
@@ -409,7 +403,7 @@ static void *generate_lfp_data_ptrs(struct drm_i915_private *i915,
table_size = sizeof(struct lvds_dvo_timing);
size = make_lfp_data_ptr(&ptrs->ptr[0].dvo_timing, table_size, size);
- table_size = t0 - block + 2;
+ table_size = fp_timing_size;
size = make_lfp_data_ptr(&ptrs->ptr[0].fp_timing, table_size, size);
if (ptrs->ptr[0].fp_timing.table_size)
@@ -424,14 +418,14 @@ static void *generate_lfp_data_ptrs(struct drm_i915_private *i915,
return NULL;
}
- size = t1 - t0;
+ size = fp_timing_size + sizeof(struct lvds_dvo_timing) +
+ sizeof(struct lvds_pnp_id);
for (i = 1; i < 16; i++) {
next_lfp_data_ptr(&ptrs->ptr[i].fp_timing, &ptrs->ptr[i-1].fp_timing, size);
next_lfp_data_ptr(&ptrs->ptr[i].dvo_timing, &ptrs->ptr[i-1].dvo_timing, size);
next_lfp_data_ptr(&ptrs->ptr[i].panel_pnp_id, &ptrs->ptr[i-1].panel_pnp_id, size);
}
- size = t1 - t0;
table_size = sizeof(struct lvds_lfp_panel_name);
if (16 * (size + table_size) <= block_size) {
@@ -479,6 +473,13 @@ init_bdb_block(struct drm_i915_private *i915,
block_size = get_blocksize(block);
+ /*
+ * Version number and new block size are considered
+ * part of the header for MIPI sequenece block v3+.
+ */
+ if (section_id == BDB_MIPI_SEQUENCE && *(const u8 *)block >= 3)
+ block_size += 5;
+
entry = kzalloc(struct_size(entry, data, max(min_size, block_size) + 3),
GFP_KERNEL);
if (!entry) {
@@ -501,7 +502,7 @@ init_bdb_block(struct drm_i915_private *i915,
return;
}
- list_add_tail(&entry->node, &i915->vbt.bdb_blocks);
+ list_add_tail(&entry->node, &i915->display.vbt.bdb_blocks);
}
static void init_bdb_blocks(struct drm_i915_private *i915,
@@ -604,6 +605,19 @@ get_lfp_data_tail(const struct bdb_lvds_lfp_data *data,
return NULL;
}
+static void dump_pnp_id(struct drm_i915_private *i915,
+ const struct lvds_pnp_id *pnp_id,
+ const char *name)
+{
+ u16 mfg_name = be16_to_cpu((__force __be16)pnp_id->mfg_name);
+ char vend[4];
+
+ drm_dbg_kms(&i915->drm, "%s PNPID mfg: %s (0x%x), prod: %u, serial: %u, week: %d, year: %d\n",
+ name, drm_edid_decode_mfg_id(mfg_name, vend),
+ pnp_id->mfg_name, pnp_id->product_code, pnp_id->serial,
+ pnp_id->mfg_week, pnp_id->mfg_year + 1990);
+}
+
static int opregion_get_panel_type(struct drm_i915_private *i915,
const struct intel_bios_encoder_data *devdata,
const struct edid *edid)
@@ -655,6 +669,8 @@ static int pnpid_get_panel_type(struct drm_i915_private *i915,
edid_id_nodate.mfg_week = 0;
edid_id_nodate.mfg_year = 0;
+ dump_pnp_id(i915, edid_id, "EDID");
+
ptrs = find_section(i915, BDB_LVDS_LFP_DATA_PTRS);
if (!ptrs)
return -1;
@@ -861,6 +877,7 @@ parse_lfp_data(struct drm_i915_private *i915,
const struct bdb_lvds_lfp_data *data;
const struct bdb_lvds_lfp_data_tail *tail;
const struct bdb_lvds_lfp_data_ptrs *ptrs;
+ const struct lvds_pnp_id *pnp_id;
int panel_type = panel->vbt.panel_type;
ptrs = find_section(i915, BDB_LVDS_LFP_DATA_PTRS);
@@ -874,11 +891,18 @@ parse_lfp_data(struct drm_i915_private *i915,
if (!panel->vbt.lfp_lvds_vbt_mode)
parse_lfp_panel_dtd(i915, panel, data, ptrs);
+ pnp_id = get_lvds_pnp_id(data, ptrs, panel_type);
+ dump_pnp_id(i915, pnp_id, "Panel");
+
tail = get_lfp_data_tail(data, ptrs);
if (!tail)
return;
- if (i915->vbt.version >= 188) {
+ drm_dbg_kms(&i915->drm, "Panel name: %.*s\n",
+ (int)sizeof(tail->panel_name[0].name),
+ tail->panel_name[panel_type].name);
+
+ if (i915->display.vbt.version >= 188) {
panel->vbt.seamless_drrs_min_refresh_rate =
tail->seamless_drrs_min_refresh_rate[panel_type];
drm_dbg_kms(&i915->drm,
@@ -904,7 +928,7 @@ parse_generic_dtd(struct drm_i915_private *i915,
* first on VBT >= 229, but still fall back to trying the old LFP
* block if that fails.
*/
- if (i915->vbt.version < 229)
+ if (i915->display.vbt.version < 229)
return;
generic_dtd = find_section(i915, BDB_GENERIC_DTD);
@@ -1008,12 +1032,12 @@ parse_lfp_backlight(struct drm_i915_private *i915,
}
panel->vbt.backlight.type = INTEL_BACKLIGHT_DISPLAY_DDI;
- if (i915->vbt.version >= 191) {
+ if (i915->display.vbt.version >= 191) {
size_t exp_size;
- if (i915->vbt.version >= 236)
+ if (i915->display.vbt.version >= 236)
exp_size = sizeof(struct bdb_lfp_backlight_data);
- else if (i915->vbt.version >= 234)
+ else if (i915->display.vbt.version >= 234)
exp_size = EXP_BDB_LFP_BL_DATA_SIZE_REV_234;
else
exp_size = EXP_BDB_LFP_BL_DATA_SIZE_REV_191;
@@ -1030,14 +1054,14 @@ parse_lfp_backlight(struct drm_i915_private *i915,
panel->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz;
panel->vbt.backlight.active_low_pwm = entry->active_low_pwm;
- if (i915->vbt.version >= 234) {
+ if (i915->display.vbt.version >= 234) {
u16 min_level;
bool scale;
level = backlight_data->brightness_level[panel_type].level;
min_level = backlight_data->brightness_min_level[panel_type].level;
- if (i915->vbt.version >= 236)
+ if (i915->display.vbt.version >= 236)
scale = backlight_data->brightness_precision_bits[panel_type] == 16;
else
scale = level > 255;
@@ -1134,37 +1158,37 @@ parse_general_features(struct drm_i915_private *i915)
if (!general)
return;
- i915->vbt.int_tv_support = general->int_tv_support;
+ i915->display.vbt.int_tv_support = general->int_tv_support;
/* int_crt_support can't be trusted on earlier platforms */
- if (i915->vbt.version >= 155 &&
+ if (i915->display.vbt.version >= 155 &&
(HAS_DDI(i915) || IS_VALLEYVIEW(i915)))
- i915->vbt.int_crt_support = general->int_crt_support;
- i915->vbt.lvds_use_ssc = general->enable_ssc;
- i915->vbt.lvds_ssc_freq =
+ i915->display.vbt.int_crt_support = general->int_crt_support;
+ i915->display.vbt.lvds_use_ssc = general->enable_ssc;
+ i915->display.vbt.lvds_ssc_freq =
intel_bios_ssc_frequency(i915, general->ssc_freq);
- i915->vbt.display_clock_mode = general->display_clock_mode;
- i915->vbt.fdi_rx_polarity_inverted = general->fdi_rx_polarity_inverted;
- if (i915->vbt.version >= 181) {
- i915->vbt.orientation = general->rotate_180 ?
+ i915->display.vbt.display_clock_mode = general->display_clock_mode;
+ i915->display.vbt.fdi_rx_polarity_inverted = general->fdi_rx_polarity_inverted;
+ if (i915->display.vbt.version >= 181) {
+ i915->display.vbt.orientation = general->rotate_180 ?
DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP :
DRM_MODE_PANEL_ORIENTATION_NORMAL;
} else {
- i915->vbt.orientation = DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
+ i915->display.vbt.orientation = DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
}
- if (i915->vbt.version >= 249 && general->afc_startup_config) {
- i915->vbt.override_afc_startup = true;
- i915->vbt.override_afc_startup_val = general->afc_startup_config == 0x1 ? 0x0 : 0x7;
+ if (i915->display.vbt.version >= 249 && general->afc_startup_config) {
+ i915->display.vbt.override_afc_startup = true;
+ i915->display.vbt.override_afc_startup_val = general->afc_startup_config == 0x1 ? 0x0 : 0x7;
}
drm_dbg_kms(&i915->drm,
"BDB_GENERAL_FEATURES int_tv_support %d int_crt_support %d lvds_use_ssc %d lvds_ssc_freq %d display_clock_mode %d fdi_rx_polarity_inverted %d\n",
- i915->vbt.int_tv_support,
- i915->vbt.int_crt_support,
- i915->vbt.lvds_use_ssc,
- i915->vbt.lvds_ssc_freq,
- i915->vbt.display_clock_mode,
- i915->vbt.fdi_rx_polarity_inverted);
+ i915->display.vbt.int_tv_support,
+ i915->display.vbt.int_crt_support,
+ i915->display.vbt.lvds_use_ssc,
+ i915->display.vbt.lvds_ssc_freq,
+ i915->display.vbt.display_clock_mode,
+ i915->display.vbt.fdi_rx_polarity_inverted);
}
static const struct child_device_config *
@@ -1190,7 +1214,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *i915)
return;
}
- list_for_each_entry(devdata, &i915->vbt.display_devices, node) {
+ list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) {
child = &devdata->child;
if (child->slave_addr != SLAVE_ADDR1 &&
@@ -1214,7 +1238,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *i915)
child->slave_addr,
(child->dvo_port == DEVICE_PORT_DVOB) ?
"SDVOB" : "SDVOC");
- mapping = &i915->vbt.sdvo_mappings[child->dvo_port - 1];
+ mapping = &i915->display.vbt.sdvo_mappings[child->dvo_port - 1];
if (!mapping->initialized) {
mapping->dvo_port = child->dvo_port;
mapping->slave_addr = child->slave_addr;
@@ -1265,7 +1289,7 @@ parse_driver_features(struct drm_i915_private *i915)
* interpretation, but real world VBTs seem to.
*/
if (driver->lvds_config != BDB_DRIVER_FEATURE_INT_LVDS)
- i915->vbt.int_lvds_support = 0;
+ i915->display.vbt.int_lvds_support = 0;
} else {
/*
* FIXME it's not clear which BDB version has the LVDS config
@@ -1278,10 +1302,10 @@ parse_driver_features(struct drm_i915_private *i915)
* in the wild with the bits correctly populated. Version
* 108 (on i85x) does not have the bits correctly populated.
*/
- if (i915->vbt.version >= 134 &&
+ if (i915->display.vbt.version >= 134 &&
driver->lvds_config != BDB_DRIVER_FEATURE_INT_LVDS &&
driver->lvds_config != BDB_DRIVER_FEATURE_INT_SDVO_LVDS)
- i915->vbt.int_lvds_support = 0;
+ i915->display.vbt.int_lvds_support = 0;
}
}
@@ -1295,7 +1319,7 @@ parse_panel_driver_features(struct drm_i915_private *i915,
if (!driver)
return;
- if (i915->vbt.version < 228) {
+ if (i915->display.vbt.version < 228) {
drm_dbg_kms(&i915->drm, "DRRS State Enabled:%d\n",
driver->drrs_enabled);
/*
@@ -1328,7 +1352,7 @@ parse_power_conservation_features(struct drm_i915_private *i915,
panel->vbt.vrr = true; /* matches Windows behaviour */
- if (i915->vbt.version < 228)
+ if (i915->display.vbt.version < 228)
return;
power = find_section(i915, BDB_LFP_POWER);
@@ -1354,10 +1378,10 @@ parse_power_conservation_features(struct drm_i915_private *i915,
panel->vbt.drrs_type = DRRS_TYPE_NONE;
}
- if (i915->vbt.version >= 232)
+ if (i915->display.vbt.version >= 232)
panel->vbt.edp.hobl = panel_bool(power->hobl, panel_type);
- if (i915->vbt.version >= 233)
+ if (i915->display.vbt.version >= 233)
panel->vbt.vrr = panel_bool(power->vrr_feature_enabled,
panel_type);
}
@@ -1393,7 +1417,7 @@ parse_edp(struct drm_i915_private *i915,
panel->vbt.edp.pps = *edp_pps;
- if (i915->vbt.version >= 224) {
+ if (i915->display.vbt.version >= 224) {
panel->vbt.edp.rate =
edp->edp_fast_link_training_rate[panel_type] * 20;
} else {
@@ -1472,7 +1496,7 @@ parse_edp(struct drm_i915_private *i915,
break;
}
- if (i915->vbt.version >= 173) {
+ if (i915->display.vbt.version >= 173) {
u8 vswing;
/* Don't read from VBT if module parameter has valid value*/
@@ -1488,7 +1512,7 @@ parse_edp(struct drm_i915_private *i915,
panel->vbt.edp.drrs_msa_timing_delay =
panel_bits(edp->sdrrs_msa_timing_delay, panel_type, 2);
- if (i915->vbt.version >= 244)
+ if (i915->display.vbt.version >= 244)
panel->vbt.edp.max_link_rate =
edp->edp_max_port_link_rate[panel_type] * 20;
}
@@ -1520,7 +1544,7 @@ parse_psr(struct drm_i915_private *i915,
* New psr options 0=500us, 1=100us, 2=2500us, 3=0us
* Old decimal value is wake up time in multiples of 100 us.
*/
- if (i915->vbt.version >= 205 &&
+ if (i915->display.vbt.version >= 205 &&
(DISPLAY_VER(i915) >= 9 && !IS_BROXTON(i915))) {
switch (psr_table->tp1_wakeup_time) {
case 0:
@@ -1566,7 +1590,7 @@ parse_psr(struct drm_i915_private *i915,
panel->vbt.psr.tp2_tp3_wakeup_time_us = psr_table->tp2_tp3_wakeup_time * 100;
}
- if (i915->vbt.version >= 226) {
+ if (i915->display.vbt.version >= 226) {
u32 wakeup_time = psr->psr2_tp2_tp3_wakeup_time;
wakeup_time = panel_bits(wakeup_time, panel_type, 2);
@@ -1596,7 +1620,9 @@ static void parse_dsi_backlight_ports(struct drm_i915_private *i915,
struct intel_panel *panel,
enum port port)
{
- if (!panel->vbt.dsi.config->dual_link || i915->vbt.version < 197) {
+ enum port port_bc = DISPLAY_VER(i915) >= 11 ? PORT_B : PORT_C;
+
+ if (!panel->vbt.dsi.config->dual_link || i915->display.vbt.version < 197) {
panel->vbt.dsi.bl_ports = BIT(port);
if (panel->vbt.dsi.config->cabc_supported)
panel->vbt.dsi.cabc_ports = BIT(port);
@@ -1609,11 +1635,11 @@ static void parse_dsi_backlight_ports(struct drm_i915_private *i915,
panel->vbt.dsi.bl_ports = BIT(PORT_A);
break;
case DL_DCS_PORT_C:
- panel->vbt.dsi.bl_ports = BIT(PORT_C);
+ panel->vbt.dsi.bl_ports = BIT(port_bc);
break;
default:
case DL_DCS_PORT_A_AND_C:
- panel->vbt.dsi.bl_ports = BIT(PORT_A) | BIT(PORT_C);
+ panel->vbt.dsi.bl_ports = BIT(PORT_A) | BIT(port_bc);
break;
}
@@ -1625,12 +1651,12 @@ static void parse_dsi_backlight_ports(struct drm_i915_private *i915,
panel->vbt.dsi.cabc_ports = BIT(PORT_A);
break;
case DL_DCS_PORT_C:
- panel->vbt.dsi.cabc_ports = BIT(PORT_C);
+ panel->vbt.dsi.cabc_ports = BIT(port_bc);
break;
default:
case DL_DCS_PORT_A_AND_C:
panel->vbt.dsi.cabc_ports =
- BIT(PORT_A) | BIT(PORT_C);
+ BIT(PORT_A) | BIT(port_bc);
break;
}
}
@@ -2051,7 +2077,7 @@ parse_compression_parameters(struct drm_i915_private *i915)
u16 block_size;
int index;
- if (i915->vbt.version < 198)
+ if (i915->display.vbt.version < 198)
return;
params = find_section(i915, BDB_COMPRESSION_PARAMETERS);
@@ -2071,7 +2097,7 @@ parse_compression_parameters(struct drm_i915_private *i915)
}
}
- list_for_each_entry(devdata, &i915->vbt.display_devices, node) {
+ list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) {
child = &devdata->child;
if (!child->compression_enable)
@@ -2205,7 +2231,7 @@ static enum port get_port_by_ddc_pin(struct drm_i915_private *i915, u8 ddc_pin)
return PORT_NONE;
for_each_port(port) {
- devdata = i915->vbt.ports[port];
+ devdata = i915->display.vbt.ports[port];
if (devdata && ddc_pin == devdata->child.ddc_pin)
return port;
@@ -2254,7 +2280,7 @@ static void sanitize_ddc_pin(struct intel_bios_encoder_data *devdata,
* there are real machines (eg. Asrock B250M-HDV) where VBT has both
* port A and port E with the same AUX ch and we must pick port E :(
*/
- child = &i915->vbt.ports[p]->child;
+ child = &i915->display.vbt.ports[p]->child;
child->device_type &= ~DEVICE_TYPE_TMDS_DVI_SIGNALING;
child->device_type |= DEVICE_TYPE_NOT_HDMI_OUTPUT;
@@ -2271,7 +2297,7 @@ static enum port get_port_by_aux_ch(struct drm_i915_private *i915, u8 aux_ch)
return PORT_NONE;
for_each_port(port) {
- devdata = i915->vbt.ports[port];
+ devdata = i915->display.vbt.ports[port];
if (devdata && aux_ch == devdata->child.aux_channel)
return port;
@@ -2306,7 +2332,7 @@ static void sanitize_aux_ch(struct intel_bios_encoder_data *devdata,
* there are real machines (eg. Asrock B250M-HDV) where VBT has both
* port A and port E with the same AUX ch and we must pick port E :(
*/
- child = &i915->vbt.ports[p]->child;
+ child = &i915->display.vbt.ports[p]->child;
child->device_type &= ~DEVICE_TYPE_DISPLAYPORT_OUTPUT;
child->aux_channel = 0;
@@ -2418,7 +2444,7 @@ static enum port dvo_port_to_port(struct drm_i915_private *i915,
[PORT_TC4] = { DVO_PORT_HDMII, DVO_PORT_DPI, -1 },
};
- if (DISPLAY_VER(i915) == 13)
+ if (DISPLAY_VER(i915) >= 13)
return __dvo_port_to_port(ARRAY_SIZE(xelpd_port_mapping),
ARRAY_SIZE(xelpd_port_mapping[0]),
xelpd_port_mapping,
@@ -2480,15 +2506,23 @@ static int parse_bdb_216_dp_max_link_rate(const int vbt_max_link_rate)
static int _intel_bios_dp_max_link_rate(const struct intel_bios_encoder_data *devdata)
{
- if (!devdata || devdata->i915->vbt.version < 216)
+ if (!devdata || devdata->i915->display.vbt.version < 216)
return 0;
- if (devdata->i915->vbt.version >= 230)
+ if (devdata->i915->display.vbt.version >= 230)
return parse_bdb_230_dp_max_link_rate(devdata->child.dp_max_link_rate);
else
return parse_bdb_216_dp_max_link_rate(devdata->child.dp_max_link_rate);
}
+static int _intel_bios_dp_max_lane_count(const struct intel_bios_encoder_data *devdata)
+{
+ if (!devdata || devdata->i915->display.vbt.version < 244)
+ return 0;
+
+ return devdata->child.dp_max_lane_count + 1;
+}
+
static void sanitize_device_type(struct intel_bios_encoder_data *devdata,
enum port port)
{
@@ -2544,7 +2578,7 @@ intel_bios_encoder_supports_edp(const struct intel_bios_encoder_data *devdata)
static int _intel_bios_hdmi_level_shift(const struct intel_bios_encoder_data *devdata)
{
- if (!devdata || devdata->i915->vbt.version < 158)
+ if (!devdata || devdata->i915->display.vbt.version < 158)
return -1;
return devdata->child.hdmi_level_shifter_value;
@@ -2552,7 +2586,7 @@ static int _intel_bios_hdmi_level_shift(const struct intel_bios_encoder_data *de
static int _intel_bios_max_tmds_clock(const struct intel_bios_encoder_data *devdata)
{
- if (!devdata || devdata->i915->vbt.version < 204)
+ if (!devdata || devdata->i915->display.vbt.version < 204)
return 0;
switch (devdata->child.hdmi_max_data_rate) {
@@ -2661,7 +2695,7 @@ static void parse_ddi_port(struct intel_bios_encoder_data *devdata)
return;
}
- if (i915->vbt.ports[port]) {
+ if (i915->display.vbt.ports[port]) {
drm_dbg_kms(&i915->drm,
"More than one child device for port %c in VBT, using the first.\n",
port_name(port));
@@ -2676,7 +2710,7 @@ static void parse_ddi_port(struct intel_bios_encoder_data *devdata)
if (intel_bios_encoder_supports_dp(devdata))
sanitize_aux_ch(devdata, port);
- i915->vbt.ports[port] = devdata;
+ i915->display.vbt.ports[port] = devdata;
}
static bool has_ddi_port_info(struct drm_i915_private *i915)
@@ -2692,12 +2726,12 @@ static void parse_ddi_ports(struct drm_i915_private *i915)
if (!has_ddi_port_info(i915))
return;
- list_for_each_entry(devdata, &i915->vbt.display_devices, node)
+ list_for_each_entry(devdata, &i915->display.vbt.display_devices, node)
parse_ddi_port(devdata);
for_each_port(port) {
- if (i915->vbt.ports[port])
- print_ddi_port(i915->vbt.ports[port], port);
+ if (i915->display.vbt.ports[port])
+ print_ddi_port(i915->display.vbt.ports[port], port);
}
}
@@ -2730,33 +2764,33 @@ parse_general_definitions(struct drm_i915_private *i915)
bus_pin = defs->crt_ddc_gmbus_pin;
drm_dbg_kms(&i915->drm, "crt_ddc_bus_pin: %d\n", bus_pin);
if (intel_gmbus_is_valid_pin(i915, bus_pin))
- i915->vbt.crt_ddc_pin = bus_pin;
+ i915->display.vbt.crt_ddc_pin = bus_pin;
- if (i915->vbt.version < 106) {
+ if (i915->display.vbt.version < 106) {
expected_size = 22;
- } else if (i915->vbt.version < 111) {
+ } else if (i915->display.vbt.version < 111) {
expected_size = 27;
- } else if (i915->vbt.version < 195) {
+ } else if (i915->display.vbt.version < 195) {
expected_size = LEGACY_CHILD_DEVICE_CONFIG_SIZE;
- } else if (i915->vbt.version == 195) {
+ } else if (i915->display.vbt.version == 195) {
expected_size = 37;
- } else if (i915->vbt.version <= 215) {
+ } else if (i915->display.vbt.version <= 215) {
expected_size = 38;
- } else if (i915->vbt.version <= 237) {
+ } else if (i915->display.vbt.version <= 237) {
expected_size = 39;
} else {
expected_size = sizeof(*child);
BUILD_BUG_ON(sizeof(*child) < 39);
drm_dbg(&i915->drm,
"Expected child device config size for VBT version %u not known; assuming %u\n",
- i915->vbt.version, expected_size);
+ i915->display.vbt.version, expected_size);
}
/* Flag an error for unexpected size, but continue anyway. */
if (defs->child_dev_size != expected_size)
drm_err(&i915->drm,
"Unexpected child device config size %u (expected %u for VBT version %u)\n",
- defs->child_dev_size, expected_size, i915->vbt.version);
+ defs->child_dev_size, expected_size, i915->display.vbt.version);
/* The legacy sized child device config is the minimum we need. */
if (defs->child_dev_size < LEGACY_CHILD_DEVICE_CONFIG_SIZE) {
@@ -2792,10 +2826,10 @@ parse_general_definitions(struct drm_i915_private *i915)
memcpy(&devdata->child, child,
min_t(size_t, defs->child_dev_size, sizeof(*child)));
- list_add_tail(&devdata->node, &i915->vbt.display_devices);
+ list_add_tail(&devdata->node, &i915->display.vbt.display_devices);
}
- if (list_empty(&i915->vbt.display_devices))
+ if (list_empty(&i915->display.vbt.display_devices))
drm_dbg_kms(&i915->drm,
"no child dev is parsed from VBT\n");
}
@@ -2804,25 +2838,25 @@ parse_general_definitions(struct drm_i915_private *i915)
static void
init_vbt_defaults(struct drm_i915_private *i915)
{
- i915->vbt.crt_ddc_pin = GMBUS_PIN_VGADDC;
+ i915->display.vbt.crt_ddc_pin = GMBUS_PIN_VGADDC;
/* general features */
- i915->vbt.int_tv_support = 1;
- i915->vbt.int_crt_support = 1;
+ i915->display.vbt.int_tv_support = 1;
+ i915->display.vbt.int_crt_support = 1;
/* driver features */
- i915->vbt.int_lvds_support = 1;
+ i915->display.vbt.int_lvds_support = 1;
/* Default to using SSC */
- i915->vbt.lvds_use_ssc = 1;
+ i915->display.vbt.lvds_use_ssc = 1;
/*
* Core/SandyBridge/IvyBridge use alternative (120MHz) reference
* clock for LVDS.
*/
- i915->vbt.lvds_ssc_freq = intel_bios_ssc_frequency(i915,
- !HAS_PCH_SPLIT(i915));
+ i915->display.vbt.lvds_ssc_freq = intel_bios_ssc_frequency(i915,
+ !HAS_PCH_SPLIT(i915));
drm_dbg_kms(&i915->drm, "Set default to SSC at %d kHz\n",
- i915->vbt.lvds_ssc_freq);
+ i915->display.vbt.lvds_ssc_freq);
}
/* Common defaults which may be overridden by VBT. */
@@ -2883,7 +2917,7 @@ init_vbt_missing_defaults(struct drm_i915_private *i915)
if (port == PORT_A)
child->device_type |= DEVICE_TYPE_INTERNAL_CONNECTOR;
- list_add_tail(&devdata->node, &i915->vbt.display_devices);
+ list_add_tail(&devdata->node, &i915->display.vbt.display_devices);
drm_dbg_kms(&i915->drm,
"Generating default VBT child device with type 0x04%x on port %c\n",
@@ -2891,7 +2925,7 @@ init_vbt_missing_defaults(struct drm_i915_private *i915)
}
/* Bypass some minimum baseline VBT version checks */
- i915->vbt.version = 155;
+ i915->display.vbt.version = 155;
}
static const struct bdb_header *get_bdb_header(const struct vbt_header *vbt)
@@ -3078,12 +3112,12 @@ err_unmap_oprom:
*/
void intel_bios_init(struct drm_i915_private *i915)
{
- const struct vbt_header *vbt = i915->opregion.vbt;
+ const struct vbt_header *vbt = i915->display.opregion.vbt;
struct vbt_header *oprom_vbt = NULL;
const struct bdb_header *bdb;
- INIT_LIST_HEAD(&i915->vbt.display_devices);
- INIT_LIST_HEAD(&i915->vbt.bdb_blocks);
+ INIT_LIST_HEAD(&i915->display.vbt.display_devices);
+ INIT_LIST_HEAD(&i915->display.vbt.bdb_blocks);
if (!HAS_DISPLAY(i915)) {
drm_dbg_kms(&i915->drm,
@@ -3111,11 +3145,11 @@ void intel_bios_init(struct drm_i915_private *i915)
goto out;
bdb = get_bdb_header(vbt);
- i915->vbt.version = bdb->version;
+ i915->display.vbt.version = bdb->version;
drm_dbg_kms(&i915->drm,
"VBT signature \"%.*s\", BDB version %d\n",
- (int)sizeof(vbt->signature), vbt->signature, i915->vbt.version);
+ (int)sizeof(vbt->signature), vbt->signature, i915->display.vbt.version);
init_bdb_blocks(i915, bdb);
@@ -3172,13 +3206,13 @@ void intel_bios_driver_remove(struct drm_i915_private *i915)
struct intel_bios_encoder_data *devdata, *nd;
struct bdb_block_entry *entry, *ne;
- list_for_each_entry_safe(devdata, nd, &i915->vbt.display_devices, node) {
+ list_for_each_entry_safe(devdata, nd, &i915->display.vbt.display_devices, node) {
list_del(&devdata->node);
kfree(devdata->dsc);
kfree(devdata);
}
- list_for_each_entry_safe(entry, ne, &i915->vbt.bdb_blocks, node) {
+ list_for_each_entry_safe(entry, ne, &i915->display.vbt.bdb_blocks, node) {
list_del(&entry->node);
kfree(entry);
}
@@ -3212,13 +3246,13 @@ bool intel_bios_is_tv_present(struct drm_i915_private *i915)
const struct intel_bios_encoder_data *devdata;
const struct child_device_config *child;
- if (!i915->vbt.int_tv_support)
+ if (!i915->display.vbt.int_tv_support)
return false;
- if (list_empty(&i915->vbt.display_devices))
+ if (list_empty(&i915->display.vbt.display_devices))
return true;
- list_for_each_entry(devdata, &i915->vbt.display_devices, node) {
+ list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) {
child = &devdata->child;
/*
@@ -3255,10 +3289,10 @@ bool intel_bios_is_lvds_present(struct drm_i915_private *i915, u8 *i2c_pin)
const struct intel_bios_encoder_data *devdata;
const struct child_device_config *child;
- if (list_empty(&i915->vbt.display_devices))
+ if (list_empty(&i915->display.vbt.display_devices))
return true;
- list_for_each_entry(devdata, &i915->vbt.display_devices, node) {
+ list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) {
child = &devdata->child;
/* If the device type is not LFP, continue.
@@ -3285,7 +3319,7 @@ bool intel_bios_is_lvds_present(struct drm_i915_private *i915, u8 *i2c_pin)
* additional data. Trust that if the VBT was written into
* the OpRegion then they have validated the LVDS's existence.
*/
- if (i915->opregion.vbt)
+ if (i915->display.opregion.vbt)
return true;
}
@@ -3304,7 +3338,7 @@ bool intel_bios_is_port_present(struct drm_i915_private *i915, enum port port)
if (WARN_ON(!has_ddi_port_info(i915)))
return true;
- return i915->vbt.ports[port];
+ return i915->display.vbt.ports[port];
}
/**
@@ -3364,7 +3398,7 @@ bool intel_bios_is_dsi_present(struct drm_i915_private *i915,
const struct child_device_config *child;
u8 dvo_port;
- list_for_each_entry(devdata, &i915->vbt.display_devices, node) {
+ list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) {
child = &devdata->child;
if (!(child->device_type & DEVICE_TYPE_MIPI_OUTPUT))
@@ -3463,7 +3497,7 @@ bool intel_bios_get_dsc_params(struct intel_encoder *encoder,
const struct intel_bios_encoder_data *devdata;
const struct child_device_config *child;
- list_for_each_entry(devdata, &i915->vbt.display_devices, node) {
+ list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) {
child = &devdata->child;
if (!(child->device_type & DEVICE_TYPE_MIPI_OUTPUT))
@@ -3494,7 +3528,7 @@ bool
intel_bios_is_port_hpd_inverted(const struct drm_i915_private *i915,
enum port port)
{
- const struct intel_bios_encoder_data *devdata = i915->vbt.ports[port];
+ const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[port];
if (drm_WARN_ON_ONCE(&i915->drm,
!IS_GEMINILAKE(i915) && !IS_BROXTON(i915)))
@@ -3514,7 +3548,7 @@ bool
intel_bios_is_lspcon_present(const struct drm_i915_private *i915,
enum port port)
{
- const struct intel_bios_encoder_data *devdata = i915->vbt.ports[port];
+ const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[port];
return HAS_LSPCON(i915) && devdata && devdata->child.lspcon;
}
@@ -3530,7 +3564,7 @@ bool
intel_bios_is_lane_reversal_needed(const struct drm_i915_private *i915,
enum port port)
{
- const struct intel_bios_encoder_data *devdata = i915->vbt.ports[port];
+ const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[port];
return devdata && devdata->child.lane_reversal;
}
@@ -3538,7 +3572,7 @@ intel_bios_is_lane_reversal_needed(const struct drm_i915_private *i915,
enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *i915,
enum port port)
{
- const struct intel_bios_encoder_data *devdata = i915->vbt.ports[port];
+ const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[port];
enum aux_ch aux_ch;
if (!devdata || !devdata->child.aux_channel) {
@@ -3576,7 +3610,7 @@ enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *i915,
aux_ch = AUX_CH_C;
break;
case DP_AUX_D:
- if (DISPLAY_VER(i915) == 13)
+ if (DISPLAY_VER(i915) >= 13)
aux_ch = AUX_CH_D_XELPD;
else if (IS_ALDERLAKE_S(i915))
aux_ch = AUX_CH_USBC3;
@@ -3586,7 +3620,7 @@ enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *i915,
aux_ch = AUX_CH_D;
break;
case DP_AUX_E:
- if (DISPLAY_VER(i915) == 13)
+ if (DISPLAY_VER(i915) >= 13)
aux_ch = AUX_CH_E_XELPD;
else if (IS_ALDERLAKE_S(i915))
aux_ch = AUX_CH_USBC4;
@@ -3594,25 +3628,25 @@ enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *i915,
aux_ch = AUX_CH_E;
break;
case DP_AUX_F:
- if (DISPLAY_VER(i915) == 13)
+ if (DISPLAY_VER(i915) >= 13)
aux_ch = AUX_CH_USBC1;
else
aux_ch = AUX_CH_F;
break;
case DP_AUX_G:
- if (DISPLAY_VER(i915) == 13)
+ if (DISPLAY_VER(i915) >= 13)
aux_ch = AUX_CH_USBC2;
else
aux_ch = AUX_CH_G;
break;
case DP_AUX_H:
- if (DISPLAY_VER(i915) == 13)
+ if (DISPLAY_VER(i915) >= 13)
aux_ch = AUX_CH_USBC3;
else
aux_ch = AUX_CH_H;
break;
case DP_AUX_I:
- if (DISPLAY_VER(i915) == 13)
+ if (DISPLAY_VER(i915) >= 13)
aux_ch = AUX_CH_USBC4;
else
aux_ch = AUX_CH_I;
@@ -3632,7 +3666,7 @@ enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *i915,
int intel_bios_max_tmds_clock(struct intel_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- const struct intel_bios_encoder_data *devdata = i915->vbt.ports[encoder->port];
+ const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[encoder->port];
return _intel_bios_max_tmds_clock(devdata);
}
@@ -3641,14 +3675,14 @@ int intel_bios_max_tmds_clock(struct intel_encoder *encoder)
int intel_bios_hdmi_level_shift(struct intel_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- const struct intel_bios_encoder_data *devdata = i915->vbt.ports[encoder->port];
+ const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[encoder->port];
return _intel_bios_hdmi_level_shift(devdata);
}
int intel_bios_encoder_dp_boost_level(const struct intel_bios_encoder_data *devdata)
{
- if (!devdata || devdata->i915->vbt.version < 196 || !devdata->child.iboost)
+ if (!devdata || devdata->i915->display.vbt.version < 196 || !devdata->child.iboost)
return 0;
return translate_iboost(devdata->child.dp_iboost_level);
@@ -3656,7 +3690,7 @@ int intel_bios_encoder_dp_boost_level(const struct intel_bios_encoder_data *devd
int intel_bios_encoder_hdmi_boost_level(const struct intel_bios_encoder_data *devdata)
{
- if (!devdata || devdata->i915->vbt.version < 196 || !devdata->child.iboost)
+ if (!devdata || devdata->i915->display.vbt.version < 196 || !devdata->child.iboost)
return 0;
return translate_iboost(devdata->child.hdmi_iboost_level);
@@ -3665,15 +3699,23 @@ int intel_bios_encoder_hdmi_boost_level(const struct intel_bios_encoder_data *de
int intel_bios_dp_max_link_rate(struct intel_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- const struct intel_bios_encoder_data *devdata = i915->vbt.ports[encoder->port];
+ const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[encoder->port];
return _intel_bios_dp_max_link_rate(devdata);
}
+int intel_bios_dp_max_lane_count(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[encoder->port];
+
+ return _intel_bios_dp_max_lane_count(devdata);
+}
+
int intel_bios_alternate_ddc_pin(struct intel_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- const struct intel_bios_encoder_data *devdata = i915->vbt.ports[encoder->port];
+ const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[encoder->port];
if (!devdata || !devdata->child.ddc_pin)
return 0;
@@ -3683,16 +3725,16 @@ int intel_bios_alternate_ddc_pin(struct intel_encoder *encoder)
bool intel_bios_encoder_supports_typec_usb(const struct intel_bios_encoder_data *devdata)
{
- return devdata->i915->vbt.version >= 195 && devdata->child.dp_usb_type_c;
+ return devdata->i915->display.vbt.version >= 195 && devdata->child.dp_usb_type_c;
}
bool intel_bios_encoder_supports_tbt(const struct intel_bios_encoder_data *devdata)
{
- return devdata->i915->vbt.version >= 209 && devdata->child.tbt;
+ return devdata->i915->display.vbt.version >= 209 && devdata->child.tbt;
}
const struct intel_bios_encoder_data *
intel_bios_encoder_data_lookup(struct drm_i915_private *i915, enum port port)
{
- return i915->vbt.ports[port];
+ return i915->display.vbt.ports[port];
}
diff --git a/drivers/gpu/drm/i915/display/intel_bios.h b/drivers/gpu/drm/i915/display/intel_bios.h
index e47582b0de0a..e375405a7828 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.h
+++ b/drivers/gpu/drm/i915/display/intel_bios.h
@@ -258,6 +258,7 @@ bool intel_bios_get_dsc_params(struct intel_encoder *encoder,
int intel_bios_max_tmds_clock(struct intel_encoder *encoder);
int intel_bios_hdmi_level_shift(struct intel_encoder *encoder);
int intel_bios_dp_max_link_rate(struct intel_encoder *encoder);
+int intel_bios_dp_max_lane_count(struct intel_encoder *encoder);
int intel_bios_alternate_ddc_pin(struct intel_encoder *encoder);
bool intel_bios_port_supports_typec_usb(struct drm_i915_private *i915, enum port port);
bool intel_bios_port_supports_tbt(struct drm_i915_private *i915, enum port port);
diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
index 79269d2c476b..4ace026b29bd 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_bw.c
@@ -5,15 +5,17 @@
#include <drm/drm_atomic_state_helper.h>
+#include "i915_drv.h"
#include "i915_reg.h"
#include "i915_utils.h"
#include "intel_atomic.h"
#include "intel_bw.h"
#include "intel_cdclk.h"
+#include "intel_display_core.h"
#include "intel_display_types.h"
+#include "skl_watermark.h"
#include "intel_mchbar_regs.h"
#include "intel_pcode.h"
-#include "intel_pm.h"
/* Parameters for Qclk Geyserville (QGV) */
struct intel_qgv_point {
@@ -137,6 +139,42 @@ int icl_pcode_restrict_qgv_points(struct drm_i915_private *dev_priv,
return 0;
}
+static int mtl_read_qgv_point_info(struct drm_i915_private *dev_priv,
+ struct intel_qgv_point *sp, int point)
+{
+ u32 val, val2;
+ u16 dclk;
+
+ val = intel_uncore_read(&dev_priv->uncore,
+ MTL_MEM_SS_INFO_QGV_POINT_LOW(point));
+ val2 = intel_uncore_read(&dev_priv->uncore,
+ MTL_MEM_SS_INFO_QGV_POINT_HIGH(point));
+ dclk = REG_FIELD_GET(MTL_DCLK_MASK, val);
+ sp->dclk = DIV_ROUND_UP((16667 * dclk), 1000);
+ sp->t_rp = REG_FIELD_GET(MTL_TRP_MASK, val);
+ sp->t_rcd = REG_FIELD_GET(MTL_TRCD_MASK, val);
+
+ sp->t_rdpre = REG_FIELD_GET(MTL_TRDPRE_MASK, val2);
+ sp->t_ras = REG_FIELD_GET(MTL_TRAS_MASK, val2);
+
+ sp->t_rc = sp->t_rp + sp->t_ras;
+
+ return 0;
+}
+
+static int
+intel_read_qgv_point_info(struct drm_i915_private *dev_priv,
+ struct intel_qgv_point *sp,
+ int point)
+{
+ if (DISPLAY_VER(dev_priv) >= 14)
+ return mtl_read_qgv_point_info(dev_priv, sp, point);
+ else if (IS_DG1(dev_priv))
+ return dg1_mchbar_read_qgv_point_info(dev_priv, sp, point);
+ else
+ return icl_pcode_read_qgv_point_info(dev_priv, sp, point);
+}
+
static int icl_get_qgv_points(struct drm_i915_private *dev_priv,
struct intel_qgv_info *qi,
bool is_y_tile)
@@ -147,7 +185,32 @@ static int icl_get_qgv_points(struct drm_i915_private *dev_priv,
qi->num_points = dram_info->num_qgv_points;
qi->num_psf_points = dram_info->num_psf_gv_points;
- if (DISPLAY_VER(dev_priv) >= 12)
+ if (DISPLAY_VER(dev_priv) >= 14) {
+ switch (dram_info->type) {
+ case INTEL_DRAM_DDR4:
+ qi->t_bl = 4;
+ qi->max_numchannels = 2;
+ qi->channel_width = 64;
+ qi->deinterleave = 2;
+ break;
+ case INTEL_DRAM_DDR5:
+ qi->t_bl = 8;
+ qi->max_numchannels = 4;
+ qi->channel_width = 32;
+ qi->deinterleave = 2;
+ break;
+ case INTEL_DRAM_LPDDR4:
+ case INTEL_DRAM_LPDDR5:
+ qi->t_bl = 16;
+ qi->max_numchannels = 8;
+ qi->channel_width = 16;
+ qi->deinterleave = 4;
+ break;
+ default:
+ MISSING_CASE(dram_info->type);
+ return -EINVAL;
+ }
+ } else if (DISPLAY_VER(dev_priv) >= 12) {
switch (dram_info->type) {
case INTEL_DRAM_DDR4:
qi->t_bl = is_y_tile ? 8 : 4;
@@ -181,7 +244,7 @@ static int icl_get_qgv_points(struct drm_i915_private *dev_priv,
qi->max_numchannels = 1;
break;
}
- else if (DISPLAY_VER(dev_priv) == 11) {
+ } else if (DISPLAY_VER(dev_priv) == 11) {
qi->t_bl = dev_priv->dram_info.type == INTEL_DRAM_DDR4 ? 4 : 8;
qi->max_numchannels = 1;
}
@@ -193,11 +256,7 @@ static int icl_get_qgv_points(struct drm_i915_private *dev_priv,
for (i = 0; i < qi->num_points; i++) {
struct intel_qgv_point *sp = &qi->points[i];
- if (IS_DG1(dev_priv))
- ret = dg1_mchbar_read_qgv_point_info(dev_priv, sp, i);
- else
- ret = icl_pcode_read_qgv_point_info(dev_priv, sp, i);
-
+ ret = intel_read_qgv_point_info(dev_priv, sp, i);
if (ret)
return ret;
@@ -284,6 +343,13 @@ static const struct intel_sa_info adlp_sa_info = {
.derating = 20,
};
+static const struct intel_sa_info mtl_sa_info = {
+ .deburst = 32,
+ .deprogbwlimit = 38, /* GB/s */
+ .displayrtids = 256,
+ .derating = 20,
+};
+
static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel_sa_info *sa)
{
struct intel_qgv_info qi = {};
@@ -292,7 +358,7 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
int ipqdepth, ipqdepthpch = 16;
int dclk_max;
int maxdebw;
- int num_groups = ARRAY_SIZE(dev_priv->max_bw);
+ int num_groups = ARRAY_SIZE(dev_priv->display.bw.max);
int i, ret;
ret = icl_get_qgv_points(dev_priv, &qi, is_y_tile);
@@ -308,7 +374,7 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
qi.deinterleave = DIV_ROUND_UP(num_channels, is_y_tile ? 4 : 2);
for (i = 0; i < num_groups; i++) {
- struct intel_bw_info *bi = &dev_priv->max_bw[i];
+ struct intel_bw_info *bi = &dev_priv->display.bw.max[i];
int clpchgroup;
int j;
@@ -346,9 +412,9 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
* as it will fail and pointless anyway.
*/
if (qi.num_points == 1)
- dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
+ dev_priv->display.sagv.status = I915_SAGV_NOT_CONTROLLED;
else
- dev_priv->sagv_status = I915_SAGV_ENABLED;
+ dev_priv->display.sagv.status = I915_SAGV_ENABLED;
return 0;
}
@@ -363,7 +429,7 @@ static int tgl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
int dclk_max;
int maxdebw, peakbw;
int clperchgroup;
- int num_groups = ARRAY_SIZE(dev_priv->max_bw);
+ int num_groups = ARRAY_SIZE(dev_priv->display.bw.max);
int i, ret;
ret = icl_get_qgv_points(dev_priv, &qi, is_y_tile);
@@ -399,20 +465,22 @@ static int tgl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
clperchgroup = 4 * DIV_ROUND_UP(8, num_channels) * qi.deinterleave;
for (i = 0; i < num_groups; i++) {
- struct intel_bw_info *bi = &dev_priv->max_bw[i];
+ struct intel_bw_info *bi = &dev_priv->display.bw.max[i];
struct intel_bw_info *bi_next;
int clpchgroup;
int j;
- if (i < num_groups - 1)
- bi_next = &dev_priv->max_bw[i + 1];
-
clpchgroup = (sa->deburst * qi.deinterleave / num_channels) << i;
- if (i < num_groups - 1 && clpchgroup < clperchgroup)
- bi_next->num_planes = (ipqdepth - clpchgroup) / clpchgroup + 1;
- else
- bi_next->num_planes = 0;
+ if (i < num_groups - 1) {
+ bi_next = &dev_priv->display.bw.max[i + 1];
+
+ if (clpchgroup < clperchgroup)
+ bi_next->num_planes = (ipqdepth - clpchgroup) /
+ clpchgroup + 1;
+ else
+ bi_next->num_planes = 0;
+ }
bi->num_qgv_points = qi.num_points;
bi->num_psf_gv_points = qi.num_psf_points;
@@ -456,9 +524,9 @@ static int tgl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
* as it will fail and pointless anyway.
*/
if (qi.num_points == 1)
- dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
+ dev_priv->display.sagv.status = I915_SAGV_NOT_CONTROLLED;
else
- dev_priv->sagv_status = I915_SAGV_ENABLED;
+ dev_priv->display.sagv.status = I915_SAGV_ENABLED;
return 0;
}
@@ -466,7 +534,7 @@ static int tgl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
static void dg2_get_bw_info(struct drm_i915_private *i915)
{
unsigned int deratedbw = IS_DG2_G11(i915) ? 38000 : 50000;
- int num_groups = ARRAY_SIZE(i915->max_bw);
+ int num_groups = ARRAY_SIZE(i915->display.bw.max);
int i;
/*
@@ -477,7 +545,7 @@ static void dg2_get_bw_info(struct drm_i915_private *i915)
* whereas DG2-G11 platforms have 38 GB/s.
*/
for (i = 0; i < num_groups; i++) {
- struct intel_bw_info *bi = &i915->max_bw[i];
+ struct intel_bw_info *bi = &i915->display.bw.max[i];
bi->num_planes = 1;
/* Need only one dummy QGV point per group */
@@ -485,7 +553,7 @@ static void dg2_get_bw_info(struct drm_i915_private *i915)
bi->deratedbw[0] = deratedbw;
}
- i915->sagv_status = I915_SAGV_NOT_CONTROLLED;
+ i915->display.sagv.status = I915_SAGV_NOT_CONTROLLED;
}
static unsigned int icl_max_bw(struct drm_i915_private *dev_priv,
@@ -498,9 +566,9 @@ static unsigned int icl_max_bw(struct drm_i915_private *dev_priv,
*/
num_planes = max(1, num_planes);
- for (i = 0; i < ARRAY_SIZE(dev_priv->max_bw); i++) {
+ for (i = 0; i < ARRAY_SIZE(dev_priv->display.bw.max); i++) {
const struct intel_bw_info *bi =
- &dev_priv->max_bw[i];
+ &dev_priv->display.bw.max[i];
/*
* Pcode will not expose all QGV points when
@@ -526,9 +594,9 @@ static unsigned int tgl_max_bw(struct drm_i915_private *dev_priv,
*/
num_planes = max(1, num_planes);
- for (i = ARRAY_SIZE(dev_priv->max_bw) - 1; i >= 0; i--) {
+ for (i = ARRAY_SIZE(dev_priv->display.bw.max) - 1; i >= 0; i--) {
const struct intel_bw_info *bi =
- &dev_priv->max_bw[i];
+ &dev_priv->display.bw.max[i];
/*
* Pcode will not expose all QGV points when
@@ -541,14 +609,14 @@ static unsigned int tgl_max_bw(struct drm_i915_private *dev_priv,
return bi->deratedbw[qgv_point];
}
- return dev_priv->max_bw[0].deratedbw[qgv_point];
+ return dev_priv->display.bw.max[0].deratedbw[qgv_point];
}
static unsigned int adl_psf_bw(struct drm_i915_private *dev_priv,
int psf_gv_point)
{
const struct intel_bw_info *bi =
- &dev_priv->max_bw[0];
+ &dev_priv->display.bw.max[0];
return bi->psf_bw[psf_gv_point];
}
@@ -558,7 +626,9 @@ void intel_bw_init_hw(struct drm_i915_private *dev_priv)
if (!HAS_DISPLAY(dev_priv))
return;
- if (IS_DG2(dev_priv))
+ if (DISPLAY_VER(dev_priv) >= 14)
+ tgl_get_bw_info(dev_priv, &mtl_sa_info);
+ else if (IS_DG2(dev_priv))
dg2_get_bw_info(dev_priv);
else if (IS_ALDERLAKE_P(dev_priv))
tgl_get_bw_info(dev_priv, &adlp_sa_info);
@@ -667,7 +737,7 @@ intel_atomic_get_old_bw_state(struct intel_atomic_state *state)
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_global_state *bw_state;
- bw_state = intel_atomic_get_old_global_obj_state(state, &dev_priv->bw_obj);
+ bw_state = intel_atomic_get_old_global_obj_state(state, &dev_priv->display.bw.obj);
return to_intel_bw_state(bw_state);
}
@@ -678,7 +748,7 @@ intel_atomic_get_new_bw_state(struct intel_atomic_state *state)
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_global_state *bw_state;
- bw_state = intel_atomic_get_new_global_obj_state(state, &dev_priv->bw_obj);
+ bw_state = intel_atomic_get_new_global_obj_state(state, &dev_priv->display.bw.obj);
return to_intel_bw_state(bw_state);
}
@@ -689,7 +759,7 @@ intel_atomic_get_bw_state(struct intel_atomic_state *state)
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_global_state *bw_state;
- bw_state = intel_atomic_get_global_obj_state(state, &dev_priv->bw_obj);
+ bw_state = intel_atomic_get_global_obj_state(state, &dev_priv->display.bw.obj);
if (IS_ERR(bw_state))
return ERR_CAST(bw_state);
@@ -896,8 +966,8 @@ int intel_bw_calc_min_cdclk(struct intel_atomic_state *state,
static u16 icl_qgv_points_mask(struct drm_i915_private *i915)
{
- unsigned int num_psf_gv_points = i915->max_bw[0].num_psf_gv_points;
- unsigned int num_qgv_points = i915->max_bw[0].num_qgv_points;
+ unsigned int num_psf_gv_points = i915->display.bw.max[0].num_psf_gv_points;
+ unsigned int num_qgv_points = i915->display.bw.max[0].num_qgv_points;
u16 qgv_points = 0, psf_points = 0;
/*
@@ -970,8 +1040,8 @@ int intel_bw_atomic_check(struct intel_atomic_state *state)
int i, ret;
u16 qgv_points = 0, psf_points = 0;
unsigned int max_bw_point = 0, max_bw = 0;
- unsigned int num_qgv_points = dev_priv->max_bw[0].num_qgv_points;
- unsigned int num_psf_gv_points = dev_priv->max_bw[0].num_psf_gv_points;
+ unsigned int num_qgv_points = dev_priv->display.bw.max[0].num_qgv_points;
+ unsigned int num_psf_gv_points = dev_priv->display.bw.max[0].num_psf_gv_points;
bool changed = false;
/* FIXME earlier gens need some checks too */
@@ -1126,7 +1196,7 @@ int intel_bw_init(struct drm_i915_private *dev_priv)
if (!state)
return -ENOMEM;
- intel_atomic_global_obj_init(dev_priv, &dev_priv->bw_obj,
+ intel_atomic_global_obj_init(dev_priv, &dev_priv->display.bw.obj,
&state->base, &intel_bw_funcs);
return 0;
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index 6e80162632dd..ed05070b7307 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -79,26 +79,26 @@ struct intel_cdclk_funcs {
void intel_cdclk_get_cdclk(struct drm_i915_private *dev_priv,
struct intel_cdclk_config *cdclk_config)
{
- dev_priv->cdclk_funcs->get_cdclk(dev_priv, cdclk_config);
+ dev_priv->display.funcs.cdclk->get_cdclk(dev_priv, cdclk_config);
}
static void intel_cdclk_set_cdclk(struct drm_i915_private *dev_priv,
const struct intel_cdclk_config *cdclk_config,
enum pipe pipe)
{
- dev_priv->cdclk_funcs->set_cdclk(dev_priv, cdclk_config, pipe);
+ dev_priv->display.funcs.cdclk->set_cdclk(dev_priv, cdclk_config, pipe);
}
static int intel_cdclk_modeset_calc_cdclk(struct drm_i915_private *dev_priv,
struct intel_cdclk_state *cdclk_config)
{
- return dev_priv->cdclk_funcs->modeset_calc_cdclk(cdclk_config);
+ return dev_priv->display.funcs.cdclk->modeset_calc_cdclk(cdclk_config);
}
static u8 intel_cdclk_calc_voltage_level(struct drm_i915_private *dev_priv,
int cdclk)
{
- return dev_priv->cdclk_funcs->calc_voltage_level(cdclk);
+ return dev_priv->display.funcs.cdclk->calc_voltage_level(cdclk);
}
static void fixed_133mhz_get_cdclk(struct drm_i915_private *dev_priv,
@@ -548,7 +548,7 @@ static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv)
else
default_credits = PFI_CREDIT(8);
- if (dev_priv->cdclk.hw.cdclk >= dev_priv->czclk_freq) {
+ if (dev_priv->display.cdclk.hw.cdclk >= dev_priv->czclk_freq) {
/* CHV suggested value is 31 or 63 */
if (IS_CHERRYVIEW(dev_priv))
credits = PFI_CREDIT_63;
@@ -1026,7 +1026,7 @@ static void skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco)
if (intel_de_wait_for_set(dev_priv, LCPLL1_CTL, LCPLL_PLL_LOCK, 5))
drm_err(&dev_priv->drm, "DPLL0 not locked\n");
- dev_priv->cdclk.hw.vco = vco;
+ dev_priv->display.cdclk.hw.vco = vco;
/* We'll want to keep using the current vco from now on. */
skl_set_preferred_cdclk_vco(dev_priv, vco);
@@ -1040,7 +1040,7 @@ static void skl_dpll0_disable(struct drm_i915_private *dev_priv)
if (intel_de_wait_for_clear(dev_priv, LCPLL1_CTL, LCPLL_PLL_LOCK, 1))
drm_err(&dev_priv->drm, "Couldn't disable DPLL0\n");
- dev_priv->cdclk.hw.vco = 0;
+ dev_priv->display.cdclk.hw.vco = 0;
}
static u32 skl_cdclk_freq_sel(struct drm_i915_private *dev_priv,
@@ -1049,7 +1049,7 @@ static u32 skl_cdclk_freq_sel(struct drm_i915_private *dev_priv,
switch (cdclk) {
default:
drm_WARN_ON(&dev_priv->drm,
- cdclk != dev_priv->cdclk.hw.bypass);
+ cdclk != dev_priv->display.cdclk.hw.bypass);
drm_WARN_ON(&dev_priv->drm, vco != 0);
fallthrough;
case 308571:
@@ -1098,13 +1098,13 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
freq_select = skl_cdclk_freq_sel(dev_priv, cdclk, vco);
- if (dev_priv->cdclk.hw.vco != 0 &&
- dev_priv->cdclk.hw.vco != vco)
+ if (dev_priv->display.cdclk.hw.vco != 0 &&
+ dev_priv->display.cdclk.hw.vco != vco)
skl_dpll0_disable(dev_priv);
cdclk_ctl = intel_de_read(dev_priv, CDCLK_CTL);
- if (dev_priv->cdclk.hw.vco != vco) {
+ if (dev_priv->display.cdclk.hw.vco != vco) {
/* Wa Display #1183: skl,kbl,cfl */
cdclk_ctl &= ~(CDCLK_FREQ_SEL_MASK | CDCLK_FREQ_DECIMAL_MASK);
cdclk_ctl |= freq_select | skl_cdclk_decimal(cdclk);
@@ -1116,7 +1116,7 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
intel_de_write(dev_priv, CDCLK_CTL, cdclk_ctl);
intel_de_posting_read(dev_priv, CDCLK_CTL);
- if (dev_priv->cdclk.hw.vco != vco)
+ if (dev_priv->display.cdclk.hw.vco != vco)
skl_dpll0_enable(dev_priv, vco);
/* Wa Display #1183: skl,kbl,cfl */
@@ -1151,11 +1151,11 @@ static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
goto sanitize;
intel_update_cdclk(dev_priv);
- intel_cdclk_dump_config(dev_priv, &dev_priv->cdclk.hw, "Current CDCLK");
+ intel_cdclk_dump_config(dev_priv, &dev_priv->display.cdclk.hw, "Current CDCLK");
/* Is PLL enabled and locked ? */
- if (dev_priv->cdclk.hw.vco == 0 ||
- dev_priv->cdclk.hw.cdclk == dev_priv->cdclk.hw.bypass)
+ if (dev_priv->display.cdclk.hw.vco == 0 ||
+ dev_priv->display.cdclk.hw.cdclk == dev_priv->display.cdclk.hw.bypass)
goto sanitize;
/* DPLL okay; verify the cdclock
@@ -1166,7 +1166,7 @@ static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
*/
cdctl = intel_de_read(dev_priv, CDCLK_CTL);
expected = (cdctl & CDCLK_FREQ_SEL_MASK) |
- skl_cdclk_decimal(dev_priv->cdclk.hw.cdclk);
+ skl_cdclk_decimal(dev_priv->display.cdclk.hw.cdclk);
if (cdctl == expected)
/* All well; nothing to sanitize */
return;
@@ -1175,9 +1175,9 @@ sanitize:
drm_dbg_kms(&dev_priv->drm, "Sanitizing cdclk programmed by pre-os\n");
/* force cdclk programming */
- dev_priv->cdclk.hw.cdclk = 0;
+ dev_priv->display.cdclk.hw.cdclk = 0;
/* force full PLL disable + enable */
- dev_priv->cdclk.hw.vco = -1;
+ dev_priv->display.cdclk.hw.vco = -1;
}
static void skl_cdclk_init_hw(struct drm_i915_private *dev_priv)
@@ -1186,19 +1186,19 @@ static void skl_cdclk_init_hw(struct drm_i915_private *dev_priv)
skl_sanitize_cdclk(dev_priv);
- if (dev_priv->cdclk.hw.cdclk != 0 &&
- dev_priv->cdclk.hw.vco != 0) {
+ if (dev_priv->display.cdclk.hw.cdclk != 0 &&
+ dev_priv->display.cdclk.hw.vco != 0) {
/*
* Use the current vco as our initial
* guess as to what the preferred vco is.
*/
if (dev_priv->skl_preferred_vco_freq == 0)
skl_set_preferred_cdclk_vco(dev_priv,
- dev_priv->cdclk.hw.vco);
+ dev_priv->display.cdclk.hw.vco);
return;
}
- cdclk_config = dev_priv->cdclk.hw;
+ cdclk_config = dev_priv->display.cdclk.hw;
cdclk_config.vco = dev_priv->skl_preferred_vco_freq;
if (cdclk_config.vco == 0)
@@ -1211,7 +1211,7 @@ static void skl_cdclk_init_hw(struct drm_i915_private *dev_priv)
static void skl_cdclk_uninit_hw(struct drm_i915_private *dev_priv)
{
- struct intel_cdclk_config cdclk_config = dev_priv->cdclk.hw;
+ struct intel_cdclk_config cdclk_config = dev_priv->display.cdclk.hw;
cdclk_config.cdclk = cdclk_config.bypass;
cdclk_config.vco = 0;
@@ -1352,35 +1352,35 @@ static const struct intel_cdclk_vals dg2_cdclk_table[] = {
static int bxt_calc_cdclk(struct drm_i915_private *dev_priv, int min_cdclk)
{
- const struct intel_cdclk_vals *table = dev_priv->cdclk.table;
+ const struct intel_cdclk_vals *table = dev_priv->display.cdclk.table;
int i;
for (i = 0; table[i].refclk; i++)
- if (table[i].refclk == dev_priv->cdclk.hw.ref &&
+ if (table[i].refclk == dev_priv->display.cdclk.hw.ref &&
table[i].cdclk >= min_cdclk)
return table[i].cdclk;
drm_WARN(&dev_priv->drm, 1,
"Cannot satisfy minimum cdclk %d with refclk %u\n",
- min_cdclk, dev_priv->cdclk.hw.ref);
+ min_cdclk, dev_priv->display.cdclk.hw.ref);
return 0;
}
static int bxt_calc_cdclk_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
{
- const struct intel_cdclk_vals *table = dev_priv->cdclk.table;
+ const struct intel_cdclk_vals *table = dev_priv->display.cdclk.table;
int i;
- if (cdclk == dev_priv->cdclk.hw.bypass)
+ if (cdclk == dev_priv->display.cdclk.hw.bypass)
return 0;
for (i = 0; table[i].refclk; i++)
- if (table[i].refclk == dev_priv->cdclk.hw.ref &&
+ if (table[i].refclk == dev_priv->display.cdclk.hw.ref &&
table[i].cdclk == cdclk)
- return dev_priv->cdclk.hw.ref * table[i].ratio;
+ return dev_priv->display.cdclk.hw.ref * table[i].ratio;
drm_WARN(&dev_priv->drm, 1, "cdclk %d not valid for refclk %u\n",
- cdclk, dev_priv->cdclk.hw.ref);
+ cdclk, dev_priv->display.cdclk.hw.ref);
return 0;
}
@@ -1554,12 +1554,12 @@ static void bxt_de_pll_disable(struct drm_i915_private *dev_priv)
BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1))
drm_err(&dev_priv->drm, "timeout waiting for DE PLL unlock\n");
- dev_priv->cdclk.hw.vco = 0;
+ dev_priv->display.cdclk.hw.vco = 0;
}
static void bxt_de_pll_enable(struct drm_i915_private *dev_priv, int vco)
{
- int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->cdclk.hw.ref);
+ int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->display.cdclk.hw.ref);
intel_de_rmw(dev_priv, BXT_DE_PLL_CTL,
BXT_DE_PLL_RATIO_MASK, BXT_DE_PLL_RATIO(ratio));
@@ -1571,7 +1571,7 @@ static void bxt_de_pll_enable(struct drm_i915_private *dev_priv, int vco)
BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1))
drm_err(&dev_priv->drm, "timeout waiting for DE PLL lock\n");
- dev_priv->cdclk.hw.vco = vco;
+ dev_priv->display.cdclk.hw.vco = vco;
}
static void icl_cdclk_pll_disable(struct drm_i915_private *dev_priv)
@@ -1583,12 +1583,12 @@ static void icl_cdclk_pll_disable(struct drm_i915_private *dev_priv)
if (intel_de_wait_for_clear(dev_priv, BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1))
drm_err(&dev_priv->drm, "timeout waiting for CDCLK PLL unlock\n");
- dev_priv->cdclk.hw.vco = 0;
+ dev_priv->display.cdclk.hw.vco = 0;
}
static void icl_cdclk_pll_enable(struct drm_i915_private *dev_priv, int vco)
{
- int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->cdclk.hw.ref);
+ int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->display.cdclk.hw.ref);
u32 val;
val = ICL_CDCLK_PLL_RATIO(ratio);
@@ -1601,12 +1601,12 @@ static void icl_cdclk_pll_enable(struct drm_i915_private *dev_priv, int vco)
if (intel_de_wait_for_set(dev_priv, BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1))
drm_err(&dev_priv->drm, "timeout waiting for CDCLK PLL lock\n");
- dev_priv->cdclk.hw.vco = vco;
+ dev_priv->display.cdclk.hw.vco = vco;
}
static void adlp_cdclk_pll_crawl(struct drm_i915_private *dev_priv, int vco)
{
- int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->cdclk.hw.ref);
+ int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->display.cdclk.hw.ref);
u32 val;
/* Write PLL ratio without disabling */
@@ -1625,7 +1625,7 @@ static void adlp_cdclk_pll_crawl(struct drm_i915_private *dev_priv, int vco)
val &= ~BXT_DE_PLL_FREQ_REQ;
intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, val);
- dev_priv->cdclk.hw.vco = vco;
+ dev_priv->display.cdclk.hw.vco = vco;
}
static u32 bxt_cdclk_cd2x_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
@@ -1655,7 +1655,7 @@ static u32 bxt_cdclk_cd2x_div_sel(struct drm_i915_private *dev_priv,
switch (DIV_ROUND_CLOSEST(vco, cdclk)) {
default:
drm_WARN_ON(&dev_priv->drm,
- cdclk != dev_priv->cdclk.hw.bypass);
+ cdclk != dev_priv->display.cdclk.hw.bypass);
drm_WARN_ON(&dev_priv->drm, vco != 0);
fallthrough;
case 2:
@@ -1672,19 +1672,19 @@ static u32 bxt_cdclk_cd2x_div_sel(struct drm_i915_private *dev_priv,
static u32 cdclk_squash_waveform(struct drm_i915_private *dev_priv,
int cdclk)
{
- const struct intel_cdclk_vals *table = dev_priv->cdclk.table;
+ const struct intel_cdclk_vals *table = dev_priv->display.cdclk.table;
int i;
- if (cdclk == dev_priv->cdclk.hw.bypass)
+ if (cdclk == dev_priv->display.cdclk.hw.bypass)
return 0;
for (i = 0; table[i].refclk; i++)
- if (table[i].refclk == dev_priv->cdclk.hw.ref &&
+ if (table[i].refclk == dev_priv->display.cdclk.hw.ref &&
table[i].cdclk == cdclk)
return table[i].waveform;
drm_WARN(&dev_priv->drm, 1, "cdclk %d not valid for refclk %u\n",
- cdclk, dev_priv->cdclk.hw.ref);
+ cdclk, dev_priv->display.cdclk.hw.ref);
return 0xffff;
}
@@ -1721,22 +1721,22 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
return;
}
- if (HAS_CDCLK_CRAWL(dev_priv) && dev_priv->cdclk.hw.vco > 0 && vco > 0) {
- if (dev_priv->cdclk.hw.vco != vco)
+ if (HAS_CDCLK_CRAWL(dev_priv) && dev_priv->display.cdclk.hw.vco > 0 && vco > 0) {
+ if (dev_priv->display.cdclk.hw.vco != vco)
adlp_cdclk_pll_crawl(dev_priv, vco);
} else if (DISPLAY_VER(dev_priv) >= 11) {
- if (dev_priv->cdclk.hw.vco != 0 &&
- dev_priv->cdclk.hw.vco != vco)
+ if (dev_priv->display.cdclk.hw.vco != 0 &&
+ dev_priv->display.cdclk.hw.vco != vco)
icl_cdclk_pll_disable(dev_priv);
- if (dev_priv->cdclk.hw.vco != vco)
+ if (dev_priv->display.cdclk.hw.vco != vco)
icl_cdclk_pll_enable(dev_priv, vco);
} else {
- if (dev_priv->cdclk.hw.vco != 0 &&
- dev_priv->cdclk.hw.vco != vco)
+ if (dev_priv->display.cdclk.hw.vco != 0 &&
+ dev_priv->display.cdclk.hw.vco != vco)
bxt_de_pll_disable(dev_priv);
- if (dev_priv->cdclk.hw.vco != vco)
+ if (dev_priv->display.cdclk.hw.vco != vco)
bxt_de_pll_enable(dev_priv, vco);
}
@@ -1803,7 +1803,7 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
* Can't read out the voltage level :(
* Let's just assume everything is as expected.
*/
- dev_priv->cdclk.hw.voltage_level = cdclk_config->voltage_level;
+ dev_priv->display.cdclk.hw.voltage_level = cdclk_config->voltage_level;
}
static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv)
@@ -1812,10 +1812,10 @@ static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv)
int cdclk, clock, vco;
intel_update_cdclk(dev_priv);
- intel_cdclk_dump_config(dev_priv, &dev_priv->cdclk.hw, "Current CDCLK");
+ intel_cdclk_dump_config(dev_priv, &dev_priv->display.cdclk.hw, "Current CDCLK");
- if (dev_priv->cdclk.hw.vco == 0 ||
- dev_priv->cdclk.hw.cdclk == dev_priv->cdclk.hw.bypass)
+ if (dev_priv->display.cdclk.hw.vco == 0 ||
+ dev_priv->display.cdclk.hw.cdclk == dev_priv->display.cdclk.hw.bypass)
goto sanitize;
/* DPLL okay; verify the cdclock
@@ -1833,32 +1833,32 @@ static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv)
cdctl &= ~bxt_cdclk_cd2x_pipe(dev_priv, INVALID_PIPE);
/* Make sure this is a legal cdclk value for the platform */
- cdclk = bxt_calc_cdclk(dev_priv, dev_priv->cdclk.hw.cdclk);
- if (cdclk != dev_priv->cdclk.hw.cdclk)
+ cdclk = bxt_calc_cdclk(dev_priv, dev_priv->display.cdclk.hw.cdclk);
+ if (cdclk != dev_priv->display.cdclk.hw.cdclk)
goto sanitize;
/* Make sure the VCO is correct for the cdclk */
vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk);
- if (vco != dev_priv->cdclk.hw.vco)
+ if (vco != dev_priv->display.cdclk.hw.vco)
goto sanitize;
expected = skl_cdclk_decimal(cdclk);
/* Figure out what CD2X divider we should be using for this cdclk */
if (has_cdclk_squasher(dev_priv))
- clock = dev_priv->cdclk.hw.vco / 2;
+ clock = dev_priv->display.cdclk.hw.vco / 2;
else
- clock = dev_priv->cdclk.hw.cdclk;
+ clock = dev_priv->display.cdclk.hw.cdclk;
expected |= bxt_cdclk_cd2x_div_sel(dev_priv, clock,
- dev_priv->cdclk.hw.vco);
+ dev_priv->display.cdclk.hw.vco);
/*
* Disable SSA Precharge when CD clock frequency < 500 MHz,
* enable otherwise.
*/
if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
- dev_priv->cdclk.hw.cdclk >= 500000)
+ dev_priv->display.cdclk.hw.cdclk >= 500000)
expected |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
if (cdctl == expected)
@@ -1869,10 +1869,10 @@ sanitize:
drm_dbg_kms(&dev_priv->drm, "Sanitizing cdclk programmed by pre-os\n");
/* force cdclk programming */
- dev_priv->cdclk.hw.cdclk = 0;
+ dev_priv->display.cdclk.hw.cdclk = 0;
/* force full PLL disable + enable */
- dev_priv->cdclk.hw.vco = -1;
+ dev_priv->display.cdclk.hw.vco = -1;
}
static void bxt_cdclk_init_hw(struct drm_i915_private *dev_priv)
@@ -1881,11 +1881,11 @@ static void bxt_cdclk_init_hw(struct drm_i915_private *dev_priv)
bxt_sanitize_cdclk(dev_priv);
- if (dev_priv->cdclk.hw.cdclk != 0 &&
- dev_priv->cdclk.hw.vco != 0)
+ if (dev_priv->display.cdclk.hw.cdclk != 0 &&
+ dev_priv->display.cdclk.hw.vco != 0)
return;
- cdclk_config = dev_priv->cdclk.hw;
+ cdclk_config = dev_priv->display.cdclk.hw;
/*
* FIXME:
@@ -1902,7 +1902,7 @@ static void bxt_cdclk_init_hw(struct drm_i915_private *dev_priv)
static void bxt_cdclk_uninit_hw(struct drm_i915_private *dev_priv)
{
- struct intel_cdclk_config cdclk_config = dev_priv->cdclk.hw;
+ struct intel_cdclk_config cdclk_config = dev_priv->display.cdclk.hw;
cdclk_config.cdclk = cdclk_config.bypass;
cdclk_config.vco = 0;
@@ -1916,7 +1916,7 @@ static void bxt_cdclk_uninit_hw(struct drm_i915_private *dev_priv)
* intel_cdclk_init_hw - Initialize CDCLK hardware
* @i915: i915 device
*
- * Initialize CDCLK. This consists mainly of initializing dev_priv->cdclk.hw and
+ * Initialize CDCLK. This consists mainly of initializing dev_priv->display.cdclk.hw and
* sanitizing the state of the hardware if needed. This is generally done only
* during the display core initialization sequence, after which the DMC will
* take care of turning CDCLK off/on as needed.
@@ -2077,10 +2077,10 @@ static void intel_set_cdclk(struct drm_i915_private *dev_priv,
{
struct intel_encoder *encoder;
- if (!intel_cdclk_changed(&dev_priv->cdclk.hw, cdclk_config))
+ if (!intel_cdclk_changed(&dev_priv->display.cdclk.hw, cdclk_config))
return;
- if (drm_WARN_ON_ONCE(&dev_priv->drm, !dev_priv->cdclk_funcs->set_cdclk))
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, !dev_priv->display.funcs.cdclk->set_cdclk))
return;
intel_cdclk_dump_config(dev_priv, cdclk_config, "Changing CDCLK to");
@@ -2098,12 +2098,12 @@ static void intel_set_cdclk(struct drm_i915_private *dev_priv,
* functions use cdclk. Not all platforms/ports do,
* but we'll lock them all for simplicity.
*/
- mutex_lock(&dev_priv->gmbus_mutex);
+ mutex_lock(&dev_priv->display.gmbus.mutex);
for_each_intel_dp(&dev_priv->drm, encoder) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
mutex_lock_nest_lock(&intel_dp->aux.hw_mutex,
- &dev_priv->gmbus_mutex);
+ &dev_priv->display.gmbus.mutex);
}
intel_cdclk_set_cdclk(dev_priv, cdclk_config, pipe);
@@ -2113,7 +2113,7 @@ static void intel_set_cdclk(struct drm_i915_private *dev_priv,
mutex_unlock(&intel_dp->aux.hw_mutex);
}
- mutex_unlock(&dev_priv->gmbus_mutex);
+ mutex_unlock(&dev_priv->display.gmbus.mutex);
for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
@@ -2124,9 +2124,9 @@ static void intel_set_cdclk(struct drm_i915_private *dev_priv,
intel_audio_cdclk_change_post(dev_priv);
if (drm_WARN(&dev_priv->drm,
- intel_cdclk_changed(&dev_priv->cdclk.hw, cdclk_config),
+ intel_cdclk_changed(&dev_priv->display.cdclk.hw, cdclk_config),
"cdclk state doesn't match!\n")) {
- intel_cdclk_dump_config(dev_priv, &dev_priv->cdclk.hw, "[hw state]");
+ intel_cdclk_dump_config(dev_priv, &dev_priv->display.cdclk.hw, "[hw state]");
intel_cdclk_dump_config(dev_priv, cdclk_config, "[sw state]");
}
}
@@ -2300,7 +2300,7 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
min_cdclk = max(min_cdclk, (int)crtc_state->pixel_rate);
/*
- * HACK. Currently for TGL platforms we calculate
+ * HACK. Currently for TGL/DG2 platforms we calculate
* min_cdclk initially based on pixel_rate divided
* by 2, accounting for also plane requirements,
* however in some cases the lowest possible CDCLK
@@ -2308,14 +2308,14 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
* Explicitly stating here that this seems to be currently
* rather a Hack, than final solution.
*/
- if (IS_TIGERLAKE(dev_priv)) {
+ if (IS_TIGERLAKE(dev_priv) || IS_DG2(dev_priv)) {
/*
* Clamp to max_cdclk_freq in case pixel rate is higher,
* in order not to break an 8K, but still leave W/A at place.
*/
min_cdclk = max_t(int, min_cdclk,
min_t(int, crtc_state->pixel_rate,
- dev_priv->max_cdclk_freq));
+ dev_priv->display.cdclk.max_cdclk_freq));
}
return min_cdclk;
@@ -2368,10 +2368,10 @@ static int intel_compute_min_cdclk(struct intel_cdclk_state *cdclk_state)
for_each_pipe(dev_priv, pipe)
min_cdclk = max(cdclk_state->min_cdclk[pipe], min_cdclk);
- if (min_cdclk > dev_priv->max_cdclk_freq) {
+ if (min_cdclk > dev_priv->display.cdclk.max_cdclk_freq) {
drm_dbg_kms(&dev_priv->drm,
"required cdclk (%d kHz) exceeds max (%d kHz)\n",
- min_cdclk, dev_priv->max_cdclk_freq);
+ min_cdclk, dev_priv->display.cdclk.max_cdclk_freq);
return -EINVAL;
}
@@ -2643,7 +2643,7 @@ intel_atomic_get_cdclk_state(struct intel_atomic_state *state)
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_global_state *cdclk_state;
- cdclk_state = intel_atomic_get_global_obj_state(state, &dev_priv->cdclk.obj);
+ cdclk_state = intel_atomic_get_global_obj_state(state, &dev_priv->display.cdclk.obj);
if (IS_ERR(cdclk_state))
return ERR_CAST(cdclk_state);
@@ -2693,7 +2693,7 @@ int intel_cdclk_init(struct drm_i915_private *dev_priv)
if (!cdclk_state)
return -ENOMEM;
- intel_atomic_global_obj_init(dev_priv, &dev_priv->cdclk.obj,
+ intel_atomic_global_obj_init(dev_priv, &dev_priv->display.cdclk.obj,
&cdclk_state->base, &intel_cdclk_funcs);
return 0;
@@ -2799,7 +2799,7 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state)
static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
{
- int max_cdclk_freq = dev_priv->max_cdclk_freq;
+ int max_cdclk_freq = dev_priv->display.cdclk.max_cdclk_freq;
if (DISPLAY_VER(dev_priv) >= 10)
return 2 * max_cdclk_freq;
@@ -2825,19 +2825,19 @@ static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
void intel_update_max_cdclk(struct drm_i915_private *dev_priv)
{
if (IS_JSL_EHL(dev_priv)) {
- if (dev_priv->cdclk.hw.ref == 24000)
- dev_priv->max_cdclk_freq = 552000;
+ if (dev_priv->display.cdclk.hw.ref == 24000)
+ dev_priv->display.cdclk.max_cdclk_freq = 552000;
else
- dev_priv->max_cdclk_freq = 556800;
+ dev_priv->display.cdclk.max_cdclk_freq = 556800;
} else if (DISPLAY_VER(dev_priv) >= 11) {
- if (dev_priv->cdclk.hw.ref == 24000)
- dev_priv->max_cdclk_freq = 648000;
+ if (dev_priv->display.cdclk.hw.ref == 24000)
+ dev_priv->display.cdclk.max_cdclk_freq = 648000;
else
- dev_priv->max_cdclk_freq = 652800;
+ dev_priv->display.cdclk.max_cdclk_freq = 652800;
} else if (IS_GEMINILAKE(dev_priv)) {
- dev_priv->max_cdclk_freq = 316800;
+ dev_priv->display.cdclk.max_cdclk_freq = 316800;
} else if (IS_BROXTON(dev_priv)) {
- dev_priv->max_cdclk_freq = 624000;
+ dev_priv->display.cdclk.max_cdclk_freq = 624000;
} else if (DISPLAY_VER(dev_priv) == 9) {
u32 limit = intel_de_read(dev_priv, SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK;
int max_cdclk, vco;
@@ -2859,7 +2859,7 @@ void intel_update_max_cdclk(struct drm_i915_private *dev_priv)
else
max_cdclk = 308571;
- dev_priv->max_cdclk_freq = skl_calc_cdclk(max_cdclk, vco);
+ dev_priv->display.cdclk.max_cdclk_freq = skl_calc_cdclk(max_cdclk, vco);
} else if (IS_BROADWELL(dev_priv)) {
/*
* FIXME with extra cooling we can allow
@@ -2868,26 +2868,26 @@ void intel_update_max_cdclk(struct drm_i915_private *dev_priv)
* available? PCI ID, VTB, something else?
*/
if (intel_de_read(dev_priv, FUSE_STRAP) & HSW_CDCLK_LIMIT)
- dev_priv->max_cdclk_freq = 450000;
+ dev_priv->display.cdclk.max_cdclk_freq = 450000;
else if (IS_BDW_ULX(dev_priv))
- dev_priv->max_cdclk_freq = 450000;
+ dev_priv->display.cdclk.max_cdclk_freq = 450000;
else if (IS_BDW_ULT(dev_priv))
- dev_priv->max_cdclk_freq = 540000;
+ dev_priv->display.cdclk.max_cdclk_freq = 540000;
else
- dev_priv->max_cdclk_freq = 675000;
+ dev_priv->display.cdclk.max_cdclk_freq = 675000;
} else if (IS_CHERRYVIEW(dev_priv)) {
- dev_priv->max_cdclk_freq = 320000;
+ dev_priv->display.cdclk.max_cdclk_freq = 320000;
} else if (IS_VALLEYVIEW(dev_priv)) {
- dev_priv->max_cdclk_freq = 400000;
+ dev_priv->display.cdclk.max_cdclk_freq = 400000;
} else {
/* otherwise assume cdclk is fixed */
- dev_priv->max_cdclk_freq = dev_priv->cdclk.hw.cdclk;
+ dev_priv->display.cdclk.max_cdclk_freq = dev_priv->display.cdclk.hw.cdclk;
}
dev_priv->max_dotclk_freq = intel_compute_max_dotclk(dev_priv);
drm_dbg(&dev_priv->drm, "Max CD clock rate: %d kHz\n",
- dev_priv->max_cdclk_freq);
+ dev_priv->display.cdclk.max_cdclk_freq);
drm_dbg(&dev_priv->drm, "Max dotclock rate: %d kHz\n",
dev_priv->max_dotclk_freq);
@@ -2901,7 +2901,7 @@ void intel_update_max_cdclk(struct drm_i915_private *dev_priv)
*/
void intel_update_cdclk(struct drm_i915_private *dev_priv)
{
- intel_cdclk_get_cdclk(dev_priv, &dev_priv->cdclk.hw);
+ intel_cdclk_get_cdclk(dev_priv, &dev_priv->display.cdclk.hw);
/*
* 9:0 CMBUS [sic] CDCLK frequency (cdfreq):
@@ -2911,7 +2911,7 @@ void intel_update_cdclk(struct drm_i915_private *dev_priv)
*/
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
intel_de_write(dev_priv, GMBUSFREQ_VLV,
- DIV_ROUND_UP(dev_priv->cdclk.hw.cdclk, 1000));
+ DIV_ROUND_UP(dev_priv->display.cdclk.hw.cdclk, 1000));
}
static int dg1_rawclk(struct drm_i915_private *dev_priv)
@@ -3036,6 +3036,13 @@ u32 intel_read_rawclk(struct drm_i915_private *dev_priv)
if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1)
freq = dg1_rawclk(dev_priv);
+ else if (INTEL_PCH_TYPE(dev_priv) >= PCH_MTP)
+ /*
+ * MTL always uses a 38.4 MHz rawclk. The bspec tells us
+ * "RAWCLK_FREQ defaults to the values for 38.4 and does
+ * not need to be programmed."
+ */
+ freq = 38400;
else if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
freq = cnp_rawclk(dev_priv);
else if (HAS_PCH_SPLIT(dev_priv))
@@ -3187,78 +3194,78 @@ static const struct intel_cdclk_funcs i830_cdclk_funcs = {
void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv)
{
if (IS_DG2(dev_priv)) {
- dev_priv->cdclk_funcs = &tgl_cdclk_funcs;
- dev_priv->cdclk.table = dg2_cdclk_table;
+ dev_priv->display.funcs.cdclk = &tgl_cdclk_funcs;
+ dev_priv->display.cdclk.table = dg2_cdclk_table;
} else if (IS_ALDERLAKE_P(dev_priv)) {
- dev_priv->cdclk_funcs = &tgl_cdclk_funcs;
+ dev_priv->display.funcs.cdclk = &tgl_cdclk_funcs;
/* Wa_22011320316:adl-p[a0] */
if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
- dev_priv->cdclk.table = adlp_a_step_cdclk_table;
+ dev_priv->display.cdclk.table = adlp_a_step_cdclk_table;
else
- dev_priv->cdclk.table = adlp_cdclk_table;
+ dev_priv->display.cdclk.table = adlp_cdclk_table;
} else if (IS_ROCKETLAKE(dev_priv)) {
- dev_priv->cdclk_funcs = &tgl_cdclk_funcs;
- dev_priv->cdclk.table = rkl_cdclk_table;
+ dev_priv->display.funcs.cdclk = &tgl_cdclk_funcs;
+ dev_priv->display.cdclk.table = rkl_cdclk_table;
} else if (DISPLAY_VER(dev_priv) >= 12) {
- dev_priv->cdclk_funcs = &tgl_cdclk_funcs;
- dev_priv->cdclk.table = icl_cdclk_table;
+ dev_priv->display.funcs.cdclk = &tgl_cdclk_funcs;
+ dev_priv->display.cdclk.table = icl_cdclk_table;
} else if (IS_JSL_EHL(dev_priv)) {
- dev_priv->cdclk_funcs = &ehl_cdclk_funcs;
- dev_priv->cdclk.table = icl_cdclk_table;
+ dev_priv->display.funcs.cdclk = &ehl_cdclk_funcs;
+ dev_priv->display.cdclk.table = icl_cdclk_table;
} else if (DISPLAY_VER(dev_priv) >= 11) {
- dev_priv->cdclk_funcs = &icl_cdclk_funcs;
- dev_priv->cdclk.table = icl_cdclk_table;
+ dev_priv->display.funcs.cdclk = &icl_cdclk_funcs;
+ dev_priv->display.cdclk.table = icl_cdclk_table;
} else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
- dev_priv->cdclk_funcs = &bxt_cdclk_funcs;
+ dev_priv->display.funcs.cdclk = &bxt_cdclk_funcs;
if (IS_GEMINILAKE(dev_priv))
- dev_priv->cdclk.table = glk_cdclk_table;
+ dev_priv->display.cdclk.table = glk_cdclk_table;
else
- dev_priv->cdclk.table = bxt_cdclk_table;
+ dev_priv->display.cdclk.table = bxt_cdclk_table;
} else if (DISPLAY_VER(dev_priv) == 9) {
- dev_priv->cdclk_funcs = &skl_cdclk_funcs;
+ dev_priv->display.funcs.cdclk = &skl_cdclk_funcs;
} else if (IS_BROADWELL(dev_priv)) {
- dev_priv->cdclk_funcs = &bdw_cdclk_funcs;
+ dev_priv->display.funcs.cdclk = &bdw_cdclk_funcs;
} else if (IS_HASWELL(dev_priv)) {
- dev_priv->cdclk_funcs = &hsw_cdclk_funcs;
+ dev_priv->display.funcs.cdclk = &hsw_cdclk_funcs;
} else if (IS_CHERRYVIEW(dev_priv)) {
- dev_priv->cdclk_funcs = &chv_cdclk_funcs;
+ dev_priv->display.funcs.cdclk = &chv_cdclk_funcs;
} else if (IS_VALLEYVIEW(dev_priv)) {
- dev_priv->cdclk_funcs = &vlv_cdclk_funcs;
+ dev_priv->display.funcs.cdclk = &vlv_cdclk_funcs;
} else if (IS_SANDYBRIDGE(dev_priv) || IS_IVYBRIDGE(dev_priv)) {
- dev_priv->cdclk_funcs = &fixed_400mhz_cdclk_funcs;
+ dev_priv->display.funcs.cdclk = &fixed_400mhz_cdclk_funcs;
} else if (IS_IRONLAKE(dev_priv)) {
- dev_priv->cdclk_funcs = &ilk_cdclk_funcs;
+ dev_priv->display.funcs.cdclk = &ilk_cdclk_funcs;
} else if (IS_GM45(dev_priv)) {
- dev_priv->cdclk_funcs = &gm45_cdclk_funcs;
+ dev_priv->display.funcs.cdclk = &gm45_cdclk_funcs;
} else if (IS_G45(dev_priv)) {
- dev_priv->cdclk_funcs = &g33_cdclk_funcs;
+ dev_priv->display.funcs.cdclk = &g33_cdclk_funcs;
} else if (IS_I965GM(dev_priv)) {
- dev_priv->cdclk_funcs = &i965gm_cdclk_funcs;
+ dev_priv->display.funcs.cdclk = &i965gm_cdclk_funcs;
} else if (IS_I965G(dev_priv)) {
- dev_priv->cdclk_funcs = &fixed_400mhz_cdclk_funcs;
+ dev_priv->display.funcs.cdclk = &fixed_400mhz_cdclk_funcs;
} else if (IS_PINEVIEW(dev_priv)) {
- dev_priv->cdclk_funcs = &pnv_cdclk_funcs;
+ dev_priv->display.funcs.cdclk = &pnv_cdclk_funcs;
} else if (IS_G33(dev_priv)) {
- dev_priv->cdclk_funcs = &g33_cdclk_funcs;
+ dev_priv->display.funcs.cdclk = &g33_cdclk_funcs;
} else if (IS_I945GM(dev_priv)) {
- dev_priv->cdclk_funcs = &i945gm_cdclk_funcs;
+ dev_priv->display.funcs.cdclk = &i945gm_cdclk_funcs;
} else if (IS_I945G(dev_priv)) {
- dev_priv->cdclk_funcs = &fixed_400mhz_cdclk_funcs;
+ dev_priv->display.funcs.cdclk = &fixed_400mhz_cdclk_funcs;
} else if (IS_I915GM(dev_priv)) {
- dev_priv->cdclk_funcs = &i915gm_cdclk_funcs;
+ dev_priv->display.funcs.cdclk = &i915gm_cdclk_funcs;
} else if (IS_I915G(dev_priv)) {
- dev_priv->cdclk_funcs = &i915g_cdclk_funcs;
+ dev_priv->display.funcs.cdclk = &i915g_cdclk_funcs;
} else if (IS_I865G(dev_priv)) {
- dev_priv->cdclk_funcs = &i865g_cdclk_funcs;
+ dev_priv->display.funcs.cdclk = &i865g_cdclk_funcs;
} else if (IS_I85X(dev_priv)) {
- dev_priv->cdclk_funcs = &i85x_cdclk_funcs;
+ dev_priv->display.funcs.cdclk = &i85x_cdclk_funcs;
} else if (IS_I845G(dev_priv)) {
- dev_priv->cdclk_funcs = &i845g_cdclk_funcs;
+ dev_priv->display.funcs.cdclk = &i845g_cdclk_funcs;
} else if (IS_I830(dev_priv)) {
- dev_priv->cdclk_funcs = &i830_cdclk_funcs;
+ dev_priv->display.funcs.cdclk = &i830_cdclk_funcs;
}
- if (drm_WARN(&dev_priv->drm, !dev_priv->cdclk_funcs,
+ if (drm_WARN(&dev_priv->drm, !dev_priv->display.funcs.cdclk,
"Unknown platform. Assuming i830\n"))
- dev_priv->cdclk_funcs = &i830_cdclk_funcs;
+ dev_priv->display.funcs.cdclk = &i830_cdclk_funcs;
}
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.h b/drivers/gpu/drm/i915/display/intel_cdclk.h
index b535cf6a7d9e..c674879a84a5 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.h
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.h
@@ -77,9 +77,9 @@ intel_atomic_get_cdclk_state(struct intel_atomic_state *state);
#define to_intel_cdclk_state(x) container_of((x), struct intel_cdclk_state, base)
#define intel_atomic_get_old_cdclk_state(state) \
- to_intel_cdclk_state(intel_atomic_get_old_global_obj_state(state, &to_i915(state->base.dev)->cdclk.obj))
+ to_intel_cdclk_state(intel_atomic_get_old_global_obj_state(state, &to_i915(state->base.dev)->display.cdclk.obj))
#define intel_atomic_get_new_cdclk_state(state) \
- to_intel_cdclk_state(intel_atomic_get_new_global_obj_state(state, &to_i915(state->base.dev)->cdclk.obj))
+ to_intel_cdclk_state(intel_atomic_get_new_global_obj_state(state, &to_i915(state->base.dev)->display.cdclk.obj))
int intel_cdclk_init(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c
index 9583d17e858d..6bda4274eae9 100644
--- a/drivers/gpu/drm/i915/display/intel_color.c
+++ b/drivers/gpu/drm/i915/display/intel_color.c
@@ -26,6 +26,7 @@
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_dpll.h"
+#include "intel_dsb.h"
#include "vlv_dsi_pll.h"
struct intel_color_funcs {
@@ -1167,22 +1168,22 @@ void intel_color_load_luts(const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- dev_priv->color_funcs->load_luts(crtc_state);
+ dev_priv->display.funcs.color->load_luts(crtc_state);
}
void intel_color_commit_noarm(const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- if (dev_priv->color_funcs->color_commit_noarm)
- dev_priv->color_funcs->color_commit_noarm(crtc_state);
+ if (dev_priv->display.funcs.color->color_commit_noarm)
+ dev_priv->display.funcs.color->color_commit_noarm(crtc_state);
}
void intel_color_commit_arm(const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- dev_priv->color_funcs->color_commit_arm(crtc_state);
+ dev_priv->display.funcs.color->color_commit_arm(crtc_state);
}
static bool intel_can_preload_luts(const struct intel_crtc_state *new_crtc_state)
@@ -1238,15 +1239,15 @@ int intel_color_check(struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- return dev_priv->color_funcs->color_check(crtc_state);
+ return dev_priv->display.funcs.color->color_check(crtc_state);
}
void intel_color_get_config(struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- if (dev_priv->color_funcs->read_luts)
- dev_priv->color_funcs->read_luts(crtc_state);
+ if (dev_priv->display.funcs.color->read_luts)
+ dev_priv->display.funcs.color->read_luts(crtc_state);
}
static bool need_plane_update(struct intel_plane *plane,
@@ -2225,28 +2226,28 @@ void intel_color_init(struct intel_crtc *crtc)
if (HAS_GMCH(dev_priv)) {
if (IS_CHERRYVIEW(dev_priv)) {
- dev_priv->color_funcs = &chv_color_funcs;
+ dev_priv->display.funcs.color = &chv_color_funcs;
} else if (DISPLAY_VER(dev_priv) >= 4) {
- dev_priv->color_funcs = &i965_color_funcs;
+ dev_priv->display.funcs.color = &i965_color_funcs;
} else {
- dev_priv->color_funcs = &i9xx_color_funcs;
+ dev_priv->display.funcs.color = &i9xx_color_funcs;
}
} else {
if (DISPLAY_VER(dev_priv) >= 11)
- dev_priv->color_funcs = &icl_color_funcs;
+ dev_priv->display.funcs.color = &icl_color_funcs;
else if (DISPLAY_VER(dev_priv) == 10)
- dev_priv->color_funcs = &glk_color_funcs;
+ dev_priv->display.funcs.color = &glk_color_funcs;
else if (DISPLAY_VER(dev_priv) == 9)
- dev_priv->color_funcs = &skl_color_funcs;
+ dev_priv->display.funcs.color = &skl_color_funcs;
else if (DISPLAY_VER(dev_priv) == 8)
- dev_priv->color_funcs = &bdw_color_funcs;
+ dev_priv->display.funcs.color = &bdw_color_funcs;
else if (DISPLAY_VER(dev_priv) == 7) {
if (IS_HASWELL(dev_priv))
- dev_priv->color_funcs = &hsw_color_funcs;
+ dev_priv->display.funcs.color = &hsw_color_funcs;
else
- dev_priv->color_funcs = &ivb_color_funcs;
+ dev_priv->display.funcs.color = &ivb_color_funcs;
} else
- dev_priv->color_funcs = &ilk_color_funcs;
+ dev_priv->display.funcs.color = &ilk_color_funcs;
}
drm_crtc_enable_color_mgmt(&crtc->base,
diff --git a/drivers/gpu/drm/i915/display/intel_connector.c b/drivers/gpu/drm/i915/display/intel_connector.c
index 1dcc268927a2..6d5cbeb8df4d 100644
--- a/drivers/gpu/drm/i915/display/intel_connector.c
+++ b/drivers/gpu/drm/i915/display/intel_connector.c
@@ -229,7 +229,7 @@ intel_attach_force_audio_property(struct drm_connector *connector)
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_property *prop;
- prop = dev_priv->force_audio_property;
+ prop = dev_priv->display.properties.force_audio;
if (prop == NULL) {
prop = drm_property_create_enum(dev, 0,
"audio",
@@ -238,7 +238,7 @@ intel_attach_force_audio_property(struct drm_connector *connector)
if (prop == NULL)
return;
- dev_priv->force_audio_property = prop;
+ dev_priv->display.properties.force_audio = prop;
}
drm_object_attach_property(&connector->base, prop, 0);
}
@@ -256,7 +256,7 @@ intel_attach_broadcast_rgb_property(struct drm_connector *connector)
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_property *prop;
- prop = dev_priv->broadcast_rgb_property;
+ prop = dev_priv->display.properties.broadcast_rgb;
if (prop == NULL) {
prop = drm_property_create_enum(dev, DRM_MODE_PROP_ENUM,
"Broadcast RGB",
@@ -265,7 +265,7 @@ intel_attach_broadcast_rgb_property(struct drm_connector *connector)
if (prop == NULL)
return;
- dev_priv->broadcast_rgb_property = prop;
+ dev_priv->display.properties.broadcast_rgb = prop;
}
drm_object_attach_property(&connector->base, prop, 0);
diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c
index 6a3893c8ff22..4a8ff2f97608 100644
--- a/drivers/gpu/drm/i915/display/intel_crt.c
+++ b/drivers/gpu/drm/i915/display/intel_crt.c
@@ -46,6 +46,7 @@
#include "intel_gmbus.h"
#include "intel_hotplug.h"
#include "intel_pch_display.h"
+#include "intel_pch_refclk.h"
/* Here's the desired hotplug mode */
#define ADPA_HOTPLUG_BITS (ADPA_CRT_HOTPLUG_PERIOD_128 | \
@@ -444,6 +445,8 @@ static int hsw_crt_compute_config(struct intel_encoder *encoder,
/* FDI must always be 2.7 GHz */
pipe_config->port_clock = 135000 * 2;
+ adjusted_mode->crtc_clock = lpt_iclkip(pipe_config);
+
return 0;
}
@@ -643,9 +646,7 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
struct i2c_adapter *i2c;
bool ret = false;
- BUG_ON(crt->base.type != INTEL_OUTPUT_ANALOG);
-
- i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->vbt.crt_ddc_pin);
+ i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->display.vbt.crt_ddc_pin);
edid = intel_crt_get_edid(connector, i2c);
if (edid) {
@@ -931,7 +932,7 @@ static int intel_crt_get_modes(struct drm_connector *connector)
wakeref = intel_display_power_get(dev_priv,
intel_encoder->power_domain);
- i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->vbt.crt_ddc_pin);
+ i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->display.vbt.crt_ddc_pin);
ret = intel_crt_ddc_get_modes(connector, i2c);
if (ret || !IS_G4X(dev_priv))
goto out;
@@ -1110,8 +1111,8 @@ void intel_crt_init(struct drm_i915_private *dev_priv)
u32 fdi_config = FDI_RX_POLARITY_REVERSED_LPT |
FDI_RX_LINK_REVERSAL_OVERRIDE;
- dev_priv->fdi_rx_config = intel_de_read(dev_priv,
- FDI_RX_CTL(PIPE_A)) & fdi_config;
+ dev_priv->display.fdi.rx_config = intel_de_read(dev_priv,
+ FDI_RX_CTL(PIPE_A)) & fdi_config;
}
intel_crt_reset(&crt->base.base);
diff --git a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
index 4ca6e9493ff2..e9212f69c360 100644
--- a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
+++ b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
@@ -134,8 +134,8 @@ static void intel_dump_plane_state(const struct intel_plane_state *plane_state)
plane->base.base.id, plane->base.name,
fb->base.id, fb->width, fb->height, &fb->format->format,
fb->modifier, str_yes_no(plane_state->uapi.visible));
- drm_dbg_kms(&i915->drm, "\trotation: 0x%x, scaler: %d\n",
- plane_state->hw.rotation, plane_state->scaler_id);
+ drm_dbg_kms(&i915->drm, "\trotation: 0x%x, scaler: %d, scaling_filter: %d\n",
+ plane_state->hw.rotation, plane_state->scaler_id, plane_state->hw.scaling_filter);
if (plane_state->uapi.visible)
drm_dbg_kms(&i915->drm,
"\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n",
@@ -262,10 +262,11 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config,
if (DISPLAY_VER(i915) >= 9)
drm_dbg_kms(&i915->drm,
- "num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
+ "num_scalers: %d, scaler_users: 0x%x, scaler_id: %d, scaling_filter: %d\n",
crtc->num_scalers,
pipe_config->scaler_state.scaler_users,
- pipe_config->scaler_state.scaler_id);
+ pipe_config->scaler_state.scaler_id,
+ pipe_config->hw.scaling_filter);
if (HAS_GMCH(i915))
drm_dbg_kms(&i915->drm,
diff --git a/drivers/gpu/drm/i915/display/intel_cursor.c b/drivers/gpu/drm/i915/display/intel_cursor.c
index 16ac560eab49..87899e89b3a7 100644
--- a/drivers/gpu/drm/i915/display/intel_cursor.c
+++ b/drivers/gpu/drm/i915/display/intel_cursor.c
@@ -19,9 +19,9 @@
#include "intel_fb.h"
#include "intel_fb_pin.h"
#include "intel_frontbuffer.h"
-#include "intel_pm.h"
#include "intel_psr.h"
#include "intel_sprite.h"
+#include "skl_watermark.h"
/* Cursor formats */
static const u32 intel_cursor_formats[] = {
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index 2330604b0bcc..da8472cdc135 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -57,6 +57,7 @@
#include "intel_lspcon.h"
#include "intel_pps.h"
#include "intel_psr.h"
+#include "intel_quirks.h"
#include "intel_snps_phy.h"
#include "intel_sprite.h"
#include "intel_tc.h"
@@ -323,28 +324,6 @@ static int icl_calc_tbt_pll_link(struct drm_i915_private *dev_priv,
}
}
-int intel_crtc_dotclock(const struct intel_crtc_state *pipe_config)
-{
- int dotclock;
-
- if (intel_crtc_has_dp_encoder(pipe_config))
- dotclock = intel_dotclock_calculate(pipe_config->port_clock,
- &pipe_config->dp_m_n);
- else if (pipe_config->has_hdmi_sink && pipe_config->pipe_bpp > 24)
- dotclock = pipe_config->port_clock * 24 / pipe_config->pipe_bpp;
- else
- dotclock = pipe_config->port_clock;
-
- if (pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 &&
- !intel_crtc_has_dp_encoder(pipe_config))
- dotclock *= 2;
-
- if (pipe_config->pixel_multiplier)
- dotclock /= pipe_config->pixel_multiplier;
-
- return dotclock;
-}
-
static void ddi_dotclock_get(struct intel_crtc_state *pipe_config)
{
/* CRT dotclock is determined via other means */
@@ -631,7 +610,7 @@ void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state
intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), ctl);
- if (dev_priv->quirks & QUIRK_INCREASE_DDI_DISABLED_TIME &&
+ if (intel_has_quirk(dev_priv, QUIRK_INCREASE_DDI_DISABLED_TIME) &&
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
drm_dbg_kms(&dev_priv->drm,
"Quirk Increase DDI disabled time\n");
@@ -1425,7 +1404,7 @@ hsw_set_signal_levels(struct intel_encoder *encoder,
static void _icl_ddi_enable_clock(struct drm_i915_private *i915, i915_reg_t reg,
u32 clk_sel_mask, u32 clk_sel, u32 clk_off)
{
- mutex_lock(&i915->dpll.lock);
+ mutex_lock(&i915->display.dpll.lock);
intel_de_rmw(i915, reg, clk_sel_mask, clk_sel);
@@ -1435,17 +1414,17 @@ static void _icl_ddi_enable_clock(struct drm_i915_private *i915, i915_reg_t reg,
*/
intel_de_rmw(i915, reg, clk_off, 0);
- mutex_unlock(&i915->dpll.lock);
+ mutex_unlock(&i915->display.dpll.lock);
}
static void _icl_ddi_disable_clock(struct drm_i915_private *i915, i915_reg_t reg,
u32 clk_off)
{
- mutex_lock(&i915->dpll.lock);
+ mutex_lock(&i915->display.dpll.lock);
intel_de_rmw(i915, reg, 0, clk_off);
- mutex_unlock(&i915->dpll.lock);
+ mutex_unlock(&i915->display.dpll.lock);
}
static bool _icl_ddi_is_clock_enabled(struct drm_i915_private *i915, i915_reg_t reg,
@@ -1720,12 +1699,12 @@ static void icl_ddi_tc_enable_clock(struct intel_encoder *encoder,
intel_de_write(i915, DDI_CLK_SEL(port),
icl_pll_to_ddi_clk_sel(encoder, crtc_state));
- mutex_lock(&i915->dpll.lock);
+ mutex_lock(&i915->display.dpll.lock);
intel_de_rmw(i915, ICL_DPCLKA_CFGCR0,
ICL_DPCLKA_CFGCR0_TC_CLK_OFF(tc_port), 0);
- mutex_unlock(&i915->dpll.lock);
+ mutex_unlock(&i915->display.dpll.lock);
}
static void icl_ddi_tc_disable_clock(struct intel_encoder *encoder)
@@ -1734,12 +1713,12 @@ static void icl_ddi_tc_disable_clock(struct intel_encoder *encoder)
enum tc_port tc_port = intel_port_to_tc(i915, encoder->port);
enum port port = encoder->port;
- mutex_lock(&i915->dpll.lock);
+ mutex_lock(&i915->display.dpll.lock);
intel_de_rmw(i915, ICL_DPCLKA_CFGCR0,
0, ICL_DPCLKA_CFGCR0_TC_CLK_OFF(tc_port));
- mutex_unlock(&i915->dpll.lock);
+ mutex_unlock(&i915->display.dpll.lock);
intel_de_write(i915, DDI_CLK_SEL(port), DDI_CLK_SEL_NONE);
}
@@ -1824,7 +1803,7 @@ static void skl_ddi_enable_clock(struct intel_encoder *encoder,
if (drm_WARN_ON(&i915->drm, !pll))
return;
- mutex_lock(&i915->dpll.lock);
+ mutex_lock(&i915->display.dpll.lock);
intel_de_rmw(i915, DPLL_CTRL2,
DPLL_CTRL2_DDI_CLK_OFF(port) |
@@ -1832,7 +1811,7 @@ static void skl_ddi_enable_clock(struct intel_encoder *encoder,
DPLL_CTRL2_DDI_CLK_SEL(pll->info->id, port) |
DPLL_CTRL2_DDI_SEL_OVERRIDE(port));
- mutex_unlock(&i915->dpll.lock);
+ mutex_unlock(&i915->display.dpll.lock);
}
static void skl_ddi_disable_clock(struct intel_encoder *encoder)
@@ -1840,12 +1819,12 @@ static void skl_ddi_disable_clock(struct intel_encoder *encoder)
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
enum port port = encoder->port;
- mutex_lock(&i915->dpll.lock);
+ mutex_lock(&i915->display.dpll.lock);
intel_de_rmw(i915, DPLL_CTRL2,
0, DPLL_CTRL2_DDI_CLK_OFF(port));
- mutex_unlock(&i915->dpll.lock);
+ mutex_unlock(&i915->display.dpll.lock);
}
static bool skl_ddi_is_clock_enabled(struct intel_encoder *encoder)
@@ -2691,10 +2670,14 @@ static void intel_ddi_post_disable_hdmi(struct intel_atomic_state *state,
dig_port->set_infoframes(encoder, false,
old_crtc_state, old_conn_state);
- intel_ddi_disable_pipe_clock(old_crtc_state);
+ if (DISPLAY_VER(dev_priv) < 12)
+ intel_ddi_disable_pipe_clock(old_crtc_state);
intel_disable_ddi_buf(encoder, old_crtc_state);
+ if (DISPLAY_VER(dev_priv) >= 12)
+ intel_ddi_disable_pipe_clock(old_crtc_state);
+
intel_display_power_put(dev_priv,
dig_port->ddi_io_power_domain,
fetch_and_zero(&dig_port->ddi_io_wakeref));
@@ -2862,6 +2845,8 @@ static void intel_enable_ddi_hdmi(struct intel_atomic_state *state,
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct drm_connector *connector = conn_state->connector;
enum port port = encoder->port;
+ enum phy phy = intel_port_to_phy(dev_priv, port);
+ u32 buf_ctl;
if (!intel_hdmi_handle_sink_scrambling(encoder, connector,
crtc_state->hdmi_high_tmds_clock_ratio,
@@ -2919,8 +2904,12 @@ static void intel_enable_ddi_hdmi(struct intel_atomic_state *state,
* On ADL_P the PHY link rate and lane count must be programmed but
* these are both 0 for HDMI.
*/
- intel_de_write(dev_priv, DDI_BUF_CTL(port),
- dig_port->saved_port_bits | DDI_BUF_CTL_ENABLE);
+ buf_ctl = dig_port->saved_port_bits | DDI_BUF_CTL_ENABLE;
+ if (IS_ALDERLAKE_P(dev_priv) && intel_phy_is_tc(dev_priv, phy)) {
+ drm_WARN_ON(&dev_priv->drm, !intel_tc_port_in_legacy_mode(dig_port));
+ buf_ctl |= DDI_BUF_CTL_TC_PHY_OWNERSHIP;
+ }
+ intel_de_write(dev_priv, DDI_BUF_CTL(port), buf_ctl);
intel_audio_codec_enable(encoder, crtc_state, conn_state);
}
@@ -3611,10 +3600,22 @@ static void intel_ddi_sync_state(struct intel_encoder *encoder,
static bool intel_ddi_initial_fastset_check(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state)
{
- if (intel_crtc_has_dp_encoder(crtc_state))
- return intel_dp_initial_fastset_check(encoder, crtc_state);
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum phy phy = intel_port_to_phy(i915, encoder->port);
+ bool fastset = true;
- return true;
+ if (intel_phy_is_tc(i915, phy)) {
+ drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Forcing full modeset to compute TC port DPLLs\n",
+ encoder->base.base.id, encoder->base.name);
+ crtc_state->uapi.mode_changed = true;
+ fastset = false;
+ }
+
+ if (intel_crtc_has_dp_encoder(crtc_state) &&
+ !intel_dp_initial_fastset_check(encoder, crtc_state))
+ fastset = false;
+
+ return fastset;
}
static enum intel_output_type
@@ -4028,7 +4029,7 @@ intel_ddi_hotplug(struct intel_encoder *encoder,
static bool lpt_digital_port_connected(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 bit = dev_priv->hotplug.pch_hpd[encoder->hpd_pin];
+ u32 bit = dev_priv->display.hotplug.pch_hpd[encoder->hpd_pin];
return intel_de_read(dev_priv, SDEISR) & bit;
}
@@ -4036,7 +4037,7 @@ static bool lpt_digital_port_connected(struct intel_encoder *encoder)
static bool hsw_digital_port_connected(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 bit = dev_priv->hotplug.hpd[encoder->hpd_pin];
+ u32 bit = dev_priv->display.hotplug.hpd[encoder->hpd_pin];
return intel_de_read(dev_priv, DEISR) & bit;
}
@@ -4044,7 +4045,7 @@ static bool hsw_digital_port_connected(struct intel_encoder *encoder)
static bool bdw_digital_port_connected(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 bit = dev_priv->hotplug.hpd[encoder->hpd_pin];
+ u32 bit = dev_priv->display.hotplug.hpd[encoder->hpd_pin];
return intel_de_read(dev_priv, GEN8_DE_PORT_ISR) & bit;
}
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 40fbf8a296e2..461c62c88413 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -91,6 +91,7 @@
#include "intel_dmc.h"
#include "intel_dp_link_training.h"
#include "intel_dpt.h"
+#include "intel_dsb.h"
#include "intel_fbc.h"
#include "intel_fbdev.h"
#include "intel_fdi.h"
@@ -117,6 +118,7 @@
#include "i9xx_plane.h"
#include "skl_scaler.h"
#include "skl_universal_plane.h"
+#include "skl_watermark.h"
#include "vlv_dsi.h"
#include "vlv_dsi_pll.h"
#include "vlv_dsi_regs.h"
@@ -163,16 +165,16 @@ static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state);
*/
void intel_update_watermarks(struct drm_i915_private *dev_priv)
{
- if (dev_priv->wm_disp->update_wm)
- dev_priv->wm_disp->update_wm(dev_priv);
+ if (dev_priv->display.funcs.wm->update_wm)
+ dev_priv->display.funcs.wm->update_wm(dev_priv);
}
static int intel_compute_pipe_wm(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- if (dev_priv->wm_disp->compute_pipe_wm)
- return dev_priv->wm_disp->compute_pipe_wm(state, crtc);
+ if (dev_priv->display.funcs.wm->compute_pipe_wm)
+ return dev_priv->display.funcs.wm->compute_pipe_wm(state, crtc);
return 0;
}
@@ -180,20 +182,20 @@ static int intel_compute_intermediate_wm(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- if (!dev_priv->wm_disp->compute_intermediate_wm)
+ if (!dev_priv->display.funcs.wm->compute_intermediate_wm)
return 0;
if (drm_WARN_ON(&dev_priv->drm,
- !dev_priv->wm_disp->compute_pipe_wm))
+ !dev_priv->display.funcs.wm->compute_pipe_wm))
return 0;
- return dev_priv->wm_disp->compute_intermediate_wm(state, crtc);
+ return dev_priv->display.funcs.wm->compute_intermediate_wm(state, crtc);
}
static bool intel_initial_watermarks(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- if (dev_priv->wm_disp->initial_watermarks) {
- dev_priv->wm_disp->initial_watermarks(state, crtc);
+ if (dev_priv->display.funcs.wm->initial_watermarks) {
+ dev_priv->display.funcs.wm->initial_watermarks(state, crtc);
return true;
}
return false;
@@ -203,23 +205,23 @@ static void intel_atomic_update_watermarks(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- if (dev_priv->wm_disp->atomic_update_watermarks)
- dev_priv->wm_disp->atomic_update_watermarks(state, crtc);
+ if (dev_priv->display.funcs.wm->atomic_update_watermarks)
+ dev_priv->display.funcs.wm->atomic_update_watermarks(state, crtc);
}
static void intel_optimize_watermarks(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- if (dev_priv->wm_disp->optimize_watermarks)
- dev_priv->wm_disp->optimize_watermarks(state, crtc);
+ if (dev_priv->display.funcs.wm->optimize_watermarks)
+ dev_priv->display.funcs.wm->optimize_watermarks(state, crtc);
}
static int intel_compute_global_watermarks(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- if (dev_priv->wm_disp->compute_global_watermarks)
- return dev_priv->wm_disp->compute_global_watermarks(state);
+ if (dev_priv->display.funcs.wm->compute_global_watermarks)
+ return dev_priv->display.funcs.wm->compute_global_watermarks(state);
return 0;
}
@@ -618,7 +620,10 @@ void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state)
if (!IS_I830(dev_priv))
val &= ~PIPECONF_ENABLE;
- if (DISPLAY_VER(dev_priv) >= 12)
+ if (DISPLAY_VER(dev_priv) >= 14)
+ intel_de_rmw(dev_priv, MTL_CHICKEN_TRANS(cpu_transcoder),
+ FECSTALL_DIS_DPTSTREAM_DPTTG, 0);
+ else if (DISPLAY_VER(dev_priv) >= 12)
intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder),
FECSTALL_DIS_DPTSTREAM_DPTTG, 0);
@@ -1486,7 +1491,7 @@ static void intel_encoders_update_prepare(struct intel_atomic_state *state)
* Make sure the DPLL state is up-to-date for fastset TypeC ports after non-blocking commits.
* TODO: Update the DPLL state for all cases in the encoder->update_prepare() hook.
*/
- if (i915->dpll.mgr) {
+ if (i915->display.dpll.mgr) {
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
if (intel_crtc_needs_modeset(new_crtc_state))
continue;
@@ -1838,7 +1843,9 @@ static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- i915_reg_t reg = CHICKEN_TRANS(crtc_state->cpu_transcoder);
+ enum transcoder transcoder = crtc_state->cpu_transcoder;
+ i915_reg_t reg = DISPLAY_VER(dev_priv) >= 14 ? MTL_CHICKEN_TRANS(transcoder) :
+ CHICKEN_TRANS(transcoder);
u32 val;
val = intel_de_read(dev_priv, reg);
@@ -2080,22 +2087,20 @@ bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
{
if (phy == PHY_NONE)
return false;
- else if (IS_DG2(dev_priv))
- /*
- * DG2 outputs labelled as "combo PHY" in the bspec use
- * SNPS PHYs with completely different programming,
- * hence we always return false here.
- */
- return false;
else if (IS_ALDERLAKE_S(dev_priv))
return phy <= PHY_E;
else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv))
return phy <= PHY_D;
else if (IS_JSL_EHL(dev_priv))
return phy <= PHY_C;
- else if (DISPLAY_VER(dev_priv) >= 11)
+ else if (IS_ALDERLAKE_P(dev_priv) || IS_DISPLAY_VER(dev_priv, 11, 12))
return phy <= PHY_B;
else
+ /*
+ * DG2 outputs labelled as "combo PHY" in the bspec use
+ * SNPS PHYs with completely different programming,
+ * hence we always return false here.
+ */
return false;
}
@@ -2401,7 +2406,7 @@ static void i9xx_crtc_disable(struct intel_atomic_state *state,
if (DISPLAY_VER(dev_priv) != 2)
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
- if (!dev_priv->wm_disp->initial_watermarks)
+ if (!dev_priv->display.funcs.wm->initial_watermarks)
intel_update_watermarks(dev_priv);
/* clock the pipe down to 640x480@60 to potentially save power */
@@ -2660,7 +2665,7 @@ static int intel_crtc_compute_pipe_mode(struct intel_crtc_state *crtc_state)
intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
if (DISPLAY_VER(i915) < 4) {
- clock_limit = i915->max_cdclk_freq * 9 / 10;
+ clock_limit = i915->display.cdclk.max_cdclk_freq * 9 / 10;
/*
* Enable double wide mode when the dot clock
@@ -2692,6 +2697,10 @@ static int intel_crtc_compute_config(struct intel_atomic_state *state,
intel_atomic_get_new_crtc_state(state, crtc);
int ret;
+ ret = intel_dpll_crtc_compute_clock(state, crtc);
+ if (ret)
+ return ret;
+
ret = intel_crtc_compute_pipe_src(crtc_state);
if (ret)
return ret;
@@ -2718,19 +2727,11 @@ intel_reduce_m_n_ratio(u32 *num, u32 *den)
}
}
-static void compute_m_n(unsigned int m, unsigned int n,
- u32 *ret_m, u32 *ret_n,
- bool constant_n)
+static void compute_m_n(u32 *ret_m, u32 *ret_n,
+ u32 m, u32 n, u32 constant_n)
{
- /*
- * Several DP dongles in particular seem to be fussy about
- * too large link M/N values. Give N value as 0x8000 that
- * should be acceptable by specific devices. 0x8000 is the
- * specified fixed N value for asynchronous clock mode,
- * which the devices expect also in synchronous clock mode.
- */
if (constant_n)
- *ret_n = DP_LINK_CONSTANT_N_VALUE;
+ *ret_n = constant_n;
else
*ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
@@ -2742,22 +2743,28 @@ void
intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
int pixel_clock, int link_clock,
struct intel_link_m_n *m_n,
- bool constant_n, bool fec_enable)
+ bool fec_enable)
{
u32 data_clock = bits_per_pixel * pixel_clock;
if (fec_enable)
data_clock = intel_dp_mode_to_fec_clock(data_clock);
+ /*
+ * Windows/BIOS uses fixed M/N values always. Follow suit.
+ *
+ * Also several DP dongles in particular seem to be fussy
+ * about too large link M/N values. Presumably the 20bit
+ * value used by Windows/BIOS is acceptable to everyone.
+ */
m_n->tu = 64;
- compute_m_n(data_clock,
- link_clock * nlanes * 8,
- &m_n->data_m, &m_n->data_n,
- constant_n);
+ compute_m_n(&m_n->data_m, &m_n->data_n,
+ data_clock, link_clock * nlanes * 8,
+ 0x8000000);
- compute_m_n(pixel_clock, link_clock,
- &m_n->link_m, &m_n->link_n,
- constant_n);
+ compute_m_n(&m_n->link_m, &m_n->link_n,
+ pixel_clock, link_clock,
+ 0x80000);
}
static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv)
@@ -2773,12 +2780,12 @@ static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv)
PCH_DREF_CONTROL) &
DREF_SSC1_ENABLE;
- if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
+ if (dev_priv->display.vbt.lvds_use_ssc != bios_lvds_use_ssc) {
drm_dbg_kms(&dev_priv->drm,
"SSC %s by BIOS, overriding VBT which says %s\n",
str_enabled_disabled(bios_lvds_use_ssc),
- str_enabled_disabled(dev_priv->vbt.lvds_use_ssc));
- dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
+ str_enabled_disabled(dev_priv->display.vbt.lvds_use_ssc));
+ dev_priv->display.vbt.lvds_use_ssc = bios_lvds_use_ssc;
}
}
}
@@ -4126,7 +4133,9 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
}
if (!transcoder_is_dsi(pipe_config->cpu_transcoder)) {
- tmp = intel_de_read(dev_priv, CHICKEN_TRANS(pipe_config->cpu_transcoder));
+ tmp = intel_de_read(dev_priv, DISPLAY_VER(dev_priv) >= 14 ?
+ MTL_CHICKEN_TRANS(pipe_config->cpu_transcoder) :
+ CHICKEN_TRANS(pipe_config->cpu_transcoder));
pipe_config->framestart_delay = REG_FIELD_GET(HSW_FRAME_START_DELAY_MASK, tmp) + 1;
} else {
@@ -4145,7 +4154,7 @@ bool intel_crtc_get_pipe_config(struct intel_crtc_state *crtc_state)
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- if (!i915->display->get_pipe_config(crtc, crtc_state))
+ if (!i915->display.funcs.display->get_pipe_config(crtc, crtc_state))
return false;
crtc_state->hw.active = true;
@@ -4374,7 +4383,7 @@ static int i9xx_pll_refclk(struct drm_device *dev,
u32 dpll = pipe_config->dpll_hw_state.dpll;
if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
- return dev_priv->vbt.lvds_ssc_freq;
+ return dev_priv->display.vbt.lvds_ssc_freq;
else if (HAS_PCH_SPLIT(dev_priv))
return 120000;
else if (DISPLAY_VER(dev_priv) != 2)
@@ -4492,7 +4501,31 @@ int intel_dotclock_calculate(int link_freq,
if (!m_n->link_n)
return 0;
- return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n);
+ return DIV_ROUND_UP_ULL(mul_u32_u32(m_n->link_m, link_freq),
+ m_n->link_n);
+}
+
+int intel_crtc_dotclock(const struct intel_crtc_state *pipe_config)
+{
+ int dotclock;
+
+ if (intel_crtc_has_dp_encoder(pipe_config))
+ dotclock = intel_dotclock_calculate(pipe_config->port_clock,
+ &pipe_config->dp_m_n);
+ else if (pipe_config->has_hdmi_sink && pipe_config->pipe_bpp > 24)
+ dotclock = DIV_ROUND_CLOSEST(pipe_config->port_clock * 24,
+ pipe_config->pipe_bpp);
+ else
+ dotclock = pipe_config->port_clock;
+
+ if (pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 &&
+ !intel_crtc_has_dp_encoder(pipe_config))
+ dotclock *= 2;
+
+ if (pipe_config->pixel_multiplier)
+ dotclock /= pipe_config->pixel_multiplier;
+
+ return dotclock;
}
/* Returns the currently programmed mode of the given encoder. */
@@ -4753,7 +4786,7 @@ static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state)
/* Display WA #1135: BXT:ALL GLK:ALL */
if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
- dev_priv->ipc_enabled)
+ skl_watermark_ipc_enabled(dev_priv))
linetime_wm /= 2;
return min(linetime_wm, 0x1ff);
@@ -4799,10 +4832,6 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state,
crtc_state->update_wm_post = true;
if (mode_changed) {
- ret = intel_dpll_crtc_compute_clock(state, crtc);
- if (ret)
- return ret;
-
ret = intel_dpll_crtc_get_shared_dpll(state, crtc);
if (ret)
return ret;
@@ -5367,46 +5396,14 @@ bool intel_fuzzy_clock_check(int clock1, int clock2)
}
static bool
-intel_compare_m_n(unsigned int m, unsigned int n,
- unsigned int m2, unsigned int n2,
- bool exact)
-{
- if (m == m2 && n == n2)
- return true;
-
- if (exact || !m || !n || !m2 || !n2)
- return false;
-
- BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
-
- if (n > n2) {
- while (n > n2) {
- m2 <<= 1;
- n2 <<= 1;
- }
- } else if (n < n2) {
- while (n < n2) {
- m <<= 1;
- n <<= 1;
- }
- }
-
- if (n != n2)
- return false;
-
- return intel_fuzzy_clock_check(m, m2);
-}
-
-static bool
intel_compare_link_m_n(const struct intel_link_m_n *m_n,
- const struct intel_link_m_n *m2_n2,
- bool exact)
+ const struct intel_link_m_n *m2_n2)
{
return m_n->tu == m2_n2->tu &&
- intel_compare_m_n(m_n->data_m, m_n->data_n,
- m2_n2->data_m, m2_n2->data_n, exact) &&
- intel_compare_m_n(m_n->link_m, m_n->link_n,
- m2_n2->link_m, m2_n2->link_n, exact);
+ m_n->data_m == m2_n2->data_m &&
+ m_n->data_n == m2_n2->data_n &&
+ m_n->link_m == m2_n2->link_m &&
+ m_n->link_n == m2_n2->link_n;
}
static bool
@@ -5600,8 +5597,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
#define PIPE_CONF_CHECK_M_N(name) do { \
if (!intel_compare_link_m_n(&current_config->name, \
- &pipe_config->name,\
- !fastset)) { \
+ &pipe_config->name)) { \
pipe_config_mismatch(fastset, crtc, __stringify(name), \
"(expected tu %i data %i/%i link %i/%i, " \
"found tu %i, data %i/%i link %i/%i)", \
@@ -5648,9 +5644,9 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
*/
#define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \
if (!intel_compare_link_m_n(&current_config->name, \
- &pipe_config->name, !fastset) && \
+ &pipe_config->name) && \
!intel_compare_link_m_n(&current_config->alt_name, \
- &pipe_config->name, !fastset)) { \
+ &pipe_config->name)) { \
pipe_config_mismatch(fastset, crtc, __stringify(name), \
"(expected tu %i data %i/%i link %i/%i, " \
"or tu %i data %i/%i link %i/%i, " \
@@ -5685,16 +5681,6 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
} \
} while (0)
-#define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \
- if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
- pipe_config_mismatch(fastset, crtc, __stringify(name), \
- "(expected %i, found %i)", \
- current_config->name, \
- pipe_config->name); \
- ret = false; \
- } \
-} while (0)
-
#define PIPE_CONF_CHECK_INFOFRAME(name) do { \
if (!intel_compare_infoframe(&current_config->infoframes.name, \
&pipe_config->infoframes.name)) { \
@@ -5750,8 +5736,9 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_I(lane_count);
PIPE_CONF_CHECK_X(lane_lat_optim_mask);
- if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv)) {
- PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
+ if (HAS_DOUBLE_BUFFERED_M_N(dev_priv)) {
+ if (!fastset || !pipe_config->seamless_m_n)
+ PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
} else {
PIPE_CONF_CHECK_M_N(dp_m_n);
PIPE_CONF_CHECK_M_N(dp_m2_n2);
@@ -5813,7 +5800,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_RECT(pch_pfit.dst);
PIPE_CONF_CHECK_I(scaler_state.scaler_id);
- PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate);
+ PIPE_CONF_CHECK_I(pixel_rate);
PIPE_CONF_CHECK_X(gamma_mode);
if (IS_CHERRYVIEW(dev_priv))
@@ -5840,7 +5827,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_BOOL(double_wide);
- if (dev_priv->dpll.mgr) {
+ if (dev_priv->display.dpll.mgr) {
PIPE_CONF_CHECK_P(shared_dpll);
PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
@@ -5883,9 +5870,11 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
if (IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) >= 5)
PIPE_CONF_CHECK_I(pipe_bpp);
- PIPE_CONF_CHECK_CLOCK_FUZZY(hw.pipe_mode.crtc_clock);
- PIPE_CONF_CHECK_CLOCK_FUZZY(hw.adjusted_mode.crtc_clock);
- PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
+ if (!fastset || !pipe_config->seamless_m_n) {
+ PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_clock);
+ PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_clock);
+ }
+ PIPE_CONF_CHECK_I(port_clock);
PIPE_CONF_CHECK_I(min_voltage_level);
@@ -5927,7 +5916,6 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
#undef PIPE_CONF_CHECK_BOOL_INCOMPLETE
#undef PIPE_CONF_CHECK_P
#undef PIPE_CONF_CHECK_FLAGS
-#undef PIPE_CONF_CHECK_CLOCK_FUZZY
#undef PIPE_CONF_CHECK_COLOR_LUT
#undef PIPE_CONF_CHECK_TIMINGS
#undef PIPE_CONF_CHECK_RECT
@@ -6049,20 +6037,6 @@ void intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
}
}
-static void intel_modeset_clear_plls(struct intel_atomic_state *state)
-{
- struct intel_crtc_state *new_crtc_state;
- struct intel_crtc *crtc;
- int i;
-
- for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
- if (!intel_crtc_needs_modeset(new_crtc_state))
- continue;
-
- intel_release_shared_dplls(state, crtc);
- }
-}
-
/*
* This implements the workaround described in the "notes" section of the mode
* set sequence documentation. When going from no pipes or single pipe to
@@ -6163,23 +6137,6 @@ static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_sta
new_crtc_state->update_pipe = true;
}
-static void intel_crtc_copy_fastset(const struct intel_crtc_state *old_crtc_state,
- struct intel_crtc_state *new_crtc_state)
-{
- /*
- * If we're not doing the full modeset we want to
- * keep the current M/N values as they may be
- * sufficiently different to the computed values
- * to cause problems.
- *
- * FIXME: should really copy more fuzzy state here
- */
- new_crtc_state->fdi_m_n = old_crtc_state->fdi_m_n;
- new_crtc_state->dp_m_n = old_crtc_state->dp_m_n;
- new_crtc_state->dp_m2_n2 = old_crtc_state->dp_m2_n2;
- new_crtc_state->has_drrs = old_crtc_state->has_drrs;
-}
-
static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state,
struct intel_crtc *crtc,
u8 plane_ids_mask)
@@ -6836,9 +6793,11 @@ static int intel_atomic_check(struct drm_device *dev,
if (!intel_crtc_needs_modeset(new_crtc_state))
continue;
- ret = intel_modeset_pipe_config_late(state, crtc);
- if (ret)
- goto fail;
+ if (new_crtc_state->hw.enable) {
+ ret = intel_modeset_pipe_config_late(state, crtc);
+ if (ret)
+ goto fail;
+ }
intel_crtc_check_fastset(old_crtc_state, new_crtc_state);
}
@@ -6889,15 +6848,12 @@ static int intel_atomic_check(struct drm_device *dev,
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
- if (intel_crtc_needs_modeset(new_crtc_state)) {
- any_ms = true;
+ if (!intel_crtc_needs_modeset(new_crtc_state))
continue;
- }
- if (!new_crtc_state->update_pipe)
- continue;
+ any_ms = true;
- intel_crtc_copy_fastset(old_crtc_state, new_crtc_state);
+ intel_release_shared_dplls(state, crtc);
}
if (any_ms && !check_digital_port_conflicts(state)) {
@@ -6938,8 +6894,6 @@ static int intel_atomic_check(struct drm_device *dev,
ret = intel_modeset_calc_cdclk(state);
if (ret)
return ret;
-
- intel_modeset_clear_plls(state);
}
ret = intel_atomic_check_crtcs(state);
@@ -7058,6 +7012,10 @@ static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
if (DISPLAY_VER(dev_priv) >= 9 ||
IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
hsw_set_linetime_wm(new_crtc_state);
+
+ if (new_crtc_state->seamless_m_n)
+ intel_cpu_transcoder_set_m1_n1(crtc, new_crtc_state->cpu_transcoder,
+ &new_crtc_state->dp_m_n);
}
static void commit_pipe_pre_planes(struct intel_atomic_state *state,
@@ -7120,7 +7078,7 @@ static void intel_enable_crtc(struct intel_atomic_state *state,
intel_crtc_update_active_timings(new_crtc_state);
- dev_priv->display->crtc_enable(state, crtc);
+ dev_priv->display.funcs.display->crtc_enable(state, crtc);
if (intel_crtc_is_bigjoiner_slave(new_crtc_state))
return;
@@ -7199,7 +7157,7 @@ static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
*/
intel_crtc_disable_pipe_crc(crtc);
- dev_priv->display->crtc_disable(state, crtc);
+ dev_priv->display.funcs.display->crtc_disable(state, crtc);
crtc->active = false;
intel_fbc_disable(crtc);
intel_disable_shared_dpll(old_crtc_state);
@@ -7410,7 +7368,7 @@ static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
struct intel_atomic_state *state, *next;
struct llist_node *freed;
- freed = llist_del_all(&dev_priv->atomic_helper.free_list);
+ freed = llist_del_all(&dev_priv->display.atomic_helper.free_list);
llist_for_each_entry_safe(state, next, freed, freed)
drm_atomic_state_put(&state->base);
}
@@ -7418,7 +7376,7 @@ static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
static void intel_atomic_helper_free_state_worker(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
- container_of(work, typeof(*dev_priv), atomic_helper.free_work);
+ container_of(work, typeof(*dev_priv), display.atomic_helper.free_work);
intel_atomic_helper_free_state(dev_priv);
}
@@ -7588,7 +7546,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
}
/* Now enable the clocks, plane, pipe, and connectors that we set up. */
- dev_priv->display->commit_modeset_enables(state);
+ dev_priv->display.funcs.display->commit_modeset_enables(state);
intel_encoders_update_complete(state);
@@ -7711,7 +7669,7 @@ intel_atomic_commit_ready(struct i915_sw_fence *fence,
case FENCE_FREE:
{
struct intel_atomic_helper *helper =
- &to_i915(state->base.dev)->atomic_helper;
+ &to_i915(state->base.dev)->display.atomic_helper;
if (llist_add(&state->freed, &helper->free_list))
schedule_work(&helper->free_work);
@@ -7814,12 +7772,12 @@ static int intel_atomic_commit(struct drm_device *dev,
i915_sw_fence_commit(&state->commit_ready);
if (nonblock && state->modeset) {
- queue_work(dev_priv->modeset_wq, &state->base.commit_work);
+ queue_work(dev_priv->display.wq.modeset, &state->base.commit_work);
} else if (nonblock) {
- queue_work(dev_priv->flip_wq, &state->base.commit_work);
+ queue_work(dev_priv->display.wq.flip, &state->base.commit_work);
} else {
if (state->modeset)
- flush_workqueue(dev_priv->modeset_wq);
+ flush_workqueue(dev_priv->display.wq.modeset);
intel_atomic_commit_tail(state);
}
@@ -7925,7 +7883,7 @@ static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
if (intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
return false;
- if (!dev_priv->vbt.int_crt_support)
+ if (!dev_priv->display.vbt.int_crt_support)
return false;
return true;
@@ -8060,7 +8018,7 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
bool has_edp, has_port;
- if (IS_VALLEYVIEW(dev_priv) && dev_priv->vbt.int_crt_support)
+ if (IS_VALLEYVIEW(dev_priv) && dev_priv->display.vbt.int_crt_support)
intel_crt_init(dev_priv);
/*
@@ -8172,6 +8130,17 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
}
+static int max_dotclock(struct drm_i915_private *i915)
+{
+ int max_dotclock = i915->max_dotclk_freq;
+
+ /* icl+ might use bigjoiner */
+ if (DISPLAY_VER(i915) >= 11)
+ max_dotclock *= 2;
+
+ return max_dotclock;
+}
+
static enum drm_mode_status
intel_mode_valid(struct drm_device *dev,
const struct drm_display_mode *mode)
@@ -8209,6 +8178,13 @@ intel_mode_valid(struct drm_device *dev,
DRM_MODE_FLAG_CLKDIV2))
return MODE_BAD;
+ /*
+ * Reject clearly excessive dotclocks early to
+ * avoid having to worry about huge integers later.
+ */
+ if (mode->clock > max_dotclock(dev_priv))
+ return MODE_CLOCK_HIGH;
+
/* Transcoder timing limits */
if (DISPLAY_VER(dev_priv) >= 11) {
hdisplay_max = 16384;
@@ -8319,7 +8295,7 @@ static const struct drm_mode_config_funcs intel_mode_funcs = {
.atomic_state_free = intel_atomic_state_free,
};
-static const struct drm_i915_display_funcs skl_display_funcs = {
+static const struct intel_display_funcs skl_display_funcs = {
.get_pipe_config = hsw_get_pipe_config,
.crtc_enable = hsw_crtc_enable,
.crtc_disable = hsw_crtc_disable,
@@ -8327,7 +8303,7 @@ static const struct drm_i915_display_funcs skl_display_funcs = {
.get_initial_plane_config = skl_get_initial_plane_config,
};
-static const struct drm_i915_display_funcs ddi_display_funcs = {
+static const struct intel_display_funcs ddi_display_funcs = {
.get_pipe_config = hsw_get_pipe_config,
.crtc_enable = hsw_crtc_enable,
.crtc_disable = hsw_crtc_disable,
@@ -8335,7 +8311,7 @@ static const struct drm_i915_display_funcs ddi_display_funcs = {
.get_initial_plane_config = i9xx_get_initial_plane_config,
};
-static const struct drm_i915_display_funcs pch_split_display_funcs = {
+static const struct intel_display_funcs pch_split_display_funcs = {
.get_pipe_config = ilk_get_pipe_config,
.crtc_enable = ilk_crtc_enable,
.crtc_disable = ilk_crtc_disable,
@@ -8343,7 +8319,7 @@ static const struct drm_i915_display_funcs pch_split_display_funcs = {
.get_initial_plane_config = i9xx_get_initial_plane_config,
};
-static const struct drm_i915_display_funcs vlv_display_funcs = {
+static const struct intel_display_funcs vlv_display_funcs = {
.get_pipe_config = i9xx_get_pipe_config,
.crtc_enable = valleyview_crtc_enable,
.crtc_disable = i9xx_crtc_disable,
@@ -8351,7 +8327,7 @@ static const struct drm_i915_display_funcs vlv_display_funcs = {
.get_initial_plane_config = i9xx_get_initial_plane_config,
};
-static const struct drm_i915_display_funcs i9xx_display_funcs = {
+static const struct intel_display_funcs i9xx_display_funcs = {
.get_pipe_config = i9xx_get_pipe_config,
.crtc_enable = i9xx_crtc_enable,
.crtc_disable = i9xx_crtc_disable,
@@ -8374,16 +8350,16 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
intel_dpll_init_clock_hook(dev_priv);
if (DISPLAY_VER(dev_priv) >= 9) {
- dev_priv->display = &skl_display_funcs;
+ dev_priv->display.funcs.display = &skl_display_funcs;
} else if (HAS_DDI(dev_priv)) {
- dev_priv->display = &ddi_display_funcs;
+ dev_priv->display.funcs.display = &ddi_display_funcs;
} else if (HAS_PCH_SPLIT(dev_priv)) {
- dev_priv->display = &pch_split_display_funcs;
+ dev_priv->display.funcs.display = &pch_split_display_funcs;
} else if (IS_CHERRYVIEW(dev_priv) ||
IS_VALLEYVIEW(dev_priv)) {
- dev_priv->display = &vlv_display_funcs;
+ dev_priv->display.funcs.display = &vlv_display_funcs;
} else {
- dev_priv->display = &i9xx_display_funcs;
+ dev_priv->display.funcs.display = &i9xx_display_funcs;
}
intel_fdi_init_hook(dev_priv);
@@ -8396,11 +8372,11 @@ void intel_modeset_init_hw(struct drm_i915_private *i915)
if (!HAS_DISPLAY(i915))
return;
- cdclk_state = to_intel_cdclk_state(i915->cdclk.obj.state);
+ cdclk_state = to_intel_cdclk_state(i915->display.cdclk.obj.state);
intel_update_cdclk(i915);
- intel_cdclk_dump_config(i915, &i915->cdclk.hw, "Current CDCLK");
- cdclk_state->logical = cdclk_state->actual = i915->cdclk.hw;
+ intel_cdclk_dump_config(i915, &i915->display.cdclk.hw, "Current CDCLK");
+ cdclk_state->logical = cdclk_state->actual = i915->display.cdclk.hw;
}
static int sanitize_watermarks_add_affected(struct drm_atomic_state *state)
@@ -8456,7 +8432,7 @@ static void sanitize_watermarks(struct drm_i915_private *dev_priv)
int i;
/* Only supported on platforms that use atomic watermark design */
- if (!dev_priv->wm_disp->optimize_watermarks)
+ if (!dev_priv->display.funcs.wm->optimize_watermarks)
return;
state = drm_atomic_state_alloc(&dev_priv->drm);
@@ -8688,11 +8664,9 @@ int intel_modeset_init_noirq(struct drm_i915_private *i915)
intel_dmc_ucode_init(i915);
- i915->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
- i915->flip_wq = alloc_workqueue("i915_flip", WQ_HIGHPRI |
- WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
-
- i915->window2_delay = 0; /* No DSB so no window2 delay */
+ i915->display.wq.modeset = alloc_ordered_workqueue("i915_modeset", 0);
+ i915->display.wq.flip = alloc_workqueue("i915_flip", WQ_HIGHPRI |
+ WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
intel_mode_config_init(i915);
@@ -8708,8 +8682,8 @@ int intel_modeset_init_noirq(struct drm_i915_private *i915)
if (ret)
goto cleanup_vga_client_pw_domain_dmc;
- init_llist_head(&i915->atomic_helper.free_list);
- INIT_WORK(&i915->atomic_helper.free_work,
+ init_llist_head(&i915->display.atomic_helper.free_list);
+ INIT_WORK(&i915->display.atomic_helper.free_work,
intel_atomic_helper_free_state_worker);
intel_init_quirks(i915);
@@ -8769,7 +8743,7 @@ int intel_modeset_init_nogem(struct drm_i915_private *i915)
intel_hdcp_component_init(i915);
- if (i915->max_cdclk_freq == 0)
+ if (i915->display.cdclk.max_cdclk_freq == 0)
intel_update_max_cdclk(i915);
/*
@@ -8833,7 +8807,7 @@ int intel_modeset_init(struct drm_i915_private *i915)
intel_hpd_init(i915);
intel_hpd_poll_disable(i915);
- intel_init_ipc(i915);
+ skl_watermark_ipc_init(i915);
return 0;
}
@@ -8964,7 +8938,7 @@ void intel_display_resume(struct drm_device *dev)
if (!ret)
ret = __intel_display_resume(i915, state, &ctx);
- intel_enable_ipc(i915);
+ skl_watermark_ipc_update(i915);
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
@@ -8999,11 +8973,18 @@ void intel_modeset_driver_remove(struct drm_i915_private *i915)
if (!HAS_DISPLAY(i915))
return;
- flush_workqueue(i915->flip_wq);
- flush_workqueue(i915->modeset_wq);
+ flush_workqueue(i915->display.wq.flip);
+ flush_workqueue(i915->display.wq.modeset);
+
+ flush_work(&i915->display.atomic_helper.free_work);
+ drm_WARN_ON(&i915->drm, !llist_empty(&i915->display.atomic_helper.free_list));
- flush_work(&i915->atomic_helper.free_work);
- drm_WARN_ON(&i915->drm, !llist_empty(&i915->atomic_helper.free_list));
+ /*
+ * MST topology needs to be suspended so we don't have any calls to
+ * fbdev after it's finalized. MST will be destroyed later as part of
+ * drm_mode_config_cleanup()
+ */
+ intel_dp_mst_suspend(i915);
}
/* part #2: call after irq uninstall */
@@ -9018,13 +8999,6 @@ void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915)
*/
intel_hpd_poll_fini(i915);
- /*
- * MST topology needs to be suspended so we don't have any calls to
- * fbdev after it's finalized. MST will be destroyed later as part of
- * drm_mode_config_cleanup()
- */
- intel_dp_mst_suspend(i915);
-
/* poll work can call into fbdev, hence clean that up afterwards */
intel_fbdev_fini(i915);
@@ -9041,8 +9015,8 @@ void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915)
intel_gmbus_teardown(i915);
- destroy_workqueue(i915->flip_wq);
- destroy_workqueue(i915->modeset_wq);
+ destroy_workqueue(i915->display.wq.flip);
+ destroy_workqueue(i915->display.wq.modeset);
intel_fbc_cleanup(i915);
}
diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h
index cef251025d7a..884e8e67b17c 100644
--- a/drivers/gpu/drm/i915/display/intel_display.h
+++ b/drivers/gpu/drm/i915/display/intel_display.h
@@ -375,7 +375,7 @@ enum hpd_pin {
#define for_each_pipe(__dev_priv, __p) \
for ((__p) = 0; (__p) < I915_MAX_PIPES; (__p)++) \
- for_each_if(INTEL_INFO(__dev_priv)->display.pipe_mask & BIT(__p))
+ for_each_if(RUNTIME_INFO(__dev_priv)->pipe_mask & BIT(__p))
#define for_each_pipe_masked(__dev_priv, __p, __mask) \
for_each_pipe(__dev_priv, __p) \
@@ -383,7 +383,7 @@ enum hpd_pin {
#define for_each_cpu_transcoder(__dev_priv, __t) \
for ((__t) = 0; (__t) < I915_MAX_TRANSCODERS; (__t)++) \
- for_each_if (INTEL_INFO(__dev_priv)->display.cpu_transcoder_mask & BIT(__t))
+ for_each_if (RUNTIME_INFO(__dev_priv)->cpu_transcoder_mask & BIT(__t))
#define for_each_cpu_transcoder_masked(__dev_priv, __t, __mask) \
for_each_cpu_transcoder(__dev_priv, __t) \
@@ -547,7 +547,7 @@ u8 intel_calc_active_pipes(struct intel_atomic_state *state,
void intel_link_compute_m_n(u16 bpp, int nlanes,
int pixel_clock, int link_clock,
struct intel_link_m_n *m_n,
- bool constant_n, bool fec_enable);
+ bool fec_enable);
u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
u32 pixel_format, u64 modifier);
enum drm_mode_status
diff --git a/drivers/gpu/drm/i915/display/intel_display_core.h b/drivers/gpu/drm/i915/display/intel_display_core.h
new file mode 100644
index 000000000000..96cf994b0ad1
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_core.h
@@ -0,0 +1,418 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef __INTEL_DISPLAY_CORE_H__
+#define __INTEL_DISPLAY_CORE_H__
+
+#include <linux/list.h>
+#include <linux/llist.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+
+#include <drm/drm_connector.h>
+
+#include "intel_cdclk.h"
+#include "intel_display.h"
+#include "intel_display_power.h"
+#include "intel_dmc.h"
+#include "intel_dpll_mgr.h"
+#include "intel_fbc.h"
+#include "intel_global_state.h"
+#include "intel_gmbus.h"
+#include "intel_opregion.h"
+#include "intel_pm_types.h"
+
+struct drm_i915_private;
+struct drm_property;
+struct i915_audio_component;
+struct i915_hdcp_comp_master;
+struct intel_atomic_state;
+struct intel_audio_funcs;
+struct intel_bios_encoder_data;
+struct intel_cdclk_funcs;
+struct intel_cdclk_vals;
+struct intel_color_funcs;
+struct intel_crtc;
+struct intel_crtc_state;
+struct intel_dpll_funcs;
+struct intel_dpll_mgr;
+struct intel_fbdev;
+struct intel_fdi_funcs;
+struct intel_hotplug_funcs;
+struct intel_initial_plane_config;
+struct intel_overlay;
+
+/* Amount of SAGV/QGV points, BSpec precisely defines this */
+#define I915_NUM_QGV_POINTS 8
+
+/* Amount of PSF GV points, BSpec precisely defines this */
+#define I915_NUM_PSF_GV_POINTS 3
+
+struct intel_display_funcs {
+ /*
+ * Returns the active state of the crtc, and if the crtc is active,
+ * fills out the pipe-config with the hw state.
+ */
+ bool (*get_pipe_config)(struct intel_crtc *,
+ struct intel_crtc_state *);
+ void (*get_initial_plane_config)(struct intel_crtc *,
+ struct intel_initial_plane_config *);
+ void (*crtc_enable)(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+ void (*crtc_disable)(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+ void (*commit_modeset_enables)(struct intel_atomic_state *state);
+};
+
+/* functions used for watermark calcs for display. */
+struct intel_wm_funcs {
+ /* update_wm is for legacy wm management */
+ void (*update_wm)(struct drm_i915_private *dev_priv);
+ int (*compute_pipe_wm)(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+ int (*compute_intermediate_wm)(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+ void (*initial_watermarks)(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+ void (*atomic_update_watermarks)(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+ void (*optimize_watermarks)(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+ int (*compute_global_watermarks)(struct intel_atomic_state *state);
+};
+
+struct intel_audio {
+ /* hda/i915 audio component */
+ struct i915_audio_component *component;
+ bool component_registered;
+ /* mutex for audio/video sync */
+ struct mutex mutex;
+ int power_refcount;
+ u32 freq_cntrl;
+
+ /* Used to save the pipe-to-encoder mapping for audio */
+ struct intel_encoder *encoder_map[I915_MAX_PIPES];
+
+ /* necessary resource sharing with HDMI LPE audio driver. */
+ struct {
+ struct platform_device *platdev;
+ int irq;
+ } lpe;
+};
+
+/*
+ * dpll and cdclk state is protected by connection_mutex dpll.lock serializes
+ * intel_{prepare,enable,disable}_shared_dpll. Must be global rather than per
+ * dpll, because on some platforms plls share registers.
+ */
+struct intel_dpll {
+ struct mutex lock;
+
+ int num_shared_dpll;
+ struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
+ const struct intel_dpll_mgr *mgr;
+
+ struct {
+ int nssc;
+ int ssc;
+ } ref_clks;
+};
+
+struct intel_frontbuffer_tracking {
+ spinlock_t lock;
+
+ /*
+ * Tracking bits for delayed frontbuffer flushing du to gpu activity or
+ * scheduled flips.
+ */
+ unsigned busy_bits;
+ unsigned flip_bits;
+};
+
+struct intel_hotplug {
+ struct delayed_work hotplug_work;
+
+ const u32 *hpd, *pch_hpd;
+
+ struct {
+ unsigned long last_jiffies;
+ int count;
+ enum {
+ HPD_ENABLED = 0,
+ HPD_DISABLED = 1,
+ HPD_MARK_DISABLED = 2
+ } state;
+ } stats[HPD_NUM_PINS];
+ u32 event_bits;
+ u32 retry_bits;
+ struct delayed_work reenable_work;
+
+ u32 long_port_mask;
+ u32 short_port_mask;
+ struct work_struct dig_port_work;
+
+ struct work_struct poll_init_work;
+ bool poll_enabled;
+
+ unsigned int hpd_storm_threshold;
+ /* Whether or not to count short HPD IRQs in HPD storms */
+ u8 hpd_short_storm_enabled;
+
+ /*
+ * if we get a HPD irq from DP and a HPD irq from non-DP
+ * the non-DP HPD could block the workqueue on a mode config
+ * mutex getting, that userspace may have taken. However
+ * userspace is waiting on the DP workqueue to run which is
+ * blocked behind the non-DP one.
+ */
+ struct workqueue_struct *dp_wq;
+};
+
+struct intel_vbt_data {
+ /* bdb version */
+ u16 version;
+
+ /* Feature bits */
+ unsigned int int_tv_support:1;
+ unsigned int int_crt_support:1;
+ unsigned int lvds_use_ssc:1;
+ unsigned int int_lvds_support:1;
+ unsigned int display_clock_mode:1;
+ unsigned int fdi_rx_polarity_inverted:1;
+ int lvds_ssc_freq;
+ enum drm_panel_orientation orientation;
+
+ bool override_afc_startup;
+ u8 override_afc_startup_val;
+
+ int crt_ddc_pin;
+
+ struct list_head display_devices;
+ struct list_head bdb_blocks;
+
+ struct intel_bios_encoder_data *ports[I915_MAX_PORTS]; /* Non-NULL if port present. */
+ struct sdvo_device_mapping {
+ u8 initialized;
+ u8 dvo_port;
+ u8 slave_addr;
+ u8 dvo_wiring;
+ u8 i2c_pin;
+ u8 ddc_pin;
+ } sdvo_mappings[2];
+};
+
+struct intel_wm {
+ /*
+ * Raw watermark latency values:
+ * in 0.1us units for WM0,
+ * in 0.5us units for WM1+.
+ */
+ /* primary */
+ u16 pri_latency[5];
+ /* sprite */
+ u16 spr_latency[5];
+ /* cursor */
+ u16 cur_latency[5];
+ /*
+ * Raw watermark memory latency values
+ * for SKL for all 8 levels
+ * in 1us units.
+ */
+ u16 skl_latency[8];
+
+ /* current hardware state */
+ union {
+ struct ilk_wm_values hw;
+ struct vlv_wm_values vlv;
+ struct g4x_wm_values g4x;
+ };
+
+ u8 max_level;
+
+ /*
+ * Should be held around atomic WM register writing; also
+ * protects * intel_crtc->wm.active and
+ * crtc_state->wm.need_postvbl_update.
+ */
+ struct mutex wm_mutex;
+
+ bool ipc_enabled;
+};
+
+struct intel_display {
+ /* Display functions */
+ struct {
+ /* Top level crtc-ish functions */
+ const struct intel_display_funcs *display;
+
+ /* Display CDCLK functions */
+ const struct intel_cdclk_funcs *cdclk;
+
+ /* Display pll funcs */
+ const struct intel_dpll_funcs *dpll;
+
+ /* irq display functions */
+ const struct intel_hotplug_funcs *hotplug;
+
+ /* pm display functions */
+ const struct intel_wm_funcs *wm;
+
+ /* fdi display functions */
+ const struct intel_fdi_funcs *fdi;
+
+ /* Display internal color functions */
+ const struct intel_color_funcs *color;
+
+ /* Display internal audio functions */
+ const struct intel_audio_funcs *audio;
+ } funcs;
+
+ /* Grouping using anonymous structs. Keep sorted. */
+ struct intel_atomic_helper {
+ struct llist_head free_list;
+ struct work_struct free_work;
+ } atomic_helper;
+
+ struct {
+ /* backlight registers and fields in struct intel_panel */
+ struct mutex lock;
+ } backlight;
+
+ struct {
+ struct intel_global_obj obj;
+
+ struct intel_bw_info {
+ /* for each QGV point */
+ unsigned int deratedbw[I915_NUM_QGV_POINTS];
+ /* for each PSF GV point */
+ unsigned int psf_bw[I915_NUM_PSF_GV_POINTS];
+ u8 num_qgv_points;
+ u8 num_psf_gv_points;
+ u8 num_planes;
+ } max[6];
+ } bw;
+
+ struct {
+ /* The current hardware cdclk configuration */
+ struct intel_cdclk_config hw;
+
+ /* cdclk, divider, and ratio table from bspec */
+ const struct intel_cdclk_vals *table;
+
+ struct intel_global_obj obj;
+
+ unsigned int max_cdclk_freq;
+ } cdclk;
+
+ struct {
+ /* The current hardware dbuf configuration */
+ u8 enabled_slices;
+
+ struct intel_global_obj obj;
+ } dbuf;
+
+ struct {
+ /* VLV/CHV/BXT/GLK DSI MMIO register base address */
+ u32 mmio_base;
+ } dsi;
+
+ struct {
+ /* list of fbdev register on this device */
+ struct intel_fbdev *fbdev;
+ struct work_struct suspend_work;
+ } fbdev;
+
+ struct {
+ unsigned int pll_freq;
+ u32 rx_config;
+ } fdi;
+
+ struct {
+ /*
+ * Base address of where the gmbus and gpio blocks are located
+ * (either on PCH or on SoC for platforms without PCH).
+ */
+ u32 mmio_base;
+
+ /*
+ * gmbus.mutex protects against concurrent usage of the single
+ * hw gmbus controller on different i2c buses.
+ */
+ struct mutex mutex;
+
+ struct intel_gmbus *bus[GMBUS_NUM_PINS];
+
+ wait_queue_head_t wait_queue;
+ } gmbus;
+
+ struct {
+ struct i915_hdcp_comp_master *master;
+ bool comp_added;
+
+ /* Mutex to protect the above hdcp component related values. */
+ struct mutex comp_mutex;
+ } hdcp;
+
+ struct {
+ struct i915_power_domains domains;
+
+ /* Shadow for DISPLAY_PHY_CONTROL which can't be safely read */
+ u32 chv_phy_control;
+
+ /* perform PHY state sanity checks? */
+ bool chv_phy_assert[2];
+ } power;
+
+ struct {
+ u32 mmio_base;
+
+ /* protects panel power sequencer state */
+ struct mutex mutex;
+ } pps;
+
+ struct {
+ struct drm_property *broadcast_rgb;
+ struct drm_property *force_audio;
+ } properties;
+
+ struct {
+ unsigned long mask;
+ } quirks;
+
+ struct {
+ enum {
+ I915_SAGV_UNKNOWN = 0,
+ I915_SAGV_DISABLED,
+ I915_SAGV_ENABLED,
+ I915_SAGV_NOT_CONTROLLED
+ } status;
+
+ u32 block_time_us;
+ } sagv;
+
+ struct {
+ /* ordered wq for modesets */
+ struct workqueue_struct *modeset;
+
+ /* unbound hipri wq for page flips/plane updates */
+ struct workqueue_struct *flip;
+ } wq;
+
+ /* Grouping using named structs. Keep sorted. */
+ struct intel_audio audio;
+ struct intel_dmc dmc;
+ struct intel_dpll dpll;
+ struct intel_fbc *fbc[I915_MAX_FBCS];
+ struct intel_frontbuffer_tracking fb_tracking;
+ struct intel_hotplug hotplug;
+ struct intel_opregion opregion;
+ struct intel_overlay *overlay;
+ struct intel_vbt_data vbt;
+ struct intel_wm wm;
+};
+
+#endif /* __INTEL_DISPLAY_CORE_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
index 6c3954479047..7c7253a2541c 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
@@ -26,6 +26,7 @@
#include "intel_pm.h"
#include "intel_psr.h"
#include "intel_sprite.h"
+#include "skl_watermark.h"
static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
{
@@ -37,10 +38,10 @@ static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
struct drm_i915_private *dev_priv = node_to_i915(m->private);
seq_printf(m, "FB tracking busy bits: 0x%08x\n",
- dev_priv->fb_tracking.busy_bits);
+ dev_priv->display.fb_tracking.busy_bits);
seq_printf(m, "FB tracking flip bits: 0x%08x\n",
- dev_priv->fb_tracking.flip_bits);
+ dev_priv->display.fb_tracking.flip_bits);
return 0;
}
@@ -103,7 +104,8 @@ static int i915_sr_status(struct seq_file *m, void *unused)
static int i915_opregion(struct seq_file *m, void *unused)
{
- struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
+ struct drm_i915_private *i915 = node_to_i915(m->private);
+ struct intel_opregion *opregion = &i915->display.opregion;
if (opregion->header)
seq_write(m, opregion->header, OPREGION_SIZE);
@@ -113,7 +115,8 @@ static int i915_opregion(struct seq_file *m, void *unused)
static int i915_vbt(struct seq_file *m, void *unused)
{
- struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
+ struct drm_i915_private *i915 = node_to_i915(m->private);
+ struct intel_opregion *opregion = &i915->display.opregion;
if (opregion->vbt)
seq_write(m, opregion->vbt, opregion->vbt_size);
@@ -129,7 +132,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
struct drm_framebuffer *drm_fb;
#ifdef CONFIG_DRM_FBDEV_EMULATION
- fbdev_fb = intel_fbdev_framebuffer(dev_priv->fbdev);
+ fbdev_fb = intel_fbdev_framebuffer(dev_priv->display.fbdev.fbdev);
if (fbdev_fb) {
seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
fbdev_fb->base.width,
@@ -722,10 +725,11 @@ static void intel_scaler_info(struct seq_file *m, struct intel_crtc *crtc)
/* Not all platformas have a scaler */
if (num_scalers) {
- seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
+ seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d scaling_filter=%d",
num_scalers,
crtc_state->scaler_state.scaler_users,
- crtc_state->scaler_state.scaler_id);
+ crtc_state->scaler_state.scaler_id,
+ crtc_state->hw.scaling_filter);
for (i = 0; i < num_scalers; i++) {
const struct intel_scaler *sc =
@@ -932,11 +936,11 @@ static int i915_shared_dplls_info(struct seq_file *m, void *unused)
drm_modeset_lock_all(dev);
seq_printf(m, "PLL refclks: non-SSC: %d kHz, SSC: %d kHz\n",
- dev_priv->dpll.ref_clks.nssc,
- dev_priv->dpll.ref_clks.ssc);
+ dev_priv->display.dpll.ref_clks.nssc,
+ dev_priv->display.dpll.ref_clks.ssc);
- for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
- struct intel_shared_dpll *pll = &dev_priv->dpll.shared_dplls[i];
+ for (i = 0; i < dev_priv->display.dpll.num_shared_dpll; i++) {
+ struct intel_shared_dpll *pll = &dev_priv->display.dpll.shared_dplls[i];
seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name,
pll->info->id);
@@ -979,58 +983,6 @@ static int i915_shared_dplls_info(struct seq_file *m, void *unused)
return 0;
}
-static int i915_ipc_status_show(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = m->private;
-
- seq_printf(m, "Isochronous Priority Control: %s\n",
- str_yes_no(dev_priv->ipc_enabled));
- return 0;
-}
-
-static int i915_ipc_status_open(struct inode *inode, struct file *file)
-{
- struct drm_i915_private *dev_priv = inode->i_private;
-
- if (!HAS_IPC(dev_priv))
- return -ENODEV;
-
- return single_open(file, i915_ipc_status_show, dev_priv);
-}
-
-static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
- size_t len, loff_t *offp)
-{
- struct seq_file *m = file->private_data;
- struct drm_i915_private *dev_priv = m->private;
- intel_wakeref_t wakeref;
- bool enable;
- int ret;
-
- ret = kstrtobool_from_user(ubuf, len, &enable);
- if (ret < 0)
- return ret;
-
- with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
- if (!dev_priv->ipc_enabled && enable)
- drm_info(&dev_priv->drm,
- "Enabling IPC: WM will be proper only after next commit\n");
- dev_priv->ipc_enabled = enable;
- intel_enable_ipc(dev_priv);
- }
-
- return len;
-}
-
-static const struct file_operations i915_ipc_status_fops = {
- .owner = THIS_MODULE,
- .open = i915_ipc_status_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = i915_ipc_status_write
-};
-
static int i915_ddb_info(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -1427,9 +1379,9 @@ static int pri_wm_latency_show(struct seq_file *m, void *data)
const u16 *latencies;
if (DISPLAY_VER(dev_priv) >= 9)
- latencies = dev_priv->wm.skl_latency;
+ latencies = dev_priv->display.wm.skl_latency;
else
- latencies = dev_priv->wm.pri_latency;
+ latencies = dev_priv->display.wm.pri_latency;
wm_latency_show(m, latencies);
@@ -1442,9 +1394,9 @@ static int spr_wm_latency_show(struct seq_file *m, void *data)
const u16 *latencies;
if (DISPLAY_VER(dev_priv) >= 9)
- latencies = dev_priv->wm.skl_latency;
+ latencies = dev_priv->display.wm.skl_latency;
else
- latencies = dev_priv->wm.spr_latency;
+ latencies = dev_priv->display.wm.spr_latency;
wm_latency_show(m, latencies);
@@ -1457,9 +1409,9 @@ static int cur_wm_latency_show(struct seq_file *m, void *data)
const u16 *latencies;
if (DISPLAY_VER(dev_priv) >= 9)
- latencies = dev_priv->wm.skl_latency;
+ latencies = dev_priv->display.wm.skl_latency;
else
- latencies = dev_priv->wm.cur_latency;
+ latencies = dev_priv->display.wm.cur_latency;
wm_latency_show(m, latencies);
@@ -1550,9 +1502,9 @@ static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
u16 *latencies;
if (DISPLAY_VER(dev_priv) >= 9)
- latencies = dev_priv->wm.skl_latency;
+ latencies = dev_priv->display.wm.skl_latency;
else
- latencies = dev_priv->wm.pri_latency;
+ latencies = dev_priv->display.wm.pri_latency;
return wm_latency_write(file, ubuf, len, offp, latencies);
}
@@ -1565,9 +1517,9 @@ static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
u16 *latencies;
if (DISPLAY_VER(dev_priv) >= 9)
- latencies = dev_priv->wm.skl_latency;
+ latencies = dev_priv->display.wm.skl_latency;
else
- latencies = dev_priv->wm.spr_latency;
+ latencies = dev_priv->display.wm.spr_latency;
return wm_latency_write(file, ubuf, len, offp, latencies);
}
@@ -1580,9 +1532,9 @@ static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
u16 *latencies;
if (DISPLAY_VER(dev_priv) >= 9)
- latencies = dev_priv->wm.skl_latency;
+ latencies = dev_priv->display.wm.skl_latency;
else
- latencies = dev_priv->wm.cur_latency;
+ latencies = dev_priv->display.wm.cur_latency;
return wm_latency_write(file, ubuf, len, offp, latencies);
}
@@ -1617,14 +1569,14 @@ static const struct file_operations i915_cur_wm_latency_fops = {
static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = m->private;
- struct i915_hotplug *hotplug = &dev_priv->hotplug;
+ struct intel_hotplug *hotplug = &dev_priv->display.hotplug;
/* Synchronize with everything first in case there's been an HPD
* storm, but we haven't finished handling it in the kernel yet
*/
intel_synchronize_irq(dev_priv);
- flush_work(&dev_priv->hotplug.dig_port_work);
- flush_delayed_work(&dev_priv->hotplug.hotplug_work);
+ flush_work(&dev_priv->display.hotplug.dig_port_work);
+ flush_delayed_work(&dev_priv->display.hotplug.hotplug_work);
seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
seq_printf(m, "Detected: %s\n",
@@ -1639,7 +1591,7 @@ static ssize_t i915_hpd_storm_ctl_write(struct file *file,
{
struct seq_file *m = file->private_data;
struct drm_i915_private *dev_priv = m->private;
- struct i915_hotplug *hotplug = &dev_priv->hotplug;
+ struct intel_hotplug *hotplug = &dev_priv->display.hotplug;
unsigned int new_threshold;
int i;
char *newline;
@@ -1678,7 +1630,7 @@ static ssize_t i915_hpd_storm_ctl_write(struct file *file,
spin_unlock_irq(&dev_priv->irq_lock);
/* Re-enable hpd immediately if we were in an irq storm */
- flush_delayed_work(&dev_priv->hotplug.reenable_work);
+ flush_delayed_work(&dev_priv->display.hotplug.reenable_work);
return len;
}
@@ -1702,7 +1654,7 @@ static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data)
struct drm_i915_private *dev_priv = m->private;
seq_printf(m, "Enabled: %s\n",
- str_yes_no(dev_priv->hotplug.hpd_short_storm_enabled));
+ str_yes_no(dev_priv->display.hotplug.hpd_short_storm_enabled));
return 0;
}
@@ -1720,7 +1672,7 @@ static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
{
struct seq_file *m = file->private_data;
struct drm_i915_private *dev_priv = m->private;
- struct i915_hotplug *hotplug = &dev_priv->hotplug;
+ struct intel_hotplug *hotplug = &dev_priv->display.hotplug;
char *newline;
char tmp[16];
int i;
@@ -1756,7 +1708,7 @@ static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
spin_unlock_irq(&dev_priv->irq_lock);
/* Re-enable hpd immediately if we were in an irq storm */
- flush_delayed_work(&dev_priv->hotplug.reenable_work);
+ flush_delayed_work(&dev_priv->display.hotplug.reenable_work);
return len;
}
@@ -1907,7 +1859,6 @@ static const struct {
{"i915_dp_test_active", &i915_displayport_test_active_fops},
{"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
{"i915_hpd_short_storm_ctl", &i915_hpd_short_storm_ctl_fops},
- {"i915_ipc_status", &i915_ipc_status_fops},
{"i915_drrs_ctl", &i915_drrs_ctl_fops},
{"i915_edp_psr_debug", &i915_edp_psr_debug_fops},
};
@@ -1931,6 +1882,7 @@ void intel_display_debugfs_register(struct drm_i915_private *i915)
intel_dmc_debugfs_register(i915);
intel_fbc_debugfs_register(i915);
+ skl_watermark_ipc_debugfs_register(i915);
}
static int i915_panel_show(struct seq_file *m, void *data)
@@ -2137,7 +2089,7 @@ static const struct file_operations i915_dsc_fec_support_fops = {
.write = i915_dsc_fec_support_write
};
-static int i915_dsc_bpp_show(struct seq_file *m, void *data)
+static int i915_dsc_bpc_show(struct seq_file *m, void *data)
{
struct drm_connector *connector = m->private;
struct drm_device *dev = connector->dev;
@@ -2160,14 +2112,14 @@ static int i915_dsc_bpp_show(struct seq_file *m, void *data)
}
crtc_state = to_intel_crtc_state(crtc->state);
- seq_printf(m, "Compressed_BPP: %d\n", crtc_state->dsc.compressed_bpp);
+ seq_printf(m, "Input_BPC: %d\n", crtc_state->dsc.config.bits_per_component);
out: drm_modeset_unlock(&dev->mode_config.connection_mutex);
return ret;
}
-static ssize_t i915_dsc_bpp_write(struct file *file,
+static ssize_t i915_dsc_bpc_write(struct file *file,
const char __user *ubuf,
size_t len, loff_t *offp)
{
@@ -2175,33 +2127,32 @@ static ssize_t i915_dsc_bpp_write(struct file *file,
((struct seq_file *)file->private_data)->private;
struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector));
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- int dsc_bpp = 0;
+ int dsc_bpc = 0;
int ret;
- ret = kstrtoint_from_user(ubuf, len, 0, &dsc_bpp);
+ ret = kstrtoint_from_user(ubuf, len, 0, &dsc_bpc);
if (ret < 0)
return ret;
- intel_dp->force_dsc_bpp = dsc_bpp;
+ intel_dp->force_dsc_bpc = dsc_bpc;
*offp += len;
return len;
}
-static int i915_dsc_bpp_open(struct inode *inode,
+static int i915_dsc_bpc_open(struct inode *inode,
struct file *file)
{
- return single_open(file, i915_dsc_bpp_show,
- inode->i_private);
+ return single_open(file, i915_dsc_bpc_show, inode->i_private);
}
-static const struct file_operations i915_dsc_bpp_fops = {
+static const struct file_operations i915_dsc_bpc_fops = {
.owner = THIS_MODULE,
- .open = i915_dsc_bpp_open,
+ .open = i915_dsc_bpc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
- .write = i915_dsc_bpp_write
+ .write = i915_dsc_bpc_write
};
/*
@@ -2271,8 +2222,8 @@ void intel_connector_debugfs_add(struct intel_connector *intel_connector)
debugfs_create_file("i915_dsc_fec_support", 0644, root,
connector, &i915_dsc_fec_support_fops);
- debugfs_create_file("i915_dsc_bpp", 0644, root,
- connector, &i915_dsc_bpp_fops);
+ debugfs_create_file("i915_dsc_bpc", 0644, root,
+ connector, &i915_dsc_bpc_fops);
}
if (connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index 589af257edeb..1e608b9e5055 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -7,6 +7,7 @@
#include "i915_drv.h"
#include "i915_irq.h"
+#include "intel_backlight_regs.h"
#include "intel_cdclk.h"
#include "intel_combo_phy.h"
#include "intel_de.h"
@@ -18,8 +19,8 @@
#include "intel_mchbar_regs.h"
#include "intel_pch_refclk.h"
#include "intel_pcode.h"
-#include "intel_pm.h"
#include "intel_snps_phy.h"
+#include "skl_watermark.h"
#include "vlv_sideband.h"
#define for_each_power_domain_well(__dev_priv, __power_well, __domain) \
@@ -243,7 +244,7 @@ bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
struct i915_power_domains *power_domains;
bool ret;
- power_domains = &dev_priv->power_domains;
+ power_domains = &dev_priv->display.power.domains;
mutex_lock(&power_domains->lock);
ret = __intel_display_power_is_enabled(dev_priv, domain);
@@ -268,7 +269,7 @@ sanitize_target_dc_state(struct drm_i915_private *dev_priv,
if (target_dc_state != states[i])
continue;
- if (dev_priv->dmc.allowed_dc_mask & target_dc_state)
+ if (dev_priv->display.dmc.allowed_dc_mask & target_dc_state)
break;
target_dc_state = states[i + 1];
@@ -291,7 +292,7 @@ void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv,
{
struct i915_power_well *power_well;
bool dc_off_enabled;
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
mutex_lock(&power_domains->lock);
power_well = lookup_power_well(dev_priv, SKL_DISP_DC_OFF);
@@ -301,7 +302,7 @@ void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv,
state = sanitize_target_dc_state(dev_priv, state);
- if (state == dev_priv->dmc.target_dc_state)
+ if (state == dev_priv->display.dmc.target_dc_state)
goto unlock;
dc_off_enabled = intel_power_well_is_enabled(dev_priv, power_well);
@@ -312,7 +313,7 @@ void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv,
if (!dc_off_enabled)
intel_power_well_enable(dev_priv, power_well);
- dev_priv->dmc.target_dc_state = state;
+ dev_priv->display.dmc.target_dc_state = state;
if (!dc_off_enabled)
intel_power_well_disable(dev_priv, power_well);
@@ -339,7 +340,7 @@ assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
{
struct drm_i915_private *i915 = container_of(power_domains,
struct drm_i915_private,
- power_domains);
+ display.power.domains);
return !drm_WARN_ON(&i915->drm,
bitmap_intersects(power_domains->async_put_domains[0].bits,
@@ -352,7 +353,7 @@ __async_put_domains_state_ok(struct i915_power_domains *power_domains)
{
struct drm_i915_private *i915 = container_of(power_domains,
struct drm_i915_private,
- power_domains);
+ display.power.domains);
struct intel_power_domain_mask async_put_mask;
enum intel_display_power_domain domain;
bool err = false;
@@ -375,7 +376,7 @@ static void print_power_domains(struct i915_power_domains *power_domains,
{
struct drm_i915_private *i915 = container_of(power_domains,
struct drm_i915_private,
- power_domains);
+ display.power.domains);
enum intel_display_power_domain domain;
drm_dbg(&i915->drm, "%s (%d):\n", prefix, bitmap_weight(mask->bits, POWER_DOMAIN_NUM));
@@ -390,7 +391,7 @@ print_async_put_domains_state(struct i915_power_domains *power_domains)
{
struct drm_i915_private *i915 = container_of(power_domains,
struct drm_i915_private,
- power_domains);
+ display.power.domains);
drm_dbg(&i915->drm, "async_put_wakeref %u\n",
power_domains->async_put_wakeref);
@@ -445,7 +446,7 @@ static bool
intel_display_power_grab_async_put_ref(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain)
{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
struct intel_power_domain_mask async_put_mask;
bool ret = false;
@@ -474,7 +475,7 @@ static void
__intel_display_power_get_domain(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain)
{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
struct i915_power_well *power_well;
if (intel_display_power_grab_async_put_ref(dev_priv, domain))
@@ -501,7 +502,7 @@ __intel_display_power_get_domain(struct drm_i915_private *dev_priv,
intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain)
{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
intel_wakeref_t wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
mutex_lock(&power_domains->lock);
@@ -527,7 +528,7 @@ intel_wakeref_t
intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain)
{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
intel_wakeref_t wakeref;
bool is_enabled;
@@ -563,7 +564,7 @@ __intel_display_power_put_domain(struct drm_i915_private *dev_priv,
const char *name = intel_display_power_domain_str(domain);
struct intel_power_domain_mask async_put_mask;
- power_domains = &dev_priv->power_domains;
+ power_domains = &dev_priv->display.power.domains;
drm_WARN(&dev_priv->drm, !power_domains->domain_use_count[domain],
"Use count on domain %s is already zero\n",
@@ -583,7 +584,7 @@ __intel_display_power_put_domain(struct drm_i915_private *dev_priv,
static void __intel_display_power_put(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain)
{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
mutex_lock(&power_domains->lock);
__intel_display_power_put_domain(dev_priv, domain);
@@ -596,7 +597,7 @@ queue_async_put_domains_work(struct i915_power_domains *power_domains,
{
struct drm_i915_private *i915 = container_of(power_domains,
struct drm_i915_private,
- power_domains);
+ display.power.domains);
drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref);
power_domains->async_put_wakeref = wakeref;
drm_WARN_ON(&i915->drm, !queue_delayed_work(system_unbound_wq,
@@ -610,7 +611,7 @@ release_async_put_domains(struct i915_power_domains *power_domains,
{
struct drm_i915_private *dev_priv =
container_of(power_domains, struct drm_i915_private,
- power_domains);
+ display.power.domains);
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
enum intel_display_power_domain domain;
intel_wakeref_t wakeref;
@@ -637,8 +638,8 @@ intel_display_power_put_async_work(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
container_of(work, struct drm_i915_private,
- power_domains.async_put_work.work);
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ display.power.domains.async_put_work.work);
+ struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
intel_wakeref_t new_work_wakeref = intel_runtime_pm_get_raw(rpm);
intel_wakeref_t old_work_wakeref = 0;
@@ -698,7 +699,7 @@ void __intel_display_power_put_async(struct drm_i915_private *i915,
enum intel_display_power_domain domain,
intel_wakeref_t wakeref)
{
- struct i915_power_domains *power_domains = &i915->power_domains;
+ struct i915_power_domains *power_domains = &i915->display.power.domains;
struct intel_runtime_pm *rpm = &i915->runtime_pm;
intel_wakeref_t work_wakeref = intel_runtime_pm_get_raw(rpm);
@@ -746,7 +747,7 @@ out_verify:
*/
void intel_display_power_flush_work(struct drm_i915_private *i915)
{
- struct i915_power_domains *power_domains = &i915->power_domains;
+ struct i915_power_domains *power_domains = &i915->display.power.domains;
struct intel_power_domain_mask async_put_mask;
intel_wakeref_t work_wakeref;
@@ -779,7 +780,7 @@ out_verify:
static void
intel_display_power_flush_work_sync(struct drm_i915_private *i915)
{
- struct i915_power_domains *power_domains = &i915->power_domains;
+ struct i915_power_domains *power_domains = &i915->display.power.domains;
intel_display_power_flush_work(i915);
cancel_delayed_work_sync(&power_domains->async_put_work);
@@ -908,7 +909,7 @@ static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
return 0;
if (IS_DG2(dev_priv))
- max_dc = 0;
+ max_dc = 1;
else if (IS_DG1(dev_priv))
max_dc = 3;
else if (DISPLAY_VER(dev_priv) >= 12)
@@ -976,15 +977,15 @@ static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
*/
int intel_power_domains_init(struct drm_i915_private *dev_priv)
{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
dev_priv->params.disable_power_well =
sanitize_disable_power_well_option(dev_priv,
dev_priv->params.disable_power_well);
- dev_priv->dmc.allowed_dc_mask =
+ dev_priv->display.dmc.allowed_dc_mask =
get_allowed_dc_mask(dev_priv, dev_priv->params.enable_dc);
- dev_priv->dmc.target_dc_state =
+ dev_priv->display.dmc.target_dc_state =
sanitize_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
mutex_init(&power_domains->lock);
@@ -1003,12 +1004,12 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
*/
void intel_power_domains_cleanup(struct drm_i915_private *dev_priv)
{
- intel_display_power_map_cleanup(&dev_priv->power_domains);
+ intel_display_power_map_cleanup(&dev_priv->display.power.domains);
}
static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
struct i915_power_well *power_well;
mutex_lock(&power_domains->lock);
@@ -1037,7 +1038,7 @@ static void gen9_dbuf_slice_set(struct drm_i915_private *dev_priv,
void gen9_dbuf_slices_update(struct drm_i915_private *dev_priv,
u8 req_slices)
{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
u8 slice_mask = INTEL_INFO(dev_priv)->display.dbuf.slice_mask;
enum dbuf_slice slice;
@@ -1060,14 +1061,14 @@ void gen9_dbuf_slices_update(struct drm_i915_private *dev_priv,
for_each_dbuf_slice(dev_priv, slice)
gen9_dbuf_slice_set(dev_priv, slice, req_slices & BIT(slice));
- dev_priv->dbuf.enabled_slices = req_slices;
+ dev_priv->display.dbuf.enabled_slices = req_slices;
mutex_unlock(&power_domains->lock);
}
static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
{
- dev_priv->dbuf.enabled_slices =
+ dev_priv->display.dbuf.enabled_slices =
intel_enabled_dbuf_slices_mask(dev_priv);
/*
@@ -1075,7 +1076,7 @@ static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
* figure out later which slices we have and what we need.
*/
gen9_dbuf_slices_update(dev_priv, BIT(DBUF_S1) |
- dev_priv->dbuf.enabled_slices);
+ dev_priv->display.dbuf.enabled_slices);
}
static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
@@ -1101,7 +1102,7 @@ static void icl_mbus_init(struct drm_i915_private *dev_priv)
unsigned long abox_regs = INTEL_INFO(dev_priv)->display.abox_mask;
u32 mask, val, i;
- if (IS_ALDERLAKE_P(dev_priv))
+ if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
return;
mask = MBUS_ABOX_BT_CREDIT_POOL1_MASK |
@@ -1309,7 +1310,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
intel_update_cdclk(dev_priv);
- intel_cdclk_dump_config(dev_priv, &dev_priv->cdclk.hw, "Current CDCLK");
+ intel_cdclk_dump_config(dev_priv, &dev_priv->display.cdclk.hw, "Current CDCLK");
}
/*
@@ -1381,6 +1382,9 @@ static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv,
reset_bits = RESET_PCH_HANDSHAKE_ENABLE;
}
+ if (DISPLAY_VER(dev_priv) >= 14)
+ reset_bits |= MTL_RESET_PICA_HANDSHAKE_EN;
+
val = intel_de_read(dev_priv, reg);
if (enable)
@@ -1394,7 +1398,7 @@ static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv,
static void skl_display_core_init(struct drm_i915_private *dev_priv,
bool resume)
{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
struct i915_power_well *well;
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
@@ -1426,13 +1430,14 @@ static void skl_display_core_init(struct drm_i915_private *dev_priv,
static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
struct i915_power_well *well;
if (!HAS_DISPLAY(dev_priv))
return;
gen9_disable_dc_states(dev_priv);
+ /* TODO: disable DMC program */
gen9_dbuf_disable(dev_priv);
@@ -1459,7 +1464,7 @@ static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
static void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume)
{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
struct i915_power_well *well;
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
@@ -1493,13 +1498,14 @@ static void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume
static void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
struct i915_power_well *well;
if (!HAS_DISPLAY(dev_priv))
return;
gen9_disable_dc_states(dev_priv);
+ /* TODO: disable DMC program */
gen9_dbuf_disable(dev_priv);
@@ -1601,7 +1607,7 @@ static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv)
static void icl_display_core_init(struct drm_i915_private *dev_priv,
bool resume)
{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
struct i915_power_well *well;
u32 val;
@@ -1668,13 +1674,14 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv,
static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
struct i915_power_well *well;
if (!HAS_DISPLAY(dev_priv))
return;
gen9_disable_dc_states(dev_priv);
+ intel_dmc_disable_program(dev_priv);
/* 1. Disable all display engine functions -> aready done */
@@ -1712,7 +1719,7 @@ static void chv_phy_control_init(struct drm_i915_private *dev_priv)
* power well state and lane status to reconstruct the
* expected initial value.
*/
- dev_priv->chv_phy_control =
+ dev_priv->display.power.chv_phy_control =
PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) |
PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) |
PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) |
@@ -1734,27 +1741,27 @@ static void chv_phy_control_init(struct drm_i915_private *dev_priv)
if (mask == 0xf)
mask = 0x0;
else
- dev_priv->chv_phy_control |=
+ dev_priv->display.power.chv_phy_control |=
PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0);
- dev_priv->chv_phy_control |=
+ dev_priv->display.power.chv_phy_control |=
PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0);
mask = (status & DPLL_PORTC_READY_MASK) >> 4;
if (mask == 0xf)
mask = 0x0;
else
- dev_priv->chv_phy_control |=
+ dev_priv->display.power.chv_phy_control |=
PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1);
- dev_priv->chv_phy_control |=
+ dev_priv->display.power.chv_phy_control |=
PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1);
- dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
+ dev_priv->display.power.chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
- dev_priv->chv_phy_assert[DPIO_PHY0] = false;
+ dev_priv->display.power.chv_phy_assert[DPIO_PHY0] = false;
} else {
- dev_priv->chv_phy_assert[DPIO_PHY0] = true;
+ dev_priv->display.power.chv_phy_assert[DPIO_PHY0] = true;
}
if (intel_power_well_is_enabled(dev_priv, cmn_d)) {
@@ -1766,21 +1773,21 @@ static void chv_phy_control_init(struct drm_i915_private *dev_priv)
if (mask == 0xf)
mask = 0x0;
else
- dev_priv->chv_phy_control |=
+ dev_priv->display.power.chv_phy_control |=
PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0);
- dev_priv->chv_phy_control |=
+ dev_priv->display.power.chv_phy_control |=
PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0);
- dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
+ dev_priv->display.power.chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
- dev_priv->chv_phy_assert[DPIO_PHY1] = false;
+ dev_priv->display.power.chv_phy_assert[DPIO_PHY1] = false;
} else {
- dev_priv->chv_phy_assert[DPIO_PHY1] = true;
+ dev_priv->display.power.chv_phy_assert[DPIO_PHY1] = true;
}
drm_dbg_kms(&dev_priv->drm, "Initial PHY_CONTROL=0x%08x\n",
- dev_priv->chv_phy_control);
+ dev_priv->display.power.chv_phy_control);
/* Defer application of initial phy_control to enabling the powerwell */
}
@@ -1864,7 +1871,7 @@ static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv);
*/
void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
{
- struct i915_power_domains *power_domains = &i915->power_domains;
+ struct i915_power_domains *power_domains = &i915->display.power.domains;
power_domains->initializing = true;
@@ -1905,8 +1912,8 @@ void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
/* Disable power support if the user asked so. */
if (!i915->params.disable_power_well) {
drm_WARN_ON(&i915->drm, power_domains->disable_wakeref);
- i915->power_domains.disable_wakeref = intel_display_power_get(i915,
- POWER_DOMAIN_INIT);
+ i915->display.power.domains.disable_wakeref = intel_display_power_get(i915,
+ POWER_DOMAIN_INIT);
}
intel_power_domains_sync_hw(i915);
@@ -1927,12 +1934,12 @@ void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
void intel_power_domains_driver_remove(struct drm_i915_private *i915)
{
intel_wakeref_t wakeref __maybe_unused =
- fetch_and_zero(&i915->power_domains.init_wakeref);
+ fetch_and_zero(&i915->display.power.domains.init_wakeref);
/* Remove the refcount we took to keep power well support disabled. */
if (!i915->params.disable_power_well)
intel_display_power_put(i915, POWER_DOMAIN_INIT,
- fetch_and_zero(&i915->power_domains.disable_wakeref));
+ fetch_and_zero(&i915->display.power.domains.disable_wakeref));
intel_display_power_flush_work_sync(i915);
@@ -1954,7 +1961,7 @@ void intel_power_domains_driver_remove(struct drm_i915_private *i915)
*/
void intel_power_domains_sanitize_state(struct drm_i915_private *i915)
{
- struct i915_power_domains *power_domains = &i915->power_domains;
+ struct i915_power_domains *power_domains = &i915->display.power.domains;
struct i915_power_well *power_well;
mutex_lock(&power_domains->lock);
@@ -1988,7 +1995,7 @@ void intel_power_domains_sanitize_state(struct drm_i915_private *i915)
void intel_power_domains_enable(struct drm_i915_private *i915)
{
intel_wakeref_t wakeref __maybe_unused =
- fetch_and_zero(&i915->power_domains.init_wakeref);
+ fetch_and_zero(&i915->display.power.domains.init_wakeref);
intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
intel_power_domains_verify_state(i915);
@@ -2003,7 +2010,7 @@ void intel_power_domains_enable(struct drm_i915_private *i915)
*/
void intel_power_domains_disable(struct drm_i915_private *i915)
{
- struct i915_power_domains *power_domains = &i915->power_domains;
+ struct i915_power_domains *power_domains = &i915->display.power.domains;
drm_WARN_ON(&i915->drm, power_domains->init_wakeref);
power_domains->init_wakeref =
@@ -2026,7 +2033,7 @@ void intel_power_domains_disable(struct drm_i915_private *i915)
void intel_power_domains_suspend(struct drm_i915_private *i915,
enum i915_drm_suspend_mode suspend_mode)
{
- struct i915_power_domains *power_domains = &i915->power_domains;
+ struct i915_power_domains *power_domains = &i915->display.power.domains;
intel_wakeref_t wakeref __maybe_unused =
fetch_and_zero(&power_domains->init_wakeref);
@@ -2039,7 +2046,7 @@ void intel_power_domains_suspend(struct drm_i915_private *i915,
* resources as required and also enable deeper system power states
* that would be blocked if the firmware was inactive.
*/
- if (!(i915->dmc.allowed_dc_mask & DC_STATE_EN_DC9) &&
+ if (!(i915->display.dmc.allowed_dc_mask & DC_STATE_EN_DC9) &&
suspend_mode == I915_DRM_SUSPEND_IDLE &&
intel_dmc_has_payload(i915)) {
intel_display_power_flush_work(i915);
@@ -2053,7 +2060,7 @@ void intel_power_domains_suspend(struct drm_i915_private *i915,
*/
if (!i915->params.disable_power_well)
intel_display_power_put(i915, POWER_DOMAIN_INIT,
- fetch_and_zero(&i915->power_domains.disable_wakeref));
+ fetch_and_zero(&i915->display.power.domains.disable_wakeref));
intel_display_power_flush_work(i915);
intel_power_domains_verify_state(i915);
@@ -2080,7 +2087,7 @@ void intel_power_domains_suspend(struct drm_i915_private *i915,
*/
void intel_power_domains_resume(struct drm_i915_private *i915)
{
- struct i915_power_domains *power_domains = &i915->power_domains;
+ struct i915_power_domains *power_domains = &i915->display.power.domains;
if (power_domains->display_core_suspended) {
intel_power_domains_init_hw(i915, true);
@@ -2098,7 +2105,7 @@ void intel_power_domains_resume(struct drm_i915_private *i915)
static void intel_power_domains_dump_info(struct drm_i915_private *i915)
{
- struct i915_power_domains *power_domains = &i915->power_domains;
+ struct i915_power_domains *power_domains = &i915->display.power.domains;
struct i915_power_well *power_well;
for_each_power_well(i915, power_well) {
@@ -2126,7 +2133,7 @@ static void intel_power_domains_dump_info(struct drm_i915_private *i915)
*/
static void intel_power_domains_verify_state(struct drm_i915_private *i915)
{
- struct i915_power_domains *power_domains = &i915->power_domains;
+ struct i915_power_domains *power_domains = &i915->display.power.domains;
struct i915_power_well *power_well;
bool dump_domain_info;
@@ -2232,10 +2239,10 @@ void intel_display_power_resume(struct drm_i915_private *i915)
bxt_disable_dc9(i915);
icl_display_core_init(i915, true);
if (intel_dmc_has_payload(i915)) {
- if (i915->dmc.allowed_dc_mask &
+ if (i915->display.dmc.allowed_dc_mask &
DC_STATE_EN_UPTO_DC6)
skl_enable_dc6(i915);
- else if (i915->dmc.allowed_dc_mask &
+ else if (i915->display.dmc.allowed_dc_mask &
DC_STATE_EN_UPTO_DC5)
gen9_enable_dc5(i915);
}
@@ -2243,7 +2250,7 @@ void intel_display_power_resume(struct drm_i915_private *i915)
bxt_disable_dc9(i915);
bxt_display_core_init(i915, true);
if (intel_dmc_has_payload(i915) &&
- (i915->dmc.allowed_dc_mask & DC_STATE_EN_UPTO_DC5))
+ (i915->display.dmc.allowed_dc_mask & DC_STATE_EN_UPTO_DC5))
gen9_enable_dc5(i915);
} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
hsw_disable_pc8(i915);
@@ -2252,7 +2259,7 @@ void intel_display_power_resume(struct drm_i915_private *i915)
void intel_display_power_debug(struct drm_i915_private *i915, struct seq_file *m)
{
- struct i915_power_domains *power_domains = &i915->power_domains;
+ struct i915_power_domains *power_domains = &i915->display.power.domains;
int i;
mutex_lock(&power_domains->lock);
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_map.c b/drivers/gpu/drm/i915/display/intel_display_power_map.c
index 97b367f39f35..dc04afc6cc8f 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power_map.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power_map.c
@@ -1350,6 +1350,117 @@ static const struct i915_power_well_desc_list xelpd_power_wells[] = {
I915_PW_DESCRIPTORS(xelpd_power_wells_main),
};
+/*
+ * MTL is based on XELPD power domains with the exception of power gating for:
+ * - DDI_IO (moved to PLL logic)
+ * - AUX and AUX_IO functionality and register access for USBC1-4 (PICA always-on)
+ */
+#define XELPDP_PW_2_POWER_DOMAINS \
+ XELPD_PW_B_POWER_DOMAINS, \
+ XELPD_PW_C_POWER_DOMAINS, \
+ XELPD_PW_D_POWER_DOMAINS, \
+ POWER_DOMAIN_AUDIO_PLAYBACK, \
+ POWER_DOMAIN_VGA, \
+ POWER_DOMAIN_PORT_DDI_LANES_TC1, \
+ POWER_DOMAIN_PORT_DDI_LANES_TC2, \
+ POWER_DOMAIN_PORT_DDI_LANES_TC3, \
+ POWER_DOMAIN_PORT_DDI_LANES_TC4
+
+I915_DECL_PW_DOMAINS(xelpdp_pwdoms_pw_2,
+ XELPDP_PW_2_POWER_DOMAINS,
+ POWER_DOMAIN_INIT);
+
+I915_DECL_PW_DOMAINS(xelpdp_pwdoms_dc_off,
+ XELPDP_PW_2_POWER_DOMAINS,
+ POWER_DOMAIN_AUDIO_MMIO,
+ POWER_DOMAIN_MODESET,
+ POWER_DOMAIN_AUX_A,
+ POWER_DOMAIN_AUX_B,
+ POWER_DOMAIN_INIT);
+
+I915_DECL_PW_DOMAINS(xelpdp_pwdoms_aux_tc1,
+ POWER_DOMAIN_AUX_USBC1,
+ POWER_DOMAIN_AUX_TBT1);
+
+I915_DECL_PW_DOMAINS(xelpdp_pwdoms_aux_tc2,
+ POWER_DOMAIN_AUX_USBC2,
+ POWER_DOMAIN_AUX_TBT2);
+
+I915_DECL_PW_DOMAINS(xelpdp_pwdoms_aux_tc3,
+ POWER_DOMAIN_AUX_USBC3,
+ POWER_DOMAIN_AUX_TBT3);
+
+I915_DECL_PW_DOMAINS(xelpdp_pwdoms_aux_tc4,
+ POWER_DOMAIN_AUX_USBC4,
+ POWER_DOMAIN_AUX_TBT4);
+
+static const struct i915_power_well_desc xelpdp_power_wells_main[] = {
+ {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("DC_off", &xelpdp_pwdoms_dc_off,
+ .id = SKL_DISP_DC_OFF),
+ ),
+ .ops = &gen9_dc_off_power_well_ops,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_2", &xelpdp_pwdoms_pw_2,
+ .hsw.idx = ICL_PW_CTL_IDX_PW_2,
+ .id = SKL_DISP_PW_2),
+ ),
+ .ops = &hsw_power_well_ops,
+ .has_vga = true,
+ .has_fuses = true,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_A", &xelpd_pwdoms_pw_a,
+ .hsw.idx = XELPD_PW_CTL_IDX_PW_A),
+ ),
+ .ops = &hsw_power_well_ops,
+ .irq_pipe_mask = BIT(PIPE_A),
+ .has_fuses = true,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_B", &xelpd_pwdoms_pw_b,
+ .hsw.idx = XELPD_PW_CTL_IDX_PW_B),
+ ),
+ .ops = &hsw_power_well_ops,
+ .irq_pipe_mask = BIT(PIPE_B),
+ .has_fuses = true,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_C", &xelpd_pwdoms_pw_c,
+ .hsw.idx = XELPD_PW_CTL_IDX_PW_C),
+ ),
+ .ops = &hsw_power_well_ops,
+ .irq_pipe_mask = BIT(PIPE_C),
+ .has_fuses = true,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_D", &xelpd_pwdoms_pw_d,
+ .hsw.idx = XELPD_PW_CTL_IDX_PW_D),
+ ),
+ .ops = &hsw_power_well_ops,
+ .irq_pipe_mask = BIT(PIPE_D),
+ .has_fuses = true,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("AUX_A", &icl_pwdoms_aux_a, .xelpdp.aux_ch = AUX_CH_A),
+ I915_PW("AUX_B", &icl_pwdoms_aux_b, .xelpdp.aux_ch = AUX_CH_B),
+ I915_PW("AUX_TC1", &xelpdp_pwdoms_aux_tc1, .xelpdp.aux_ch = AUX_CH_USBC1),
+ I915_PW("AUX_TC2", &xelpdp_pwdoms_aux_tc2, .xelpdp.aux_ch = AUX_CH_USBC2),
+ I915_PW("AUX_TC3", &xelpdp_pwdoms_aux_tc3, .xelpdp.aux_ch = AUX_CH_USBC3),
+ I915_PW("AUX_TC4", &xelpdp_pwdoms_aux_tc4, .xelpdp.aux_ch = AUX_CH_USBC4),
+ ),
+ .ops = &xelpdp_aux_power_well_ops,
+ },
+};
+
+static const struct i915_power_well_desc_list xelpdp_power_wells[] = {
+ I915_PW_DESCRIPTORS(i9xx_power_wells_always_on),
+ I915_PW_DESCRIPTORS(icl_power_wells_pw_1),
+ I915_PW_DESCRIPTORS(xelpdp_power_wells_main),
+};
+
static void init_power_well_domains(const struct i915_power_well_instance *inst,
struct i915_power_well *power_well)
{
@@ -1388,7 +1499,7 @@ __set_power_wells(struct i915_power_domains *power_domains,
{
struct drm_i915_private *i915 = container_of(power_domains,
struct drm_i915_private,
- power_domains);
+ display.power.domains);
u64 power_well_ids = 0;
const struct i915_power_well_desc_list *desc_list;
const struct i915_power_well_desc *desc;
@@ -1447,7 +1558,7 @@ int intel_display_power_map_init(struct i915_power_domains *power_domains)
{
struct drm_i915_private *i915 = container_of(power_domains,
struct drm_i915_private,
- power_domains);
+ display.power.domains);
/*
* The enabling order will be from lower to higher indexed wells,
* the disabling order is reversed.
@@ -1457,7 +1568,9 @@ int intel_display_power_map_init(struct i915_power_domains *power_domains)
return 0;
}
- if (DISPLAY_VER(i915) >= 13)
+ if (DISPLAY_VER(i915) >= 14)
+ return set_power_wells(power_domains, xelpdp_power_wells);
+ else if (DISPLAY_VER(i915) >= 13)
return set_power_wells(power_domains, xelpd_power_wells);
else if (IS_DG1(i915))
return set_power_wells(power_domains, dg1_power_wells);
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.c b/drivers/gpu/drm/i915/display/intel_display_power_well.c
index 91cfd5890f46..df7ee4969ef1 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power_well.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power_well.c
@@ -5,6 +5,7 @@
#include "i915_drv.h"
#include "i915_irq.h"
+#include "intel_backlight_regs.h"
#include "intel_combo_phy.h"
#include "intel_combo_phy_regs.h"
#include "intel_crt.h"
@@ -16,10 +17,10 @@
#include "intel_dpll.h"
#include "intel_hotplug.h"
#include "intel_pcode.h"
-#include "intel_pm.h"
#include "intel_pps.h"
#include "intel_tc.h"
#include "intel_vga.h"
+#include "skl_watermark.h"
#include "vlv_sideband.h"
#include "vlv_sideband_reg.h"
@@ -84,7 +85,7 @@ lookup_power_well(struct drm_i915_private *i915,
drm_WARN(&i915->drm, 1,
"Power well %d not defined for this platform\n",
power_well_id);
- return &i915->power_domains.power_wells[0];
+ return &i915->display.power.domains.power_wells[0];
}
void intel_power_well_enable(struct drm_i915_private *i915,
@@ -710,8 +711,8 @@ void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
drm_dbg_kms(&dev_priv->drm,
"Resetting DC state tracking from %02x to %02x\n",
- dev_priv->dmc.dc_state, val);
- dev_priv->dmc.dc_state = val;
+ dev_priv->display.dmc.dc_state, val);
+ dev_priv->display.dmc.dc_state = val;
}
/**
@@ -746,8 +747,8 @@ void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state)
return;
if (drm_WARN_ON_ONCE(&dev_priv->drm,
- state & ~dev_priv->dmc.allowed_dc_mask))
- state &= dev_priv->dmc.allowed_dc_mask;
+ state & ~dev_priv->display.dmc.allowed_dc_mask))
+ state &= dev_priv->display.dmc.allowed_dc_mask;
val = intel_de_read(dev_priv, DC_STATE_EN);
mask = gen9_dc_mask(dev_priv);
@@ -755,16 +756,16 @@ void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state)
val & mask, state);
/* Check if DMC is ignoring our DC state requests */
- if ((val & mask) != dev_priv->dmc.dc_state)
+ if ((val & mask) != dev_priv->display.dmc.dc_state)
drm_err(&dev_priv->drm, "DC state mismatch (0x%x -> 0x%x)\n",
- dev_priv->dmc.dc_state, val & mask);
+ dev_priv->display.dmc.dc_state, val & mask);
val &= ~mask;
val |= state;
gen9_write_dc_state(dev_priv, val);
- dev_priv->dmc.dc_state = val & mask;
+ dev_priv->display.dmc.dc_state = val & mask;
}
static void tgl_enable_dc3co(struct drm_i915_private *dev_priv)
@@ -945,7 +946,7 @@ static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
{
u8 hw_enabled_dbuf_slices = intel_enabled_dbuf_slices_mask(dev_priv);
- u8 enabled_dbuf_slices = dev_priv->dbuf.enabled_slices;
+ u8 enabled_dbuf_slices = dev_priv->display.dbuf.enabled_slices;
drm_WARN(&dev_priv->drm,
hw_enabled_dbuf_slices != enabled_dbuf_slices,
@@ -958,7 +959,7 @@ void gen9_disable_dc_states(struct drm_i915_private *dev_priv)
{
struct intel_cdclk_config cdclk_config = {};
- if (dev_priv->dmc.target_dc_state == DC_STATE_EN_DC3CO) {
+ if (dev_priv->display.dmc.target_dc_state == DC_STATE_EN_DC3CO) {
tgl_disable_dc3co(dev_priv);
return;
}
@@ -971,7 +972,7 @@ void gen9_disable_dc_states(struct drm_i915_private *dev_priv)
intel_cdclk_get_cdclk(dev_priv, &cdclk_config);
/* Can't read out voltage_level so can't use intel_cdclk_changed() */
drm_WARN_ON(&dev_priv->drm,
- intel_cdclk_needs_modeset(&dev_priv->cdclk.hw,
+ intel_cdclk_needs_modeset(&dev_priv->display.cdclk.hw,
&cdclk_config));
gen9_assert_dbuf_enabled(dev_priv);
@@ -1000,7 +1001,7 @@ static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
if (!intel_dmc_has_payload(dev_priv))
return;
- switch (dev_priv->dmc.target_dc_state) {
+ switch (dev_priv->display.dmc.target_dc_state) {
case DC_STATE_EN_DC3CO:
tgl_enable_dc3co(dev_priv);
break;
@@ -1156,10 +1157,10 @@ static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
* (and never recovering) in this case. intel_dsi_post_disable() will
* clear it when we turn off the display.
*/
- val = intel_de_read(dev_priv, DSPCLK_GATE_D);
+ val = intel_de_read(dev_priv, DSPCLK_GATE_D(dev_priv));
val &= DPOUNIT_CLOCK_GATE_DISABLE;
val |= VRHUNIT_CLOCK_GATE_DISABLE;
- intel_de_write(dev_priv, DSPCLK_GATE_D, val);
+ intel_de_write(dev_priv, DSPCLK_GATE_D(dev_priv), val);
/*
* Disable trickle feed and enable pnd deadline calculation
@@ -1207,7 +1208,7 @@ static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
* During driver initialization/resume we can avoid restoring the
* part of the HW/SW state that will be inited anyway explicitly.
*/
- if (dev_priv->power_domains.initializing)
+ if (dev_priv->display.power.domains.initializing)
return;
intel_hpd_init(dev_priv);
@@ -1302,7 +1303,7 @@ static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
struct i915_power_well *cmn_d =
lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
- u32 phy_control = dev_priv->chv_phy_control;
+ u32 phy_control = dev_priv->display.power.chv_phy_control;
u32 phy_status = 0;
u32 phy_status_mask = 0xffffffff;
@@ -1313,7 +1314,7 @@ static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
* reset (ie. the power well has been disabled at
* least once).
*/
- if (!dev_priv->chv_phy_assert[DPIO_PHY0])
+ if (!dev_priv->display.power.chv_phy_assert[DPIO_PHY0])
phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) |
PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) |
PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) |
@@ -1321,7 +1322,7 @@ static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) |
PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1));
- if (!dev_priv->chv_phy_assert[DPIO_PHY1])
+ if (!dev_priv->display.power.chv_phy_assert[DPIO_PHY1])
phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) |
PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) |
PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1));
@@ -1397,7 +1398,7 @@ static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
drm_err(&dev_priv->drm,
"Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
intel_de_read(dev_priv, DISPLAY_PHY_STATUS) & phy_status_mask,
- phy_status, dev_priv->chv_phy_control);
+ phy_status, dev_priv->display.power.chv_phy_control);
}
#undef BITS_SET
@@ -1457,13 +1458,13 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
vlv_dpio_put(dev_priv);
- dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
+ dev_priv->display.power.chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
- dev_priv->chv_phy_control);
+ dev_priv->display.power.chv_phy_control);
drm_dbg_kms(&dev_priv->drm,
"Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
- phy, dev_priv->chv_phy_control);
+ phy, dev_priv->display.power.chv_phy_control);
assert_chv_phy_status(dev_priv);
}
@@ -1487,18 +1488,18 @@ static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
assert_pll_disabled(dev_priv, PIPE_C);
}
- dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
+ dev_priv->display.power.chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
- dev_priv->chv_phy_control);
+ dev_priv->display.power.chv_phy_control);
vlv_set_power_well(dev_priv, power_well, false);
drm_dbg_kms(&dev_priv->drm,
"Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
- phy, dev_priv->chv_phy_control);
+ phy, dev_priv->display.power.chv_phy_control);
/* PHY is fully reset now, so we can enable the PHY state asserts */
- dev_priv->chv_phy_assert[phy] = true;
+ dev_priv->display.power.chv_phy_assert[phy] = true;
assert_chv_phy_status(dev_priv);
}
@@ -1516,7 +1517,7 @@ static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpi
* reset (ie. the power well has been disabled at
* least once).
*/
- if (!dev_priv->chv_phy_assert[phy])
+ if (!dev_priv->display.power.chv_phy_assert[phy])
return;
if (ch == DPIO_CH0)
@@ -1570,27 +1571,27 @@ static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpi
bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
enum dpio_channel ch, bool override)
{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
bool was_override;
mutex_lock(&power_domains->lock);
- was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
+ was_override = dev_priv->display.power.chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
if (override == was_override)
goto out;
if (override)
- dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
+ dev_priv->display.power.chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
else
- dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
+ dev_priv->display.power.chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
- dev_priv->chv_phy_control);
+ dev_priv->display.power.chv_phy_control);
drm_dbg_kms(&dev_priv->drm,
"Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n",
- phy, ch, dev_priv->chv_phy_control);
+ phy, ch, dev_priv->display.power.chv_phy_control);
assert_chv_phy_status(dev_priv);
@@ -1604,26 +1605,26 @@ void chv_phy_powergate_lanes(struct intel_encoder *encoder,
bool override, unsigned int mask)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
enum dpio_phy phy = vlv_dig_port_to_phy(enc_to_dig_port(encoder));
enum dpio_channel ch = vlv_dig_port_to_channel(enc_to_dig_port(encoder));
mutex_lock(&power_domains->lock);
- dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch);
- dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch);
+ dev_priv->display.power.chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch);
+ dev_priv->display.power.chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch);
if (override)
- dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
+ dev_priv->display.power.chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
else
- dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
+ dev_priv->display.power.chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
- dev_priv->chv_phy_control);
+ dev_priv->display.power.chv_phy_control);
drm_dbg_kms(&dev_priv->drm,
"Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n",
- phy, ch, mask, dev_priv->chv_phy_control);
+ phy, ch, mask, dev_priv->display.power.chv_phy_control);
assert_chv_phy_status(dev_priv);
@@ -1701,7 +1702,7 @@ static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
- dev_priv->chv_phy_control);
+ dev_priv->display.power.chv_phy_control);
}
static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
@@ -1797,6 +1798,43 @@ tgl_tc_cold_off_power_well_is_enabled(struct drm_i915_private *dev_priv,
return intel_power_well_refcount(power_well);
}
+static void xelpdp_aux_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ enum aux_ch aux_ch = i915_power_well_instance(power_well)->xelpdp.aux_ch;
+
+ intel_de_rmw(dev_priv, XELPDP_DP_AUX_CH_CTL(aux_ch),
+ XELPDP_DP_AUX_CH_CTL_POWER_REQUEST,
+ XELPDP_DP_AUX_CH_CTL_POWER_REQUEST);
+
+ /*
+ * The power status flag cannot be used to determine whether aux
+ * power wells have finished powering up. Instead we're
+ * expected to just wait a fixed 600us after raising the request
+ * bit.
+ */
+ usleep_range(600, 1200);
+}
+
+static void xelpdp_aux_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ enum aux_ch aux_ch = i915_power_well_instance(power_well)->xelpdp.aux_ch;
+
+ intel_de_rmw(dev_priv, XELPDP_DP_AUX_CH_CTL(aux_ch),
+ XELPDP_DP_AUX_CH_CTL_POWER_REQUEST,
+ 0);
+ usleep_range(10, 30);
+}
+
+static bool xelpdp_aux_power_well_enabled(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ enum aux_ch aux_ch = i915_power_well_instance(power_well)->xelpdp.aux_ch;
+
+ return intel_de_read(dev_priv, XELPDP_DP_AUX_CH_CTL(aux_ch)) &
+ XELPDP_DP_AUX_CH_CTL_POWER_STATUS;
+}
const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
.sync_hw = i9xx_power_well_sync_hw_noop,
@@ -1910,3 +1948,10 @@ const struct i915_power_well_ops tgl_tc_cold_off_ops = {
.disable = tgl_tc_cold_off_power_well_disable,
.is_enabled = tgl_tc_cold_off_power_well_is_enabled,
};
+
+const struct i915_power_well_ops xelpdp_aux_power_well_ops = {
+ .sync_hw = i9xx_power_well_sync_hw_noop,
+ .enable = xelpdp_aux_power_well_enable,
+ .disable = xelpdp_aux_power_well_disable,
+ .is_enabled = xelpdp_aux_power_well_enabled,
+};
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.h b/drivers/gpu/drm/i915/display/intel_display_power_well.h
index d0624642dcb6..e13b521e322a 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power_well.h
+++ b/drivers/gpu/drm/i915/display/intel_display_power_well.h
@@ -14,15 +14,15 @@ struct drm_i915_private;
struct i915_power_well;
#define for_each_power_well(__dev_priv, __power_well) \
- for ((__power_well) = (__dev_priv)->power_domains.power_wells; \
- (__power_well) - (__dev_priv)->power_domains.power_wells < \
- (__dev_priv)->power_domains.power_well_count; \
+ for ((__power_well) = (__dev_priv)->display.power.domains.power_wells; \
+ (__power_well) - (__dev_priv)->display.power.domains.power_wells < \
+ (__dev_priv)->display.power.domains.power_well_count; \
(__power_well)++)
#define for_each_power_well_reverse(__dev_priv, __power_well) \
- for ((__power_well) = (__dev_priv)->power_domains.power_wells + \
- (__dev_priv)->power_domains.power_well_count - 1; \
- (__power_well) - (__dev_priv)->power_domains.power_wells >= 0; \
+ for ((__power_well) = (__dev_priv)->display.power.domains.power_wells + \
+ (__dev_priv)->display.power.domains.power_well_count - 1; \
+ (__power_well) - (__dev_priv)->display.power.domains.power_wells >= 0; \
(__power_well)--)
/*
@@ -80,6 +80,9 @@ struct i915_power_well_instance {
*/
u8 idx;
} hsw;
+ struct {
+ u8 aux_ch;
+ } xelpdp;
};
};
@@ -169,5 +172,6 @@ extern const struct i915_power_well_ops vlv_dpio_power_well_ops;
extern const struct i915_power_well_ops icl_aux_power_well_ops;
extern const struct i915_power_well_ops icl_ddi_power_well_ops;
extern const struct i915_power_well_ops tgl_tc_cold_off_ops;
+extern const struct i915_power_well_ops xelpdp_aux_power_well_ops;
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index 01977cd237eb..298d00a11f47 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -1130,6 +1130,7 @@ struct intel_crtc_state {
/* m2_n2 for eDP downclock */
struct intel_link_m_n dp_m2_n2;
bool has_drrs;
+ bool seamless_m_n;
/* PSR is supported but might not be enabled due the lack of enabled planes */
bool has_psr;
@@ -1712,7 +1713,7 @@ struct intel_dp {
/* Display stream compression testing */
bool force_dsc_en;
- int force_dsc_bpp;
+ int force_dsc_bpc;
bool hobl_failed;
bool hobl_active;
diff --git a/drivers/gpu/drm/i915/display/intel_dmc.c b/drivers/gpu/drm/i915/display/intel_dmc.c
index fa9ef591b885..e52ecc0738a6 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc.c
+++ b/drivers/gpu/drm/i915/display/intel_dmc.c
@@ -52,8 +52,8 @@
#define DISPLAY_VER12_DMC_MAX_FW_SIZE ICL_DMC_MAX_FW_SIZE
-#define DG2_DMC_PATH DMC_PATH(dg2, 2, 06)
-#define DG2_DMC_VERSION_REQUIRED DMC_VERSION(2, 06)
+#define DG2_DMC_PATH DMC_PATH(dg2, 2, 07)
+#define DG2_DMC_VERSION_REQUIRED DMC_VERSION(2, 07)
MODULE_FIRMWARE(DG2_DMC_PATH);
#define ADLP_DMC_PATH DMC_PATH(adlp, 2, 16)
@@ -250,7 +250,7 @@ struct stepping_info {
static bool has_dmc_id_fw(struct drm_i915_private *i915, int dmc_id)
{
- return i915->dmc.dmc_info[dmc_id].payload;
+ return i915->display.dmc.dmc_info[dmc_id].payload;
}
bool intel_dmc_has_payload(struct drm_i915_private *i915)
@@ -277,6 +277,17 @@ static void gen9_set_dc_state_debugmask(struct drm_i915_private *dev_priv)
intel_de_posting_read(dev_priv, DC_STATE_DEBUG);
}
+static void disable_event_handler(struct drm_i915_private *i915,
+ i915_reg_t ctl_reg, i915_reg_t htp_reg)
+{
+ intel_de_write(i915, ctl_reg,
+ REG_FIELD_PREP(DMC_EVT_CTL_TYPE_MASK,
+ DMC_EVT_CTL_TYPE_EDGE_0_1) |
+ REG_FIELD_PREP(DMC_EVT_CTL_EVENT_ID_MASK,
+ DMC_EVT_CTL_EVENT_ID_FALSE));
+ intel_de_write(i915, htp_reg, 0);
+}
+
static void
disable_flip_queue_event(struct drm_i915_private *i915,
i915_reg_t ctl_reg, i915_reg_t htp_reg)
@@ -299,12 +310,7 @@ disable_flip_queue_event(struct drm_i915_private *i915,
return;
}
- intel_de_write(i915, ctl_reg,
- REG_FIELD_PREP(DMC_EVT_CTL_TYPE_MASK,
- DMC_EVT_CTL_TYPE_EDGE_0_1) |
- REG_FIELD_PREP(DMC_EVT_CTL_EVENT_ID_MASK,
- DMC_EVT_CTL_EVENT_ID_FALSE));
- intel_de_write(i915, htp_reg, 0);
+ disable_event_handler(i915, ctl_reg, htp_reg);
}
static bool
@@ -356,6 +362,51 @@ disable_all_flip_queue_events(struct drm_i915_private *i915)
}
}
+static void disable_all_event_handlers(struct drm_i915_private *i915)
+{
+ int id;
+
+ /* TODO: disable the event handlers on pre-GEN12 platforms as well */
+ if (DISPLAY_VER(i915) < 12)
+ return;
+
+ for (id = DMC_FW_MAIN; id < DMC_FW_MAX; id++) {
+ int handler;
+
+ if (!has_dmc_id_fw(i915, id))
+ continue;
+
+ for (handler = 0; handler < DMC_EVENT_HANDLER_COUNT_GEN12; handler++)
+ disable_event_handler(i915,
+ DMC_EVT_CTL(i915, id, handler),
+ DMC_EVT_HTP(i915, id, handler));
+ }
+}
+
+static void pipedmc_clock_gating_wa(struct drm_i915_private *i915, bool enable)
+{
+ enum pipe pipe;
+
+ if (DISPLAY_VER(i915) != 13)
+ return;
+
+ /*
+ * Wa_16015201720:adl-p,dg2
+ * The WA requires clock gating to be disabled all the time
+ * for pipe A and B.
+ * For pipe C and D clock gating needs to be disabled only
+ * during initializing the firmware.
+ */
+ if (enable)
+ for (pipe = PIPE_A; pipe <= PIPE_D; pipe++)
+ intel_de_rmw(i915, CLKGATE_DIS_PSL_EXT(pipe),
+ 0, PIPEDMC_GATING_DIS);
+ else
+ for (pipe = PIPE_C; pipe <= PIPE_D; pipe++)
+ intel_de_rmw(i915, CLKGATE_DIS_PSL_EXT(pipe),
+ PIPEDMC_GATING_DIS, 0);
+}
+
/**
* intel_dmc_load_program() - write the firmware from memory to register.
* @dev_priv: i915 drm device.
@@ -366,12 +417,16 @@ disable_all_flip_queue_events(struct drm_i915_private *i915)
*/
void intel_dmc_load_program(struct drm_i915_private *dev_priv)
{
- struct intel_dmc *dmc = &dev_priv->dmc;
+ struct intel_dmc *dmc = &dev_priv->display.dmc;
u32 id, i;
if (!intel_dmc_has_payload(dev_priv))
return;
+ pipedmc_clock_gating_wa(dev_priv, true);
+
+ disable_all_event_handlers(dev_priv);
+
assert_rpm_wakelock_held(&dev_priv->runtime_pm);
preempt_disable();
@@ -393,7 +448,7 @@ void intel_dmc_load_program(struct drm_i915_private *dev_priv)
}
}
- dev_priv->dmc.dc_state = 0;
+ dev_priv->display.dmc.dc_state = 0;
gen9_set_dc_state_debugmask(dev_priv);
@@ -403,12 +458,31 @@ void intel_dmc_load_program(struct drm_i915_private *dev_priv)
* here.
*/
disable_all_flip_queue_events(dev_priv);
+
+ pipedmc_clock_gating_wa(dev_priv, false);
+}
+
+/**
+ * intel_dmc_disable_program() - disable the firmware
+ * @i915: i915 drm device
+ *
+ * Disable all event handlers in the firmware, making sure the firmware is
+ * inactive after the display is uninitialized.
+ */
+void intel_dmc_disable_program(struct drm_i915_private *i915)
+{
+ if (!intel_dmc_has_payload(i915))
+ return;
+
+ pipedmc_clock_gating_wa(i915, true);
+ disable_all_event_handlers(i915);
+ pipedmc_clock_gating_wa(i915, false);
}
void assert_dmc_loaded(struct drm_i915_private *i915)
{
drm_WARN_ONCE(&i915->drm,
- !intel_de_read(i915, DMC_PROGRAM(i915->dmc.dmc_info[DMC_FW_MAIN].start_mmioaddr, 0)),
+ !intel_de_read(i915, DMC_PROGRAM(i915->display.dmc.dmc_info[DMC_FW_MAIN].start_mmioaddr, 0)),
"DMC program storage start is NULL\n");
drm_WARN_ONCE(&i915->drm, !intel_de_read(i915, DMC_SSP_BASE),
"DMC SSP Base Not fine\n");
@@ -445,7 +519,7 @@ static void dmc_set_fw_offset(struct intel_dmc *dmc,
{
unsigned int i, id;
- struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), dmc);
+ struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), display.dmc);
for (i = 0; i < num_entries; i++) {
id = package_ver <= 1 ? DMC_FW_MAIN : fw_info[i].dmc_id;
@@ -473,7 +547,7 @@ static bool dmc_mmio_addr_sanity_check(struct intel_dmc *dmc,
const u32 *mmioaddr, u32 mmio_count,
int header_ver, u8 dmc_id)
{
- struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), dmc);
+ struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), display.dmc);
u32 start_range, end_range;
int i;
@@ -511,7 +585,7 @@ static u32 parse_dmc_fw_header(struct intel_dmc *dmc,
const struct intel_dmc_header_base *dmc_header,
size_t rem_size, u8 dmc_id)
{
- struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), dmc);
+ struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), display.dmc);
struct dmc_fw_info *dmc_info = &dmc->dmc_info[dmc_id];
unsigned int header_len_bytes, dmc_header_size, payload_size, i;
const u32 *mmioaddr, *mmiodata;
@@ -622,7 +696,7 @@ parse_dmc_fw_package(struct intel_dmc *dmc,
const struct stepping_info *si,
size_t rem_size)
{
- struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), dmc);
+ struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), display.dmc);
u32 package_size = sizeof(struct intel_package_header);
u32 num_entries, max_entries;
const struct intel_fw_info *fw_info;
@@ -676,7 +750,7 @@ static u32 parse_dmc_fw_css(struct intel_dmc *dmc,
struct intel_css_header *css_header,
size_t rem_size)
{
- struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), dmc);
+ struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), display.dmc);
if (rem_size < sizeof(struct intel_css_header)) {
drm_err(&i915->drm, "Truncated DMC firmware, refusing.\n");
@@ -713,7 +787,7 @@ static void parse_dmc_fw(struct drm_i915_private *dev_priv,
struct intel_css_header *css_header;
struct intel_package_header *package_header;
struct intel_dmc_header_base *dmc_header;
- struct intel_dmc *dmc = &dev_priv->dmc;
+ struct intel_dmc *dmc = &dev_priv->display.dmc;
struct stepping_info display_info = { '*', '*'};
const struct stepping_info *si = intel_get_stepping_info(dev_priv, &display_info);
u32 readcount = 0;
@@ -740,7 +814,7 @@ static void parse_dmc_fw(struct drm_i915_private *dev_priv,
readcount += r;
for (id = 0; id < DMC_FW_MAX; id++) {
- if (!dev_priv->dmc.dmc_info[id].present)
+ if (!dev_priv->display.dmc.dmc_info[id].present)
continue;
offset = readcount + dmc->dmc_info[id].dmc_offset * 4;
@@ -756,15 +830,15 @@ static void parse_dmc_fw(struct drm_i915_private *dev_priv,
static void intel_dmc_runtime_pm_get(struct drm_i915_private *dev_priv)
{
- drm_WARN_ON(&dev_priv->drm, dev_priv->dmc.wakeref);
- dev_priv->dmc.wakeref =
+ drm_WARN_ON(&dev_priv->drm, dev_priv->display.dmc.wakeref);
+ dev_priv->display.dmc.wakeref =
intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
}
static void intel_dmc_runtime_pm_put(struct drm_i915_private *dev_priv)
{
intel_wakeref_t wakeref __maybe_unused =
- fetch_and_zero(&dev_priv->dmc.wakeref);
+ fetch_and_zero(&dev_priv->display.dmc.wakeref);
intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
}
@@ -775,10 +849,10 @@ static void dmc_load_work_fn(struct work_struct *work)
struct intel_dmc *dmc;
const struct firmware *fw = NULL;
- dev_priv = container_of(work, typeof(*dev_priv), dmc.work);
- dmc = &dev_priv->dmc;
+ dev_priv = container_of(work, typeof(*dev_priv), display.dmc.work);
+ dmc = &dev_priv->display.dmc;
- request_firmware(&fw, dev_priv->dmc.fw_path, dev_priv->drm.dev);
+ request_firmware(&fw, dev_priv->display.dmc.fw_path, dev_priv->drm.dev);
parse_dmc_fw(dev_priv, fw);
if (intel_dmc_has_payload(dev_priv)) {
@@ -787,7 +861,7 @@ static void dmc_load_work_fn(struct work_struct *work)
drm_info(&dev_priv->drm,
"Finished loading DMC firmware %s (v%u.%u)\n",
- dev_priv->dmc.fw_path, DMC_VERSION_MAJOR(dmc->version),
+ dev_priv->display.dmc.fw_path, DMC_VERSION_MAJOR(dmc->version),
DMC_VERSION_MINOR(dmc->version));
} else {
drm_notice(&dev_priv->drm,
@@ -810,9 +884,9 @@ static void dmc_load_work_fn(struct work_struct *work)
*/
void intel_dmc_ucode_init(struct drm_i915_private *dev_priv)
{
- struct intel_dmc *dmc = &dev_priv->dmc;
+ struct intel_dmc *dmc = &dev_priv->display.dmc;
- INIT_WORK(&dev_priv->dmc.work, dmc_load_work_fn);
+ INIT_WORK(&dev_priv->display.dmc.work, dmc_load_work_fn);
if (!HAS_DMC(dev_priv))
return;
@@ -895,7 +969,7 @@ void intel_dmc_ucode_init(struct drm_i915_private *dev_priv)
}
drm_dbg_kms(&dev_priv->drm, "Loading %s\n", dmc->fw_path);
- schedule_work(&dev_priv->dmc.work);
+ schedule_work(&dev_priv->display.dmc.work);
}
/**
@@ -911,7 +985,7 @@ void intel_dmc_ucode_suspend(struct drm_i915_private *dev_priv)
if (!HAS_DMC(dev_priv))
return;
- flush_work(&dev_priv->dmc.work);
+ flush_work(&dev_priv->display.dmc.work);
/* Drop the reference held in case DMC isn't loaded. */
if (!intel_dmc_has_payload(dev_priv))
@@ -953,16 +1027,16 @@ void intel_dmc_ucode_fini(struct drm_i915_private *dev_priv)
return;
intel_dmc_ucode_suspend(dev_priv);
- drm_WARN_ON(&dev_priv->drm, dev_priv->dmc.wakeref);
+ drm_WARN_ON(&dev_priv->drm, dev_priv->display.dmc.wakeref);
for (id = 0; id < DMC_FW_MAX; id++)
- kfree(dev_priv->dmc.dmc_info[id].payload);
+ kfree(dev_priv->display.dmc.dmc_info[id].payload);
}
void intel_dmc_print_error_state(struct drm_i915_error_state_buf *m,
struct drm_i915_private *i915)
{
- struct intel_dmc *dmc = &i915->dmc;
+ struct intel_dmc *dmc = &i915->display.dmc;
if (!HAS_DMC(i915))
return;
@@ -984,7 +1058,7 @@ static int intel_dmc_debugfs_status_show(struct seq_file *m, void *unused)
if (!HAS_DMC(i915))
return -ENODEV;
- dmc = &i915->dmc;
+ dmc = &i915->display.dmc;
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
diff --git a/drivers/gpu/drm/i915/display/intel_dmc.h b/drivers/gpu/drm/i915/display/intel_dmc.h
index 41091aee3b47..67e03315ef99 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc.h
+++ b/drivers/gpu/drm/i915/display/intel_dmc.h
@@ -47,6 +47,7 @@ struct intel_dmc {
void intel_dmc_ucode_init(struct drm_i915_private *i915);
void intel_dmc_load_program(struct drm_i915_private *i915);
+void intel_dmc_disable_program(struct drm_i915_private *i915);
void intel_dmc_ucode_fini(struct drm_i915_private *i915);
void intel_dmc_ucode_suspend(struct drm_i915_private *i915);
void intel_dmc_ucode_resume(struct drm_i915_private *i915);
diff --git a/drivers/gpu/drm/i915/display/intel_dmc_regs.h b/drivers/gpu/drm/i915/display/intel_dmc_regs.h
index 238620b55966..5e5e41644ddf 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_dmc_regs.h
@@ -28,6 +28,8 @@
#define _DMC_REG(i915, dmc_id, reg) \
((reg) - __DMC_REG_MMIO_BASE + _DMC_REG_MMIO_BASE(i915, dmc_id))
+#define DMC_EVENT_HANDLER_COUNT_GEN12 8
+
#define _DMC_EVT_HTP_0 0x8f004
#define DMC_EVT_HTP(i915, dmc_id, handler) \
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index a4e113253df3..c9be61d2348e 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -286,11 +286,22 @@ static int intel_dp_max_common_rate(struct intel_dp *intel_dp)
return intel_dp_common_rate(intel_dp, intel_dp->num_common_rates - 1);
}
+static int intel_dp_max_source_lane_count(struct intel_digital_port *dig_port)
+{
+ int vbt_max_lanes = intel_bios_dp_max_lane_count(&dig_port->base);
+ int max_lanes = dig_port->max_lanes;
+
+ if (vbt_max_lanes)
+ max_lanes = min(max_lanes, vbt_max_lanes);
+
+ return max_lanes;
+}
+
/* Theoretical max between source and sink */
static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- int source_max = dig_port->max_lanes;
+ int source_max = intel_dp_max_source_lane_count(dig_port);
int sink_max = intel_dp->max_sink_lane_count;
int fia_max = intel_tc_port_fia_max_lane_count(dig_port);
int lttpr_max = drm_dp_lttpr_max_lane_count(intel_dp->lttpr_common_caps);
@@ -389,23 +400,13 @@ static int dg2_max_source_rate(struct intel_dp *intel_dp)
return intel_dp_is_edp(intel_dp) ? 810000 : 1350000;
}
-static bool is_low_voltage_sku(struct drm_i915_private *i915, enum phy phy)
-{
- u32 voltage;
-
- voltage = intel_de_read(i915, ICL_PORT_COMP_DW3(phy)) & VOLTAGE_INFO_MASK;
-
- return voltage == VOLTAGE_INFO_0_85V;
-}
-
static int icl_max_source_rate(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
- if (intel_phy_is_combo(dev_priv, phy) &&
- (is_low_voltage_sku(dev_priv, phy) || !intel_dp_is_edp(intel_dp)))
+ if (intel_phy_is_combo(dev_priv, phy) && !intel_dp_is_edp(intel_dp))
return 540000;
return 810000;
@@ -413,23 +414,7 @@ static int icl_max_source_rate(struct intel_dp *intel_dp)
static int ehl_max_source_rate(struct intel_dp *intel_dp)
{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
- enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
-
- if (intel_dp_is_edp(intel_dp) || is_low_voltage_sku(dev_priv, phy))
- return 540000;
-
- return 810000;
-}
-
-static int dg1_max_source_rate(struct intel_dp *intel_dp)
-{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
-
- if (intel_phy_is_combo(i915, phy) && is_low_voltage_sku(i915, phy))
+ if (intel_dp_is_edp(intel_dp))
return 540000;
return 810000;
@@ -491,7 +476,7 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp)
max_rate = dg2_max_source_rate(intel_dp);
else if (IS_ALDERLAKE_P(dev_priv) || IS_ALDERLAKE_S(dev_priv) ||
IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv))
- max_rate = dg1_max_source_rate(intel_dp);
+ max_rate = 810000;
else if (IS_JSL_EHL(dev_priv))
max_rate = ehl_max_source_rate(intel_dp);
else
@@ -720,7 +705,7 @@ static u16 intel_dp_dsc_get_output_bpp(struct drm_i915_private *i915,
if (bigjoiner) {
u32 max_bpp_bigjoiner =
- i915->max_cdclk_freq * 48 /
+ i915->display.cdclk.max_cdclk_freq * 48 /
intel_dp_mode_to_fec_clock(mode_clock);
bits_per_pixel = min(bits_per_pixel, max_bpp_bigjoiner);
@@ -1312,21 +1297,45 @@ intel_dp_adjust_compliance_config(struct intel_dp *intel_dp,
}
}
+static bool has_seamless_m_n(struct intel_connector *connector)
+{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+
+ /*
+ * Seamless M/N reprogramming only implemented
+ * for BDW+ double buffered M/N registers so far.
+ */
+ return HAS_DOUBLE_BUFFERED_M_N(i915) &&
+ intel_panel_drrs_type(connector) == DRRS_TYPE_SEAMLESS;
+}
+
+static int intel_dp_mode_clock(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
+
+ /* FIXME a bit of a mess wrt clock vs. crtc_clock */
+ if (has_seamless_m_n(connector))
+ return intel_panel_highest_mode(connector, adjusted_mode)->clock;
+ else
+ return adjusted_mode->crtc_clock;
+}
+
/* Optimize link config in order: max bpp, min clock, min lanes */
static int
intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state,
const struct link_config_limits *limits)
{
- struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
- int bpp, i, lane_count;
+ int bpp, i, lane_count, clock = intel_dp_mode_clock(pipe_config, conn_state);
int mode_rate, link_rate, link_avail;
for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
int output_bpp = intel_dp_output_bpp(pipe_config->output_format, bpp);
- mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
- output_bpp);
+ mode_rate = intel_dp_link_required(clock, output_bpp);
for (i = 0; i < intel_dp->num_common_rates; i++) {
link_rate = intel_dp_common_rate(intel_dp, i);
@@ -1377,7 +1386,18 @@ static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 max_req_bpc)
return 0;
}
-#define DSC_SUPPORTED_VERSION_MIN 1
+static int intel_dp_source_dsc_version_minor(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
+ return DISPLAY_VER(i915) >= 14 ? 2 : 1;
+}
+
+static int intel_dp_sink_dsc_version_minor(struct intel_dp *intel_dp)
+{
+ return (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] & DP_DSC_MINOR_MASK) >>
+ DP_DSC_MINOR_SHIFT;
+}
static int intel_dp_dsc_compute_params(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state)
@@ -1395,6 +1415,7 @@ static int intel_dp_dsc_compute_params(struct intel_encoder *encoder,
* DP_DSC_RC_BUF_SIZE for this.
*/
vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST;
+ vdsc_cfg->pic_height = crtc_state->hw.adjusted_mode.crtc_vdisplay;
/*
* Slice Height of 8 works for all currently available panels. So start
@@ -1416,9 +1437,8 @@ static int intel_dp_dsc_compute_params(struct intel_encoder *encoder,
(intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] &
DP_DSC_MAJOR_MASK) >> DP_DSC_MAJOR_SHIFT;
vdsc_cfg->dsc_version_minor =
- min(DSC_SUPPORTED_VERSION_MIN,
- (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] &
- DP_DSC_MINOR_MASK) >> DP_DSC_MINOR_SHIFT);
+ min(intel_dp_source_dsc_version_minor(intel_dp),
+ intel_dp_sink_dsc_version_minor(intel_dp));
vdsc_cfg->convert_rgb = intel_dp->dsc_dpcd[DP_DSC_DEC_COLOR_FORMAT_CAP - DP_DSC_SUPPORT] &
DP_DSC_RGB;
@@ -1464,6 +1484,11 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, conn_state->max_requested_bpc);
+ if (intel_dp->force_dsc_bpc) {
+ pipe_bpp = intel_dp->force_dsc_bpc * 3;
+ drm_dbg_kms(&dev_priv->drm, "Input DSC BPP forced to %d", pipe_bpp);
+ }
+
/* Min Input BPC for ICL+ is 8 */
if (pipe_bpp < 8 * 3) {
drm_dbg_kms(&dev_priv->drm,
@@ -1515,28 +1540,12 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
pipe_config->dsc.slice_count = dsc_dp_slice_count;
}
- /* As of today we support DSC for only RGB */
- if (intel_dp->force_dsc_bpp) {
- if (intel_dp->force_dsc_bpp >= 8 &&
- intel_dp->force_dsc_bpp < pipe_bpp) {
- drm_dbg_kms(&dev_priv->drm,
- "DSC BPP forced to %d",
- intel_dp->force_dsc_bpp);
- pipe_config->dsc.compressed_bpp =
- intel_dp->force_dsc_bpp;
- } else {
- drm_dbg_kms(&dev_priv->drm,
- "Invalid DSC BPP %d",
- intel_dp->force_dsc_bpp);
- }
- }
-
/*
* VDSC engine operates at 1 Pixel per clock, so if peak pixel rate
* is greater than the maximum Cdclock and if slice count is even
* then we need to use 2 VDSC instances.
*/
- if (adjusted_mode->crtc_clock > dev_priv->max_cdclk_freq ||
+ if (adjusted_mode->crtc_clock > dev_priv->display.cdclk.max_cdclk_freq ||
pipe_config->bigjoiner_pipes) {
if (pipe_config->dsc.slice_count < 2) {
drm_dbg_kms(&dev_priv->drm,
@@ -1626,7 +1635,7 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
* Optimize for slow and wide for everything, because there are some
* eDP 1.3 and 1.4 panels don't work well with fast and narrow.
*/
- ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits);
+ ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, conn_state, &limits);
if (ret || joiner_needs_dsc || intel_dp->force_dsc_en) {
drm_dbg_kms(&i915->drm, "Try DSC (fallback=%s, joiner=%s, force=%s)\n",
@@ -1869,8 +1878,7 @@ intel_dp_compute_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp,
static bool cpu_transcoder_has_drrs(struct drm_i915_private *i915,
enum transcoder cpu_transcoder)
{
- /* M1/N1 is double buffered */
- if (DISPLAY_VER(i915) >= 9 || IS_BROADWELL(i915))
+ if (HAS_DOUBLE_BUFFERED_M_N(i915))
return true;
return intel_cpu_transcoder_has_m2_n2(i915, cpu_transcoder);
@@ -1908,13 +1916,16 @@ static bool can_enable_drrs(struct intel_connector *connector,
static void
intel_dp_drrs_compute_config(struct intel_connector *connector,
struct intel_crtc_state *pipe_config,
- int output_bpp, bool constant_n)
+ int output_bpp)
{
struct drm_i915_private *i915 = to_i915(connector->base.dev);
const struct drm_display_mode *downclock_mode =
intel_panel_downclock_mode(connector, &pipe_config->hw.adjusted_mode);
int pixel_clock;
+ if (has_seamless_m_n(connector))
+ pipe_config->seamless_m_n = true;
+
if (!can_enable_drrs(connector, pipe_config, downclock_mode)) {
if (intel_cpu_transcoder_has_m2_n2(i915, pipe_config->cpu_transcoder))
intel_zero_m_n(&pipe_config->dp_m2_n2);
@@ -1932,7 +1943,7 @@ intel_dp_drrs_compute_config(struct intel_connector *connector,
intel_link_compute_m_n(output_bpp, pipe_config->lane_count, pixel_clock,
pipe_config->port_clock, &pipe_config->dp_m2_n2,
- constant_n, pipe_config->fec_enable);
+ pipe_config->fec_enable);
/* FIXME: abstract this better */
if (pipe_config->splitter.enable)
@@ -2007,7 +2018,6 @@ intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
const struct drm_display_mode *fixed_mode;
struct intel_connector *connector = intel_dp->attached_connector;
- bool constant_n = drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_CONSTANT_N);
int ret = 0, output_bpp;
if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && encoder->port != PORT_A)
@@ -2086,7 +2096,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
adjusted_mode->crtc_clock,
pipe_config->port_clock,
&pipe_config->dp_m_n,
- constant_n, pipe_config->fec_enable);
+ pipe_config->fec_enable);
/* FIXME: abstract this better */
if (pipe_config->splitter.enable)
@@ -2097,8 +2107,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
intel_vrr_compute_config(pipe_config, conn_state);
intel_psr_compute_config(intel_dp, pipe_config, conn_state);
- intel_dp_drrs_compute_config(connector, pipe_config,
- output_bpp, constant_n);
+ intel_dp_drrs_compute_config(connector, pipe_config, output_bpp);
intel_dp_compute_vsc_sdp(intel_dp, pipe_config, conn_state);
intel_dp_compute_hdr_metadata_infoframe_sdp(intel_dp, pipe_config, conn_state);
@@ -5032,9 +5041,9 @@ static void intel_dp_oob_hotplug_event(struct drm_connector *connector)
struct drm_i915_private *i915 = to_i915(connector->dev);
spin_lock_irq(&i915->irq_lock);
- i915->hotplug.event_bits |= BIT(encoder->hpd_pin);
+ i915->display.hotplug.event_bits |= BIT(encoder->hpd_pin);
spin_unlock_irq(&i915->irq_lock);
- queue_delayed_work(system_wq, &i915->hotplug.hotplug_work, 0);
+ queue_delayed_work(system_wq, &i915->display.hotplug.hotplug_work, 0);
}
static const struct drm_connector_funcs intel_dp_connector_funcs = {
@@ -5192,7 +5201,7 @@ intel_edp_add_properties(struct intel_dp *intel_dp)
return;
drm_connector_set_panel_orientation_with_quirk(&connector->base,
- i915->vbt.orientation,
+ i915->display.vbt.orientation,
fixed_mode->hdisplay,
fixed_mode->vdisplay);
}
@@ -5302,8 +5311,6 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
intel_panel_init(intel_connector);
- if (!(dev_priv->quirks & QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK))
- intel_connector->panel.backlight.power = intel_pps_backlight_power;
intel_backlight_setup(intel_connector, pipe);
intel_edp_add_properties(intel_dp);
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.c b/drivers/gpu/drm/i915/display/intel_dp_aux.c
index 2bc119374555..48c375c65a41 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux.c
@@ -42,7 +42,7 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp)
bool done;
#define C (((status = intel_uncore_read_notrace(&i915->uncore, ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
- done = wait_event_timeout(i915->gmbus_wait_queue, C,
+ done = wait_event_timeout(i915->display.gmbus.wait_queue, C,
msecs_to_jiffies_timeout(timeout_ms));
/* just trace the final value */
@@ -86,7 +86,7 @@ static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
* divide by 2000 and use that
*/
if (dig_port->aux_ch == AUX_CH_A)
- freq = dev_priv->cdclk.hw.cdclk;
+ freq = dev_priv->display.cdclk.hw.cdclk;
else
freq = RUNTIME_INFO(dev_priv)->rawclk_freq;
return DIV_ROUND_CLOSEST(freq, 2000);
@@ -150,6 +150,7 @@ static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
u32 unused)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
u32 ret;
/*
@@ -170,6 +171,13 @@ static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
if (intel_tc_port_in_tbt_alt_mode(dig_port))
ret |= DP_AUX_CH_CTL_TBT_IO;
+ /*
+ * Power request bit is already set during aux power well enable.
+ * Preserve the bit across aux transactions.
+ */
+ if (DISPLAY_VER(i915) >= 14)
+ ret |= XELPDP_DP_AUX_CH_CTL_POWER_REQUEST;
+
return ret;
}
@@ -629,6 +637,46 @@ static i915_reg_t tgl_aux_data_reg(struct intel_dp *intel_dp, int index)
}
}
+static i915_reg_t xelpdp_aux_ctl_reg(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum aux_ch aux_ch = dig_port->aux_ch;
+
+ switch (aux_ch) {
+ case AUX_CH_A:
+ case AUX_CH_B:
+ case AUX_CH_USBC1:
+ case AUX_CH_USBC2:
+ case AUX_CH_USBC3:
+ case AUX_CH_USBC4:
+ return XELPDP_DP_AUX_CH_CTL(aux_ch);
+ default:
+ MISSING_CASE(aux_ch);
+ return XELPDP_DP_AUX_CH_CTL(AUX_CH_A);
+ }
+}
+
+static i915_reg_t xelpdp_aux_data_reg(struct intel_dp *intel_dp, int index)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum aux_ch aux_ch = dig_port->aux_ch;
+
+ switch (aux_ch) {
+ case AUX_CH_A:
+ case AUX_CH_B:
+ case AUX_CH_USBC1:
+ case AUX_CH_USBC2:
+ case AUX_CH_USBC3:
+ case AUX_CH_USBC4:
+ return XELPDP_DP_AUX_CH_DATA(aux_ch, index);
+ default:
+ MISSING_CASE(aux_ch);
+ return XELPDP_DP_AUX_CH_DATA(AUX_CH_A, index);
+ }
+}
+
void intel_dp_aux_fini(struct intel_dp *intel_dp)
{
if (cpu_latency_qos_request_active(&intel_dp->pm_qos))
@@ -644,7 +692,10 @@ void intel_dp_aux_init(struct intel_dp *intel_dp)
struct intel_encoder *encoder = &dig_port->base;
enum aux_ch aux_ch = dig_port->aux_ch;
- if (DISPLAY_VER(dev_priv) >= 12) {
+ if (DISPLAY_VER(dev_priv) >= 14) {
+ intel_dp->aux_ch_ctl_reg = xelpdp_aux_ctl_reg;
+ intel_dp->aux_ch_data_reg = xelpdp_aux_data_reg;
+ } else if (DISPLAY_VER(dev_priv) >= 12) {
intel_dp->aux_ch_ctl_reg = tgl_aux_ctl_reg;
intel_dp->aux_ch_data_reg = tgl_aux_data_reg;
} else if (DISPLAY_VER(dev_priv) >= 9) {
diff --git a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
index a7640dbcf00e..88689124c013 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
@@ -17,6 +17,7 @@
#include "intel_dp.h"
#include "intel_dp_hdcp.h"
#include "intel_hdcp.h"
+#include "intel_hdcp_regs.h"
static unsigned int transcoder_to_stream_enc_status(enum transcoder cpu_transcoder)
{
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
index 9feaf1a589f3..3d3efcf02011 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
@@ -37,17 +37,6 @@ static void intel_dp_reset_lttpr_count(struct intel_dp *intel_dp)
DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV] = 0;
}
-static const char *intel_dp_phy_name(enum drm_dp_phy dp_phy,
- char *buf, size_t buf_size)
-{
- if (dp_phy == DP_PHY_DPRX)
- snprintf(buf, buf_size, "DPRX");
- else
- snprintf(buf, buf_size, "LTTPR %d", dp_phy - DP_PHY_LTTPR1 + 1);
-
- return buf;
-}
-
static u8 *intel_dp_lttpr_phy_caps(struct intel_dp *intel_dp,
enum drm_dp_phy dp_phy)
{
@@ -60,20 +49,19 @@ static void intel_dp_read_lttpr_phy_caps(struct intel_dp *intel_dp,
{
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
u8 *phy_caps = intel_dp_lttpr_phy_caps(intel_dp, dp_phy);
- char phy_name[10];
-
- intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name));
if (drm_dp_read_lttpr_phy_caps(&intel_dp->aux, dpcd, dp_phy, phy_caps) < 0) {
drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
"[ENCODER:%d:%s][%s] failed to read the PHY caps\n",
- encoder->base.base.id, encoder->base.name, phy_name);
+ encoder->base.base.id, encoder->base.name,
+ drm_dp_phy_name(dp_phy));
return;
}
drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
"[ENCODER:%d:%s][%s] PHY capabilities: %*ph\n",
- encoder->base.base.id, encoder->base.name, phy_name,
+ encoder->base.base.id, encoder->base.name,
+ drm_dp_phy_name(dp_phy),
(int)sizeof(intel_dp->lttpr_phy_caps[0]),
phy_caps);
}
@@ -423,14 +411,13 @@ intel_dp_get_adjust_train(struct intel_dp *intel_dp,
{
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- char phy_name[10];
int lane;
if (intel_dp_is_uhbr(crtc_state)) {
drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s][%s] 128b/132b, lanes: %d, "
"TX FFE request: " TRAIN_REQ_FMT "\n",
encoder->base.base.id, encoder->base.name,
- intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)),
+ drm_dp_phy_name(dp_phy),
crtc_state->lane_count,
TRAIN_REQ_TX_FFE_ARGS(link_status));
} else {
@@ -438,7 +425,7 @@ intel_dp_get_adjust_train(struct intel_dp *intel_dp,
"vswing request: " TRAIN_REQ_FMT ", "
"pre-emphasis request: " TRAIN_REQ_FMT "\n",
encoder->base.base.id, encoder->base.name,
- intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)),
+ drm_dp_phy_name(dp_phy),
crtc_state->lane_count,
TRAIN_REQ_VSWING_ARGS(link_status),
TRAIN_REQ_PREEMPH_ARGS(link_status));
@@ -503,13 +490,12 @@ intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
u8 train_pat = intel_dp_training_pattern_symbol(dp_train_pat);
- char phy_name[10];
if (train_pat != DP_TRAINING_PATTERN_DISABLE)
drm_dbg_kms(&i915->drm,
"[ENCODER:%d:%s][%s] Using DP training pattern TPS%c\n",
encoder->base.base.id, encoder->base.name,
- intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)),
+ drm_dp_phy_name(dp_phy),
dp_training_pattern_name(train_pat));
intel_dp->set_link_train(intel_dp, crtc_state, dp_train_pat);
@@ -546,13 +532,12 @@ void intel_dp_set_signal_levels(struct intel_dp *intel_dp,
{
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- char phy_name[10];
if (intel_dp_is_uhbr(crtc_state)) {
drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s][%s] 128b/132b, lanes: %d, "
"TX FFE presets: " TRAIN_SET_FMT "\n",
encoder->base.base.id, encoder->base.name,
- intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)),
+ drm_dp_phy_name(dp_phy),
crtc_state->lane_count,
TRAIN_SET_TX_FFE_ARGS(intel_dp->train_set));
} else {
@@ -560,7 +545,7 @@ void intel_dp_set_signal_levels(struct intel_dp *intel_dp,
"vswing levels: " TRAIN_SET_FMT ", "
"pre-emphasis levels: " TRAIN_SET_FMT "\n",
encoder->base.base.id, encoder->base.name,
- intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)),
+ drm_dp_phy_name(dp_phy),
crtc_state->lane_count,
TRAIN_SET_VSWING_ARGS(intel_dp->train_set),
TRAIN_SET_PREEMPH_ARGS(intel_dp->train_set));
@@ -671,6 +656,28 @@ intel_dp_prepare_link_train(struct intel_dp *intel_dp,
intel_dp_compute_rate(intel_dp, crtc_state->port_clock,
&link_bw, &rate_select);
+ /*
+ * WaEdpLinkRateDataReload
+ *
+ * Parade PS8461E MUX (used on varius TGL+ laptops) needs
+ * to snoop the link rates reported by the sink when we
+ * use LINK_RATE_SET in order to operate in jitter cleaning
+ * mode (as opposed to redriver mode). Unfortunately it
+ * loses track of the snooped link rates when powered down,
+ * so we need to make it re-snoop often. Without this high
+ * link rates are not stable.
+ */
+ if (!link_bw) {
+ struct intel_connector *connector = intel_dp->attached_connector;
+ __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
+
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Reloading eDP link rates\n",
+ connector->base.base.id, connector->base.name);
+
+ drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES,
+ sink_rates, sizeof(sink_rates));
+ }
+
if (link_bw)
drm_dbg_kms(&i915->drm,
"[ENCODER:%d:%s] Using LINK_BW_SET value %02x\n",
@@ -732,12 +739,11 @@ intel_dp_dump_link_status(struct intel_dp *intel_dp, enum drm_dp_phy dp_phy,
{
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- char phy_name[10];
drm_dbg_kms(&i915->drm,
"[ENCODER:%d:%s][%s] ln0_1:0x%x ln2_3:0x%x align:0x%x sink:0x%x adj_req0_1:0x%x adj_req2_3:0x%x\n",
encoder->base.base.id, encoder->base.name,
- intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)),
+ drm_dp_phy_name(dp_phy),
link_status[0], link_status[1], link_status[2],
link_status[3], link_status[4], link_status[5]);
}
@@ -757,21 +763,19 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp,
int voltage_tries, cr_tries, max_cr_tries;
u8 link_status[DP_LINK_STATUS_SIZE];
bool max_vswing_reached = false;
- char phy_name[10];
int delay_us;
delay_us = drm_dp_read_clock_recovery_delay(&intel_dp->aux,
intel_dp->dpcd, dp_phy,
intel_dp_is_uhbr(crtc_state));
- intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name));
-
/* clock recovery */
if (!intel_dp_reset_link_train(intel_dp, crtc_state, dp_phy,
DP_TRAINING_PATTERN_1 |
DP_LINK_SCRAMBLING_DISABLE)) {
drm_err(&i915->drm, "[ENCODER:%d:%s][%s] Failed to enable link training\n",
- encoder->base.base.id, encoder->base.name, phy_name);
+ encoder->base.base.id, encoder->base.name,
+ drm_dp_phy_name(dp_phy));
return false;
}
@@ -795,14 +799,16 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp,
if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, dp_phy,
link_status) < 0) {
drm_err(&i915->drm, "[ENCODER:%d:%s][%s] Failed to get link status\n",
- encoder->base.base.id, encoder->base.name, phy_name);
+ encoder->base.base.id, encoder->base.name,
+ drm_dp_phy_name(dp_phy));
return false;
}
if (drm_dp_clock_recovery_ok(link_status, crtc_state->lane_count)) {
drm_dbg_kms(&i915->drm,
"[ENCODER:%d:%s][%s] Clock recovery OK\n",
- encoder->base.base.id, encoder->base.name, phy_name);
+ encoder->base.base.id, encoder->base.name,
+ drm_dp_phy_name(dp_phy));
return true;
}
@@ -810,7 +816,8 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp,
intel_dp_dump_link_status(intel_dp, dp_phy, link_status);
drm_dbg_kms(&i915->drm,
"[ENCODER:%d:%s][%s] Same voltage tried 5 times\n",
- encoder->base.base.id, encoder->base.name, phy_name);
+ encoder->base.base.id, encoder->base.name,
+ drm_dp_phy_name(dp_phy));
return false;
}
@@ -818,7 +825,8 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp,
intel_dp_dump_link_status(intel_dp, dp_phy, link_status);
drm_dbg_kms(&i915->drm,
"[ENCODER:%d:%s][%s] Max Voltage Swing reached\n",
- encoder->base.base.id, encoder->base.name, phy_name);
+ encoder->base.base.id, encoder->base.name,
+ drm_dp_phy_name(dp_phy));
return false;
}
@@ -828,7 +836,8 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp,
if (!intel_dp_update_link_train(intel_dp, crtc_state, dp_phy)) {
drm_err(&i915->drm,
"[ENCODER:%d:%s][%s] Failed to update link training\n",
- encoder->base.base.id, encoder->base.name, phy_name);
+ encoder->base.base.id, encoder->base.name,
+ drm_dp_phy_name(dp_phy));
return false;
}
@@ -846,7 +855,8 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp,
intel_dp_dump_link_status(intel_dp, dp_phy, link_status);
drm_err(&i915->drm,
"[ENCODER:%d:%s][%s] Failed clock recovery %d times, giving up!\n",
- encoder->base.base.id, encoder->base.name, phy_name, max_cr_tries);
+ encoder->base.base.id, encoder->base.name,
+ drm_dp_phy_name(dp_phy), max_cr_tries);
return false;
}
@@ -924,15 +934,12 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp,
u32 training_pattern;
u8 link_status[DP_LINK_STATUS_SIZE];
bool channel_eq = false;
- char phy_name[10];
int delay_us;
delay_us = drm_dp_read_channel_eq_delay(&intel_dp->aux,
intel_dp->dpcd, dp_phy,
intel_dp_is_uhbr(crtc_state));
- intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name));
-
training_pattern = intel_dp_training_pattern(intel_dp, crtc_state, dp_phy);
/* Scrambling is disabled for TPS2/3 and enabled for TPS4 */
if (training_pattern != DP_TRAINING_PATTERN_4)
@@ -944,7 +951,7 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp,
drm_err(&i915->drm,
"[ENCODER:%d:%s][%s] Failed to start channel equalization\n",
encoder->base.base.id, encoder->base.name,
- phy_name);
+ drm_dp_phy_name(dp_phy));
return false;
}
@@ -955,7 +962,8 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp,
link_status) < 0) {
drm_err(&i915->drm,
"[ENCODER:%d:%s][%s] Failed to get link status\n",
- encoder->base.base.id, encoder->base.name, phy_name);
+ encoder->base.base.id, encoder->base.name,
+ drm_dp_phy_name(dp_phy));
break;
}
@@ -966,7 +974,8 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp,
drm_dbg_kms(&i915->drm,
"[ENCODER:%d:%s][%s] Clock recovery check failed, cannot "
"continue channel equalization\n",
- encoder->base.base.id, encoder->base.name, phy_name);
+ encoder->base.base.id, encoder->base.name,
+ drm_dp_phy_name(dp_phy));
break;
}
@@ -975,7 +984,8 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp,
channel_eq = true;
drm_dbg_kms(&i915->drm,
"[ENCODER:%d:%s][%s] Channel EQ done. DP Training successful\n",
- encoder->base.base.id, encoder->base.name, phy_name);
+ encoder->base.base.id, encoder->base.name,
+ drm_dp_phy_name(dp_phy));
break;
}
@@ -985,7 +995,8 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp,
if (!intel_dp_update_link_train(intel_dp, crtc_state, dp_phy)) {
drm_err(&i915->drm,
"[ENCODER:%d:%s][%s] Failed to update link training\n",
- encoder->base.base.id, encoder->base.name, phy_name);
+ encoder->base.base.id, encoder->base.name,
+ drm_dp_phy_name(dp_phy));
break;
}
}
@@ -995,7 +1006,8 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp,
intel_dp_dump_link_status(intel_dp, dp_phy, link_status);
drm_dbg_kms(&i915->drm,
"[ENCODER:%d:%s][%s] Channel equalization failed 5 times\n",
- encoder->base.base.id, encoder->base.name, phy_name);
+ encoder->base.base.id, encoder->base.name,
+ drm_dp_phy_name(dp_phy));
}
return channel_eq;
@@ -1070,7 +1082,6 @@ intel_dp_link_train_phy(struct intel_dp *intel_dp,
{
struct intel_connector *connector = intel_dp->attached_connector;
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
- char phy_name[10];
bool ret = false;
if (!intel_dp_link_training_clock_recovery(intel_dp, crtc_state, dp_phy))
@@ -1086,7 +1097,7 @@ out:
"[CONNECTOR:%d:%s][ENCODER:%d:%s][%s] Link Training %s at link rate = %d, lane count = %d\n",
connector->base.base.id, connector->base.name,
encoder->base.base.id, encoder->base.name,
- intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)),
+ drm_dp_phy_name(dp_phy),
ret ? "passed" : "failed",
crtc_state->port_clock, crtc_state->lane_count);
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 7713c19042f3..03604a37931c 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -58,7 +58,6 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
struct drm_i915_private *i915 = to_i915(connector->base.dev);
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
- bool constant_n = drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_CONSTANT_N);
int bpp, slots = -EINVAL;
mst_state = drm_atomic_get_mst_topology_state(state, &intel_dp->mst_mgr);
@@ -100,7 +99,7 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
adjusted_mode->crtc_clock,
crtc_state->port_clock,
&crtc_state->dp_m_n,
- constant_n, crtc_state->fec_enable);
+ crtc_state->fec_enable);
crtc_state->dp_m_n.tu = slots;
return 0;
@@ -566,7 +565,10 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state,
drm_dp_add_payload_part2(&intel_dp->mst_mgr, &state->base,
drm_atomic_get_mst_payload_state(mst_state, connector->port));
- if (DISPLAY_VER(dev_priv) >= 12 && pipe_config->fec_enable)
+ if (DISPLAY_VER(dev_priv) >= 14 && pipe_config->fec_enable)
+ intel_de_rmw(dev_priv, MTL_CHICKEN_TRANS(trans), 0,
+ FECSTALL_DIS_DPTSTREAM_DPTTG);
+ else if (DISPLAY_VER(dev_priv) >= 12 && pipe_config->fec_enable)
intel_de_rmw(dev_priv, CHICKEN_TRANS(trans), 0,
FECSTALL_DIS_DPTSTREAM_DPTTG);
diff --git a/drivers/gpu/drm/i915/display/intel_dpio_phy.c b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
index cc6abe761f5e..8732b8722ed7 100644
--- a/drivers/gpu/drm/i915/display/intel_dpio_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
@@ -484,7 +484,7 @@ void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy)
enum dpio_phy rcomp_phy = phy_info->rcomp_phy;
bool was_enabled;
- lockdep_assert_held(&dev_priv->power_domains.lock);
+ lockdep_assert_held(&dev_priv->display.power.domains.lock);
was_enabled = true;
if (rcomp_phy != -1)
diff --git a/drivers/gpu/drm/i915/display/intel_dpll.c b/drivers/gpu/drm/i915/display/intel_dpll.c
index 5262f16b45ac..b15ba78d64d6 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll.c
@@ -938,12 +938,25 @@ static int hsw_crtc_compute_clock(struct intel_atomic_state *state,
intel_atomic_get_new_crtc_state(state, crtc);
struct intel_encoder *encoder =
intel_get_crtc_new_encoder(state, crtc_state);
+ int ret;
if (DISPLAY_VER(dev_priv) < 11 &&
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
return 0;
- return intel_compute_shared_dplls(state, crtc, encoder);
+ ret = intel_compute_shared_dplls(state, crtc, encoder);
+ if (ret)
+ return ret;
+
+ /* FIXME this is a mess */
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
+ return 0;
+
+ /* CRT dotclock is determined via other means */
+ if (!crtc_state->has_pch_encoder)
+ crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
+
+ return 0;
}
static int hsw_crtc_get_shared_dpll(struct intel_atomic_state *state,
@@ -969,8 +982,15 @@ static int dg2_crtc_compute_clock(struct intel_atomic_state *state,
intel_atomic_get_new_crtc_state(state, crtc);
struct intel_encoder *encoder =
intel_get_crtc_new_encoder(state, crtc_state);
+ int ret;
+
+ ret = intel_mpllb_calc_state(crtc_state, encoder);
+ if (ret)
+ return ret;
+
+ crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
- return intel_mpllb_calc_state(crtc_state, encoder);
+ return 0;
}
static bool ilk_needs_fb_cb_tune(const struct dpll *dpll, int factor)
@@ -991,7 +1011,7 @@ static void ilk_update_pll_dividers(struct intel_crtc_state *crtc_state,
factor = 21;
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if ((intel_panel_use_ssc(dev_priv) &&
- dev_priv->vbt.lvds_ssc_freq == 100000) ||
+ dev_priv->display.vbt.lvds_ssc_freq == 100000) ||
(HAS_PCH_IBX(dev_priv) &&
intel_is_dual_link_lvds(dev_priv)))
factor = 25;
@@ -1096,6 +1116,7 @@ static int ilk_crtc_compute_clock(struct intel_atomic_state *state,
intel_atomic_get_new_crtc_state(state, crtc);
const struct intel_limit *limit;
int refclk = 120000;
+ int ret;
/* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
if (!crtc_state->has_pch_encoder)
@@ -1105,8 +1126,8 @@ static int ilk_crtc_compute_clock(struct intel_atomic_state *state,
if (intel_panel_use_ssc(dev_priv)) {
drm_dbg_kms(&dev_priv->drm,
"using SSC reference clock of %d kHz\n",
- dev_priv->vbt.lvds_ssc_freq);
- refclk = dev_priv->vbt.lvds_ssc_freq;
+ dev_priv->display.vbt.lvds_ssc_freq);
+ refclk = dev_priv->display.vbt.lvds_ssc_freq;
}
if (intel_is_dual_link_lvds(dev_priv)) {
@@ -1132,7 +1153,14 @@ static int ilk_crtc_compute_clock(struct intel_atomic_state *state,
ilk_compute_dpll(crtc_state, &crtc_state->dpll,
&crtc_state->dpll);
- return intel_compute_shared_dplls(state, crtc, NULL);
+ ret = intel_compute_shared_dplls(state, crtc, NULL);
+ if (ret)
+ return ret;
+
+ crtc_state->port_clock = crtc_state->dpll.dot;
+ crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
+
+ return ret;
}
static int ilk_crtc_get_shared_dpll(struct intel_atomic_state *state,
@@ -1198,6 +1226,13 @@ static int chv_crtc_compute_clock(struct intel_atomic_state *state,
chv_compute_dpll(crtc_state);
+ /* FIXME this is a mess */
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
+ return 0;
+
+ crtc_state->port_clock = crtc_state->dpll.dot;
+ crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
+
return 0;
}
@@ -1217,6 +1252,13 @@ static int vlv_crtc_compute_clock(struct intel_atomic_state *state,
vlv_compute_dpll(crtc_state);
+ /* FIXME this is a mess */
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
+ return 0;
+
+ crtc_state->port_clock = crtc_state->dpll.dot;
+ crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
+
return 0;
}
@@ -1231,7 +1273,7 @@ static int g4x_crtc_compute_clock(struct intel_atomic_state *state,
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if (intel_panel_use_ssc(dev_priv)) {
- refclk = dev_priv->vbt.lvds_ssc_freq;
+ refclk = dev_priv->display.vbt.lvds_ssc_freq;
drm_dbg_kms(&dev_priv->drm,
"using SSC reference clock of %d kHz\n",
refclk);
@@ -1259,6 +1301,11 @@ static int g4x_crtc_compute_clock(struct intel_atomic_state *state,
i9xx_compute_dpll(crtc_state, &crtc_state->dpll,
&crtc_state->dpll);
+ crtc_state->port_clock = crtc_state->dpll.dot;
+ /* FIXME this is a mess */
+ if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_TVOUT))
+ crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
+
return 0;
}
@@ -1273,7 +1320,7 @@ static int pnv_crtc_compute_clock(struct intel_atomic_state *state,
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if (intel_panel_use_ssc(dev_priv)) {
- refclk = dev_priv->vbt.lvds_ssc_freq;
+ refclk = dev_priv->display.vbt.lvds_ssc_freq;
drm_dbg_kms(&dev_priv->drm,
"using SSC reference clock of %d kHz\n",
refclk);
@@ -1292,6 +1339,9 @@ static int pnv_crtc_compute_clock(struct intel_atomic_state *state,
i9xx_compute_dpll(crtc_state, &crtc_state->dpll,
&crtc_state->dpll);
+ crtc_state->port_clock = crtc_state->dpll.dot;
+ crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
+
return 0;
}
@@ -1306,7 +1356,7 @@ static int i9xx_crtc_compute_clock(struct intel_atomic_state *state,
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if (intel_panel_use_ssc(dev_priv)) {
- refclk = dev_priv->vbt.lvds_ssc_freq;
+ refclk = dev_priv->display.vbt.lvds_ssc_freq;
drm_dbg_kms(&dev_priv->drm,
"using SSC reference clock of %d kHz\n",
refclk);
@@ -1325,6 +1375,11 @@ static int i9xx_crtc_compute_clock(struct intel_atomic_state *state,
i9xx_compute_dpll(crtc_state, &crtc_state->dpll,
&crtc_state->dpll);
+ crtc_state->port_clock = crtc_state->dpll.dot;
+ /* FIXME this is a mess */
+ if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_TVOUT))
+ crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
+
return 0;
}
@@ -1339,7 +1394,7 @@ static int i8xx_crtc_compute_clock(struct intel_atomic_state *state,
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if (intel_panel_use_ssc(dev_priv)) {
- refclk = dev_priv->vbt.lvds_ssc_freq;
+ refclk = dev_priv->display.vbt.lvds_ssc_freq;
drm_dbg_kms(&dev_priv->drm,
"using SSC reference clock of %d kHz\n",
refclk);
@@ -1360,6 +1415,9 @@ static int i8xx_crtc_compute_clock(struct intel_atomic_state *state,
i8xx_compute_dpll(crtc_state, &crtc_state->dpll,
&crtc_state->dpll);
+ crtc_state->port_clock = crtc_state->dpll.dot;
+ crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
+
return 0;
}
@@ -1411,16 +1469,13 @@ int intel_dpll_crtc_compute_clock(struct intel_atomic_state *state,
drm_WARN_ON(&i915->drm, !intel_crtc_needs_modeset(crtc_state));
- if (drm_WARN_ON(&i915->drm, crtc_state->shared_dpll))
- return 0;
-
memset(&crtc_state->dpll_hw_state, 0,
sizeof(crtc_state->dpll_hw_state));
if (!crtc_state->hw.enable)
return 0;
- ret = i915->dpll_funcs->crtc_compute_clock(state, crtc);
+ ret = i915->display.funcs.dpll->crtc_compute_clock(state, crtc);
if (ret) {
drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] Couldn't calculate DPLL settings\n",
crtc->base.base.id, crtc->base.name);
@@ -1439,17 +1494,15 @@ int intel_dpll_crtc_get_shared_dpll(struct intel_atomic_state *state,
int ret;
drm_WARN_ON(&i915->drm, !intel_crtc_needs_modeset(crtc_state));
+ drm_WARN_ON(&i915->drm, !crtc_state->hw.enable && crtc_state->shared_dpll);
- if (drm_WARN_ON(&i915->drm, crtc_state->shared_dpll))
- return 0;
-
- if (!crtc_state->hw.enable)
+ if (!crtc_state->hw.enable || crtc_state->shared_dpll)
return 0;
- if (!i915->dpll_funcs->crtc_get_shared_dpll)
+ if (!i915->display.funcs.dpll->crtc_get_shared_dpll)
return 0;
- ret = i915->dpll_funcs->crtc_get_shared_dpll(state, crtc);
+ ret = i915->display.funcs.dpll->crtc_get_shared_dpll(state, crtc);
if (ret) {
drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] Couldn't get a shared DPLL\n",
crtc->base.base.id, crtc->base.name);
@@ -1463,23 +1516,23 @@ void
intel_dpll_init_clock_hook(struct drm_i915_private *dev_priv)
{
if (IS_DG2(dev_priv))
- dev_priv->dpll_funcs = &dg2_dpll_funcs;
+ dev_priv->display.funcs.dpll = &dg2_dpll_funcs;
else if (DISPLAY_VER(dev_priv) >= 9 || HAS_DDI(dev_priv))
- dev_priv->dpll_funcs = &hsw_dpll_funcs;
+ dev_priv->display.funcs.dpll = &hsw_dpll_funcs;
else if (HAS_PCH_SPLIT(dev_priv))
- dev_priv->dpll_funcs = &ilk_dpll_funcs;
+ dev_priv->display.funcs.dpll = &ilk_dpll_funcs;
else if (IS_CHERRYVIEW(dev_priv))
- dev_priv->dpll_funcs = &chv_dpll_funcs;
+ dev_priv->display.funcs.dpll = &chv_dpll_funcs;
else if (IS_VALLEYVIEW(dev_priv))
- dev_priv->dpll_funcs = &vlv_dpll_funcs;
+ dev_priv->display.funcs.dpll = &vlv_dpll_funcs;
else if (IS_G4X(dev_priv))
- dev_priv->dpll_funcs = &g4x_dpll_funcs;
+ dev_priv->display.funcs.dpll = &g4x_dpll_funcs;
else if (IS_PINEVIEW(dev_priv))
- dev_priv->dpll_funcs = &pnv_dpll_funcs;
+ dev_priv->display.funcs.dpll = &pnv_dpll_funcs;
else if (DISPLAY_VER(dev_priv) != 2)
- dev_priv->dpll_funcs = &i9xx_dpll_funcs;
+ dev_priv->display.funcs.dpll = &i9xx_dpll_funcs;
else
- dev_priv->dpll_funcs = &i8xx_dpll_funcs;
+ dev_priv->display.funcs.dpll = &i8xx_dpll_funcs;
}
static bool i9xx_has_pps(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
index 118598c9a809..e5fb66a5dd02 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
@@ -113,8 +113,8 @@ intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
enum intel_dpll_id i;
/* Copy shared dpll state */
- for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
- struct intel_shared_dpll *pll = &dev_priv->dpll.shared_dplls[i];
+ for (i = 0; i < dev_priv->display.dpll.num_shared_dpll; i++) {
+ struct intel_shared_dpll *pll = &dev_priv->display.dpll.shared_dplls[i];
shared_dpll[i] = pll->state;
}
@@ -149,7 +149,7 @@ struct intel_shared_dpll *
intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv,
enum intel_dpll_id id)
{
- return &dev_priv->dpll.shared_dplls[id];
+ return &dev_priv->display.dpll.shared_dplls[id];
}
/**
@@ -164,11 +164,11 @@ enum intel_dpll_id
intel_get_shared_dpll_id(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
- long pll_idx = pll - dev_priv->dpll.shared_dplls;
+ long pll_idx = pll - dev_priv->display.dpll.shared_dplls;
if (drm_WARN_ON(&dev_priv->drm,
pll_idx < 0 ||
- pll_idx >= dev_priv->dpll.num_shared_dpll))
+ pll_idx >= dev_priv->display.dpll.num_shared_dpll))
return -1;
return pll_idx;
@@ -245,7 +245,7 @@ void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
if (drm_WARN_ON(&dev_priv->drm, pll == NULL))
return;
- mutex_lock(&dev_priv->dpll.lock);
+ mutex_lock(&dev_priv->display.dpll.lock);
old_mask = pll->active_mask;
if (drm_WARN_ON(&dev_priv->drm, !(pll->state.pipe_mask & pipe_mask)) ||
@@ -271,7 +271,7 @@ void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
pll->on = true;
out:
- mutex_unlock(&dev_priv->dpll.lock);
+ mutex_unlock(&dev_priv->display.dpll.lock);
}
/**
@@ -294,7 +294,7 @@ void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
if (pll == NULL)
return;
- mutex_lock(&dev_priv->dpll.lock);
+ mutex_lock(&dev_priv->display.dpll.lock);
if (drm_WARN(&dev_priv->drm, !(pll->active_mask & pipe_mask),
"%s not used by [CRTC:%d:%s]\n", pll->info->name,
crtc->base.base.id, crtc->base.name))
@@ -317,7 +317,7 @@ void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
pll->on = false;
out:
- mutex_unlock(&dev_priv->dpll.lock);
+ mutex_unlock(&dev_priv->display.dpll.lock);
}
static struct intel_shared_dpll *
@@ -336,7 +336,7 @@ intel_find_shared_dpll(struct intel_atomic_state *state,
drm_WARN_ON(&dev_priv->drm, dpll_mask & ~(BIT(I915_NUM_PLLS) - 1));
for_each_set_bit(i, &dpll_mask, I915_NUM_PLLS) {
- pll = &dev_priv->dpll.shared_dplls[i];
+ pll = &dev_priv->display.dpll.shared_dplls[i];
/* Only want to check enabled timings first */
if (shared_dpll[i].pipe_mask == 0) {
@@ -436,9 +436,9 @@ void intel_shared_dpll_swap_state(struct intel_atomic_state *state)
if (!state->dpll_set)
return;
- for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
+ for (i = 0; i < dev_priv->display.dpll.num_shared_dpll; i++) {
struct intel_shared_dpll *pll =
- &dev_priv->dpll.shared_dplls[i];
+ &dev_priv->display.dpll.shared_dplls[i];
swap(pll->state, shared_dpll[i]);
}
@@ -537,7 +537,7 @@ static int ibx_get_dpll(struct intel_atomic_state *state,
if (HAS_PCH_IBX(dev_priv)) {
/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
i = (enum intel_dpll_id) crtc->pipe;
- pll = &dev_priv->dpll.shared_dplls[i];
+ pll = &dev_priv->display.dpll.shared_dplls[i];
drm_dbg_kms(&dev_priv->drm,
"[CRTC:%d:%s] using pre-allocated %s\n",
@@ -905,37 +905,6 @@ hsw_ddi_calculate_wrpll(int clock /* in Hz */,
*r2_out = best.r2;
}
-static int
-hsw_ddi_wrpll_compute_dpll(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct intel_crtc_state *crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
- unsigned int p, n2, r2;
-
- hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p);
-
- crtc_state->dpll_hw_state.wrpll =
- WRPLL_PLL_ENABLE | WRPLL_REF_LCPLL |
- WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
- WRPLL_DIVIDER_POST(p);
-
- return 0;
-}
-
-static struct intel_shared_dpll *
-hsw_ddi_wrpll_get_dpll(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct intel_crtc_state *crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
-
- return intel_find_shared_dpll(state, crtc,
- &crtc_state->dpll_hw_state,
- BIT(DPLL_ID_WRPLL2) |
- BIT(DPLL_ID_WRPLL1));
-}
-
static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv,
const struct intel_shared_dpll *pll,
const struct intel_dpll_hw_state *pll_state)
@@ -948,7 +917,7 @@ static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv,
case WRPLL_REF_SPECIAL_HSW:
/* Muxed-SSC for BDW, non-SSC for non-ULT HSW. */
if (IS_HASWELL(dev_priv) && !IS_HSW_ULT(dev_priv)) {
- refclk = dev_priv->dpll.ref_clks.nssc;
+ refclk = dev_priv->display.dpll.ref_clks.nssc;
break;
}
fallthrough;
@@ -958,7 +927,7 @@ static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv,
* code only cares about 5% accuracy, and spread is a max of
* 0.5% downspread.
*/
- refclk = dev_priv->dpll.ref_clks.ssc;
+ refclk = dev_priv->display.dpll.ref_clks.ssc;
break;
case WRPLL_REF_LCPLL:
refclk = 2700000;
@@ -977,6 +946,41 @@ static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv,
}
static int
+hsw_ddi_wrpll_compute_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ unsigned int p, n2, r2;
+
+ hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p);
+
+ crtc_state->dpll_hw_state.wrpll =
+ WRPLL_PLL_ENABLE | WRPLL_REF_LCPLL |
+ WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
+ WRPLL_DIVIDER_POST(p);
+
+ crtc_state->port_clock = hsw_ddi_wrpll_get_freq(i915, NULL,
+ &crtc_state->dpll_hw_state);
+
+ return 0;
+}
+
+static struct intel_shared_dpll *
+hsw_ddi_wrpll_get_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+
+ return intel_find_shared_dpll(state, crtc,
+ &crtc_state->dpll_hw_state,
+ BIT(DPLL_ID_WRPLL2) |
+ BIT(DPLL_ID_WRPLL1));
+}
+
+static int
hsw_ddi_lcpll_compute_dpll(struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
@@ -1145,12 +1149,12 @@ static int hsw_get_dpll(struct intel_atomic_state *state,
static void hsw_update_dpll_ref_clks(struct drm_i915_private *i915)
{
- i915->dpll.ref_clks.ssc = 135000;
+ i915->display.dpll.ref_clks.ssc = 135000;
/* Non-SSC is only used on non-ULT HSW. */
if (intel_de_read(i915, FUSE_STRAP3) & HSW_REF_CLK_SELECT)
- i915->dpll.ref_clks.nssc = 24000;
+ i915->display.dpll.ref_clks.nssc = 24000;
else
- i915->dpll.ref_clks.nssc = 135000;
+ i915->display.dpll.ref_clks.nssc = 135000;
}
static void hsw_dump_hw_state(struct drm_i915_private *dev_priv,
@@ -1618,48 +1622,11 @@ skip_remaining_dividers:
return 0;
}
-static int skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
- struct skl_wrpll_params wrpll_params = {};
- u32 ctrl1, cfgcr1, cfgcr2;
- int ret;
-
- /*
- * See comment in intel_dpll_hw_state to understand why we always use 0
- * as the DPLL id in this function.
- */
- ctrl1 = DPLL_CTRL1_OVERRIDE(0);
-
- ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
-
- ret = skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000,
- i915->dpll.ref_clks.nssc, &wrpll_params);
- if (ret)
- return ret;
-
- cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
- DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
- wrpll_params.dco_integer;
-
- cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
- DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
- DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
- DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
- wrpll_params.central_freq;
-
- crtc_state->dpll_hw_state.ctrl1 = ctrl1;
- crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
- crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;
-
- return 0;
-}
-
static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
const struct intel_shared_dpll *pll,
const struct intel_dpll_hw_state *pll_state)
{
- int ref_clock = i915->dpll.ref_clks.nssc;
+ int ref_clock = i915->display.dpll.ref_clks.nssc;
u32 p0, p1, p2, dco_freq;
p0 = pll_state->cfgcr2 & DPLL_CFGCR2_PDIV_MASK;
@@ -1726,6 +1693,46 @@ static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
return dco_freq / (p0 * p1 * p2 * 5);
}
+static int skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct skl_wrpll_params wrpll_params = {};
+ u32 ctrl1, cfgcr1, cfgcr2;
+ int ret;
+
+ /*
+ * See comment in intel_dpll_hw_state to understand why we always use 0
+ * as the DPLL id in this function.
+ */
+ ctrl1 = DPLL_CTRL1_OVERRIDE(0);
+
+ ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
+
+ ret = skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000,
+ i915->display.dpll.ref_clks.nssc, &wrpll_params);
+ if (ret)
+ return ret;
+
+ cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
+ DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
+ wrpll_params.dco_integer;
+
+ cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
+ DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
+ DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
+ DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
+ wrpll_params.central_freq;
+
+ crtc_state->dpll_hw_state.ctrl1 = ctrl1;
+ crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
+ crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;
+
+ crtc_state->port_clock = skl_ddi_wrpll_get_freq(i915, NULL,
+ &crtc_state->dpll_hw_state);
+
+ return 0;
+}
+
static int
skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
{
@@ -1858,7 +1865,7 @@ static int skl_ddi_pll_get_freq(struct drm_i915_private *i915,
static void skl_update_dpll_ref_clks(struct drm_i915_private *i915)
{
/* No SSC ref */
- i915->dpll.ref_clks.nssc = i915->cdclk.hw.ref;
+ i915->display.dpll.ref_clks.nssc = i915->display.cdclk.hw.ref;
}
static void skl_dump_hw_state(struct drm_i915_private *dev_priv,
@@ -2171,7 +2178,7 @@ static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state,
}
}
- chv_calc_dpll_params(i915->dpll.ref_clks.nssc, clk_div);
+ chv_calc_dpll_params(i915->display.dpll.ref_clks.nssc, clk_div);
drm_WARN_ON(&i915->drm, clk_div->vco == 0 ||
clk_div->dot != crtc_state->port_clock);
@@ -2245,6 +2252,23 @@ static int bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
return 0;
}
+static int bxt_ddi_pll_get_freq(struct drm_i915_private *i915,
+ const struct intel_shared_dpll *pll,
+ const struct intel_dpll_hw_state *pll_state)
+{
+ struct dpll clock;
+
+ clock.m1 = 2;
+ clock.m2 = REG_FIELD_GET(PORT_PLL_M2_INT_MASK, pll_state->pll0) << 22;
+ if (pll_state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
+ clock.m2 |= REG_FIELD_GET(PORT_PLL_M2_FRAC_MASK, pll_state->pll2);
+ clock.n = REG_FIELD_GET(PORT_PLL_N_MASK, pll_state->pll1);
+ clock.p1 = REG_FIELD_GET(PORT_PLL_P1_MASK, pll_state->ebb0);
+ clock.p2 = REG_FIELD_GET(PORT_PLL_P2_MASK, pll_state->ebb0);
+
+ return chv_calc_dpll_params(i915->display.dpll.ref_clks.nssc, &clock);
+}
+
static int
bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
{
@@ -2258,28 +2282,20 @@ bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
static int
bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
struct dpll clk_div = {};
+ int ret;
bxt_ddi_hdmi_pll_dividers(crtc_state, &clk_div);
- return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
-}
-
-static int bxt_ddi_pll_get_freq(struct drm_i915_private *i915,
- const struct intel_shared_dpll *pll,
- const struct intel_dpll_hw_state *pll_state)
-{
- struct dpll clock;
+ ret = bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
+ if (ret)
+ return ret;
- clock.m1 = 2;
- clock.m2 = REG_FIELD_GET(PORT_PLL_M2_INT_MASK, pll_state->pll0) << 22;
- if (pll_state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
- clock.m2 |= REG_FIELD_GET(PORT_PLL_M2_FRAC_MASK, pll_state->pll2);
- clock.n = REG_FIELD_GET(PORT_PLL_N_MASK, pll_state->pll1);
- clock.p1 = REG_FIELD_GET(PORT_PLL_P1_MASK, pll_state->ebb0);
- clock.p2 = REG_FIELD_GET(PORT_PLL_P2_MASK, pll_state->ebb0);
+ crtc_state->port_clock = bxt_ddi_pll_get_freq(i915, NULL,
+ &crtc_state->dpll_hw_state);
- return chv_calc_dpll_params(i915->dpll.ref_clks.nssc, &clock);
+ return 0;
}
static int bxt_compute_dpll(struct intel_atomic_state *state,
@@ -2324,8 +2340,8 @@ static int bxt_get_dpll(struct intel_atomic_state *state,
static void bxt_update_dpll_ref_clks(struct drm_i915_private *i915)
{
- i915->dpll.ref_clks.ssc = 100000;
- i915->dpll.ref_clks.nssc = 100000;
+ i915->display.dpll.ref_clks.ssc = 100000;
+ i915->display.dpll.ref_clks.nssc = 100000;
/* DSI non-SSC ref 19.2MHz */
}
@@ -2468,7 +2484,7 @@ ehl_combo_pll_div_frac_wa_needed(struct drm_i915_private *i915)
return ((IS_PLATFORM(i915, INTEL_ELKHARTLAKE) &&
IS_JSL_EHL_DISPLAY_STEP(i915, STEP_B0, STEP_FOREVER)) ||
IS_TIGERLAKE(i915) || IS_ALDERLAKE_S(i915) || IS_ALDERLAKE_P(i915)) &&
- i915->dpll.ref_clks.nssc == 38400;
+ i915->display.dpll.ref_clks.nssc == 38400;
}
struct icl_combo_pll_params {
@@ -2562,7 +2578,7 @@ static int icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
const struct icl_combo_pll_params *params =
- dev_priv->dpll.ref_clks.nssc == 24000 ?
+ dev_priv->display.dpll.ref_clks.nssc == 24000 ?
icl_dp_combo_pll_24MHz_values :
icl_dp_combo_pll_19_2MHz_values;
int clock = crtc_state->port_clock;
@@ -2585,9 +2601,9 @@ static int icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
if (DISPLAY_VER(dev_priv) >= 12) {
- switch (dev_priv->dpll.ref_clks.nssc) {
+ switch (dev_priv->display.dpll.ref_clks.nssc) {
default:
- MISSING_CASE(dev_priv->dpll.ref_clks.nssc);
+ MISSING_CASE(dev_priv->display.dpll.ref_clks.nssc);
fallthrough;
case 19200:
case 38400:
@@ -2598,9 +2614,9 @@ static int icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
break;
}
} else {
- switch (dev_priv->dpll.ref_clks.nssc) {
+ switch (dev_priv->display.dpll.ref_clks.nssc) {
default:
- MISSING_CASE(dev_priv->dpll.ref_clks.nssc);
+ MISSING_CASE(dev_priv->display.dpll.ref_clks.nssc);
fallthrough;
case 19200:
case 38400:
@@ -2630,7 +2646,7 @@ static int icl_ddi_tbt_pll_get_freq(struct drm_i915_private *i915,
static int icl_wrpll_ref_clock(struct drm_i915_private *i915)
{
- int ref_clock = i915->dpll.ref_clks.nssc;
+ int ref_clock = i915->display.dpll.ref_clks.nssc;
/*
* For ICL+, the spec states: if reference frequency is 38.4,
@@ -2769,8 +2785,8 @@ static void icl_calc_dpll_state(struct drm_i915_private *i915,
else
pll_state->cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400;
- if (i915->vbt.override_afc_startup)
- pll_state->div0 = TGL_DPLL0_DIV0_AFC_STARTUP(i915->vbt.override_afc_startup_val);
+ if (i915->display.vbt.override_afc_startup)
+ pll_state->div0 = TGL_DPLL0_DIV0_AFC_STARTUP(i915->display.vbt.override_afc_startup_val);
}
static int icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
@@ -2857,7 +2873,7 @@ static int icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
struct intel_dpll_hw_state *pll_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- int refclk_khz = dev_priv->dpll.ref_clks.nssc;
+ int refclk_khz = dev_priv->display.dpll.ref_clks.nssc;
int clock = crtc_state->port_clock;
u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
u32 iref_ndiv, iref_trim, iref_pulse_w;
@@ -2965,8 +2981,8 @@ static int icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
DKL_PLL_DIV0_PROP_COEFF(prop_coeff) |
DKL_PLL_DIV0_FBPREDIV(m1div) |
DKL_PLL_DIV0_FBDIV_INT(m2div_int);
- if (dev_priv->vbt.override_afc_startup) {
- u8 val = dev_priv->vbt.override_afc_startup_val;
+ if (dev_priv->display.vbt.override_afc_startup) {
+ u8 val = dev_priv->display.vbt.override_afc_startup_val;
pll_state->mg_pll_div0 |= DKL_PLL_DIV0_AFC_STARTUP(val);
}
@@ -3063,7 +3079,7 @@ static int icl_ddi_mg_pll_get_freq(struct drm_i915_private *dev_priv,
u32 m1, m2_int, m2_frac, div1, div2, ref_clock;
u64 tmp;
- ref_clock = dev_priv->dpll.ref_clks.nssc;
+ ref_clock = dev_priv->display.dpll.ref_clks.nssc;
if (DISPLAY_VER(dev_priv) >= 12) {
m1 = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBPREDIV_MASK;
@@ -3197,6 +3213,12 @@ static int icl_compute_combo_phy_dpll(struct intel_atomic_state *state,
icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state);
+ /* this is mainly for the fastset check */
+ icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_DEFAULT);
+
+ crtc_state->port_clock = icl_ddi_combo_pll_get_freq(dev_priv, NULL,
+ &port_dpll->hw_state);
+
return 0;
}
@@ -3282,6 +3304,12 @@ static int icl_compute_tc_phy_dplls(struct intel_atomic_state *state,
if (ret)
return ret;
+ /* this is mainly for the fastset check */
+ icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_MG_PHY);
+
+ crtc_state->port_clock = icl_ddi_mg_pll_get_freq(dev_priv, NULL,
+ &port_dpll->hw_state);
+
return 0;
}
@@ -3440,7 +3468,7 @@ static bool mg_pll_get_hw_state(struct drm_i915_private *dev_priv,
hw_state->mg_pll_tdc_coldst_bias =
intel_de_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
- if (dev_priv->dpll.ref_clks.nssc == 38400) {
+ if (dev_priv->display.dpll.ref_clks.nssc == 38400) {
hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
hw_state->mg_pll_bias_mask = 0;
} else {
@@ -3502,7 +3530,7 @@ static bool dkl_pll_get_hw_state(struct drm_i915_private *dev_priv,
hw_state->mg_pll_div0 = intel_de_read(dev_priv, DKL_PLL_DIV0(tc_port));
val = DKL_PLL_DIV0_MASK;
- if (dev_priv->vbt.override_afc_startup)
+ if (dev_priv->display.vbt.override_afc_startup)
val |= DKL_PLL_DIV0_AFC_STARTUP_MASK;
hw_state->mg_pll_div0 &= val;
@@ -3566,7 +3594,7 @@ static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
TGL_DPLL_CFGCR0(id));
hw_state->cfgcr1 = intel_de_read(dev_priv,
TGL_DPLL_CFGCR1(id));
- if (dev_priv->vbt.override_afc_startup) {
+ if (dev_priv->display.vbt.override_afc_startup) {
hw_state->div0 = intel_de_read(dev_priv, TGL_DPLL0_DIV0(id));
hw_state->div0 &= TGL_DPLL0_DIV0_AFC_STARTUP_MASK;
}
@@ -3638,9 +3666,9 @@ static void icl_dpll_write(struct drm_i915_private *dev_priv,
intel_de_write(dev_priv, cfgcr0_reg, hw_state->cfgcr0);
intel_de_write(dev_priv, cfgcr1_reg, hw_state->cfgcr1);
- drm_WARN_ON_ONCE(&dev_priv->drm, dev_priv->vbt.override_afc_startup &&
+ drm_WARN_ON_ONCE(&dev_priv->drm, dev_priv->display.vbt.override_afc_startup &&
!i915_mmio_reg_valid(div0_reg));
- if (dev_priv->vbt.override_afc_startup &&
+ if (dev_priv->display.vbt.override_afc_startup &&
i915_mmio_reg_valid(div0_reg))
intel_de_rmw(dev_priv, div0_reg, TGL_DPLL0_DIV0_AFC_STARTUP_MASK,
hw_state->div0);
@@ -3732,7 +3760,7 @@ static void dkl_pll_write(struct drm_i915_private *dev_priv,
intel_de_write(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port), val);
val = DKL_PLL_DIV0_MASK;
- if (dev_priv->vbt.override_afc_startup)
+ if (dev_priv->display.vbt.override_afc_startup)
val |= DKL_PLL_DIV0_AFC_STARTUP_MASK;
intel_de_rmw(dev_priv, DKL_PLL_DIV0(tc_port), val,
hw_state->mg_pll_div0);
@@ -3967,7 +3995,7 @@ static void mg_pll_disable(struct drm_i915_private *dev_priv,
static void icl_update_dpll_ref_clks(struct drm_i915_private *i915)
{
/* No SSC ref */
- i915->dpll.ref_clks.nssc = i915->cdclk.hw.ref;
+ i915->display.dpll.ref_clks.nssc = i915->display.cdclk.hw.ref;
}
static void icl_dump_hw_state(struct drm_i915_private *dev_priv,
@@ -4192,22 +4220,24 @@ void intel_shared_dpll_init(struct drm_i915_private *dev_priv)
dpll_mgr = &pch_pll_mgr;
if (!dpll_mgr) {
- dev_priv->dpll.num_shared_dpll = 0;
+ dev_priv->display.dpll.num_shared_dpll = 0;
return;
}
dpll_info = dpll_mgr->dpll_info;
for (i = 0; dpll_info[i].name; i++) {
+ if (drm_WARN_ON(&dev_priv->drm,
+ i >= ARRAY_SIZE(dev_priv->display.dpll.shared_dplls)))
+ break;
+
drm_WARN_ON(&dev_priv->drm, i != dpll_info[i].id);
- dev_priv->dpll.shared_dplls[i].info = &dpll_info[i];
+ dev_priv->display.dpll.shared_dplls[i].info = &dpll_info[i];
}
- dev_priv->dpll.mgr = dpll_mgr;
- dev_priv->dpll.num_shared_dpll = i;
- mutex_init(&dev_priv->dpll.lock);
-
- BUG_ON(dev_priv->dpll.num_shared_dpll > I915_NUM_PLLS);
+ dev_priv->display.dpll.mgr = dpll_mgr;
+ dev_priv->display.dpll.num_shared_dpll = i;
+ mutex_init(&dev_priv->display.dpll.lock);
}
/**
@@ -4229,7 +4259,7 @@ int intel_compute_shared_dplls(struct intel_atomic_state *state,
struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr;
+ const struct intel_dpll_mgr *dpll_mgr = dev_priv->display.dpll.mgr;
if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
return -EINVAL;
@@ -4262,7 +4292,7 @@ int intel_reserve_shared_dplls(struct intel_atomic_state *state,
struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr;
+ const struct intel_dpll_mgr *dpll_mgr = dev_priv->display.dpll.mgr;
if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
return -EINVAL;
@@ -4285,7 +4315,7 @@ void intel_release_shared_dplls(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr;
+ const struct intel_dpll_mgr *dpll_mgr = dev_priv->display.dpll.mgr;
/*
* FIXME: this function is called for every platform having a
@@ -4314,7 +4344,7 @@ void intel_update_active_dpll(struct intel_atomic_state *state,
struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr;
+ const struct intel_dpll_mgr *dpll_mgr = dev_priv->display.dpll.mgr;
if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
return;
@@ -4385,16 +4415,16 @@ static void readout_dpll_hw_state(struct drm_i915_private *i915,
void intel_dpll_update_ref_clks(struct drm_i915_private *i915)
{
- if (i915->dpll.mgr && i915->dpll.mgr->update_ref_clks)
- i915->dpll.mgr->update_ref_clks(i915);
+ if (i915->display.dpll.mgr && i915->display.dpll.mgr->update_ref_clks)
+ i915->display.dpll.mgr->update_ref_clks(i915);
}
void intel_dpll_readout_hw_state(struct drm_i915_private *i915)
{
int i;
- for (i = 0; i < i915->dpll.num_shared_dpll; i++)
- readout_dpll_hw_state(i915, &i915->dpll.shared_dplls[i]);
+ for (i = 0; i < i915->display.dpll.num_shared_dpll; i++)
+ readout_dpll_hw_state(i915, &i915->display.dpll.shared_dplls[i]);
}
static void sanitize_dpll_state(struct drm_i915_private *i915,
@@ -4420,8 +4450,8 @@ void intel_dpll_sanitize_state(struct drm_i915_private *i915)
{
int i;
- for (i = 0; i < i915->dpll.num_shared_dpll; i++)
- sanitize_dpll_state(i915, &i915->dpll.shared_dplls[i]);
+ for (i = 0; i < i915->display.dpll.num_shared_dpll; i++)
+ sanitize_dpll_state(i915, &i915->display.dpll.shared_dplls[i]);
}
/**
@@ -4434,8 +4464,8 @@ void intel_dpll_sanitize_state(struct drm_i915_private *i915)
void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
const struct intel_dpll_hw_state *hw_state)
{
- if (dev_priv->dpll.mgr) {
- dev_priv->dpll.mgr->dump_hw_state(dev_priv, hw_state);
+ if (dev_priv->display.dpll.mgr) {
+ dev_priv->display.dpll.mgr->dump_hw_state(dev_priv, hw_state);
} else {
/* fallback for platforms that don't use the shared dpll
* infrastructure
@@ -4533,7 +4563,7 @@ void intel_shared_dpll_verify_disabled(struct drm_i915_private *i915)
{
int i;
- for (i = 0; i < i915->dpll.num_shared_dpll; i++)
- verify_single_dpll_state(i915, &i915->dpll.shared_dplls[i],
+ for (i = 0; i < i915->display.dpll.num_shared_dpll; i++)
+ verify_single_dpll_state(i915, &i915->display.dpll.shared_dplls[i],
NULL, NULL);
}
diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c
index c4affcb216fd..fc9c3e41c333 100644
--- a/drivers/gpu/drm/i915/display/intel_dsb.c
+++ b/drivers/gpu/drm/i915/display/intel_dsb.c
@@ -9,6 +9,36 @@
#include "i915_drv.h"
#include "intel_de.h"
#include "intel_display_types.h"
+#include "intel_dsb.h"
+
+struct i915_vma;
+
+enum dsb_id {
+ INVALID_DSB = -1,
+ DSB1,
+ DSB2,
+ DSB3,
+ MAX_DSB_PER_PIPE
+};
+
+struct intel_dsb {
+ enum dsb_id id;
+ u32 *cmd_buf;
+ struct i915_vma *vma;
+
+ /*
+ * free_pos will point the first free entry position
+ * and help in calculating tail of command buffer.
+ */
+ int free_pos;
+
+ /*
+ * ins_start_offset will help to store start address of the dsb
+ * instuction and help in identifying the batch of auto-increment
+ * register.
+ */
+ u32 ins_start_offset;
+};
#define DSB_BUF_SIZE (2 * PAGE_SIZE)
diff --git a/drivers/gpu/drm/i915/display/intel_dsb.h b/drivers/gpu/drm/i915/display/intel_dsb.h
index 6cb9c580cdca..74dd2b3343bb 100644
--- a/drivers/gpu/drm/i915/display/intel_dsb.h
+++ b/drivers/gpu/drm/i915/display/intel_dsb.h
@@ -11,34 +11,6 @@
#include "i915_reg_defs.h"
struct intel_crtc_state;
-struct i915_vma;
-
-enum dsb_id {
- INVALID_DSB = -1,
- DSB1,
- DSB2,
- DSB3,
- MAX_DSB_PER_PIPE
-};
-
-struct intel_dsb {
- enum dsb_id id;
- u32 *cmd_buf;
- struct i915_vma *vma;
-
- /*
- * free_pos will point the first free entry position
- * and help in calculating tail of command buffer.
- */
- int free_pos;
-
- /*
- * ins_start_offset will help to store start address of the dsb
- * instuction and help in identifying the batch of auto-increment
- * register.
- */
- u32 ins_start_offset;
-};
void intel_dsb_prepare(struct intel_crtc_state *crtc_state);
void intel_dsb_cleanup(struct intel_crtc_state *crtc_state);
diff --git a/drivers/gpu/drm/i915/display/intel_dsi.c b/drivers/gpu/drm/i915/display/intel_dsi.c
index 35e121cd226c..5efdd471ac2b 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi.c
@@ -106,7 +106,7 @@ intel_dsi_get_panel_orientation(struct intel_connector *connector)
if (orientation != DRM_MODE_PANEL_ORIENTATION_UNKNOWN)
return orientation;
- orientation = dev_priv->vbt.orientation;
+ orientation = dev_priv->display.vbt.orientation;
if (orientation != DRM_MODE_PANEL_ORIENTATION_UNKNOWN)
return orientation;
diff --git a/drivers/gpu/drm/i915/display/intel_dsi.h b/drivers/gpu/drm/i915/display/intel_dsi.h
index eafef0a87fea..ce80bd8be519 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi.h
+++ b/drivers/gpu/drm/i915/display/intel_dsi.h
@@ -89,9 +89,6 @@ struct intel_dsi {
u8 escape_clk_div;
u8 dual_link;
- u16 dcs_backlight_ports;
- u16 dcs_cabc_ports;
-
/* RGB or BGR */
bool bgr_enabled;
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
index 1bc7118c56a2..20e466d843ce 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
@@ -53,7 +53,7 @@ static u32 dcs_get_backlight(struct intel_connector *connector, enum pipe unused
enum port port;
size_t len = panel->backlight.max > U8_MAX ? 2 : 1;
- for_each_dsi_port(port, intel_dsi->dcs_backlight_ports) {
+ for_each_dsi_port(port, panel->vbt.dsi.bl_ports) {
dsi_device = intel_dsi->dsi_hosts[port]->device;
mipi_dsi_dcs_read(dsi_device, MIPI_DCS_GET_DISPLAY_BRIGHTNESS,
&data, len);
@@ -80,7 +80,7 @@ static void dcs_set_backlight(const struct drm_connector_state *conn_state, u32
data[1] = level;
}
- for_each_dsi_port(port, intel_dsi->dcs_backlight_ports) {
+ for_each_dsi_port(port, panel->vbt.dsi.bl_ports) {
dsi_device = intel_dsi->dsi_hosts[port]->device;
mode_flags = dsi_device->mode_flags;
dsi_device->mode_flags &= ~MIPI_DSI_MODE_LPM;
@@ -93,12 +93,13 @@ static void dcs_set_backlight(const struct drm_connector_state *conn_state, u32
static void dcs_disable_backlight(const struct drm_connector_state *conn_state, u32 level)
{
struct intel_dsi *intel_dsi = enc_to_intel_dsi(to_intel_encoder(conn_state->best_encoder));
+ struct intel_panel *panel = &to_intel_connector(conn_state->connector)->panel;
struct mipi_dsi_device *dsi_device;
enum port port;
dcs_set_backlight(conn_state, 0);
- for_each_dsi_port(port, intel_dsi->dcs_cabc_ports) {
+ for_each_dsi_port(port, panel->vbt.dsi.cabc_ports) {
u8 cabc = POWER_SAVE_OFF;
dsi_device = intel_dsi->dsi_hosts[port]->device;
@@ -106,7 +107,7 @@ static void dcs_disable_backlight(const struct drm_connector_state *conn_state,
&cabc, sizeof(cabc));
}
- for_each_dsi_port(port, intel_dsi->dcs_backlight_ports) {
+ for_each_dsi_port(port, panel->vbt.dsi.bl_ports) {
u8 ctrl = 0;
dsi_device = intel_dsi->dsi_hosts[port]->device;
@@ -127,10 +128,11 @@ static void dcs_enable_backlight(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state, u32 level)
{
struct intel_dsi *intel_dsi = enc_to_intel_dsi(to_intel_encoder(conn_state->best_encoder));
+ struct intel_panel *panel = &to_intel_connector(conn_state->connector)->panel;
struct mipi_dsi_device *dsi_device;
enum port port;
- for_each_dsi_port(port, intel_dsi->dcs_backlight_ports) {
+ for_each_dsi_port(port, panel->vbt.dsi.bl_ports) {
u8 ctrl = 0;
dsi_device = intel_dsi->dsi_hosts[port]->device;
@@ -146,7 +148,7 @@ static void dcs_enable_backlight(const struct intel_crtc_state *crtc_state,
&ctrl, sizeof(ctrl));
}
- for_each_dsi_port(port, intel_dsi->dcs_cabc_ports) {
+ for_each_dsi_port(port, panel->vbt.dsi.cabc_ports) {
u8 cabc = POWER_SAVE_MEDIUM;
dsi_device = intel_dsi->dsi_hosts[port]->device;
diff --git a/drivers/gpu/drm/i915/display/intel_dvo_dev.h b/drivers/gpu/drm/i915/display/intel_dvo_dev.h
index d96c3cc46e50..50205f064d93 100644
--- a/drivers/gpu/drm/i915/display/intel_dvo_dev.h
+++ b/drivers/gpu/drm/i915/display/intel_dvo_dev.h
@@ -75,8 +75,8 @@ struct intel_dvo_dev_ops {
*
* \return MODE_OK if the mode is valid, or another MODE_* otherwise.
*/
- int (*mode_valid)(struct intel_dvo_device *dvo,
- struct drm_display_mode *mode);
+ enum drm_mode_status (*mode_valid)(struct intel_dvo_device *dvo,
+ struct drm_display_mode *mode);
/*
* Callback for preparing mode changes on an output
diff --git a/drivers/gpu/drm/i915/display/intel_fb_pin.c b/drivers/gpu/drm/i915/display/intel_fb_pin.c
index c86e5d4ee016..1dddd6abd77b 100644
--- a/drivers/gpu/drm/i915/display/intel_fb_pin.c
+++ b/drivers/gpu/drm/i915/display/intel_fb_pin.c
@@ -26,10 +26,17 @@ intel_pin_fb_obj_dpt(struct drm_framebuffer *fb,
struct drm_device *dev = fb->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ struct i915_gem_ww_ctx ww;
struct i915_vma *vma;
u32 alignment;
int ret;
+ /*
+ * We are not syncing against the binding (and potential migrations)
+ * below, so this vm must never be async.
+ */
+ GEM_WARN_ON(vm->bind_async_flags);
+
if (WARN_ON(!i915_gem_object_is_framebuffer(obj)))
return ERR_PTR(-EINVAL);
@@ -37,29 +44,48 @@ intel_pin_fb_obj_dpt(struct drm_framebuffer *fb,
atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
- ret = i915_gem_object_lock_interruptible(obj, NULL);
- if (!ret) {
+ for_i915_gem_ww(&ww, ret, true) {
+ ret = i915_gem_object_lock(obj, &ww);
+ if (ret)
+ continue;
+
+ if (HAS_LMEM(dev_priv)) {
+ unsigned int flags = obj->flags;
+
+ /*
+ * For this type of buffer we need to able to read from the CPU
+ * the clear color value found in the buffer, hence we need to
+ * ensure it is always in the mappable part of lmem, if this is
+ * a small-bar device.
+ */
+ if (intel_fb_rc_ccs_cc_plane(fb) >= 0)
+ flags &= ~I915_BO_ALLOC_GPU_ONLY;
+ ret = __i915_gem_object_migrate(obj, &ww, INTEL_REGION_LMEM_0,
+ flags);
+ if (ret)
+ continue;
+ }
+
ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
- i915_gem_object_unlock(obj);
- }
- if (ret) {
- vma = ERR_PTR(ret);
- goto err;
- }
+ if (ret)
+ continue;
- vma = i915_vma_instance(obj, vm, view);
- if (IS_ERR(vma))
- goto err;
+ vma = i915_vma_instance(obj, vm, view);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ continue;
+ }
- if (i915_vma_misplaced(vma, 0, alignment, 0)) {
- ret = i915_vma_unbind_unlocked(vma);
- if (ret) {
- vma = ERR_PTR(ret);
- goto err;
+ if (i915_vma_misplaced(vma, 0, alignment, 0)) {
+ ret = i915_vma_unbind(vma);
+ if (ret)
+ continue;
}
- }
- ret = i915_vma_pin(vma, 0, alignment, PIN_GLOBAL);
+ ret = i915_vma_pin_ww(vma, &ww, 0, alignment, PIN_GLOBAL);
+ if (ret)
+ continue;
+ }
if (ret) {
vma = ERR_PTR(ret);
goto err;
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
index 16537830ccf0..f38175304928 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.c
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -55,11 +55,11 @@
#define for_each_fbc_id(__dev_priv, __fbc_id) \
for ((__fbc_id) = INTEL_FBC_A; (__fbc_id) < I915_MAX_FBCS; (__fbc_id)++) \
- for_each_if(INTEL_INFO(__dev_priv)->display.fbc_mask & BIT(__fbc_id))
+ for_each_if(RUNTIME_INFO(__dev_priv)->fbc_mask & BIT(__fbc_id))
#define for_each_intel_fbc(__dev_priv, __fbc, __fbc_id) \
for_each_fbc_id((__dev_priv), (__fbc_id)) \
- for_each_if((__fbc) = (__dev_priv)->fbc[(__fbc_id)])
+ for_each_if((__fbc) = (__dev_priv)->display.fbc[(__fbc_id)])
struct intel_fbc_funcs {
void (*activate)(struct intel_fbc *fbc);
@@ -1098,6 +1098,12 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
return 0;
}
+ /* Wa_14016291713 */
+ if (IS_DISPLAY_VER(i915, 12, 13) && crtc_state->has_psr) {
+ plane_state->no_fbc_reason = "PSR1 enabled (Wa_14016291713)";
+ return 0;
+ }
+
if (!pixel_format_is_valid(plane_state)) {
plane_state->no_fbc_reason = "pixel format not supported";
return 0;
@@ -1704,17 +1710,17 @@ void intel_fbc_init(struct drm_i915_private *i915)
enum intel_fbc_id fbc_id;
if (!drm_mm_initialized(&i915->mm.stolen))
- mkwrite_device_info(i915)->display.fbc_mask = 0;
+ RUNTIME_INFO(i915)->fbc_mask = 0;
if (need_fbc_vtd_wa(i915))
- mkwrite_device_info(i915)->display.fbc_mask = 0;
+ RUNTIME_INFO(i915)->fbc_mask = 0;
i915->params.enable_fbc = intel_sanitize_fbc_option(i915);
drm_dbg_kms(&i915->drm, "Sanitized enable_fbc value: %d\n",
i915->params.enable_fbc);
for_each_fbc_id(i915, fbc_id)
- i915->fbc[fbc_id] = intel_fbc_create(i915, fbc_id);
+ i915->display.fbc[fbc_id] = intel_fbc_create(i915, fbc_id);
}
/**
@@ -1834,7 +1840,7 @@ void intel_fbc_debugfs_register(struct drm_i915_private *i915)
struct drm_minor *minor = i915->drm.primary;
struct intel_fbc *fbc;
- fbc = i915->fbc[INTEL_FBC_A];
+ fbc = i915->display.fbc[INTEL_FBC_A];
if (fbc)
intel_fbc_debugfs_add(fbc, minor->debugfs_root);
}
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.h b/drivers/gpu/drm/i915/display/intel_fbc.h
index db60143295ec..4adb98afe6ff 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.h
+++ b/drivers/gpu/drm/i915/display/intel_fbc.h
@@ -19,6 +19,7 @@ struct intel_plane_state;
enum intel_fbc_id {
INTEL_FBC_A,
+ INTEL_FBC_B,
I915_MAX_FBCS,
};
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c
index 1922f62d04c0..112aa0447a0d 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.c
@@ -210,6 +210,12 @@ static int intelfb_create(struct drm_fb_helper *helper,
struct drm_i915_gem_object *obj;
int ret;
+ mutex_lock(&ifbdev->hpd_lock);
+ ret = ifbdev->hpd_suspended ? -EAGAIN : 0;
+ mutex_unlock(&ifbdev->hpd_lock);
+ if (ret)
+ return ret;
+
if (intel_fb &&
(sizes->fb_width > intel_fb->base.width ||
sizes->fb_height > intel_fb->base.height)) {
@@ -500,7 +506,7 @@ static void intel_fbdev_suspend_worker(struct work_struct *work)
{
intel_fbdev_set_suspend(&container_of(work,
struct drm_i915_private,
- fbdev_suspend_work)->drm,
+ display.fbdev.suspend_work)->drm,
FBINFO_STATE_RUNNING,
true);
}
@@ -530,8 +536,8 @@ int intel_fbdev_init(struct drm_device *dev)
return ret;
}
- dev_priv->fbdev = ifbdev;
- INIT_WORK(&dev_priv->fbdev_suspend_work, intel_fbdev_suspend_worker);
+ dev_priv->display.fbdev.fbdev = ifbdev;
+ INIT_WORK(&dev_priv->display.fbdev.suspend_work, intel_fbdev_suspend_worker);
return 0;
}
@@ -548,7 +554,7 @@ static void intel_fbdev_initial_config(void *data, async_cookie_t cookie)
void intel_fbdev_initial_config_async(struct drm_device *dev)
{
- struct intel_fbdev *ifbdev = to_i915(dev)->fbdev;
+ struct intel_fbdev *ifbdev = to_i915(dev)->display.fbdev.fbdev;
if (!ifbdev)
return;
@@ -568,12 +574,13 @@ static void intel_fbdev_sync(struct intel_fbdev *ifbdev)
void intel_fbdev_unregister(struct drm_i915_private *dev_priv)
{
- struct intel_fbdev *ifbdev = dev_priv->fbdev;
+ struct intel_fbdev *ifbdev = dev_priv->display.fbdev.fbdev;
if (!ifbdev)
return;
- cancel_work_sync(&dev_priv->fbdev_suspend_work);
+ intel_fbdev_set_suspend(&dev_priv->drm, FBINFO_STATE_SUSPENDED, true);
+
if (!current_is_async())
intel_fbdev_sync(ifbdev);
@@ -582,7 +589,7 @@ void intel_fbdev_unregister(struct drm_i915_private *dev_priv)
void intel_fbdev_fini(struct drm_i915_private *dev_priv)
{
- struct intel_fbdev *ifbdev = fetch_and_zero(&dev_priv->fbdev);
+ struct intel_fbdev *ifbdev = fetch_and_zero(&dev_priv->display.fbdev.fbdev);
if (!ifbdev)
return;
@@ -596,7 +603,7 @@ void intel_fbdev_fini(struct drm_i915_private *dev_priv)
*/
static void intel_fbdev_hpd_set_suspend(struct drm_i915_private *i915, int state)
{
- struct intel_fbdev *ifbdev = i915->fbdev;
+ struct intel_fbdev *ifbdev = i915->display.fbdev.fbdev;
bool send_hpd = false;
mutex_lock(&ifbdev->hpd_lock);
@@ -614,11 +621,11 @@ static void intel_fbdev_hpd_set_suspend(struct drm_i915_private *i915, int state
void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_fbdev *ifbdev = dev_priv->fbdev;
+ struct intel_fbdev *ifbdev = dev_priv->display.fbdev.fbdev;
struct fb_info *info;
if (!ifbdev || !ifbdev->vma)
- return;
+ goto set_suspend;
info = ifbdev->helper.fbdev;
@@ -631,7 +638,7 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous
* ourselves, so only flush outstanding work upon suspend!
*/
if (state != FBINFO_STATE_RUNNING)
- flush_work(&dev_priv->fbdev_suspend_work);
+ flush_work(&dev_priv->display.fbdev.suspend_work);
console_lock();
} else {
@@ -645,7 +652,7 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous
/* Don't block our own workqueue as this can
* be run in parallel with other i915.ko tasks.
*/
- schedule_work(&dev_priv->fbdev_suspend_work);
+ schedule_work(&dev_priv->display.fbdev.suspend_work);
return;
}
}
@@ -661,12 +668,13 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous
drm_fb_helper_set_suspend(&ifbdev->helper, state);
console_unlock();
+set_suspend:
intel_fbdev_hpd_set_suspend(dev_priv, state);
}
void intel_fbdev_output_poll_changed(struct drm_device *dev)
{
- struct intel_fbdev *ifbdev = to_i915(dev)->fbdev;
+ struct intel_fbdev *ifbdev = to_i915(dev)->display.fbdev.fbdev;
bool send_hpd;
if (!ifbdev)
@@ -685,7 +693,7 @@ void intel_fbdev_output_poll_changed(struct drm_device *dev)
void intel_fbdev_restore_mode(struct drm_device *dev)
{
- struct intel_fbdev *ifbdev = to_i915(dev)->fbdev;
+ struct intel_fbdev *ifbdev = to_i915(dev)->display.fbdev.fbdev;
if (!ifbdev)
return;
diff --git a/drivers/gpu/drm/i915/display/intel_fdi.c b/drivers/gpu/drm/i915/display/intel_fdi.c
index 67d2484afbaa..7f47e5c85c81 100644
--- a/drivers/gpu/drm/i915/display/intel_fdi.c
+++ b/drivers/gpu/drm/i915/display/intel_fdi.c
@@ -113,7 +113,7 @@ void intel_fdi_link_train(struct intel_crtc *crtc,
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- dev_priv->fdi_funcs->fdi_link_train(crtc, crtc_state);
+ dev_priv->display.funcs.fdi->fdi_link_train(crtc, crtc_state);
}
/* units of 100MHz */
@@ -210,14 +210,14 @@ void intel_fdi_pll_freq_update(struct drm_i915_private *i915)
u32 fdi_pll_clk =
intel_de_read(i915, FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
- i915->fdi_pll_freq = (fdi_pll_clk + 2) * 10000;
+ i915->display.fdi.pll_freq = (fdi_pll_clk + 2) * 10000;
} else if (IS_SANDYBRIDGE(i915) || IS_IVYBRIDGE(i915)) {
- i915->fdi_pll_freq = 270000;
+ i915->display.fdi.pll_freq = 270000;
} else {
return;
}
- drm_dbg(&i915->drm, "FDI PLL freq=%d\n", i915->fdi_pll_freq);
+ drm_dbg(&i915->drm, "FDI PLL freq=%d\n", i915->display.fdi.pll_freq);
}
int intel_fdi_link_freq(struct drm_i915_private *i915,
@@ -226,7 +226,7 @@ int intel_fdi_link_freq(struct drm_i915_private *i915,
if (HAS_DDI(i915))
return pipe_config->port_clock; /* SPLL */
else
- return i915->fdi_pll_freq;
+ return i915->display.fdi.pll_freq;
}
int ilk_fdi_compute_config(struct intel_crtc *crtc,
@@ -256,7 +256,7 @@ retry:
pipe_config->fdi_lanes = lane;
intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
- link_bw, &pipe_config->fdi_m_n, false, false);
+ link_bw, &pipe_config->fdi_m_n, false);
ret = ilk_check_fdi_lanes(dev, crtc->pipe, pipe_config);
if (ret == -EDEADLK)
@@ -789,7 +789,7 @@ void hsw_fdi_link_train(struct intel_encoder *encoder,
FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2) | FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
/* Enable the PCH Receiver FDI PLL */
- rx_ctl_val = dev_priv->fdi_rx_config | FDI_RX_ENHANCE_FRAME_ENABLE |
+ rx_ctl_val = dev_priv->display.fdi.rx_config | FDI_RX_ENHANCE_FRAME_ENABLE |
FDI_RX_PLL_ENABLE |
FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
@@ -1066,11 +1066,11 @@ void
intel_fdi_init_hook(struct drm_i915_private *dev_priv)
{
if (IS_IRONLAKE(dev_priv)) {
- dev_priv->fdi_funcs = &ilk_funcs;
+ dev_priv->display.funcs.fdi = &ilk_funcs;
} else if (IS_SANDYBRIDGE(dev_priv)) {
- dev_priv->fdi_funcs = &gen6_funcs;
+ dev_priv->display.funcs.fdi = &gen6_funcs;
} else if (IS_IVYBRIDGE(dev_priv)) {
/* FIXME: detect B0+ stepping and use auto training */
- dev_priv->fdi_funcs = &ivb_funcs;
+ dev_priv->display.funcs.fdi = &ivb_funcs;
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.c b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
index 791248f812aa..d80e3e8a9b01 100644
--- a/drivers/gpu/drm/i915/display/intel_frontbuffer.c
+++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
@@ -81,9 +81,9 @@ static void frontbuffer_flush(struct drm_i915_private *i915,
enum fb_op_origin origin)
{
/* Delay flushing when rings are still busy.*/
- spin_lock(&i915->fb_tracking.lock);
- frontbuffer_bits &= ~i915->fb_tracking.busy_bits;
- spin_unlock(&i915->fb_tracking.lock);
+ spin_lock(&i915->display.fb_tracking.lock);
+ frontbuffer_bits &= ~i915->display.fb_tracking.busy_bits;
+ spin_unlock(&i915->display.fb_tracking.lock);
if (!frontbuffer_bits)
return;
@@ -111,11 +111,11 @@ static void frontbuffer_flush(struct drm_i915_private *i915,
void intel_frontbuffer_flip_prepare(struct drm_i915_private *i915,
unsigned frontbuffer_bits)
{
- spin_lock(&i915->fb_tracking.lock);
- i915->fb_tracking.flip_bits |= frontbuffer_bits;
+ spin_lock(&i915->display.fb_tracking.lock);
+ i915->display.fb_tracking.flip_bits |= frontbuffer_bits;
/* Remove stale busy bits due to the old buffer. */
- i915->fb_tracking.busy_bits &= ~frontbuffer_bits;
- spin_unlock(&i915->fb_tracking.lock);
+ i915->display.fb_tracking.busy_bits &= ~frontbuffer_bits;
+ spin_unlock(&i915->display.fb_tracking.lock);
}
/**
@@ -131,11 +131,11 @@ void intel_frontbuffer_flip_prepare(struct drm_i915_private *i915,
void intel_frontbuffer_flip_complete(struct drm_i915_private *i915,
unsigned frontbuffer_bits)
{
- spin_lock(&i915->fb_tracking.lock);
+ spin_lock(&i915->display.fb_tracking.lock);
/* Mask any cancelled flips. */
- frontbuffer_bits &= i915->fb_tracking.flip_bits;
- i915->fb_tracking.flip_bits &= ~frontbuffer_bits;
- spin_unlock(&i915->fb_tracking.lock);
+ frontbuffer_bits &= i915->display.fb_tracking.flip_bits;
+ i915->display.fb_tracking.flip_bits &= ~frontbuffer_bits;
+ spin_unlock(&i915->display.fb_tracking.lock);
if (frontbuffer_bits)
frontbuffer_flush(i915, frontbuffer_bits, ORIGIN_FLIP);
@@ -155,10 +155,10 @@ void intel_frontbuffer_flip_complete(struct drm_i915_private *i915,
void intel_frontbuffer_flip(struct drm_i915_private *i915,
unsigned frontbuffer_bits)
{
- spin_lock(&i915->fb_tracking.lock);
+ spin_lock(&i915->display.fb_tracking.lock);
/* Remove stale busy bits due to the old buffer. */
- i915->fb_tracking.busy_bits &= ~frontbuffer_bits;
- spin_unlock(&i915->fb_tracking.lock);
+ i915->display.fb_tracking.busy_bits &= ~frontbuffer_bits;
+ spin_unlock(&i915->display.fb_tracking.lock);
frontbuffer_flush(i915, frontbuffer_bits, ORIGIN_FLIP);
}
@@ -170,10 +170,10 @@ void __intel_fb_invalidate(struct intel_frontbuffer *front,
struct drm_i915_private *i915 = to_i915(front->obj->base.dev);
if (origin == ORIGIN_CS) {
- spin_lock(&i915->fb_tracking.lock);
- i915->fb_tracking.busy_bits |= frontbuffer_bits;
- i915->fb_tracking.flip_bits &= ~frontbuffer_bits;
- spin_unlock(&i915->fb_tracking.lock);
+ spin_lock(&i915->display.fb_tracking.lock);
+ i915->display.fb_tracking.busy_bits |= frontbuffer_bits;
+ i915->display.fb_tracking.flip_bits &= ~frontbuffer_bits;
+ spin_unlock(&i915->display.fb_tracking.lock);
}
trace_intel_frontbuffer_invalidate(frontbuffer_bits, origin);
@@ -191,11 +191,11 @@ void __intel_fb_flush(struct intel_frontbuffer *front,
struct drm_i915_private *i915 = to_i915(front->obj->base.dev);
if (origin == ORIGIN_CS) {
- spin_lock(&i915->fb_tracking.lock);
+ spin_lock(&i915->display.fb_tracking.lock);
/* Filter out new bits since rendering started. */
- frontbuffer_bits &= i915->fb_tracking.busy_bits;
- i915->fb_tracking.busy_bits &= ~frontbuffer_bits;
- spin_unlock(&i915->fb_tracking.lock);
+ frontbuffer_bits &= i915->display.fb_tracking.busy_bits;
+ i915->display.fb_tracking.busy_bits &= ~frontbuffer_bits;
+ spin_unlock(&i915->display.fb_tracking.lock);
}
if (frontbuffer_bits)
@@ -221,7 +221,7 @@ static void frontbuffer_retire(struct i915_active *ref)
}
static void frontbuffer_release(struct kref *ref)
- __releases(&to_i915(front->obj->base.dev)->fb_tracking.lock)
+ __releases(&to_i915(front->obj->base.dev)->display.fb_tracking.lock)
{
struct intel_frontbuffer *front =
container_of(ref, typeof(*front), ref);
@@ -238,7 +238,7 @@ static void frontbuffer_release(struct kref *ref)
spin_unlock(&obj->vma.lock);
RCU_INIT_POINTER(obj->frontbuffer, NULL);
- spin_unlock(&to_i915(obj->base.dev)->fb_tracking.lock);
+ spin_unlock(&to_i915(obj->base.dev)->display.fb_tracking.lock);
i915_active_fini(&front->write);
@@ -268,7 +268,7 @@ intel_frontbuffer_get(struct drm_i915_gem_object *obj)
frontbuffer_retire,
I915_ACTIVE_RETIRE_SLEEPS);
- spin_lock(&i915->fb_tracking.lock);
+ spin_lock(&i915->display.fb_tracking.lock);
if (rcu_access_pointer(obj->frontbuffer)) {
kfree(front);
front = rcu_dereference_protected(obj->frontbuffer, true);
@@ -277,7 +277,7 @@ intel_frontbuffer_get(struct drm_i915_gem_object *obj)
i915_gem_object_get(obj);
rcu_assign_pointer(obj->frontbuffer, front);
}
- spin_unlock(&i915->fb_tracking.lock);
+ spin_unlock(&i915->display.fb_tracking.lock);
return front;
}
@@ -286,7 +286,7 @@ void intel_frontbuffer_put(struct intel_frontbuffer *front)
{
kref_put_lock(&front->ref,
frontbuffer_release,
- &to_i915(front->obj->base.dev)->fb_tracking.lock);
+ &to_i915(front->obj->base.dev)->display.fb_tracking.lock);
}
/**
@@ -311,6 +311,8 @@ void intel_frontbuffer_track(struct intel_frontbuffer *old,
*/
BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES >
BITS_PER_TYPE(atomic_t));
+ BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES > 32);
+ BUILD_BUG_ON(I915_MAX_PLANES > INTEL_FRONTBUFFER_BITS_PER_PIPE);
if (old) {
drm_WARN_ON(old->obj->base.dev,
diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.h b/drivers/gpu/drm/i915/display/intel_frontbuffer.h
index ff0c37b079aa..3c474ed937fb 100644
--- a/drivers/gpu/drm/i915/display/intel_frontbuffer.h
+++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.h
@@ -25,6 +25,7 @@
#define __INTEL_FRONTBUFFER_H__
#include <linux/atomic.h>
+#include <linux/bits.h>
#include <linux/kref.h>
#include "gem/i915_gem_object_types.h"
@@ -48,6 +49,23 @@ struct intel_frontbuffer {
struct rcu_head rcu;
};
+/*
+ * Frontbuffer tracking bits. Set in obj->frontbuffer_bits while a gem bo is
+ * considered to be the frontbuffer for the given plane interface-wise. This
+ * doesn't mean that the hw necessarily already scans it out, but that any
+ * rendering (by the cpu or gpu) will land in the frontbuffer eventually.
+ *
+ * We have one bit per pipe and per scanout plane type.
+ */
+#define INTEL_FRONTBUFFER_BITS_PER_PIPE 8
+#define INTEL_FRONTBUFFER(pipe, plane_id) \
+ BIT((plane_id) + INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe));
+#define INTEL_FRONTBUFFER_OVERLAY(pipe) \
+ BIT(INTEL_FRONTBUFFER_BITS_PER_PIPE - 1 + INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))
+#define INTEL_FRONTBUFFER_ALL_MASK(pipe) \
+ GENMASK(INTEL_FRONTBUFFER_BITS_PER_PIPE * ((pipe) + 1) - 1, \
+ INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))
+
void intel_frontbuffer_flip_prepare(struct drm_i915_private *i915,
unsigned frontbuffer_bits);
void intel_frontbuffer_flip_complete(struct drm_i915_private *i915,
diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.c b/drivers/gpu/drm/i915/display/intel_gmbus.c
index a6ba7fb72339..74443f57f62d 100644
--- a/drivers/gpu/drm/i915/display/intel_gmbus.c
+++ b/drivers/gpu/drm/i915/display/intel_gmbus.c
@@ -37,6 +37,7 @@
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_gmbus.h"
+#include "intel_gmbus_regs.h"
struct intel_gmbus {
struct i2c_adapter adapter;
@@ -45,7 +46,7 @@ struct intel_gmbus {
u32 reg0;
i915_reg_t gpio_reg;
struct i2c_algo_bit_data bit_algo;
- struct drm_i915_private *dev_priv;
+ struct drm_i915_private *i915;
};
struct gmbus_pin {
@@ -116,6 +117,18 @@ static const struct gmbus_pin gmbus_pins_dg2[] = {
[GMBUS_PIN_9_TC1_ICP] = { "tc1", GPIOJ },
};
+static const struct gmbus_pin gmbus_pins_mtp[] = {
+ [GMBUS_PIN_1_BXT] = { "dpa", GPIOB },
+ [GMBUS_PIN_2_BXT] = { "dpb", GPIOC },
+ [GMBUS_PIN_3_BXT] = { "dpc", GPIOD },
+ [GMBUS_PIN_4_CNP] = { "dpd", GPIOE },
+ [GMBUS_PIN_5_MTP] = { "dpe", GPIOF },
+ [GMBUS_PIN_9_TC1_ICP] = { "tc1", GPIOJ },
+ [GMBUS_PIN_10_TC2_ICP] = { "tc2", GPIOK },
+ [GMBUS_PIN_11_TC3_ICP] = { "tc3", GPIOL },
+ [GMBUS_PIN_12_TC4_ICP] = { "tc4", GPIOM },
+};
+
static const struct gmbus_pin *get_gmbus_pin(struct drm_i915_private *i915,
unsigned int pin)
{
@@ -128,6 +141,9 @@ static const struct gmbus_pin *get_gmbus_pin(struct drm_i915_private *i915,
} else if (INTEL_PCH_TYPE(i915) >= PCH_DG1) {
pins = gmbus_pins_dg1;
size = ARRAY_SIZE(gmbus_pins_dg1);
+ } else if (INTEL_PCH_TYPE(i915) >= PCH_MTP) {
+ pins = gmbus_pins_mtp;
+ size = ARRAY_SIZE(gmbus_pins_mtp);
} else if (INTEL_PCH_TYPE(i915) >= PCH_ICP) {
pins = gmbus_pins_icp;
size = ARRAY_SIZE(gmbus_pins_icp);
@@ -170,55 +186,55 @@ to_intel_gmbus(struct i2c_adapter *i2c)
}
void
-intel_gmbus_reset(struct drm_i915_private *dev_priv)
+intel_gmbus_reset(struct drm_i915_private *i915)
{
- intel_de_write(dev_priv, GMBUS0, 0);
- intel_de_write(dev_priv, GMBUS4, 0);
+ intel_de_write(i915, GMBUS0(i915), 0);
+ intel_de_write(i915, GMBUS4(i915), 0);
}
-static void pnv_gmbus_clock_gating(struct drm_i915_private *dev_priv,
+static void pnv_gmbus_clock_gating(struct drm_i915_private *i915,
bool enable)
{
u32 val;
/* When using bit bashing for I2C, this bit needs to be set to 1 */
- val = intel_de_read(dev_priv, DSPCLK_GATE_D);
+ val = intel_de_read(i915, DSPCLK_GATE_D(i915));
if (!enable)
val |= PNV_GMBUSUNIT_CLOCK_GATE_DISABLE;
else
val &= ~PNV_GMBUSUNIT_CLOCK_GATE_DISABLE;
- intel_de_write(dev_priv, DSPCLK_GATE_D, val);
+ intel_de_write(i915, DSPCLK_GATE_D(i915), val);
}
-static void pch_gmbus_clock_gating(struct drm_i915_private *dev_priv,
+static void pch_gmbus_clock_gating(struct drm_i915_private *i915,
bool enable)
{
u32 val;
- val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D);
+ val = intel_de_read(i915, SOUTH_DSPCLK_GATE_D);
if (!enable)
val |= PCH_GMBUSUNIT_CLOCK_GATE_DISABLE;
else
val &= ~PCH_GMBUSUNIT_CLOCK_GATE_DISABLE;
- intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val);
+ intel_de_write(i915, SOUTH_DSPCLK_GATE_D, val);
}
-static void bxt_gmbus_clock_gating(struct drm_i915_private *dev_priv,
+static void bxt_gmbus_clock_gating(struct drm_i915_private *i915,
bool enable)
{
u32 val;
- val = intel_de_read(dev_priv, GEN9_CLKGATE_DIS_4);
+ val = intel_de_read(i915, GEN9_CLKGATE_DIS_4);
if (!enable)
val |= BXT_GMBUS_GATING_DIS;
else
val &= ~BXT_GMBUS_GATING_DIS;
- intel_de_write(dev_priv, GEN9_CLKGATE_DIS_4, val);
+ intel_de_write(i915, GEN9_CLKGATE_DIS_4, val);
}
static u32 get_reserved(struct intel_gmbus *bus)
{
- struct drm_i915_private *i915 = bus->dev_priv;
+ struct drm_i915_private *i915 = bus->i915;
struct intel_uncore *uncore = &i915->uncore;
u32 reserved = 0;
@@ -234,7 +250,7 @@ static u32 get_reserved(struct intel_gmbus *bus)
static int get_clock(void *data)
{
struct intel_gmbus *bus = data;
- struct intel_uncore *uncore = &bus->dev_priv->uncore;
+ struct intel_uncore *uncore = &bus->i915->uncore;
u32 reserved = get_reserved(bus);
intel_uncore_write_notrace(uncore,
@@ -249,7 +265,7 @@ static int get_clock(void *data)
static int get_data(void *data)
{
struct intel_gmbus *bus = data;
- struct intel_uncore *uncore = &bus->dev_priv->uncore;
+ struct intel_uncore *uncore = &bus->i915->uncore;
u32 reserved = get_reserved(bus);
intel_uncore_write_notrace(uncore,
@@ -264,7 +280,7 @@ static int get_data(void *data)
static void set_clock(void *data, int state_high)
{
struct intel_gmbus *bus = data;
- struct intel_uncore *uncore = &bus->dev_priv->uncore;
+ struct intel_uncore *uncore = &bus->i915->uncore;
u32 reserved = get_reserved(bus);
u32 clock_bits;
@@ -283,7 +299,7 @@ static void set_clock(void *data, int state_high)
static void set_data(void *data, int state_high)
{
struct intel_gmbus *bus = data;
- struct intel_uncore *uncore = &bus->dev_priv->uncore;
+ struct intel_uncore *uncore = &bus->i915->uncore;
u32 reserved = get_reserved(bus);
u32 data_bits;
@@ -301,12 +317,12 @@ static int
intel_gpio_pre_xfer(struct i2c_adapter *adapter)
{
struct intel_gmbus *bus = to_intel_gmbus(adapter);
- struct drm_i915_private *dev_priv = bus->dev_priv;
+ struct drm_i915_private *i915 = bus->i915;
- intel_gmbus_reset(dev_priv);
+ intel_gmbus_reset(i915);
- if (IS_PINEVIEW(dev_priv))
- pnv_gmbus_clock_gating(dev_priv, false);
+ if (IS_PINEVIEW(i915))
+ pnv_gmbus_clock_gating(i915, false);
set_data(bus, 1);
set_clock(bus, 1);
@@ -318,13 +334,13 @@ static void
intel_gpio_post_xfer(struct i2c_adapter *adapter)
{
struct intel_gmbus *bus = to_intel_gmbus(adapter);
- struct drm_i915_private *dev_priv = bus->dev_priv;
+ struct drm_i915_private *i915 = bus->i915;
set_data(bus, 1);
set_clock(bus, 1);
- if (IS_PINEVIEW(dev_priv))
- pnv_gmbus_clock_gating(dev_priv, true);
+ if (IS_PINEVIEW(i915))
+ pnv_gmbus_clock_gating(i915, true);
}
static void
@@ -356,7 +372,7 @@ static bool has_gmbus_irq(struct drm_i915_private *i915)
return HAS_GMBUS_IRQ(i915) && intel_irqs_enabled(i915);
}
-static int gmbus_wait(struct drm_i915_private *dev_priv, u32 status, u32 irq_en)
+static int gmbus_wait(struct drm_i915_private *i915, u32 status, u32 irq_en)
{
DEFINE_WAIT(wait);
u32 gmbus2;
@@ -366,21 +382,21 @@ static int gmbus_wait(struct drm_i915_private *dev_priv, u32 status, u32 irq_en)
* we also need to check for NAKs besides the hw ready/idle signal, we
* need to wake up periodically and check that ourselves.
*/
- if (!has_gmbus_irq(dev_priv))
+ if (!has_gmbus_irq(i915))
irq_en = 0;
- add_wait_queue(&dev_priv->gmbus_wait_queue, &wait);
- intel_de_write_fw(dev_priv, GMBUS4, irq_en);
+ add_wait_queue(&i915->display.gmbus.wait_queue, &wait);
+ intel_de_write_fw(i915, GMBUS4(i915), irq_en);
status |= GMBUS_SATOER;
- ret = wait_for_us((gmbus2 = intel_de_read_fw(dev_priv, GMBUS2)) & status,
+ ret = wait_for_us((gmbus2 = intel_de_read_fw(i915, GMBUS2(i915))) & status,
2);
if (ret)
- ret = wait_for((gmbus2 = intel_de_read_fw(dev_priv, GMBUS2)) & status,
+ ret = wait_for((gmbus2 = intel_de_read_fw(i915, GMBUS2(i915))) & status,
50);
- intel_de_write_fw(dev_priv, GMBUS4, 0);
- remove_wait_queue(&dev_priv->gmbus_wait_queue, &wait);
+ intel_de_write_fw(i915, GMBUS4(i915), 0);
+ remove_wait_queue(&i915->display.gmbus.wait_queue, &wait);
if (gmbus2 & GMBUS_SATOER)
return -ENXIO;
@@ -389,7 +405,7 @@ static int gmbus_wait(struct drm_i915_private *dev_priv, u32 status, u32 irq_en)
}
static int
-gmbus_wait_idle(struct drm_i915_private *dev_priv)
+gmbus_wait_idle(struct drm_i915_private *i915)
{
DEFINE_WAIT(wait);
u32 irq_enable;
@@ -397,35 +413,35 @@ gmbus_wait_idle(struct drm_i915_private *dev_priv)
/* Important: The hw handles only the first bit, so set only one! */
irq_enable = 0;
- if (has_gmbus_irq(dev_priv))
+ if (has_gmbus_irq(i915))
irq_enable = GMBUS_IDLE_EN;
- add_wait_queue(&dev_priv->gmbus_wait_queue, &wait);
- intel_de_write_fw(dev_priv, GMBUS4, irq_enable);
+ add_wait_queue(&i915->display.gmbus.wait_queue, &wait);
+ intel_de_write_fw(i915, GMBUS4(i915), irq_enable);
- ret = intel_wait_for_register_fw(&dev_priv->uncore,
- GMBUS2, GMBUS_ACTIVE, 0,
+ ret = intel_wait_for_register_fw(&i915->uncore,
+ GMBUS2(i915), GMBUS_ACTIVE, 0,
10);
- intel_de_write_fw(dev_priv, GMBUS4, 0);
- remove_wait_queue(&dev_priv->gmbus_wait_queue, &wait);
+ intel_de_write_fw(i915, GMBUS4(i915), 0);
+ remove_wait_queue(&i915->display.gmbus.wait_queue, &wait);
return ret;
}
-static unsigned int gmbus_max_xfer_size(struct drm_i915_private *dev_priv)
+static unsigned int gmbus_max_xfer_size(struct drm_i915_private *i915)
{
- return DISPLAY_VER(dev_priv) >= 9 ? GEN9_GMBUS_BYTE_COUNT_MAX :
+ return DISPLAY_VER(i915) >= 9 ? GEN9_GMBUS_BYTE_COUNT_MAX :
GMBUS_BYTE_COUNT_MAX;
}
static int
-gmbus_xfer_read_chunk(struct drm_i915_private *dev_priv,
+gmbus_xfer_read_chunk(struct drm_i915_private *i915,
unsigned short addr, u8 *buf, unsigned int len,
u32 gmbus0_reg, u32 gmbus1_index)
{
unsigned int size = len;
- bool burst_read = len > gmbus_max_xfer_size(dev_priv);
+ bool burst_read = len > gmbus_max_xfer_size(i915);
bool extra_byte_added = false;
if (burst_read) {
@@ -438,21 +454,21 @@ gmbus_xfer_read_chunk(struct drm_i915_private *dev_priv,
len++;
}
size = len % 256 + 256;
- intel_de_write_fw(dev_priv, GMBUS0,
+ intel_de_write_fw(i915, GMBUS0(i915),
gmbus0_reg | GMBUS_BYTE_CNT_OVERRIDE);
}
- intel_de_write_fw(dev_priv, GMBUS1,
+ intel_de_write_fw(i915, GMBUS1(i915),
gmbus1_index | GMBUS_CYCLE_WAIT | (size << GMBUS_BYTE_COUNT_SHIFT) | (addr << GMBUS_SLAVE_ADDR_SHIFT) | GMBUS_SLAVE_READ | GMBUS_SW_RDY);
while (len) {
int ret;
u32 val, loop = 0;
- ret = gmbus_wait(dev_priv, GMBUS_HW_RDY, GMBUS_HW_RDY_EN);
+ ret = gmbus_wait(i915, GMBUS_HW_RDY, GMBUS_HW_RDY_EN);
if (ret)
return ret;
- val = intel_de_read_fw(dev_priv, GMBUS3);
+ val = intel_de_read_fw(i915, GMBUS3(i915));
do {
if (extra_byte_added && len == 1)
break;
@@ -463,7 +479,7 @@ gmbus_xfer_read_chunk(struct drm_i915_private *dev_priv,
if (burst_read && len == size - 4)
/* Reset the override bit */
- intel_de_write_fw(dev_priv, GMBUS0, gmbus0_reg);
+ intel_de_write_fw(i915, GMBUS0(i915), gmbus0_reg);
}
return 0;
@@ -480,7 +496,7 @@ gmbus_xfer_read_chunk(struct drm_i915_private *dev_priv,
#define INTEL_GMBUS_BURST_READ_MAX_LEN 767U
static int
-gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
+gmbus_xfer_read(struct drm_i915_private *i915, struct i2c_msg *msg,
u32 gmbus0_reg, u32 gmbus1_index)
{
u8 *buf = msg->buf;
@@ -489,12 +505,12 @@ gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
int ret;
do {
- if (HAS_GMBUS_BURST_READ(dev_priv))
+ if (HAS_GMBUS_BURST_READ(i915))
len = min(rx_size, INTEL_GMBUS_BURST_READ_MAX_LEN);
else
- len = min(rx_size, gmbus_max_xfer_size(dev_priv));
+ len = min(rx_size, gmbus_max_xfer_size(i915));
- ret = gmbus_xfer_read_chunk(dev_priv, msg->addr, buf, len,
+ ret = gmbus_xfer_read_chunk(i915, msg->addr, buf, len,
gmbus0_reg, gmbus1_index);
if (ret)
return ret;
@@ -507,7 +523,7 @@ gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
}
static int
-gmbus_xfer_write_chunk(struct drm_i915_private *dev_priv,
+gmbus_xfer_write_chunk(struct drm_i915_private *i915,
unsigned short addr, u8 *buf, unsigned int len,
u32 gmbus1_index)
{
@@ -520,8 +536,8 @@ gmbus_xfer_write_chunk(struct drm_i915_private *dev_priv,
len -= 1;
}
- intel_de_write_fw(dev_priv, GMBUS3, val);
- intel_de_write_fw(dev_priv, GMBUS1,
+ intel_de_write_fw(i915, GMBUS3(i915), val);
+ intel_de_write_fw(i915, GMBUS1(i915),
gmbus1_index | GMBUS_CYCLE_WAIT | (chunk_size << GMBUS_BYTE_COUNT_SHIFT) | (addr << GMBUS_SLAVE_ADDR_SHIFT) | GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
while (len) {
int ret;
@@ -531,9 +547,9 @@ gmbus_xfer_write_chunk(struct drm_i915_private *dev_priv,
val |= *buf++ << (8 * loop);
} while (--len && ++loop < 4);
- intel_de_write_fw(dev_priv, GMBUS3, val);
+ intel_de_write_fw(i915, GMBUS3(i915), val);
- ret = gmbus_wait(dev_priv, GMBUS_HW_RDY, GMBUS_HW_RDY_EN);
+ ret = gmbus_wait(i915, GMBUS_HW_RDY, GMBUS_HW_RDY_EN);
if (ret)
return ret;
}
@@ -542,7 +558,7 @@ gmbus_xfer_write_chunk(struct drm_i915_private *dev_priv,
}
static int
-gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
+gmbus_xfer_write(struct drm_i915_private *i915, struct i2c_msg *msg,
u32 gmbus1_index)
{
u8 *buf = msg->buf;
@@ -551,9 +567,9 @@ gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
int ret;
do {
- len = min(tx_size, gmbus_max_xfer_size(dev_priv));
+ len = min(tx_size, gmbus_max_xfer_size(i915));
- ret = gmbus_xfer_write_chunk(dev_priv, msg->addr, buf, len,
+ ret = gmbus_xfer_write_chunk(i915, msg->addr, buf, len,
gmbus1_index);
if (ret)
return ret;
@@ -580,7 +596,7 @@ gmbus_is_index_xfer(struct i2c_msg *msgs, int i, int num)
}
static int
-gmbus_index_xfer(struct drm_i915_private *dev_priv, struct i2c_msg *msgs,
+gmbus_index_xfer(struct drm_i915_private *i915, struct i2c_msg *msgs,
u32 gmbus0_reg)
{
u32 gmbus1_index = 0;
@@ -596,17 +612,17 @@ gmbus_index_xfer(struct drm_i915_private *dev_priv, struct i2c_msg *msgs,
/* GMBUS5 holds 16-bit index */
if (gmbus5)
- intel_de_write_fw(dev_priv, GMBUS5, gmbus5);
+ intel_de_write_fw(i915, GMBUS5(i915), gmbus5);
if (msgs[1].flags & I2C_M_RD)
- ret = gmbus_xfer_read(dev_priv, &msgs[1], gmbus0_reg,
+ ret = gmbus_xfer_read(i915, &msgs[1], gmbus0_reg,
gmbus1_index);
else
- ret = gmbus_xfer_write(dev_priv, &msgs[1], gmbus1_index);
+ ret = gmbus_xfer_write(i915, &msgs[1], gmbus1_index);
/* Clear GMBUS5 after each index transfer */
if (gmbus5)
- intel_de_write_fw(dev_priv, GMBUS5, 0);
+ intel_de_write_fw(i915, GMBUS5(i915), 0);
return ret;
}
@@ -616,34 +632,34 @@ do_gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num,
u32 gmbus0_source)
{
struct intel_gmbus *bus = to_intel_gmbus(adapter);
- struct drm_i915_private *dev_priv = bus->dev_priv;
+ struct drm_i915_private *i915 = bus->i915;
int i = 0, inc, try = 0;
int ret = 0;
/* Display WA #0868: skl,bxt,kbl,cfl,glk */
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
- bxt_gmbus_clock_gating(dev_priv, false);
- else if (HAS_PCH_SPT(dev_priv) || HAS_PCH_CNP(dev_priv))
- pch_gmbus_clock_gating(dev_priv, false);
+ if (IS_GEMINILAKE(i915) || IS_BROXTON(i915))
+ bxt_gmbus_clock_gating(i915, false);
+ else if (HAS_PCH_SPT(i915) || HAS_PCH_CNP(i915))
+ pch_gmbus_clock_gating(i915, false);
retry:
- intel_de_write_fw(dev_priv, GMBUS0, gmbus0_source | bus->reg0);
+ intel_de_write_fw(i915, GMBUS0(i915), gmbus0_source | bus->reg0);
for (; i < num; i += inc) {
inc = 1;
if (gmbus_is_index_xfer(msgs, i, num)) {
- ret = gmbus_index_xfer(dev_priv, &msgs[i],
+ ret = gmbus_index_xfer(i915, &msgs[i],
gmbus0_source | bus->reg0);
inc = 2; /* an index transmission is two msgs */
} else if (msgs[i].flags & I2C_M_RD) {
- ret = gmbus_xfer_read(dev_priv, &msgs[i],
+ ret = gmbus_xfer_read(i915, &msgs[i],
gmbus0_source | bus->reg0, 0);
} else {
- ret = gmbus_xfer_write(dev_priv, &msgs[i], 0);
+ ret = gmbus_xfer_write(i915, &msgs[i], 0);
}
if (!ret)
- ret = gmbus_wait(dev_priv,
+ ret = gmbus_wait(i915,
GMBUS_HW_WAIT_PHASE, GMBUS_HW_WAIT_EN);
if (ret == -ETIMEDOUT)
goto timeout;
@@ -655,19 +671,19 @@ retry:
* a STOP on the very first cycle. To simplify the code we
* unconditionally generate the STOP condition with an additional gmbus
* cycle. */
- intel_de_write_fw(dev_priv, GMBUS1, GMBUS_CYCLE_STOP | GMBUS_SW_RDY);
+ intel_de_write_fw(i915, GMBUS1(i915), GMBUS_CYCLE_STOP | GMBUS_SW_RDY);
/* Mark the GMBUS interface as disabled after waiting for idle.
* We will re-enable it at the start of the next xfer,
* till then let it sleep.
*/
- if (gmbus_wait_idle(dev_priv)) {
- drm_dbg_kms(&dev_priv->drm,
+ if (gmbus_wait_idle(i915)) {
+ drm_dbg_kms(&i915->drm,
"GMBUS [%s] timed out waiting for idle\n",
adapter->name);
ret = -ETIMEDOUT;
}
- intel_de_write_fw(dev_priv, GMBUS0, 0);
+ intel_de_write_fw(i915, GMBUS0(i915), 0);
ret = ret ?: i;
goto out;
@@ -686,8 +702,8 @@ clear_err:
* it's slow responding and only answers on the 2nd retry.
*/
ret = -ENXIO;
- if (gmbus_wait_idle(dev_priv)) {
- drm_dbg_kms(&dev_priv->drm,
+ if (gmbus_wait_idle(i915)) {
+ drm_dbg_kms(&i915->drm,
"GMBUS [%s] timed out after NAK\n",
adapter->name);
ret = -ETIMEDOUT;
@@ -697,11 +713,11 @@ clear_err:
* of resetting the GMBUS controller and so clearing the
* BUS_ERROR raised by the slave's NAK.
*/
- intel_de_write_fw(dev_priv, GMBUS1, GMBUS_SW_CLR_INT);
- intel_de_write_fw(dev_priv, GMBUS1, 0);
- intel_de_write_fw(dev_priv, GMBUS0, 0);
+ intel_de_write_fw(i915, GMBUS1(i915), GMBUS_SW_CLR_INT);
+ intel_de_write_fw(i915, GMBUS1(i915), 0);
+ intel_de_write_fw(i915, GMBUS0(i915), 0);
- drm_dbg_kms(&dev_priv->drm, "GMBUS [%s] NAK for addr: %04x %c(%d)\n",
+ drm_dbg_kms(&i915->drm, "GMBUS [%s] NAK for addr: %04x %c(%d)\n",
adapter->name, msgs[i].addr,
(msgs[i].flags & I2C_M_RD) ? 'r' : 'w', msgs[i].len);
@@ -712,7 +728,7 @@ clear_err:
* drm_do_probe_ddc_edid, which bails out on the first -ENXIO.
*/
if (ret == -ENXIO && i == 0 && try++ == 0) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(&i915->drm,
"GMBUS [%s] NAK on first message, retry\n",
adapter->name);
goto retry;
@@ -721,10 +737,10 @@ clear_err:
goto out;
timeout:
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(&i915->drm,
"GMBUS [%s] timed out, falling back to bit banging on pin %d\n",
bus->adapter.name, bus->reg0 & 0xff);
- intel_de_write_fw(dev_priv, GMBUS0, 0);
+ intel_de_write_fw(i915, GMBUS0(i915), 0);
/*
* Hardware may not support GMBUS over these pins? Try GPIO bitbanging
@@ -734,10 +750,10 @@ timeout:
out:
/* Display WA #0868: skl,bxt,kbl,cfl,glk */
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
- bxt_gmbus_clock_gating(dev_priv, true);
- else if (HAS_PCH_SPT(dev_priv) || HAS_PCH_CNP(dev_priv))
- pch_gmbus_clock_gating(dev_priv, true);
+ if (IS_GEMINILAKE(i915) || IS_BROXTON(i915))
+ bxt_gmbus_clock_gating(i915, true);
+ else if (HAS_PCH_SPT(i915) || HAS_PCH_CNP(i915))
+ pch_gmbus_clock_gating(i915, true);
return ret;
}
@@ -746,11 +762,11 @@ static int
gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
{
struct intel_gmbus *bus = to_intel_gmbus(adapter);
- struct drm_i915_private *dev_priv = bus->dev_priv;
+ struct drm_i915_private *i915 = bus->i915;
intel_wakeref_t wakeref;
int ret;
- wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
+ wakeref = intel_display_power_get(i915, POWER_DOMAIN_GMBUS);
if (bus->force_bit) {
ret = i2c_bit_algo.master_xfer(adapter, msgs, num);
@@ -762,7 +778,7 @@ gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
bus->force_bit |= GMBUS_FORCE_BIT_RETRY;
}
- intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS, wakeref);
+ intel_display_power_put(i915, POWER_DOMAIN_GMBUS, wakeref);
return ret;
}
@@ -770,7 +786,7 @@ gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
int intel_gmbus_output_aksv(struct i2c_adapter *adapter)
{
struct intel_gmbus *bus = to_intel_gmbus(adapter);
- struct drm_i915_private *dev_priv = bus->dev_priv;
+ struct drm_i915_private *i915 = bus->i915;
u8 cmd = DRM_HDCP_DDC_AKSV;
u8 buf[DRM_HDCP_KSV_LEN] = { 0 };
struct i2c_msg msgs[] = {
@@ -790,8 +806,8 @@ int intel_gmbus_output_aksv(struct i2c_adapter *adapter)
intel_wakeref_t wakeref;
int ret;
- wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
- mutex_lock(&dev_priv->gmbus_mutex);
+ wakeref = intel_display_power_get(i915, POWER_DOMAIN_GMBUS);
+ mutex_lock(&i915->display.gmbus.mutex);
/*
* In order to output Aksv to the receiver, use an indexed write to
@@ -800,8 +816,8 @@ int intel_gmbus_output_aksv(struct i2c_adapter *adapter)
*/
ret = do_gmbus_xfer(adapter, msgs, ARRAY_SIZE(msgs), GMBUS_AKSV_SELECT);
- mutex_unlock(&dev_priv->gmbus_mutex);
- intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS, wakeref);
+ mutex_unlock(&i915->display.gmbus.mutex);
+ intel_display_power_put(i915, POWER_DOMAIN_GMBUS, wakeref);
return ret;
}
@@ -824,27 +840,27 @@ static void gmbus_lock_bus(struct i2c_adapter *adapter,
unsigned int flags)
{
struct intel_gmbus *bus = to_intel_gmbus(adapter);
- struct drm_i915_private *dev_priv = bus->dev_priv;
+ struct drm_i915_private *i915 = bus->i915;
- mutex_lock(&dev_priv->gmbus_mutex);
+ mutex_lock(&i915->display.gmbus.mutex);
}
static int gmbus_trylock_bus(struct i2c_adapter *adapter,
unsigned int flags)
{
struct intel_gmbus *bus = to_intel_gmbus(adapter);
- struct drm_i915_private *dev_priv = bus->dev_priv;
+ struct drm_i915_private *i915 = bus->i915;
- return mutex_trylock(&dev_priv->gmbus_mutex);
+ return mutex_trylock(&i915->display.gmbus.mutex);
}
static void gmbus_unlock_bus(struct i2c_adapter *adapter,
unsigned int flags)
{
struct intel_gmbus *bus = to_intel_gmbus(adapter);
- struct drm_i915_private *dev_priv = bus->dev_priv;
+ struct drm_i915_private *i915 = bus->i915;
- mutex_unlock(&dev_priv->gmbus_mutex);
+ mutex_unlock(&i915->display.gmbus.mutex);
}
static const struct i2c_lock_operations gmbus_lock_ops = {
@@ -855,31 +871,31 @@ static const struct i2c_lock_operations gmbus_lock_ops = {
/**
* intel_gmbus_setup - instantiate all Intel i2c GMBuses
- * @dev_priv: i915 device private
+ * @i915: i915 device private
*/
-int intel_gmbus_setup(struct drm_i915_private *dev_priv)
+int intel_gmbus_setup(struct drm_i915_private *i915)
{
- struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
+ struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
unsigned int pin;
int ret;
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- dev_priv->gpio_mmio_base = VLV_DISPLAY_BASE;
- else if (!HAS_GMCH(dev_priv))
+ if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
+ i915->display.gmbus.mmio_base = VLV_DISPLAY_BASE;
+ else if (!HAS_GMCH(i915))
/*
* Broxton uses the same PCH offsets for South Display Engine,
* even though it doesn't have a PCH.
*/
- dev_priv->gpio_mmio_base = PCH_DISPLAY_BASE;
+ i915->display.gmbus.mmio_base = PCH_DISPLAY_BASE;
- mutex_init(&dev_priv->gmbus_mutex);
- init_waitqueue_head(&dev_priv->gmbus_wait_queue);
+ mutex_init(&i915->display.gmbus.mutex);
+ init_waitqueue_head(&i915->display.gmbus.wait_queue);
- for (pin = 0; pin < ARRAY_SIZE(dev_priv->gmbus); pin++) {
+ for (pin = 0; pin < ARRAY_SIZE(i915->display.gmbus.bus); pin++) {
const struct gmbus_pin *gmbus_pin;
struct intel_gmbus *bus;
- gmbus_pin = get_gmbus_pin(dev_priv, pin);
+ gmbus_pin = get_gmbus_pin(i915, pin);
if (!gmbus_pin)
continue;
@@ -896,7 +912,7 @@ int intel_gmbus_setup(struct drm_i915_private *dev_priv)
"i915 gmbus %s", gmbus_pin->name);
bus->adapter.dev.parent = &pdev->dev;
- bus->dev_priv = dev_priv;
+ bus->i915 = i915;
bus->adapter.algo = &gmbus_algorithm;
bus->adapter.lock_ops = &gmbus_lock_ops;
@@ -911,10 +927,10 @@ int intel_gmbus_setup(struct drm_i915_private *dev_priv)
bus->reg0 = pin | GMBUS_RATE_100KHZ;
/* gmbus seems to be broken on i830 */
- if (IS_I830(dev_priv))
+ if (IS_I830(i915))
bus->force_bit = 1;
- intel_gpio_setup(bus, GPIO(gmbus_pin->gpio));
+ intel_gpio_setup(bus, GPIO(i915, gmbus_pin->gpio));
ret = i2c_add_adapter(&bus->adapter);
if (ret) {
@@ -922,43 +938,43 @@ int intel_gmbus_setup(struct drm_i915_private *dev_priv)
goto err;
}
- dev_priv->gmbus[pin] = bus;
+ i915->display.gmbus.bus[pin] = bus;
}
- intel_gmbus_reset(dev_priv);
+ intel_gmbus_reset(i915);
return 0;
err:
- intel_gmbus_teardown(dev_priv);
+ intel_gmbus_teardown(i915);
return ret;
}
-struct i2c_adapter *intel_gmbus_get_adapter(struct drm_i915_private *dev_priv,
+struct i2c_adapter *intel_gmbus_get_adapter(struct drm_i915_private *i915,
unsigned int pin)
{
- if (drm_WARN_ON(&dev_priv->drm, pin >= ARRAY_SIZE(dev_priv->gmbus) ||
- !dev_priv->gmbus[pin]))
+ if (drm_WARN_ON(&i915->drm, pin >= ARRAY_SIZE(i915->display.gmbus.bus) ||
+ !i915->display.gmbus.bus[pin]))
return NULL;
- return &dev_priv->gmbus[pin]->adapter;
+ return &i915->display.gmbus.bus[pin]->adapter;
}
void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit)
{
struct intel_gmbus *bus = to_intel_gmbus(adapter);
- struct drm_i915_private *dev_priv = bus->dev_priv;
+ struct drm_i915_private *i915 = bus->i915;
- mutex_lock(&dev_priv->gmbus_mutex);
+ mutex_lock(&i915->display.gmbus.mutex);
bus->force_bit += force_bit ? 1 : -1;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(&i915->drm,
"%sabling bit-banging on %s. force bit now %d\n",
force_bit ? "en" : "dis", adapter->name,
bus->force_bit);
- mutex_unlock(&dev_priv->gmbus_mutex);
+ mutex_unlock(&i915->display.gmbus.mutex);
}
bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
@@ -968,20 +984,20 @@ bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
return bus->force_bit;
}
-void intel_gmbus_teardown(struct drm_i915_private *dev_priv)
+void intel_gmbus_teardown(struct drm_i915_private *i915)
{
unsigned int pin;
- for (pin = 0; pin < ARRAY_SIZE(dev_priv->gmbus); pin++) {
+ for (pin = 0; pin < ARRAY_SIZE(i915->display.gmbus.bus); pin++) {
struct intel_gmbus *bus;
- bus = dev_priv->gmbus[pin];
+ bus = i915->display.gmbus.bus[pin];
if (!bus)
continue;
i2c_del_adapter(&bus->adapter);
kfree(bus);
- dev_priv->gmbus[pin] = NULL;
+ i915->display.gmbus.bus[pin] = NULL;
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.h b/drivers/gpu/drm/i915/display/intel_gmbus.h
index 8edc2e99cf53..20f704bd4e70 100644
--- a/drivers/gpu/drm/i915/display/intel_gmbus.h
+++ b/drivers/gpu/drm/i915/display/intel_gmbus.h
@@ -24,6 +24,7 @@ struct i2c_adapter;
#define GMBUS_PIN_2_BXT 2
#define GMBUS_PIN_3_BXT 3
#define GMBUS_PIN_4_CNP 4
+#define GMBUS_PIN_5_MTP 5
#define GMBUS_PIN_9_TC1_ICP 9
#define GMBUS_PIN_10_TC2_ICP 10
#define GMBUS_PIN_11_TC3_ICP 11
diff --git a/drivers/gpu/drm/i915/display/intel_gmbus_regs.h b/drivers/gpu/drm/i915/display/intel_gmbus_regs.h
new file mode 100644
index 000000000000..53aacbda983c
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_gmbus_regs.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef __INTEL_GMBUS_REGS_H__
+#define __INTEL_GMBUS_REGS_H__
+
+#include "i915_reg_defs.h"
+
+#define GMBUS_MMIO_BASE(__i915) ((__i915)->display.gmbus.mmio_base)
+
+#define GPIO(__i915, gpio) _MMIO(GMBUS_MMIO_BASE(__i915) + 0x5010 + 4 * (gpio))
+#define GPIO_CLOCK_DIR_MASK (1 << 0)
+#define GPIO_CLOCK_DIR_IN (0 << 1)
+#define GPIO_CLOCK_DIR_OUT (1 << 1)
+#define GPIO_CLOCK_VAL_MASK (1 << 2)
+#define GPIO_CLOCK_VAL_OUT (1 << 3)
+#define GPIO_CLOCK_VAL_IN (1 << 4)
+#define GPIO_CLOCK_PULLUP_DISABLE (1 << 5)
+#define GPIO_DATA_DIR_MASK (1 << 8)
+#define GPIO_DATA_DIR_IN (0 << 9)
+#define GPIO_DATA_DIR_OUT (1 << 9)
+#define GPIO_DATA_VAL_MASK (1 << 10)
+#define GPIO_DATA_VAL_OUT (1 << 11)
+#define GPIO_DATA_VAL_IN (1 << 12)
+#define GPIO_DATA_PULLUP_DISABLE (1 << 13)
+
+/* clock/port select */
+#define GMBUS0(__i915) _MMIO(GMBUS_MMIO_BASE(__i915) + 0x5100)
+#define GMBUS_AKSV_SELECT (1 << 11)
+#define GMBUS_RATE_100KHZ (0 << 8)
+#define GMBUS_RATE_50KHZ (1 << 8)
+#define GMBUS_RATE_400KHZ (2 << 8) /* reserved on Pineview */
+#define GMBUS_RATE_1MHZ (3 << 8) /* reserved on Pineview */
+#define GMBUS_HOLD_EXT (1 << 7) /* 300ns hold time, rsvd on Pineview */
+#define GMBUS_BYTE_CNT_OVERRIDE (1 << 6)
+
+/* command/status */
+#define GMBUS1(__i915) _MMIO(GMBUS_MMIO_BASE(__i915) + 0x5104)
+#define GMBUS_SW_CLR_INT (1 << 31)
+#define GMBUS_SW_RDY (1 << 30)
+#define GMBUS_ENT (1 << 29) /* enable timeout */
+#define GMBUS_CYCLE_NONE (0 << 25)
+#define GMBUS_CYCLE_WAIT (1 << 25)
+#define GMBUS_CYCLE_INDEX (2 << 25)
+#define GMBUS_CYCLE_STOP (4 << 25)
+#define GMBUS_BYTE_COUNT_SHIFT 16
+#define GMBUS_BYTE_COUNT_MAX 256U
+#define GEN9_GMBUS_BYTE_COUNT_MAX 511U
+#define GMBUS_SLAVE_INDEX_SHIFT 8
+#define GMBUS_SLAVE_ADDR_SHIFT 1
+#define GMBUS_SLAVE_READ (1 << 0)
+#define GMBUS_SLAVE_WRITE (0 << 0)
+
+/* status */
+#define GMBUS2(__i915) _MMIO(GMBUS_MMIO_BASE(__i915) + 0x5108)
+#define GMBUS_INUSE (1 << 15)
+#define GMBUS_HW_WAIT_PHASE (1 << 14)
+#define GMBUS_STALL_TIMEOUT (1 << 13)
+#define GMBUS_INT (1 << 12)
+#define GMBUS_HW_RDY (1 << 11)
+#define GMBUS_SATOER (1 << 10)
+#define GMBUS_ACTIVE (1 << 9)
+
+/* data buffer bytes 3-0 */
+#define GMBUS3(__i915) _MMIO(GMBUS_MMIO_BASE(__i915) + 0x510c)
+
+/* interrupt mask (Pineview+) */
+#define GMBUS4(__i915) _MMIO(GMBUS_MMIO_BASE(__i915) + 0x5110)
+#define GMBUS_SLAVE_TIMEOUT_EN (1 << 4)
+#define GMBUS_NAK_EN (1 << 3)
+#define GMBUS_IDLE_EN (1 << 2)
+#define GMBUS_HW_WAIT_EN (1 << 1)
+#define GMBUS_HW_RDY_EN (1 << 0)
+
+/* byte index */
+#define GMBUS5(__i915) _MMIO(GMBUS_MMIO_BASE(__i915) + 0x5120)
+#define GMBUS_2BYTE_INDEX_EN (1 << 31)
+
+#endif /* __INTEL_GMBUS_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index 7dbc9f0bb24f..6406fd487ee5 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -23,6 +23,7 @@
#include "intel_display_power_well.h"
#include "intel_display_types.h"
#include "intel_hdcp.h"
+#include "intel_hdcp_regs.h"
#include "intel_pcode.h"
#define KEY_LOAD_TRIES 5
@@ -209,12 +210,12 @@ bool intel_hdcp2_capable(struct intel_connector *connector)
return false;
/* MEI interface is solid */
- mutex_lock(&dev_priv->hdcp_comp_mutex);
- if (!dev_priv->hdcp_comp_added || !dev_priv->hdcp_master) {
- mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ mutex_lock(&dev_priv->display.hdcp.comp_mutex);
+ if (!dev_priv->display.hdcp.comp_added || !dev_priv->display.hdcp.master) {
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return false;
}
- mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
/* Sink's capability for HDCP2.2 */
hdcp->shim->hdcp_2_2_capable(dig_port, &capable);
@@ -1131,8 +1132,8 @@ static void intel_hdcp_prop_work(struct work_struct *work)
bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port)
{
- return INTEL_INFO(dev_priv)->display.has_hdcp &&
- (DISPLAY_VER(dev_priv) >= 12 || port < PORT_E);
+ return RUNTIME_INFO(dev_priv)->has_hdcp &&
+ (DISPLAY_VER(dev_priv) >= 12 || port < PORT_E);
}
static int
@@ -1145,11 +1146,11 @@ hdcp2_prepare_ake_init(struct intel_connector *connector,
struct i915_hdcp_comp_master *comp;
int ret;
- mutex_lock(&dev_priv->hdcp_comp_mutex);
- comp = dev_priv->hdcp_master;
+ mutex_lock(&dev_priv->display.hdcp.comp_mutex);
+ comp = dev_priv->display.hdcp.master;
if (!comp || !comp->ops) {
- mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return -EINVAL;
}
@@ -1157,7 +1158,7 @@ hdcp2_prepare_ake_init(struct intel_connector *connector,
if (ret)
drm_dbg_kms(&dev_priv->drm, "Prepare_ake_init failed. %d\n",
ret);
- mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return ret;
}
@@ -1175,11 +1176,11 @@ hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector,
struct i915_hdcp_comp_master *comp;
int ret;
- mutex_lock(&dev_priv->hdcp_comp_mutex);
- comp = dev_priv->hdcp_master;
+ mutex_lock(&dev_priv->display.hdcp.comp_mutex);
+ comp = dev_priv->display.hdcp.master;
if (!comp || !comp->ops) {
- mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return -EINVAL;
}
@@ -1189,7 +1190,7 @@ hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector,
if (ret < 0)
drm_dbg_kms(&dev_priv->drm, "Verify rx_cert failed. %d\n",
ret);
- mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return ret;
}
@@ -1203,18 +1204,18 @@ static int hdcp2_verify_hprime(struct intel_connector *connector,
struct i915_hdcp_comp_master *comp;
int ret;
- mutex_lock(&dev_priv->hdcp_comp_mutex);
- comp = dev_priv->hdcp_master;
+ mutex_lock(&dev_priv->display.hdcp.comp_mutex);
+ comp = dev_priv->display.hdcp.master;
if (!comp || !comp->ops) {
- mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return -EINVAL;
}
ret = comp->ops->verify_hprime(comp->mei_dev, data, rx_hprime);
if (ret < 0)
drm_dbg_kms(&dev_priv->drm, "Verify hprime failed. %d\n", ret);
- mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return ret;
}
@@ -1229,11 +1230,11 @@ hdcp2_store_pairing_info(struct intel_connector *connector,
struct i915_hdcp_comp_master *comp;
int ret;
- mutex_lock(&dev_priv->hdcp_comp_mutex);
- comp = dev_priv->hdcp_master;
+ mutex_lock(&dev_priv->display.hdcp.comp_mutex);
+ comp = dev_priv->display.hdcp.master;
if (!comp || !comp->ops) {
- mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return -EINVAL;
}
@@ -1241,7 +1242,7 @@ hdcp2_store_pairing_info(struct intel_connector *connector,
if (ret < 0)
drm_dbg_kms(&dev_priv->drm, "Store pairing info failed. %d\n",
ret);
- mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return ret;
}
@@ -1256,11 +1257,11 @@ hdcp2_prepare_lc_init(struct intel_connector *connector,
struct i915_hdcp_comp_master *comp;
int ret;
- mutex_lock(&dev_priv->hdcp_comp_mutex);
- comp = dev_priv->hdcp_master;
+ mutex_lock(&dev_priv->display.hdcp.comp_mutex);
+ comp = dev_priv->display.hdcp.master;
if (!comp || !comp->ops) {
- mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return -EINVAL;
}
@@ -1268,7 +1269,7 @@ hdcp2_prepare_lc_init(struct intel_connector *connector,
if (ret < 0)
drm_dbg_kms(&dev_priv->drm, "Prepare lc_init failed. %d\n",
ret);
- mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return ret;
}
@@ -1283,11 +1284,11 @@ hdcp2_verify_lprime(struct intel_connector *connector,
struct i915_hdcp_comp_master *comp;
int ret;
- mutex_lock(&dev_priv->hdcp_comp_mutex);
- comp = dev_priv->hdcp_master;
+ mutex_lock(&dev_priv->display.hdcp.comp_mutex);
+ comp = dev_priv->display.hdcp.master;
if (!comp || !comp->ops) {
- mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return -EINVAL;
}
@@ -1295,7 +1296,7 @@ hdcp2_verify_lprime(struct intel_connector *connector,
if (ret < 0)
drm_dbg_kms(&dev_priv->drm, "Verify L_Prime failed. %d\n",
ret);
- mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return ret;
}
@@ -1309,11 +1310,11 @@ static int hdcp2_prepare_skey(struct intel_connector *connector,
struct i915_hdcp_comp_master *comp;
int ret;
- mutex_lock(&dev_priv->hdcp_comp_mutex);
- comp = dev_priv->hdcp_master;
+ mutex_lock(&dev_priv->display.hdcp.comp_mutex);
+ comp = dev_priv->display.hdcp.master;
if (!comp || !comp->ops) {
- mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return -EINVAL;
}
@@ -1321,7 +1322,7 @@ static int hdcp2_prepare_skey(struct intel_connector *connector,
if (ret < 0)
drm_dbg_kms(&dev_priv->drm, "Get session key failed. %d\n",
ret);
- mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return ret;
}
@@ -1338,11 +1339,11 @@ hdcp2_verify_rep_topology_prepare_ack(struct intel_connector *connector,
struct i915_hdcp_comp_master *comp;
int ret;
- mutex_lock(&dev_priv->hdcp_comp_mutex);
- comp = dev_priv->hdcp_master;
+ mutex_lock(&dev_priv->display.hdcp.comp_mutex);
+ comp = dev_priv->display.hdcp.master;
if (!comp || !comp->ops) {
- mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return -EINVAL;
}
@@ -1352,7 +1353,7 @@ hdcp2_verify_rep_topology_prepare_ack(struct intel_connector *connector,
if (ret < 0)
drm_dbg_kms(&dev_priv->drm,
"Verify rep topology failed. %d\n", ret);
- mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return ret;
}
@@ -1367,18 +1368,18 @@ hdcp2_verify_mprime(struct intel_connector *connector,
struct i915_hdcp_comp_master *comp;
int ret;
- mutex_lock(&dev_priv->hdcp_comp_mutex);
- comp = dev_priv->hdcp_master;
+ mutex_lock(&dev_priv->display.hdcp.comp_mutex);
+ comp = dev_priv->display.hdcp.master;
if (!comp || !comp->ops) {
- mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return -EINVAL;
}
ret = comp->ops->verify_mprime(comp->mei_dev, data, stream_ready);
if (ret < 0)
drm_dbg_kms(&dev_priv->drm, "Verify mprime failed. %d\n", ret);
- mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return ret;
}
@@ -1391,11 +1392,11 @@ static int hdcp2_authenticate_port(struct intel_connector *connector)
struct i915_hdcp_comp_master *comp;
int ret;
- mutex_lock(&dev_priv->hdcp_comp_mutex);
- comp = dev_priv->hdcp_master;
+ mutex_lock(&dev_priv->display.hdcp.comp_mutex);
+ comp = dev_priv->display.hdcp.master;
if (!comp || !comp->ops) {
- mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return -EINVAL;
}
@@ -1403,7 +1404,7 @@ static int hdcp2_authenticate_port(struct intel_connector *connector)
if (ret < 0)
drm_dbg_kms(&dev_priv->drm, "Enable hdcp auth failed. %d\n",
ret);
- mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return ret;
}
@@ -1415,17 +1416,17 @@ static int hdcp2_close_mei_session(struct intel_connector *connector)
struct i915_hdcp_comp_master *comp;
int ret;
- mutex_lock(&dev_priv->hdcp_comp_mutex);
- comp = dev_priv->hdcp_master;
+ mutex_lock(&dev_priv->display.hdcp.comp_mutex);
+ comp = dev_priv->display.hdcp.master;
if (!comp || !comp->ops) {
- mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return -EINVAL;
}
ret = comp->ops->close_hdcp_session(comp->mei_dev,
&dig_port->hdcp_port_data);
- mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return ret;
}
@@ -2143,10 +2144,10 @@ static int i915_hdcp_component_bind(struct device *i915_kdev,
struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
drm_dbg(&dev_priv->drm, "I915 HDCP comp bind\n");
- mutex_lock(&dev_priv->hdcp_comp_mutex);
- dev_priv->hdcp_master = (struct i915_hdcp_comp_master *)data;
- dev_priv->hdcp_master->mei_dev = mei_kdev;
- mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ mutex_lock(&dev_priv->display.hdcp.comp_mutex);
+ dev_priv->display.hdcp.master = (struct i915_hdcp_comp_master *)data;
+ dev_priv->display.hdcp.master->mei_dev = mei_kdev;
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return 0;
}
@@ -2157,9 +2158,9 @@ static void i915_hdcp_component_unbind(struct device *i915_kdev,
struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
drm_dbg(&dev_priv->drm, "I915 HDCP comp unbind\n");
- mutex_lock(&dev_priv->hdcp_comp_mutex);
- dev_priv->hdcp_master = NULL;
- mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ mutex_lock(&dev_priv->display.hdcp.comp_mutex);
+ dev_priv->display.hdcp.master = NULL;
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
}
static const struct component_ops i915_hdcp_component_ops = {
@@ -2250,19 +2251,19 @@ void intel_hdcp_component_init(struct drm_i915_private *dev_priv)
if (!is_hdcp2_supported(dev_priv))
return;
- mutex_lock(&dev_priv->hdcp_comp_mutex);
- drm_WARN_ON(&dev_priv->drm, dev_priv->hdcp_comp_added);
+ mutex_lock(&dev_priv->display.hdcp.comp_mutex);
+ drm_WARN_ON(&dev_priv->drm, dev_priv->display.hdcp.comp_added);
- dev_priv->hdcp_comp_added = true;
- mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ dev_priv->display.hdcp.comp_added = true;
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
ret = component_add_typed(dev_priv->drm.dev, &i915_hdcp_component_ops,
I915_COMPONENT_HDCP);
if (ret < 0) {
drm_dbg_kms(&dev_priv->drm, "Failed at component add(%d)\n",
ret);
- mutex_lock(&dev_priv->hdcp_comp_mutex);
- dev_priv->hdcp_comp_added = false;
- mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ mutex_lock(&dev_priv->display.hdcp.comp_mutex);
+ dev_priv->display.hdcp.comp_added = false;
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return;
}
}
@@ -2475,14 +2476,14 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state,
void intel_hdcp_component_fini(struct drm_i915_private *dev_priv)
{
- mutex_lock(&dev_priv->hdcp_comp_mutex);
- if (!dev_priv->hdcp_comp_added) {
- mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ mutex_lock(&dev_priv->display.hdcp.comp_mutex);
+ if (!dev_priv->display.hdcp.comp_added) {
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
return;
}
- dev_priv->hdcp_comp_added = false;
- mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ dev_priv->display.hdcp.comp_added = false;
+ mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
component_del(dev_priv->drm.dev, &i915_hdcp_component_ops);
}
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp_regs.h b/drivers/gpu/drm/i915/display/intel_hdcp_regs.h
new file mode 100644
index 000000000000..2a3733e8966c
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_hdcp_regs.h
@@ -0,0 +1,270 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef __INTEL_HDCP_REGS_H__
+#define __INTEL_HDCP_REGS_H__
+
+#include "i915_reg_defs.h"
+
+/* HDCP Key Registers */
+#define HDCP_KEY_CONF _MMIO(0x66c00)
+#define HDCP_AKSV_SEND_TRIGGER REG_BIT(31)
+#define HDCP_CLEAR_KEYS_TRIGGER REG_BIT(30)
+#define HDCP_KEY_LOAD_TRIGGER REG_BIT(8)
+#define HDCP_KEY_STATUS _MMIO(0x66c04)
+#define HDCP_FUSE_IN_PROGRESS REG_BIT(7)
+#define HDCP_FUSE_ERROR REG_BIT(6)
+#define HDCP_FUSE_DONE REG_BIT(5)
+#define HDCP_KEY_LOAD_STATUS REG_BIT(1)
+#define HDCP_KEY_LOAD_DONE REG_BIT(0)
+#define HDCP_AKSV_LO _MMIO(0x66c10)
+#define HDCP_AKSV_HI _MMIO(0x66c14)
+
+/* HDCP Repeater Registers */
+#define HDCP_REP_CTL _MMIO(0x66d00)
+#define HDCP_TRANSA_REP_PRESENT REG_BIT(31)
+#define HDCP_TRANSB_REP_PRESENT REG_BIT(30)
+#define HDCP_TRANSC_REP_PRESENT REG_BIT(29)
+#define HDCP_TRANSD_REP_PRESENT REG_BIT(28)
+#define HDCP_DDIB_REP_PRESENT REG_BIT(30)
+#define HDCP_DDIA_REP_PRESENT REG_BIT(29)
+#define HDCP_DDIC_REP_PRESENT REG_BIT(28)
+#define HDCP_DDID_REP_PRESENT REG_BIT(27)
+#define HDCP_DDIF_REP_PRESENT REG_BIT(26)
+#define HDCP_DDIE_REP_PRESENT REG_BIT(25)
+#define HDCP_TRANSA_SHA1_M0 (1 << 20)
+#define HDCP_TRANSB_SHA1_M0 (2 << 20)
+#define HDCP_TRANSC_SHA1_M0 (3 << 20)
+#define HDCP_TRANSD_SHA1_M0 (4 << 20)
+#define HDCP_DDIB_SHA1_M0 (1 << 20)
+#define HDCP_DDIA_SHA1_M0 (2 << 20)
+#define HDCP_DDIC_SHA1_M0 (3 << 20)
+#define HDCP_DDID_SHA1_M0 (4 << 20)
+#define HDCP_DDIF_SHA1_M0 (5 << 20)
+#define HDCP_DDIE_SHA1_M0 (6 << 20) /* Bspec says 5? */
+#define HDCP_SHA1_BUSY REG_BIT(16)
+#define HDCP_SHA1_READY REG_BIT(17)
+#define HDCP_SHA1_COMPLETE REG_BIT(18)
+#define HDCP_SHA1_V_MATCH REG_BIT(19)
+#define HDCP_SHA1_TEXT_32 (1 << 1)
+#define HDCP_SHA1_COMPLETE_HASH (2 << 1)
+#define HDCP_SHA1_TEXT_24 (4 << 1)
+#define HDCP_SHA1_TEXT_16 (5 << 1)
+#define HDCP_SHA1_TEXT_8 (6 << 1)
+#define HDCP_SHA1_TEXT_0 (7 << 1)
+#define HDCP_SHA_V_PRIME_H0 _MMIO(0x66d04)
+#define HDCP_SHA_V_PRIME_H1 _MMIO(0x66d08)
+#define HDCP_SHA_V_PRIME_H2 _MMIO(0x66d0C)
+#define HDCP_SHA_V_PRIME_H3 _MMIO(0x66d10)
+#define HDCP_SHA_V_PRIME_H4 _MMIO(0x66d14)
+#define HDCP_SHA_V_PRIME(h) _MMIO((0x66d04 + (h) * 4))
+#define HDCP_SHA_TEXT _MMIO(0x66d18)
+
+/* HDCP Auth Registers */
+#define _PORTA_HDCP_AUTHENC 0x66800
+#define _PORTB_HDCP_AUTHENC 0x66500
+#define _PORTC_HDCP_AUTHENC 0x66600
+#define _PORTD_HDCP_AUTHENC 0x66700
+#define _PORTE_HDCP_AUTHENC 0x66A00
+#define _PORTF_HDCP_AUTHENC 0x66900
+#define _PORT_HDCP_AUTHENC(port, x) _MMIO(_PICK(port, \
+ _PORTA_HDCP_AUTHENC, \
+ _PORTB_HDCP_AUTHENC, \
+ _PORTC_HDCP_AUTHENC, \
+ _PORTD_HDCP_AUTHENC, \
+ _PORTE_HDCP_AUTHENC, \
+ _PORTF_HDCP_AUTHENC) + (x))
+#define PORT_HDCP_CONF(port) _PORT_HDCP_AUTHENC(port, 0x0)
+#define _TRANSA_HDCP_CONF 0x66400
+#define _TRANSB_HDCP_CONF 0x66500
+#define TRANS_HDCP_CONF(trans) _MMIO_TRANS(trans, _TRANSA_HDCP_CONF, \
+ _TRANSB_HDCP_CONF)
+#define HDCP_CONF(dev_priv, trans, port) \
+ (GRAPHICS_VER(dev_priv) >= 12 ? \
+ TRANS_HDCP_CONF(trans) : \
+ PORT_HDCP_CONF(port))
+
+#define HDCP_CONF_CAPTURE_AN REG_BIT(0)
+#define HDCP_CONF_AUTH_AND_ENC (REG_BIT(1) | REG_BIT(0))
+#define PORT_HDCP_ANINIT(port) _PORT_HDCP_AUTHENC(port, 0x4)
+#define _TRANSA_HDCP_ANINIT 0x66404
+#define _TRANSB_HDCP_ANINIT 0x66504
+#define TRANS_HDCP_ANINIT(trans) _MMIO_TRANS(trans, \
+ _TRANSA_HDCP_ANINIT, \
+ _TRANSB_HDCP_ANINIT)
+#define HDCP_ANINIT(dev_priv, trans, port) \
+ (GRAPHICS_VER(dev_priv) >= 12 ? \
+ TRANS_HDCP_ANINIT(trans) : \
+ PORT_HDCP_ANINIT(port))
+
+#define PORT_HDCP_ANLO(port) _PORT_HDCP_AUTHENC(port, 0x8)
+#define _TRANSA_HDCP_ANLO 0x66408
+#define _TRANSB_HDCP_ANLO 0x66508
+#define TRANS_HDCP_ANLO(trans) _MMIO_TRANS(trans, _TRANSA_HDCP_ANLO, \
+ _TRANSB_HDCP_ANLO)
+#define HDCP_ANLO(dev_priv, trans, port) \
+ (GRAPHICS_VER(dev_priv) >= 12 ? \
+ TRANS_HDCP_ANLO(trans) : \
+ PORT_HDCP_ANLO(port))
+
+#define PORT_HDCP_ANHI(port) _PORT_HDCP_AUTHENC(port, 0xC)
+#define _TRANSA_HDCP_ANHI 0x6640C
+#define _TRANSB_HDCP_ANHI 0x6650C
+#define TRANS_HDCP_ANHI(trans) _MMIO_TRANS(trans, _TRANSA_HDCP_ANHI, \
+ _TRANSB_HDCP_ANHI)
+#define HDCP_ANHI(dev_priv, trans, port) \
+ (GRAPHICS_VER(dev_priv) >= 12 ? \
+ TRANS_HDCP_ANHI(trans) : \
+ PORT_HDCP_ANHI(port))
+
+#define PORT_HDCP_BKSVLO(port) _PORT_HDCP_AUTHENC(port, 0x10)
+#define _TRANSA_HDCP_BKSVLO 0x66410
+#define _TRANSB_HDCP_BKSVLO 0x66510
+#define TRANS_HDCP_BKSVLO(trans) _MMIO_TRANS(trans, \
+ _TRANSA_HDCP_BKSVLO, \
+ _TRANSB_HDCP_BKSVLO)
+#define HDCP_BKSVLO(dev_priv, trans, port) \
+ (GRAPHICS_VER(dev_priv) >= 12 ? \
+ TRANS_HDCP_BKSVLO(trans) : \
+ PORT_HDCP_BKSVLO(port))
+
+#define PORT_HDCP_BKSVHI(port) _PORT_HDCP_AUTHENC(port, 0x14)
+#define _TRANSA_HDCP_BKSVHI 0x66414
+#define _TRANSB_HDCP_BKSVHI 0x66514
+#define TRANS_HDCP_BKSVHI(trans) _MMIO_TRANS(trans, \
+ _TRANSA_HDCP_BKSVHI, \
+ _TRANSB_HDCP_BKSVHI)
+#define HDCP_BKSVHI(dev_priv, trans, port) \
+ (GRAPHICS_VER(dev_priv) >= 12 ? \
+ TRANS_HDCP_BKSVHI(trans) : \
+ PORT_HDCP_BKSVHI(port))
+
+#define PORT_HDCP_RPRIME(port) _PORT_HDCP_AUTHENC(port, 0x18)
+#define _TRANSA_HDCP_RPRIME 0x66418
+#define _TRANSB_HDCP_RPRIME 0x66518
+#define TRANS_HDCP_RPRIME(trans) _MMIO_TRANS(trans, \
+ _TRANSA_HDCP_RPRIME, \
+ _TRANSB_HDCP_RPRIME)
+#define HDCP_RPRIME(dev_priv, trans, port) \
+ (GRAPHICS_VER(dev_priv) >= 12 ? \
+ TRANS_HDCP_RPRIME(trans) : \
+ PORT_HDCP_RPRIME(port))
+
+#define PORT_HDCP_STATUS(port) _PORT_HDCP_AUTHENC(port, 0x1C)
+#define _TRANSA_HDCP_STATUS 0x6641C
+#define _TRANSB_HDCP_STATUS 0x6651C
+#define TRANS_HDCP_STATUS(trans) _MMIO_TRANS(trans, \
+ _TRANSA_HDCP_STATUS, \
+ _TRANSB_HDCP_STATUS)
+#define HDCP_STATUS(dev_priv, trans, port) \
+ (GRAPHICS_VER(dev_priv) >= 12 ? \
+ TRANS_HDCP_STATUS(trans) : \
+ PORT_HDCP_STATUS(port))
+
+#define HDCP_STATUS_STREAM_A_ENC REG_BIT(31)
+#define HDCP_STATUS_STREAM_B_ENC REG_BIT(30)
+#define HDCP_STATUS_STREAM_C_ENC REG_BIT(29)
+#define HDCP_STATUS_STREAM_D_ENC REG_BIT(28)
+#define HDCP_STATUS_AUTH REG_BIT(21)
+#define HDCP_STATUS_ENC REG_BIT(20)
+#define HDCP_STATUS_RI_MATCH REG_BIT(19)
+#define HDCP_STATUS_R0_READY REG_BIT(18)
+#define HDCP_STATUS_AN_READY REG_BIT(17)
+#define HDCP_STATUS_CIPHER REG_BIT(16)
+#define HDCP_STATUS_FRAME_CNT(x) (((x) >> 8) & 0xff)
+
+/* HDCP2.2 Registers */
+#define _PORTA_HDCP2_BASE 0x66800
+#define _PORTB_HDCP2_BASE 0x66500
+#define _PORTC_HDCP2_BASE 0x66600
+#define _PORTD_HDCP2_BASE 0x66700
+#define _PORTE_HDCP2_BASE 0x66A00
+#define _PORTF_HDCP2_BASE 0x66900
+#define _PORT_HDCP2_BASE(port, x) _MMIO(_PICK((port), \
+ _PORTA_HDCP2_BASE, \
+ _PORTB_HDCP2_BASE, \
+ _PORTC_HDCP2_BASE, \
+ _PORTD_HDCP2_BASE, \
+ _PORTE_HDCP2_BASE, \
+ _PORTF_HDCP2_BASE) + (x))
+
+#define PORT_HDCP2_AUTH(port) _PORT_HDCP2_BASE(port, 0x98)
+#define _TRANSA_HDCP2_AUTH 0x66498
+#define _TRANSB_HDCP2_AUTH 0x66598
+#define TRANS_HDCP2_AUTH(trans) _MMIO_TRANS(trans, _TRANSA_HDCP2_AUTH, \
+ _TRANSB_HDCP2_AUTH)
+#define AUTH_LINK_AUTHENTICATED REG_BIT(31)
+#define AUTH_LINK_TYPE REG_BIT(30)
+#define AUTH_FORCE_CLR_INPUTCTR REG_BIT(19)
+#define AUTH_CLR_KEYS REG_BIT(18)
+#define HDCP2_AUTH(dev_priv, trans, port) \
+ (GRAPHICS_VER(dev_priv) >= 12 ? \
+ TRANS_HDCP2_AUTH(trans) : \
+ PORT_HDCP2_AUTH(port))
+
+#define PORT_HDCP2_CTL(port) _PORT_HDCP2_BASE(port, 0xB0)
+#define _TRANSA_HDCP2_CTL 0x664B0
+#define _TRANSB_HDCP2_CTL 0x665B0
+#define TRANS_HDCP2_CTL(trans) _MMIO_TRANS(trans, _TRANSA_HDCP2_CTL, \
+ _TRANSB_HDCP2_CTL)
+#define CTL_LINK_ENCRYPTION_REQ REG_BIT(31)
+#define HDCP2_CTL(dev_priv, trans, port) \
+ (GRAPHICS_VER(dev_priv) >= 12 ? \
+ TRANS_HDCP2_CTL(trans) : \
+ PORT_HDCP2_CTL(port))
+
+#define PORT_HDCP2_STATUS(port) _PORT_HDCP2_BASE(port, 0xB4)
+#define _TRANSA_HDCP2_STATUS 0x664B4
+#define _TRANSB_HDCP2_STATUS 0x665B4
+#define TRANS_HDCP2_STATUS(trans) _MMIO_TRANS(trans, \
+ _TRANSA_HDCP2_STATUS, \
+ _TRANSB_HDCP2_STATUS)
+#define LINK_TYPE_STATUS REG_BIT(22)
+#define LINK_AUTH_STATUS REG_BIT(21)
+#define LINK_ENCRYPTION_STATUS REG_BIT(20)
+#define HDCP2_STATUS(dev_priv, trans, port) \
+ (GRAPHICS_VER(dev_priv) >= 12 ? \
+ TRANS_HDCP2_STATUS(trans) : \
+ PORT_HDCP2_STATUS(port))
+
+#define _PIPEA_HDCP2_STREAM_STATUS 0x668C0
+#define _PIPEB_HDCP2_STREAM_STATUS 0x665C0
+#define _PIPEC_HDCP2_STREAM_STATUS 0x666C0
+#define _PIPED_HDCP2_STREAM_STATUS 0x667C0
+#define PIPE_HDCP2_STREAM_STATUS(pipe) _MMIO(_PICK((pipe), \
+ _PIPEA_HDCP2_STREAM_STATUS, \
+ _PIPEB_HDCP2_STREAM_STATUS, \
+ _PIPEC_HDCP2_STREAM_STATUS, \
+ _PIPED_HDCP2_STREAM_STATUS))
+
+#define _TRANSA_HDCP2_STREAM_STATUS 0x664C0
+#define _TRANSB_HDCP2_STREAM_STATUS 0x665C0
+#define TRANS_HDCP2_STREAM_STATUS(trans) _MMIO_TRANS(trans, \
+ _TRANSA_HDCP2_STREAM_STATUS, \
+ _TRANSB_HDCP2_STREAM_STATUS)
+#define STREAM_ENCRYPTION_STATUS REG_BIT(31)
+#define STREAM_TYPE_STATUS REG_BIT(30)
+#define HDCP2_STREAM_STATUS(dev_priv, trans, port) \
+ (GRAPHICS_VER(dev_priv) >= 12 ? \
+ TRANS_HDCP2_STREAM_STATUS(trans) : \
+ PIPE_HDCP2_STREAM_STATUS(pipe))
+
+#define _PORTA_HDCP2_AUTH_STREAM 0x66F00
+#define _PORTB_HDCP2_AUTH_STREAM 0x66F04
+#define PORT_HDCP2_AUTH_STREAM(port) _MMIO_PORT(port, \
+ _PORTA_HDCP2_AUTH_STREAM, \
+ _PORTB_HDCP2_AUTH_STREAM)
+#define _TRANSA_HDCP2_AUTH_STREAM 0x66F00
+#define _TRANSB_HDCP2_AUTH_STREAM 0x66F04
+#define TRANS_HDCP2_AUTH_STREAM(trans) _MMIO_TRANS(trans, \
+ _TRANSA_HDCP2_AUTH_STREAM, \
+ _TRANSB_HDCP2_AUTH_STREAM)
+#define AUTH_STREAM_TYPE REG_BIT(31)
+#define HDCP2_AUTH_STREAM(dev_priv, trans, port) \
+ (GRAPHICS_VER(dev_priv) >= 12 ? \
+ TRANS_HDCP2_AUTH_STREAM(trans) : \
+ PORT_HDCP2_AUTH_STREAM(port))
+
+#endif /* __INTEL_HDCP_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index ebd91aa69dd2..7816b2a33fee 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -50,6 +50,7 @@
#include "intel_dp.h"
#include "intel_gmbus.h"
#include "intel_hdcp.h"
+#include "intel_hdcp_regs.h"
#include "intel_hdmi.h"
#include "intel_lspcon.h"
#include "intel_panel.h"
@@ -1891,7 +1892,7 @@ int intel_hdmi_tmds_clock(int clock, int bpc, bool ycbcr420_output)
* 1.5x for 12bpc
* 1.25x for 10bpc
*/
- return clock * bpc / 8;
+ return DIV_ROUND_CLOSEST(clock * bpc, 8);
}
static bool intel_hdmi_source_bpc_possible(struct drm_i915_private *i915, int bpc)
@@ -2001,6 +2002,15 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
clock *= 2;
}
+ /*
+ * HDMI2.1 requires higher resolution modes like 8k60, 4K120 to be
+ * enumerated only if FRL is supported. Current platforms do not support
+ * FRL so prune the higher resolution modes that require doctclock more
+ * than 600MHz.
+ */
+ if (clock > 600000)
+ return MODE_CLOCK_HIGH;
+
ycbcr_420_only = drm_mode_is_420_only(&connector->display_info, mode);
status = intel_hdmi_mode_clock_valid(connector, clock, has_hdmi_sink, ycbcr_420_only);
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c
index 5f8b4f481cff..f7a2f485b177 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/display/intel_hotplug.c
@@ -119,13 +119,13 @@ intel_connector_hpd_pin(struct intel_connector *connector)
* responsible for further action.
*
* The number of IRQs that are allowed within @HPD_STORM_DETECT_PERIOD is
- * stored in @dev_priv->hotplug.hpd_storm_threshold which defaults to
+ * stored in @dev_priv->display.hotplug.hpd_storm_threshold which defaults to
* @HPD_STORM_DEFAULT_THRESHOLD. Long IRQs count as +10 to this threshold, and
* short IRQs count as +1. If this threshold is exceeded, it's considered an
* IRQ storm and the IRQ state is set to @HPD_MARK_DISABLED.
*
* By default, most systems will only count long IRQs towards
- * &dev_priv->hotplug.hpd_storm_threshold. However, some older systems also
+ * &dev_priv->display.hotplug.hpd_storm_threshold. However, some older systems also
* suffer from short IRQ storms and must also track these. Because short IRQ
* storms are naturally caused by sideband interactions with DP MST devices,
* short IRQ detection is only enabled for systems without DP MST support.
@@ -140,7 +140,7 @@ intel_connector_hpd_pin(struct intel_connector *connector)
static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv,
enum hpd_pin pin, bool long_hpd)
{
- struct i915_hotplug *hpd = &dev_priv->hotplug;
+ struct intel_hotplug *hpd = &dev_priv->display.hotplug;
unsigned long start = hpd->stats[pin].last_jiffies;
unsigned long end = start + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD);
const int increment = long_hpd ? 10 : 1;
@@ -148,7 +148,7 @@ static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv,
bool storm = false;
if (!threshold ||
- (!long_hpd && !dev_priv->hotplug.hpd_short_storm_enabled))
+ (!long_hpd && !dev_priv->display.hotplug.hpd_short_storm_enabled))
return false;
if (!time_in_range(jiffies, start, end)) {
@@ -191,7 +191,7 @@ intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv)
pin = intel_connector_hpd_pin(connector);
if (pin == HPD_NONE ||
- dev_priv->hotplug.stats[pin].state != HPD_MARK_DISABLED)
+ dev_priv->display.hotplug.stats[pin].state != HPD_MARK_DISABLED)
continue;
drm_info(&dev_priv->drm,
@@ -199,7 +199,7 @@ intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv)
"switching from hotplug detection to polling\n",
connector->base.name);
- dev_priv->hotplug.stats[pin].state = HPD_DISABLED;
+ dev_priv->display.hotplug.stats[pin].state = HPD_DISABLED;
connector->base.polled = DRM_CONNECTOR_POLL_CONNECT |
DRM_CONNECTOR_POLL_DISCONNECT;
hpd_disabled = true;
@@ -209,7 +209,7 @@ intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv)
/* Enable polling and queue hotplug re-enabling. */
if (hpd_disabled) {
drm_kms_helper_poll_enable(dev);
- mod_delayed_work(system_wq, &dev_priv->hotplug.reenable_work,
+ mod_delayed_work(system_wq, &dev_priv->display.hotplug.reenable_work,
msecs_to_jiffies(HPD_STORM_REENABLE_DELAY));
}
}
@@ -218,7 +218,7 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
container_of(work, typeof(*dev_priv),
- hotplug.reenable_work.work);
+ display.hotplug.reenable_work.work);
struct drm_device *dev = &dev_priv->drm;
struct drm_connector_list_iter conn_iter;
struct intel_connector *connector;
@@ -233,7 +233,7 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
for_each_intel_connector_iter(connector, &conn_iter) {
pin = intel_connector_hpd_pin(connector);
if (pin == HPD_NONE ||
- dev_priv->hotplug.stats[pin].state != HPD_DISABLED)
+ dev_priv->display.hotplug.stats[pin].state != HPD_DISABLED)
continue;
if (connector->base.polled != connector->polled)
@@ -245,8 +245,8 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
drm_connector_list_iter_end(&conn_iter);
for_each_hpd_pin(pin) {
- if (dev_priv->hotplug.stats[pin].state == HPD_DISABLED)
- dev_priv->hotplug.stats[pin].state = HPD_ENABLED;
+ if (dev_priv->display.hotplug.stats[pin].state == HPD_DISABLED)
+ dev_priv->display.hotplug.stats[pin].state = HPD_ENABLED;
}
intel_hpd_irq_setup(dev_priv);
@@ -297,16 +297,16 @@ static bool intel_encoder_has_hpd_pulse(struct intel_encoder *encoder)
static void i915_digport_work_func(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
- container_of(work, struct drm_i915_private, hotplug.dig_port_work);
+ container_of(work, struct drm_i915_private, display.hotplug.dig_port_work);
u32 long_port_mask, short_port_mask;
struct intel_encoder *encoder;
u32 old_bits = 0;
spin_lock_irq(&dev_priv->irq_lock);
- long_port_mask = dev_priv->hotplug.long_port_mask;
- dev_priv->hotplug.long_port_mask = 0;
- short_port_mask = dev_priv->hotplug.short_port_mask;
- dev_priv->hotplug.short_port_mask = 0;
+ long_port_mask = dev_priv->display.hotplug.long_port_mask;
+ dev_priv->display.hotplug.long_port_mask = 0;
+ short_port_mask = dev_priv->display.hotplug.short_port_mask;
+ dev_priv->display.hotplug.short_port_mask = 0;
spin_unlock_irq(&dev_priv->irq_lock);
for_each_intel_encoder(&dev_priv->drm, encoder) {
@@ -335,9 +335,9 @@ static void i915_digport_work_func(struct work_struct *work)
if (old_bits) {
spin_lock_irq(&dev_priv->irq_lock);
- dev_priv->hotplug.event_bits |= old_bits;
+ dev_priv->display.hotplug.event_bits |= old_bits;
spin_unlock_irq(&dev_priv->irq_lock);
- queue_delayed_work(system_wq, &dev_priv->hotplug.hotplug_work, 0);
+ queue_delayed_work(system_wq, &dev_priv->display.hotplug.hotplug_work, 0);
}
}
@@ -353,10 +353,10 @@ void intel_hpd_trigger_irq(struct intel_digital_port *dig_port)
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
spin_lock_irq(&i915->irq_lock);
- i915->hotplug.short_port_mask |= BIT(dig_port->base.port);
+ i915->display.hotplug.short_port_mask |= BIT(dig_port->base.port);
spin_unlock_irq(&i915->irq_lock);
- queue_work(i915->hotplug.dp_wq, &i915->hotplug.dig_port_work);
+ queue_work(i915->display.hotplug.dp_wq, &i915->display.hotplug.dig_port_work);
}
/*
@@ -366,7 +366,7 @@ static void i915_hotplug_work_func(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
container_of(work, struct drm_i915_private,
- hotplug.hotplug_work.work);
+ display.hotplug.hotplug_work.work);
struct drm_device *dev = &dev_priv->drm;
struct drm_connector_list_iter conn_iter;
struct intel_connector *connector;
@@ -379,10 +379,10 @@ static void i915_hotplug_work_func(struct work_struct *work)
spin_lock_irq(&dev_priv->irq_lock);
- hpd_event_bits = dev_priv->hotplug.event_bits;
- dev_priv->hotplug.event_bits = 0;
- hpd_retry_bits = dev_priv->hotplug.retry_bits;
- dev_priv->hotplug.retry_bits = 0;
+ hpd_event_bits = dev_priv->display.hotplug.event_bits;
+ dev_priv->display.hotplug.event_bits = 0;
+ hpd_retry_bits = dev_priv->display.hotplug.retry_bits;
+ dev_priv->display.hotplug.retry_bits = 0;
/* Enable polling for connectors which had HPD IRQ storms */
intel_hpd_irq_storm_switch_to_polling(dev_priv);
@@ -435,10 +435,10 @@ static void i915_hotplug_work_func(struct work_struct *work)
retry &= ~changed;
if (retry) {
spin_lock_irq(&dev_priv->irq_lock);
- dev_priv->hotplug.retry_bits |= retry;
+ dev_priv->display.hotplug.retry_bits |= retry;
spin_unlock_irq(&dev_priv->irq_lock);
- mod_delayed_work(system_wq, &dev_priv->hotplug.hotplug_work,
+ mod_delayed_work(system_wq, &dev_priv->display.hotplug.hotplug_work,
msecs_to_jiffies(HPD_RETRY_DELAY));
}
}
@@ -502,10 +502,10 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
if (long_hpd) {
long_hpd_pulse_mask |= BIT(pin);
- dev_priv->hotplug.long_port_mask |= BIT(port);
+ dev_priv->display.hotplug.long_port_mask |= BIT(port);
} else {
short_hpd_pulse_mask |= BIT(pin);
- dev_priv->hotplug.short_port_mask |= BIT(port);
+ dev_priv->display.hotplug.short_port_mask |= BIT(port);
}
}
@@ -516,7 +516,7 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
if (!(BIT(pin) & pin_mask))
continue;
- if (dev_priv->hotplug.stats[pin].state == HPD_DISABLED) {
+ if (dev_priv->display.hotplug.stats[pin].state == HPD_DISABLED) {
/*
* On GMCH platforms the interrupt mask bits only
* prevent irq generation, not the setting of the
@@ -529,7 +529,7 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
continue;
}
- if (dev_priv->hotplug.stats[pin].state != HPD_ENABLED)
+ if (dev_priv->display.hotplug.stats[pin].state != HPD_ENABLED)
continue;
/*
@@ -540,13 +540,13 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
if (((short_hpd_pulse_mask | long_hpd_pulse_mask) & BIT(pin))) {
long_hpd = long_hpd_pulse_mask & BIT(pin);
} else {
- dev_priv->hotplug.event_bits |= BIT(pin);
+ dev_priv->display.hotplug.event_bits |= BIT(pin);
long_hpd = true;
queue_hp = true;
}
if (intel_hpd_irq_storm_detect(dev_priv, pin, long_hpd)) {
- dev_priv->hotplug.event_bits &= ~BIT(pin);
+ dev_priv->display.hotplug.event_bits &= ~BIT(pin);
storm_detected = true;
queue_hp = true;
}
@@ -567,9 +567,9 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
* deadlock.
*/
if (queue_dig)
- queue_work(dev_priv->hotplug.dp_wq, &dev_priv->hotplug.dig_port_work);
+ queue_work(dev_priv->display.hotplug.dp_wq, &dev_priv->display.hotplug.dig_port_work);
if (queue_hp)
- queue_delayed_work(system_wq, &dev_priv->hotplug.hotplug_work, 0);
+ queue_delayed_work(system_wq, &dev_priv->display.hotplug.hotplug_work, 0);
}
/**
@@ -594,8 +594,8 @@ void intel_hpd_init(struct drm_i915_private *dev_priv)
return;
for_each_hpd_pin(i) {
- dev_priv->hotplug.stats[i].count = 0;
- dev_priv->hotplug.stats[i].state = HPD_ENABLED;
+ dev_priv->display.hotplug.stats[i].count = 0;
+ dev_priv->display.hotplug.stats[i].state = HPD_ENABLED;
}
/*
@@ -611,7 +611,7 @@ static void i915_hpd_poll_init_work(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
container_of(work, struct drm_i915_private,
- hotplug.poll_init_work);
+ display.hotplug.poll_init_work);
struct drm_device *dev = &dev_priv->drm;
struct drm_connector_list_iter conn_iter;
struct intel_connector *connector;
@@ -619,7 +619,7 @@ static void i915_hpd_poll_init_work(struct work_struct *work)
mutex_lock(&dev->mode_config.mutex);
- enabled = READ_ONCE(dev_priv->hotplug.poll_enabled);
+ enabled = READ_ONCE(dev_priv->display.hotplug.poll_enabled);
drm_connector_list_iter_begin(dev, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
@@ -672,7 +672,7 @@ void intel_hpd_poll_enable(struct drm_i915_private *dev_priv)
!INTEL_DISPLAY_ENABLED(dev_priv))
return;
- WRITE_ONCE(dev_priv->hotplug.poll_enabled, true);
+ WRITE_ONCE(dev_priv->display.hotplug.poll_enabled, true);
/*
* We might already be holding dev->mode_config.mutex, so do this in a
@@ -680,7 +680,7 @@ void intel_hpd_poll_enable(struct drm_i915_private *dev_priv)
* As well, there's no issue if we race here since we always reschedule
* this worker anyway
*/
- schedule_work(&dev_priv->hotplug.poll_init_work);
+ schedule_work(&dev_priv->display.hotplug.poll_init_work);
}
/**
@@ -707,17 +707,17 @@ void intel_hpd_poll_disable(struct drm_i915_private *dev_priv)
if (!HAS_DISPLAY(dev_priv))
return;
- WRITE_ONCE(dev_priv->hotplug.poll_enabled, false);
- schedule_work(&dev_priv->hotplug.poll_init_work);
+ WRITE_ONCE(dev_priv->display.hotplug.poll_enabled, false);
+ schedule_work(&dev_priv->display.hotplug.poll_init_work);
}
void intel_hpd_init_work(struct drm_i915_private *dev_priv)
{
- INIT_DELAYED_WORK(&dev_priv->hotplug.hotplug_work,
+ INIT_DELAYED_WORK(&dev_priv->display.hotplug.hotplug_work,
i915_hotplug_work_func);
- INIT_WORK(&dev_priv->hotplug.dig_port_work, i915_digport_work_func);
- INIT_WORK(&dev_priv->hotplug.poll_init_work, i915_hpd_poll_init_work);
- INIT_DELAYED_WORK(&dev_priv->hotplug.reenable_work,
+ INIT_WORK(&dev_priv->display.hotplug.dig_port_work, i915_digport_work_func);
+ INIT_WORK(&dev_priv->display.hotplug.poll_init_work, i915_hpd_poll_init_work);
+ INIT_DELAYED_WORK(&dev_priv->display.hotplug.reenable_work,
intel_hpd_irq_storm_reenable_work);
}
@@ -728,17 +728,17 @@ void intel_hpd_cancel_work(struct drm_i915_private *dev_priv)
spin_lock_irq(&dev_priv->irq_lock);
- dev_priv->hotplug.long_port_mask = 0;
- dev_priv->hotplug.short_port_mask = 0;
- dev_priv->hotplug.event_bits = 0;
- dev_priv->hotplug.retry_bits = 0;
+ dev_priv->display.hotplug.long_port_mask = 0;
+ dev_priv->display.hotplug.short_port_mask = 0;
+ dev_priv->display.hotplug.event_bits = 0;
+ dev_priv->display.hotplug.retry_bits = 0;
spin_unlock_irq(&dev_priv->irq_lock);
- cancel_work_sync(&dev_priv->hotplug.dig_port_work);
- cancel_delayed_work_sync(&dev_priv->hotplug.hotplug_work);
- cancel_work_sync(&dev_priv->hotplug.poll_init_work);
- cancel_delayed_work_sync(&dev_priv->hotplug.reenable_work);
+ cancel_work_sync(&dev_priv->display.hotplug.dig_port_work);
+ cancel_delayed_work_sync(&dev_priv->display.hotplug.hotplug_work);
+ cancel_work_sync(&dev_priv->display.hotplug.poll_init_work);
+ cancel_delayed_work_sync(&dev_priv->display.hotplug.reenable_work);
}
bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin)
@@ -749,8 +749,8 @@ bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin)
return false;
spin_lock_irq(&dev_priv->irq_lock);
- if (dev_priv->hotplug.stats[pin].state == HPD_ENABLED) {
- dev_priv->hotplug.stats[pin].state = HPD_DISABLED;
+ if (dev_priv->display.hotplug.stats[pin].state == HPD_ENABLED) {
+ dev_priv->display.hotplug.stats[pin].state = HPD_DISABLED;
ret = true;
}
spin_unlock_irq(&dev_priv->irq_lock);
@@ -764,6 +764,6 @@ void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin)
return;
spin_lock_irq(&dev_priv->irq_lock);
- dev_priv->hotplug.stats[pin].state = HPD_ENABLED;
+ dev_priv->display.hotplug.stats[pin].state = HPD_ENABLED;
spin_unlock_irq(&dev_priv->irq_lock);
}
diff --git a/drivers/gpu/drm/i915/display/intel_lpe_audio.c b/drivers/gpu/drm/i915/display/intel_lpe_audio.c
index 4970bf146c4a..dca6003ccac8 100644
--- a/drivers/gpu/drm/i915/display/intel_lpe_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_lpe_audio.c
@@ -73,8 +73,9 @@
#include "i915_drv.h"
#include "intel_de.h"
#include "intel_lpe_audio.h"
+#include "intel_pci_config.h"
-#define HAS_LPE_AUDIO(dev_priv) ((dev_priv)->audio.lpe.platdev != NULL)
+#define HAS_LPE_AUDIO(dev_priv) ((dev_priv)->display.audio.lpe.platdev != NULL)
static struct platform_device *
lpe_audio_platdev_create(struct drm_i915_private *dev_priv)
@@ -96,13 +97,13 @@ lpe_audio_platdev_create(struct drm_i915_private *dev_priv)
return ERR_PTR(-ENOMEM);
}
- rsc[0].start = rsc[0].end = dev_priv->audio.lpe.irq;
+ rsc[0].start = rsc[0].end = dev_priv->display.audio.lpe.irq;
rsc[0].flags = IORESOURCE_IRQ;
rsc[0].name = "hdmi-lpe-audio-irq";
- rsc[1].start = pci_resource_start(pdev, 0) +
+ rsc[1].start = pci_resource_start(pdev, GTTMMADR_BAR) +
I915_HDMI_LPE_AUDIO_BASE;
- rsc[1].end = pci_resource_start(pdev, 0) +
+ rsc[1].end = pci_resource_start(pdev, GTTMMADR_BAR) +
I915_HDMI_LPE_AUDIO_BASE + I915_HDMI_LPE_AUDIO_SIZE - 1;
rsc[1].flags = IORESOURCE_MEM;
rsc[1].name = "hdmi-lpe-audio-mmio";
@@ -148,7 +149,7 @@ static void lpe_audio_platdev_destroy(struct drm_i915_private *dev_priv)
* than us fiddle with its internals.
*/
- platform_device_unregister(dev_priv->audio.lpe.platdev);
+ platform_device_unregister(dev_priv->display.audio.lpe.platdev);
}
static void lpe_audio_irq_unmask(struct irq_data *d)
@@ -167,7 +168,7 @@ static struct irq_chip lpe_audio_irqchip = {
static int lpe_audio_irq_init(struct drm_i915_private *dev_priv)
{
- int irq = dev_priv->audio.lpe.irq;
+ int irq = dev_priv->display.audio.lpe.irq;
drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
irq_set_chip_and_handler_name(irq,
@@ -204,15 +205,15 @@ static int lpe_audio_setup(struct drm_i915_private *dev_priv)
{
int ret;
- dev_priv->audio.lpe.irq = irq_alloc_desc(0);
- if (dev_priv->audio.lpe.irq < 0) {
+ dev_priv->display.audio.lpe.irq = irq_alloc_desc(0);
+ if (dev_priv->display.audio.lpe.irq < 0) {
drm_err(&dev_priv->drm, "Failed to allocate IRQ desc: %d\n",
- dev_priv->audio.lpe.irq);
- ret = dev_priv->audio.lpe.irq;
+ dev_priv->display.audio.lpe.irq);
+ ret = dev_priv->display.audio.lpe.irq;
goto err;
}
- drm_dbg(&dev_priv->drm, "irq = %d\n", dev_priv->audio.lpe.irq);
+ drm_dbg(&dev_priv->drm, "irq = %d\n", dev_priv->display.audio.lpe.irq);
ret = lpe_audio_irq_init(dev_priv);
@@ -223,10 +224,10 @@ static int lpe_audio_setup(struct drm_i915_private *dev_priv)
goto err_free_irq;
}
- dev_priv->audio.lpe.platdev = lpe_audio_platdev_create(dev_priv);
+ dev_priv->display.audio.lpe.platdev = lpe_audio_platdev_create(dev_priv);
- if (IS_ERR(dev_priv->audio.lpe.platdev)) {
- ret = PTR_ERR(dev_priv->audio.lpe.platdev);
+ if (IS_ERR(dev_priv->display.audio.lpe.platdev)) {
+ ret = PTR_ERR(dev_priv->display.audio.lpe.platdev);
drm_err(&dev_priv->drm,
"Failed to create lpe audio platform device: %d\n",
ret);
@@ -241,10 +242,10 @@ static int lpe_audio_setup(struct drm_i915_private *dev_priv)
return 0;
err_free_irq:
- irq_free_desc(dev_priv->audio.lpe.irq);
+ irq_free_desc(dev_priv->display.audio.lpe.irq);
err:
- dev_priv->audio.lpe.irq = -1;
- dev_priv->audio.lpe.platdev = NULL;
+ dev_priv->display.audio.lpe.irq = -1;
+ dev_priv->display.audio.lpe.platdev = NULL;
return ret;
}
@@ -262,7 +263,7 @@ void intel_lpe_audio_irq_handler(struct drm_i915_private *dev_priv)
if (!HAS_LPE_AUDIO(dev_priv))
return;
- ret = generic_handle_irq(dev_priv->audio.lpe.irq);
+ ret = generic_handle_irq(dev_priv->display.audio.lpe.irq);
if (ret)
drm_err_ratelimited(&dev_priv->drm,
"error handling LPE audio irq: %d\n", ret);
@@ -303,10 +304,10 @@ void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv)
lpe_audio_platdev_destroy(dev_priv);
- irq_free_desc(dev_priv->audio.lpe.irq);
+ irq_free_desc(dev_priv->display.audio.lpe.irq);
- dev_priv->audio.lpe.irq = -1;
- dev_priv->audio.lpe.platdev = NULL;
+ dev_priv->display.audio.lpe.irq = -1;
+ dev_priv->display.audio.lpe.platdev = NULL;
}
/**
@@ -333,7 +334,7 @@ void intel_lpe_audio_notify(struct drm_i915_private *dev_priv,
if (!HAS_LPE_AUDIO(dev_priv))
return;
- pdata = dev_get_platdata(&dev_priv->audio.lpe.platdev->dev);
+ pdata = dev_get_platdata(&dev_priv->display.audio.lpe.platdev->dev);
ppdata = &pdata->port[port - PORT_B];
spin_lock_irqsave(&pdata->lpe_audio_slock, irqflags);
@@ -361,7 +362,7 @@ void intel_lpe_audio_notify(struct drm_i915_private *dev_priv,
}
if (pdata->notify_audio_lpe)
- pdata->notify_audio_lpe(dev_priv->audio.lpe.platdev, port - PORT_B);
+ pdata->notify_audio_lpe(dev_priv->display.audio.lpe.platdev, port - PORT_B);
spin_unlock_irqrestore(&pdata->lpe_audio_slock, irqflags);
}
diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c
index 730480ac3300..9aa38e8141b5 100644
--- a/drivers/gpu/drm/i915/display/intel_lvds.c
+++ b/drivers/gpu/drm/i915/display/intel_lvds.c
@@ -837,12 +837,12 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
/* Skip init on machines we know falsely report LVDS */
if (dmi_check_system(intel_no_lvds)) {
- drm_WARN(dev, !dev_priv->vbt.int_lvds_support,
+ drm_WARN(dev, !dev_priv->display.vbt.int_lvds_support,
"Useless DMI match. Internal LVDS support disabled by VBT\n");
return;
}
- if (!dev_priv->vbt.int_lvds_support) {
+ if (!dev_priv->display.vbt.int_lvds_support) {
drm_dbg_kms(&dev_priv->drm,
"Internal LVDS support disabled by VBT\n");
return;
diff --git a/drivers/gpu/drm/i915/display/intel_modeset_setup.c b/drivers/gpu/drm/i915/display/intel_modeset_setup.c
index f0e04d3904c6..cbfabd58b75a 100644
--- a/drivers/gpu/drm/i915/display/intel_modeset_setup.c
+++ b/drivers/gpu/drm/i915/display/intel_modeset_setup.c
@@ -23,6 +23,7 @@
#include "intel_modeset_setup.h"
#include "intel_pch_display.h"
#include "intel_pm.h"
+#include "skl_watermark.h"
static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
struct drm_modeset_acquire_ctx *ctx)
@@ -30,11 +31,11 @@ static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
struct intel_encoder *encoder;
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
struct intel_bw_state *bw_state =
- to_intel_bw_state(i915->bw_obj.state);
+ to_intel_bw_state(i915->display.bw.obj.state);
struct intel_cdclk_state *cdclk_state =
- to_intel_cdclk_state(i915->cdclk.obj.state);
+ to_intel_cdclk_state(i915->display.cdclk.obj.state);
struct intel_dbuf_state *dbuf_state =
- to_intel_dbuf_state(i915->dbuf.obj.state);
+ to_intel_dbuf_state(i915->display.dbuf.obj.state);
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
struct intel_plane *plane;
@@ -70,7 +71,7 @@ static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
drm_WARN_ON(&i915->drm, IS_ERR(temp_crtc_state) || ret);
- i915->display->crtc_disable(to_intel_atomic_state(state), crtc);
+ i915->display.funcs.display->crtc_disable(to_intel_atomic_state(state), crtc);
drm_atomic_state_put(state);
@@ -415,9 +416,9 @@ static void readout_plane_state(struct drm_i915_private *i915)
static void intel_modeset_readout_hw_state(struct drm_i915_private *i915)
{
struct intel_cdclk_state *cdclk_state =
- to_intel_cdclk_state(i915->cdclk.obj.state);
+ to_intel_cdclk_state(i915->display.cdclk.obj.state);
struct intel_dbuf_state *dbuf_state =
- to_intel_dbuf_state(i915->dbuf.obj.state);
+ to_intel_dbuf_state(i915->display.dbuf.obj.state);
enum pipe pipe;
struct intel_crtc *crtc;
struct intel_encoder *encoder;
@@ -535,7 +536,7 @@ static void intel_modeset_readout_hw_state(struct drm_i915_private *i915)
for_each_intel_crtc(&i915->drm, crtc) {
struct intel_bw_state *bw_state =
- to_intel_bw_state(i915->bw_obj.state);
+ to_intel_bw_state(i915->display.bw.obj.state);
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
struct intel_plane *plane;
diff --git a/drivers/gpu/drm/i915/display/intel_modeset_verify.c b/drivers/gpu/drm/i915/display/intel_modeset_verify.c
index a91586d77cb6..0fdcf2e6d57f 100644
--- a/drivers/gpu/drm/i915/display/intel_modeset_verify.c
+++ b/drivers/gpu/drm/i915/display/intel_modeset_verify.c
@@ -15,8 +15,8 @@
#include "intel_display_types.h"
#include "intel_fdi.h"
#include "intel_modeset_verify.h"
-#include "intel_pm.h"
#include "intel_snps_phy.h"
+#include "skl_watermark.h"
/*
* Cross check the actual hw state with our own modeset state tracking (and its
@@ -94,10 +94,10 @@ static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
/*
* FDI already provided one idea for the dotclock.
- * Yell if the encoder disagrees.
+ * Yell if the encoder disagrees. Allow for slight
+ * rounding differences.
*/
- drm_WARN(&dev_priv->drm,
- !intel_fuzzy_clock_check(fdi_dotclock, dotclock),
+ drm_WARN(&dev_priv->drm, abs(fdi_dotclock - dotclock) > 1,
"FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
fdi_dotclock, dotclock);
}
diff --git a/drivers/gpu/drm/i915/display/intel_opregion.c b/drivers/gpu/drm/i915/display/intel_opregion.c
index 1c0c745c142d..caa07ef34f21 100644
--- a/drivers/gpu/drm/i915/display/intel_opregion.c
+++ b/drivers/gpu/drm/i915/display/intel_opregion.c
@@ -252,7 +252,7 @@ struct opregion_asle_ext {
static int check_swsci_function(struct drm_i915_private *i915, u32 function)
{
- struct opregion_swsci *swsci = i915->opregion.swsci;
+ struct opregion_swsci *swsci = i915->display.opregion.swsci;
u32 main_function, sub_function;
if (!swsci)
@@ -265,11 +265,11 @@ static int check_swsci_function(struct drm_i915_private *i915, u32 function)
/* Check if we can call the function. See swsci_setup for details. */
if (main_function == SWSCI_SBCB) {
- if ((i915->opregion.swsci_sbcb_sub_functions &
+ if ((i915->display.opregion.swsci_sbcb_sub_functions &
(1 << sub_function)) == 0)
return -EINVAL;
} else if (main_function == SWSCI_GBDA) {
- if ((i915->opregion.swsci_gbda_sub_functions &
+ if ((i915->display.opregion.swsci_gbda_sub_functions &
(1 << sub_function)) == 0)
return -EINVAL;
}
@@ -280,7 +280,7 @@ static int check_swsci_function(struct drm_i915_private *i915, u32 function)
static int swsci(struct drm_i915_private *dev_priv,
u32 function, u32 parm, u32 *parm_out)
{
- struct opregion_swsci *swsci = dev_priv->opregion.swsci;
+ struct opregion_swsci *swsci = dev_priv->display.opregion.swsci;
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
u32 scic, dslp;
u16 swsci_val;
@@ -462,7 +462,7 @@ static u32 asle_set_backlight(struct drm_i915_private *dev_priv, u32 bclp)
{
struct intel_connector *connector;
struct drm_connector_list_iter conn_iter;
- struct opregion_asle *asle = dev_priv->opregion.asle;
+ struct opregion_asle *asle = dev_priv->display.opregion.asle;
struct drm_device *dev = &dev_priv->drm;
drm_dbg(&dev_priv->drm, "bclp = 0x%08x\n", bclp);
@@ -586,8 +586,8 @@ static void asle_work(struct work_struct *work)
struct intel_opregion *opregion =
container_of(work, struct intel_opregion, asle_work);
struct drm_i915_private *dev_priv =
- container_of(opregion, struct drm_i915_private, opregion);
- struct opregion_asle *asle = dev_priv->opregion.asle;
+ container_of(opregion, struct drm_i915_private, display.opregion);
+ struct opregion_asle *asle = dev_priv->display.opregion.asle;
u32 aslc_stat = 0;
u32 aslc_req;
@@ -635,8 +635,8 @@ static void asle_work(struct work_struct *work)
void intel_opregion_asle_intr(struct drm_i915_private *dev_priv)
{
- if (dev_priv->opregion.asle)
- schedule_work(&dev_priv->opregion.asle_work);
+ if (dev_priv->display.opregion.asle)
+ schedule_work(&dev_priv->display.opregion.asle_work);
}
#define ACPI_EV_DISPLAY_SWITCH (1<<0)
@@ -692,7 +692,7 @@ static void set_did(struct intel_opregion *opregion, int i, u32 val)
static void intel_didl_outputs(struct drm_i915_private *dev_priv)
{
- struct intel_opregion *opregion = &dev_priv->opregion;
+ struct intel_opregion *opregion = &dev_priv->display.opregion;
struct intel_connector *connector;
struct drm_connector_list_iter conn_iter;
int i = 0, max_outputs;
@@ -731,7 +731,7 @@ static void intel_didl_outputs(struct drm_i915_private *dev_priv)
static void intel_setup_cadls(struct drm_i915_private *dev_priv)
{
- struct intel_opregion *opregion = &dev_priv->opregion;
+ struct intel_opregion *opregion = &dev_priv->display.opregion;
struct intel_connector *connector;
struct drm_connector_list_iter conn_iter;
int i = 0;
@@ -761,7 +761,7 @@ static void intel_setup_cadls(struct drm_i915_private *dev_priv)
static void swsci_setup(struct drm_i915_private *dev_priv)
{
- struct intel_opregion *opregion = &dev_priv->opregion;
+ struct intel_opregion *opregion = &dev_priv->display.opregion;
bool requested_callbacks = false;
u32 tmp;
@@ -839,7 +839,7 @@ static const struct dmi_system_id intel_no_opregion_vbt[] = {
static int intel_load_vbt_firmware(struct drm_i915_private *dev_priv)
{
- struct intel_opregion *opregion = &dev_priv->opregion;
+ struct intel_opregion *opregion = &dev_priv->display.opregion;
const struct firmware *fw = NULL;
const char *name = dev_priv->params.vbt_firmware;
int ret;
@@ -879,7 +879,7 @@ static int intel_load_vbt_firmware(struct drm_i915_private *dev_priv)
int intel_opregion_setup(struct drm_i915_private *dev_priv)
{
- struct intel_opregion *opregion = &dev_priv->opregion;
+ struct intel_opregion *opregion = &dev_priv->display.opregion;
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
u32 asls, mboxes;
char buf[sizeof(OPREGION_SIGNATURE)];
@@ -1106,7 +1106,7 @@ struct edid *intel_opregion_get_edid(struct intel_connector *intel_connector)
{
struct drm_connector *connector = &intel_connector->base;
struct drm_i915_private *i915 = to_i915(connector->dev);
- struct intel_opregion *opregion = &i915->opregion;
+ struct intel_opregion *opregion = &i915->display.opregion;
const void *in_edid;
const struct edid *edid;
struct edid *new_edid;
@@ -1141,7 +1141,7 @@ struct edid *intel_opregion_get_edid(struct intel_connector *intel_connector)
bool intel_opregion_headless_sku(struct drm_i915_private *i915)
{
- struct intel_opregion *opregion = &i915->opregion;
+ struct intel_opregion *opregion = &i915->display.opregion;
struct opregion_header *header = opregion->header;
if (!header || header->over.major < 2 ||
@@ -1153,7 +1153,7 @@ bool intel_opregion_headless_sku(struct drm_i915_private *i915)
void intel_opregion_register(struct drm_i915_private *i915)
{
- struct intel_opregion *opregion = &i915->opregion;
+ struct intel_opregion *opregion = &i915->display.opregion;
if (!opregion->header)
return;
@@ -1169,7 +1169,7 @@ void intel_opregion_register(struct drm_i915_private *i915)
void intel_opregion_resume(struct drm_i915_private *i915)
{
- struct intel_opregion *opregion = &i915->opregion;
+ struct intel_opregion *opregion = &i915->display.opregion;
if (!opregion->header)
return;
@@ -1200,7 +1200,7 @@ void intel_opregion_resume(struct drm_i915_private *i915)
void intel_opregion_suspend(struct drm_i915_private *i915, pci_power_t state)
{
- struct intel_opregion *opregion = &i915->opregion;
+ struct intel_opregion *opregion = &i915->display.opregion;
if (!opregion->header)
return;
@@ -1210,7 +1210,7 @@ void intel_opregion_suspend(struct drm_i915_private *i915, pci_power_t state)
if (opregion->asle)
opregion->asle->ardy = ASLE_ARDY_NOT_READY;
- cancel_work_sync(&i915->opregion.asle_work);
+ cancel_work_sync(&i915->display.opregion.asle_work);
if (opregion->acpi)
opregion->acpi->drdy = 0;
@@ -1218,7 +1218,7 @@ void intel_opregion_suspend(struct drm_i915_private *i915, pci_power_t state)
void intel_opregion_unregister(struct drm_i915_private *i915)
{
- struct intel_opregion *opregion = &i915->opregion;
+ struct intel_opregion *opregion = &i915->display.opregion;
intel_opregion_suspend(i915, PCI_D1);
diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c
index 79ed8bd04a07..c12bdca8da9b 100644
--- a/drivers/gpu/drm/i915/display/intel_overlay.c
+++ b/drivers/gpu/drm/i915/display/intel_overlay.c
@@ -211,9 +211,9 @@ static void i830_overlay_clock_gating(struct drm_i915_private *dev_priv,
/* WA_OVERLAY_CLKGATE:alm */
if (enable)
- intel_de_write(dev_priv, DSPCLK_GATE_D, 0);
+ intel_de_write(dev_priv, DSPCLK_GATE_D(dev_priv), 0);
else
- intel_de_write(dev_priv, DSPCLK_GATE_D,
+ intel_de_write(dev_priv, DSPCLK_GATE_D(dev_priv),
OVRUNIT_CLOCK_GATE_DISABLE);
/* WA_DISABLE_L2CACHE_CLOCK_GATING:alm */
@@ -487,7 +487,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
void intel_overlay_reset(struct drm_i915_private *dev_priv)
{
- struct intel_overlay *overlay = dev_priv->overlay;
+ struct intel_overlay *overlay = dev_priv->display.overlay;
if (!overlay)
return;
@@ -1113,7 +1113,7 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
struct drm_i915_gem_object *new_bo;
int ret;
- overlay = dev_priv->overlay;
+ overlay = dev_priv->display.overlay;
if (!overlay) {
drm_dbg(&dev_priv->drm, "userspace bug: no overlay\n");
return -ENODEV;
@@ -1273,7 +1273,7 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
struct intel_overlay *overlay;
int ret;
- overlay = dev_priv->overlay;
+ overlay = dev_priv->display.overlay;
if (!overlay) {
drm_dbg(&dev_priv->drm, "userspace bug: no overlay\n");
return -ENODEV;
@@ -1416,7 +1416,7 @@ void intel_overlay_setup(struct drm_i915_private *dev_priv)
update_polyphase_filter(overlay->regs);
update_reg_attrs(overlay, overlay->regs);
- dev_priv->overlay = overlay;
+ dev_priv->display.overlay = overlay;
drm_info(&dev_priv->drm, "Initialized overlay support.\n");
return;
@@ -1428,7 +1428,7 @@ void intel_overlay_cleanup(struct drm_i915_private *dev_priv)
{
struct intel_overlay *overlay;
- overlay = fetch_and_zero(&dev_priv->overlay);
+ overlay = fetch_and_zero(&dev_priv->display.overlay);
if (!overlay)
return;
@@ -1457,7 +1457,7 @@ struct intel_overlay_error_state {
struct intel_overlay_error_state *
intel_overlay_capture_error_state(struct drm_i915_private *dev_priv)
{
- struct intel_overlay *overlay = dev_priv->overlay;
+ struct intel_overlay *overlay = dev_priv->display.overlay;
struct intel_overlay_error_state *error;
if (!overlay || !overlay->active)
diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c
index 237a40623dd7..a3a3f9fe4342 100644
--- a/drivers/gpu/drm/i915/display/intel_panel.c
+++ b/drivers/gpu/drm/i915/display/intel_panel.c
@@ -37,13 +37,14 @@
#include "intel_display_types.h"
#include "intel_drrs.h"
#include "intel_panel.h"
+#include "intel_quirks.h"
bool intel_panel_use_ssc(struct drm_i915_private *i915)
{
if (i915->params.panel_use_ssc >= 0)
return i915->params.panel_use_ssc != 0;
- return i915->vbt.lvds_use_ssc
- && !(i915->quirks & QUIRK_LVDS_SSC_DISABLE);
+ return i915->display.vbt.lvds_use_ssc &&
+ !intel_has_quirk(i915, QUIRK_LVDS_SSC_DISABLE);
}
const struct drm_display_mode *
@@ -81,15 +82,14 @@ static bool is_alt_drrs_mode(const struct drm_display_mode *mode,
mode->clock != preferred_mode->clock;
}
-static bool is_alt_vrr_mode(const struct drm_display_mode *mode,
- const struct drm_display_mode *preferred_mode)
+static bool is_alt_fixed_mode(const struct drm_display_mode *mode,
+ const struct drm_display_mode *preferred_mode)
{
return drm_mode_match(mode, preferred_mode,
DRM_MODE_MATCH_FLAGS |
DRM_MODE_MATCH_3D_FLAGS) &&
mode->hdisplay == preferred_mode->hdisplay &&
- mode->vdisplay == preferred_mode->vdisplay &&
- mode->clock != preferred_mode->clock;
+ mode->vdisplay == preferred_mode->vdisplay;
}
const struct drm_display_mode *
@@ -114,6 +114,21 @@ intel_panel_downclock_mode(struct intel_connector *connector,
return best_mode;
}
+const struct drm_display_mode *
+intel_panel_highest_mode(struct intel_connector *connector,
+ const struct drm_display_mode *adjusted_mode)
+{
+ const struct drm_display_mode *fixed_mode, *best_mode = adjusted_mode;
+
+ /* pick the fixed_mode that has the highest clock */
+ list_for_each_entry(fixed_mode, &connector->panel.fixed_modes, head) {
+ if (fixed_mode->clock > best_mode->clock)
+ best_mode = fixed_mode;
+ }
+
+ return best_mode;
+}
+
int intel_panel_get_modes(struct intel_connector *connector)
{
const struct drm_display_mode *fixed_mode;
@@ -172,19 +187,7 @@ int intel_panel_compute_config(struct intel_connector *connector,
return 0;
}
-static bool is_alt_fixed_mode(const struct drm_display_mode *mode,
- const struct drm_display_mode *preferred_mode,
- bool has_vrr)
-{
- /* is_alt_drrs_mode() is a subset of is_alt_vrr_mode() */
- if (has_vrr)
- return is_alt_vrr_mode(mode, preferred_mode);
- else
- return is_alt_drrs_mode(mode, preferred_mode);
-}
-
-static void intel_panel_add_edid_alt_fixed_modes(struct intel_connector *connector,
- bool has_vrr)
+static void intel_panel_add_edid_alt_fixed_modes(struct intel_connector *connector)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
const struct drm_display_mode *preferred_mode =
@@ -192,7 +195,7 @@ static void intel_panel_add_edid_alt_fixed_modes(struct intel_connector *connect
struct drm_display_mode *mode, *next;
list_for_each_entry_safe(mode, next, &connector->base.probed_modes, head) {
- if (!is_alt_fixed_mode(mode, preferred_mode, has_vrr))
+ if (!is_alt_fixed_mode(mode, preferred_mode))
continue;
drm_dbg_kms(&dev_priv->drm,
@@ -255,7 +258,7 @@ void intel_panel_add_edid_fixed_modes(struct intel_connector *connector,
{
intel_panel_add_edid_preferred_mode(connector);
if (intel_panel_preferred_fixed_mode(connector) && (has_drrs || has_vrr))
- intel_panel_add_edid_alt_fixed_modes(connector, has_vrr);
+ intel_panel_add_edid_alt_fixed_modes(connector);
intel_panel_destroy_probed_modes(connector);
}
diff --git a/drivers/gpu/drm/i915/display/intel_panel.h b/drivers/gpu/drm/i915/display/intel_panel.h
index b087c0c3cc6d..eff3ffd3d082 100644
--- a/drivers/gpu/drm/i915/display/intel_panel.h
+++ b/drivers/gpu/drm/i915/display/intel_panel.h
@@ -31,6 +31,9 @@ intel_panel_fixed_mode(struct intel_connector *connector,
const struct drm_display_mode *
intel_panel_downclock_mode(struct intel_connector *connector,
const struct drm_display_mode *adjusted_mode);
+const struct drm_display_mode *
+intel_panel_highest_mode(struct intel_connector *connector,
+ const struct drm_display_mode *adjusted_mode);
int intel_panel_get_modes(struct intel_connector *connector);
enum drrs_type intel_panel_drrs_type(struct intel_connector *connector);
enum drm_mode_status
diff --git a/drivers/gpu/drm/i915/display/intel_pch_refclk.c b/drivers/gpu/drm/i915/display/intel_pch_refclk.c
index 9934c8a9e240..a66097cdc1e0 100644
--- a/drivers/gpu/drm/i915/display/intel_pch_refclk.c
+++ b/drivers/gpu/drm/i915/display/intel_pch_refclk.c
@@ -167,6 +167,15 @@ static void lpt_compute_iclkip(struct iclkip_params *p, int clock)
}
}
+int lpt_iclkip(const struct intel_crtc_state *crtc_state)
+{
+ struct iclkip_params p;
+
+ lpt_compute_iclkip(&p, crtc_state->hw.adjusted_mode.crtc_clock);
+
+ return lpt_iclkip_freq(&p);
+}
+
/* Program iCLKIP clock to the desired frequency */
void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
{
@@ -179,6 +188,7 @@ void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
lpt_disable_iclkip(dev_priv);
lpt_compute_iclkip(&p, clock);
+ drm_WARN_ON(&dev_priv->drm, lpt_iclkip_freq(&p) != clock);
/* This should not happen with any sane values */
drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIVSEL(p.divsel) &
@@ -514,7 +524,7 @@ static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
}
if (HAS_PCH_IBX(dev_priv)) {
- has_ck505 = dev_priv->vbt.display_clock_mode;
+ has_ck505 = dev_priv->display.vbt.display_clock_mode;
can_ssc = has_ck505;
} else {
has_ck505 = false;
@@ -522,7 +532,7 @@ static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
}
/* Check if any DPLLs are using the SSC source */
- for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
+ for (i = 0; i < dev_priv->display.dpll.num_shared_dpll; i++) {
u32 temp = intel_de_read(dev_priv, PCH_DPLL(i));
if (!(temp & DPLL_VCO_ENABLE))
@@ -654,7 +664,7 @@ static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
}
}
- BUG_ON(val != final);
+ drm_WARN_ON(&dev_priv->drm, val != final);
}
/*
diff --git a/drivers/gpu/drm/i915/display/intel_pch_refclk.h b/drivers/gpu/drm/i915/display/intel_pch_refclk.h
index 12ab2c75a800..9bcf56629f24 100644
--- a/drivers/gpu/drm/i915/display/intel_pch_refclk.h
+++ b/drivers/gpu/drm/i915/display/intel_pch_refclk.h
@@ -14,6 +14,7 @@ struct intel_crtc_state;
void lpt_program_iclkip(const struct intel_crtc_state *crtc_state);
void lpt_disable_iclkip(struct drm_i915_private *dev_priv);
int lpt_get_iclkip(struct drm_i915_private *dev_priv);
+int lpt_iclkip(const struct intel_crtc_state *crtc_state);
void intel_init_pch_refclk(struct drm_i915_private *dev_priv);
void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/display/intel_plane_initial.c b/drivers/gpu/drm/i915/display/intel_plane_initial.c
index d10f27d0b7b0..76be796df255 100644
--- a/drivers/gpu/drm/i915/display/intel_plane_initial.c
+++ b/drivers/gpu/drm/i915/display/intel_plane_initial.c
@@ -311,7 +311,7 @@ void intel_crtc_initial_plane_config(struct intel_crtc *crtc)
* can even allow for smooth boot transitions if the BIOS
* fb is large enough for the active pipe configuration.
*/
- dev_priv->display->get_initial_plane_config(crtc, &plane_config);
+ dev_priv->display.funcs.display->get_initial_plane_config(crtc, &plane_config);
/*
* If the fb is shared between multiple heads, we'll
diff --git a/drivers/gpu/drm/i915/display/intel_pps.c b/drivers/gpu/drm/i915/display/intel_pps.c
index 1b21a341962f..21944f5bf3a8 100644
--- a/drivers/gpu/drm/i915/display/intel_pps.c
+++ b/drivers/gpu/drm/i915/display/intel_pps.c
@@ -12,6 +12,7 @@
#include "intel_dpll.h"
#include "intel_lvds.h"
#include "intel_pps.h"
+#include "intel_quirks.h"
static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
enum pipe pipe);
@@ -28,7 +29,7 @@ intel_wakeref_t intel_pps_lock(struct intel_dp *intel_dp)
* See intel_pps_reset_all() why we need a power domain reference here.
*/
wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DISPLAY_CORE);
- mutex_lock(&dev_priv->pps_mutex);
+ mutex_lock(&dev_priv->display.pps.mutex);
return wakeref;
}
@@ -38,7 +39,7 @@ intel_wakeref_t intel_pps_unlock(struct intel_dp *intel_dp,
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- mutex_unlock(&dev_priv->pps_mutex);
+ mutex_unlock(&dev_priv->display.pps.mutex);
intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
return 0;
@@ -163,7 +164,7 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
enum pipe pipe;
- lockdep_assert_held(&dev_priv->pps_mutex);
+ lockdep_assert_held(&dev_priv->display.pps.mutex);
/* We should never land here with regular DP ports */
drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp));
@@ -212,7 +213,7 @@ bxt_power_sequencer_idx(struct intel_dp *intel_dp)
struct intel_connector *connector = intel_dp->attached_connector;
int backlight_controller = connector->panel.vbt.backlight.controller;
- lockdep_assert_held(&dev_priv->pps_mutex);
+ lockdep_assert_held(&dev_priv->display.pps.mutex);
/* We should never land here with regular DP ports */
drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp));
@@ -282,7 +283,7 @@ vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
enum port port = dig_port->base.port;
- lockdep_assert_held(&dev_priv->pps_mutex);
+ lockdep_assert_held(&dev_priv->display.pps.mutex);
/* try to find a pipe with this port selected */
/* first pick one where the panel is on */
@@ -407,7 +408,7 @@ static bool edp_have_panel_power(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- lockdep_assert_held(&dev_priv->pps_mutex);
+ lockdep_assert_held(&dev_priv->display.pps.mutex);
if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
intel_dp->pps.pps_pipe == INVALID_PIPE)
@@ -420,7 +421,7 @@ static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- lockdep_assert_held(&dev_priv->pps_mutex);
+ lockdep_assert_held(&dev_priv->display.pps.mutex);
if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
intel_dp->pps.pps_pipe == INVALID_PIPE)
@@ -463,7 +464,7 @@ static void wait_panel_status(struct intel_dp *intel_dp,
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
i915_reg_t pp_stat_reg, pp_ctrl_reg;
- lockdep_assert_held(&dev_priv->pps_mutex);
+ lockdep_assert_held(&dev_priv->display.pps.mutex);
intel_pps_verify_state(intel_dp);
@@ -556,7 +557,7 @@ static u32 ilk_get_pp_control(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
u32 control;
- lockdep_assert_held(&dev_priv->pps_mutex);
+ lockdep_assert_held(&dev_priv->display.pps.mutex);
control = intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp));
if (drm_WARN_ON(&dev_priv->drm, !HAS_DDI(dev_priv) &&
@@ -580,7 +581,7 @@ bool intel_pps_vdd_on_unlocked(struct intel_dp *intel_dp)
i915_reg_t pp_stat_reg, pp_ctrl_reg;
bool need_to_disable = !intel_dp->pps.want_panel_vdd;
- lockdep_assert_held(&dev_priv->pps_mutex);
+ lockdep_assert_held(&dev_priv->display.pps.mutex);
if (!intel_dp_is_edp(intel_dp))
return false;
@@ -657,7 +658,7 @@ static void intel_pps_vdd_off_sync_unlocked(struct intel_dp *intel_dp)
u32 pp;
i915_reg_t pp_stat_reg, pp_ctrl_reg;
- lockdep_assert_held(&dev_priv->pps_mutex);
+ lockdep_assert_held(&dev_priv->display.pps.mutex);
drm_WARN_ON(&dev_priv->drm, intel_dp->pps.want_panel_vdd);
@@ -748,7 +749,7 @@ void intel_pps_vdd_off_unlocked(struct intel_dp *intel_dp, bool sync)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- lockdep_assert_held(&dev_priv->pps_mutex);
+ lockdep_assert_held(&dev_priv->display.pps.mutex);
if (!intel_dp_is_edp(intel_dp))
return;
@@ -771,7 +772,7 @@ void intel_pps_on_unlocked(struct intel_dp *intel_dp)
u32 pp;
i915_reg_t pp_ctrl_reg;
- lockdep_assert_held(&dev_priv->pps_mutex);
+ lockdep_assert_held(&dev_priv->display.pps.mutex);
if (!intel_dp_is_edp(intel_dp))
return;
@@ -832,7 +833,7 @@ void intel_pps_off_unlocked(struct intel_dp *intel_dp)
u32 pp;
i915_reg_t pp_ctrl_reg;
- lockdep_assert_held(&dev_priv->pps_mutex);
+ lockdep_assert_held(&dev_priv->display.pps.mutex);
if (!intel_dp_is_edp(intel_dp))
return;
@@ -991,7 +992,7 @@ static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
{
struct intel_encoder *encoder;
- lockdep_assert_held(&dev_priv->pps_mutex);
+ lockdep_assert_held(&dev_priv->display.pps.mutex);
for_each_intel_dp(&dev_priv->drm, encoder) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
@@ -1021,7 +1022,7 @@ void vlv_pps_init(struct intel_encoder *encoder,
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- lockdep_assert_held(&dev_priv->pps_mutex);
+ lockdep_assert_held(&dev_priv->display.pps.mutex);
drm_WARN_ON(&dev_priv->drm, intel_dp->pps.active_pipe != INVALID_PIPE);
@@ -1064,7 +1065,7 @@ static void pps_vdd_init(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- lockdep_assert_held(&dev_priv->pps_mutex);
+ lockdep_assert_held(&dev_priv->display.pps.mutex);
if (!edp_have_panel_vdd(intel_dp))
return;
@@ -1176,7 +1177,7 @@ static void pps_init_delays_bios(struct intel_dp *intel_dp,
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- lockdep_assert_held(&dev_priv->pps_mutex);
+ lockdep_assert_held(&dev_priv->display.pps.mutex);
if (!pps_delays_valid(&intel_dp->pps.bios_pps_delays))
intel_pps_readout_hw_state(intel_dp, &intel_dp->pps.bios_pps_delays);
@@ -1202,7 +1203,7 @@ static void pps_init_delays_vbt(struct intel_dp *intel_dp,
* just fails to power back on. Increasing the delay to 800ms
* seems sufficient to avoid this problem.
*/
- if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) {
+ if (intel_has_quirk(dev_priv, QUIRK_INCREASE_T12_DELAY)) {
vbt->t11_t12 = max_t(u16, vbt->t11_t12, 1300 * 10);
drm_dbg_kms(&dev_priv->drm,
"Increasing T12 panel delay as per the quirk to %d\n",
@@ -1223,7 +1224,7 @@ static void pps_init_delays_spec(struct intel_dp *intel_dp,
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- lockdep_assert_held(&dev_priv->pps_mutex);
+ lockdep_assert_held(&dev_priv->display.pps.mutex);
/* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
* our hw here, which are all in 100usec. */
@@ -1246,7 +1247,7 @@ static void pps_init_delays(struct intel_dp *intel_dp)
struct edp_power_seq cur, vbt, spec,
*final = &intel_dp->pps.pps_delays;
- lockdep_assert_held(&dev_priv->pps_mutex);
+ lockdep_assert_held(&dev_priv->display.pps.mutex);
/* already initialized? */
if (pps_delays_valid(final))
@@ -1312,7 +1313,7 @@ static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd
enum port port = dp_to_dig_port(intel_dp)->base.port;
const struct edp_power_seq *seq = &intel_dp->pps.pps_delays;
- lockdep_assert_held(&dev_priv->pps_mutex);
+ lockdep_assert_held(&dev_priv->display.pps.mutex);
intel_pps_get_registers(intel_dp, &regs);
@@ -1487,11 +1488,11 @@ void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
void intel_pps_setup(struct drm_i915_private *i915)
{
if (HAS_PCH_SPLIT(i915) || IS_GEMINILAKE(i915) || IS_BROXTON(i915))
- i915->pps_mmio_base = PCH_PPS_BASE;
+ i915->display.pps.mmio_base = PCH_PPS_BASE;
else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
- i915->pps_mmio_base = VLV_PPS_BASE;
+ i915->display.pps.mmio_base = VLV_PPS_BASE;
else
- i915->pps_mmio_base = PPS_BASE;
+ i915->display.pps.mmio_base = PPS_BASE;
}
void assert_pps_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index e6a870641cd2..d4cce627d7a8 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -116,34 +116,56 @@ static bool psr2_global_enabled(struct intel_dp *intel_dp)
}
}
+static u32 psr_irq_psr_error_bit_get(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_ERROR :
+ EDP_PSR_ERROR(intel_dp->psr.transcoder);
+}
+
+static u32 psr_irq_post_exit_bit_get(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_POST_EXIT :
+ EDP_PSR_POST_EXIT(intel_dp->psr.transcoder);
+}
+
+static u32 psr_irq_pre_entry_bit_get(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_PRE_ENTRY :
+ EDP_PSR_PRE_ENTRY(intel_dp->psr.transcoder);
+}
+
+static u32 psr_irq_mask_get(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_MASK :
+ EDP_PSR_MASK(intel_dp->psr.transcoder);
+}
+
static void psr_irq_control(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- enum transcoder trans_shift;
i915_reg_t imr_reg;
u32 mask, val;
- /*
- * gen12+ has registers relative to transcoder and one per transcoder
- * using the same bit definition: handle it as TRANSCODER_EDP to force
- * 0 shift in bit definition
- */
- if (DISPLAY_VER(dev_priv) >= 12) {
- trans_shift = 0;
+ if (DISPLAY_VER(dev_priv) >= 12)
imr_reg = TRANS_PSR_IMR(intel_dp->psr.transcoder);
- } else {
- trans_shift = intel_dp->psr.transcoder;
+ else
imr_reg = EDP_PSR_IMR;
- }
- mask = EDP_PSR_ERROR(trans_shift);
+ mask = psr_irq_psr_error_bit_get(intel_dp);
if (intel_dp->psr.debug & I915_PSR_DEBUG_IRQ)
- mask |= EDP_PSR_POST_EXIT(trans_shift) |
- EDP_PSR_PRE_ENTRY(trans_shift);
+ mask |= psr_irq_post_exit_bit_get(intel_dp) |
+ psr_irq_pre_entry_bit_get(intel_dp);
- /* Warning: it is masking/setting reserved bits too */
val = intel_de_read(dev_priv, imr_reg);
- val &= ~EDP_PSR_TRANS_MASK(trans_shift);
+ val &= ~psr_irq_mask_get(intel_dp);
val |= ~mask;
intel_de_write(dev_priv, imr_reg, val);
}
@@ -191,25 +213,21 @@ void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
ktime_t time_ns = ktime_get();
- enum transcoder trans_shift;
i915_reg_t imr_reg;
- if (DISPLAY_VER(dev_priv) >= 12) {
- trans_shift = 0;
+ if (DISPLAY_VER(dev_priv) >= 12)
imr_reg = TRANS_PSR_IMR(intel_dp->psr.transcoder);
- } else {
- trans_shift = intel_dp->psr.transcoder;
+ else
imr_reg = EDP_PSR_IMR;
- }
- if (psr_iir & EDP_PSR_PRE_ENTRY(trans_shift)) {
+ if (psr_iir & psr_irq_pre_entry_bit_get(intel_dp)) {
intel_dp->psr.last_entry_attempt = time_ns;
drm_dbg_kms(&dev_priv->drm,
"[transcoder %s] PSR entry attempt in 2 vblanks\n",
transcoder_name(cpu_transcoder));
}
- if (psr_iir & EDP_PSR_POST_EXIT(trans_shift)) {
+ if (psr_iir & psr_irq_post_exit_bit_get(intel_dp)) {
intel_dp->psr.last_exit = time_ns;
drm_dbg_kms(&dev_priv->drm,
"[transcoder %s] PSR exit completed\n",
@@ -226,7 +244,7 @@ void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
}
}
- if (psr_iir & EDP_PSR_ERROR(trans_shift)) {
+ if (psr_iir & psr_irq_psr_error_bit_get(intel_dp)) {
u32 val;
drm_warn(&dev_priv->drm, "[transcoder %s] PSR aux error\n",
@@ -243,7 +261,7 @@ void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
* or unset irq_aux_error.
*/
val = intel_de_read(dev_priv, imr_reg);
- val |= EDP_PSR_ERROR(trans_shift);
+ val |= psr_irq_psr_error_bit_get(intel_dp);
intel_de_write(dev_priv, imr_reg, val);
schedule_work(&intel_dp->psr.work);
@@ -706,7 +724,7 @@ tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
if (crtc_state->enable_psr2_sel_fetch)
return;
- if (!(dev_priv->dmc.allowed_dc_mask & DC_STATE_EN_DC3CO))
+ if (!(dev_priv->display.dmc.allowed_dc_mask & DC_STATE_EN_DC3CO))
return;
if (!dc3co_is_pipe_port_compatible(intel_dp, crtc_state))
@@ -805,13 +823,14 @@ static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_d
hblank_total = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start;
hblank_ns = div_u64(1000000ULL * hblank_total, adjusted_mode->crtc_clock);
- /* From spec: (72 / number of lanes) * 1000 / symbol clock frequency MHz */
- req_ns = (72 / crtc_state->lane_count) * 1000 / (crtc_state->port_clock / 1000);
+ /* From spec: ((60 / number of lanes) + 11) * 1000 / symbol clock frequency MHz */
+ req_ns = ((60 / crtc_state->lane_count) + 11) * 1000 / (crtc_state->port_clock / 1000);
if ((hblank_ns - req_ns) > 100)
return true;
- if (DISPLAY_VER(dev_priv) < 13 || intel_dp->edp_dpcd[0] < DP_EDP_14b)
+ /* Not supported <13 / Wa_22012279113:adl-p */
+ if (DISPLAY_VER(dev_priv) <= 13 || intel_dp->edp_dpcd[0] < DP_EDP_14b)
return false;
crtc_state->req_psr2_sdp_prior_scanline = true;
@@ -1193,14 +1212,12 @@ static bool psr_interrupt_error_check(struct intel_dp *intel_dp)
* first time that PSR HW tries to activate so lets keep PSR disabled
* to avoid any rendering problems.
*/
- if (DISPLAY_VER(dev_priv) >= 12) {
+ if (DISPLAY_VER(dev_priv) >= 12)
val = intel_de_read(dev_priv,
TRANS_PSR_IIR(intel_dp->psr.transcoder));
- val &= EDP_PSR_ERROR(0);
- } else {
+ else
val = intel_de_read(dev_priv, EDP_PSR_IIR);
- val &= EDP_PSR_ERROR(intel_dp->psr.transcoder);
- }
+ val &= psr_irq_psr_error_bit_get(intel_dp);
if (val) {
intel_dp->psr.sink_not_reliable = true;
drm_dbg_kms(&dev_priv->drm,
@@ -1721,8 +1738,6 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
new_plane_state, i) {
struct drm_rect src, damaged_area = { .x1 = 0, .y1 = -1,
.x2 = INT_MAX };
- struct drm_atomic_helper_damage_iter iter;
- struct drm_rect clip;
if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc)
continue;
@@ -1767,22 +1782,18 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
continue;
}
- drm_rect_fp_to_int(&src, &new_plane_state->uapi.src);
+ src = drm_plane_state_src(&new_plane_state->uapi);
+ drm_rect_fp_to_int(&src, &src);
- drm_atomic_helper_damage_iter_init(&iter,
- &old_plane_state->uapi,
- &new_plane_state->uapi);
- drm_atomic_for_each_plane_damage(&iter, &clip) {
- if (drm_rect_intersect(&clip, &src))
- clip_area_update(&damaged_area, &clip,
- &crtc_state->pipe_src);
- }
-
- if (damaged_area.y1 == -1)
+ if (!drm_atomic_helper_damage_merged(&old_plane_state->uapi,
+ &new_plane_state->uapi, &damaged_area))
continue;
damaged_area.y1 += new_plane_state->uapi.dst.y1 - src.y1;
damaged_area.y2 += new_plane_state->uapi.dst.y1 - src.y1;
+ damaged_area.x1 += new_plane_state->uapi.dst.x1 - src.x1;
+ damaged_area.x2 += new_plane_state->uapi.dst.x1 - src.x1;
+
clip_area_update(&pipe_clip, &damaged_area, &crtc_state->pipe_src);
}
@@ -1863,7 +1874,9 @@ void intel_psr_pre_plane_update(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct drm_i915_private *i915 = to_i915(state->base.dev);
- const struct intel_crtc_state *crtc_state =
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct intel_encoder *encoder;
@@ -1871,7 +1884,7 @@ void intel_psr_pre_plane_update(struct intel_atomic_state *state,
return;
for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
- crtc_state->uapi.encoder_mask) {
+ old_crtc_state->uapi.encoder_mask) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_psr *psr = &intel_dp->psr;
bool needs_to_disable = false;
@@ -1884,10 +1897,10 @@ void intel_psr_pre_plane_update(struct intel_atomic_state *state,
* - All planes will go inactive
* - Changing between PSR versions
*/
- needs_to_disable |= intel_crtc_needs_modeset(crtc_state);
- needs_to_disable |= !crtc_state->has_psr;
- needs_to_disable |= !crtc_state->active_planes;
- needs_to_disable |= crtc_state->has_psr2 != psr->psr2_enabled;
+ needs_to_disable |= intel_crtc_needs_modeset(new_crtc_state);
+ needs_to_disable |= !new_crtc_state->has_psr;
+ needs_to_disable |= !new_crtc_state->active_planes;
+ needs_to_disable |= new_crtc_state->has_psr2 != psr->psr2_enabled;
if (psr->enabled && needs_to_disable)
intel_psr_disable_locked(intel_dp);
diff --git a/drivers/gpu/drm/i915/display/intel_quirks.c b/drivers/gpu/drm/i915/display/intel_quirks.c
index c8488f5ebd04..6e48d3bcdfec 100644
--- a/drivers/gpu/drm/i915/display/intel_quirks.c
+++ b/drivers/gpu/drm/i915/display/intel_quirks.c
@@ -9,12 +9,17 @@
#include "intel_display_types.h"
#include "intel_quirks.h"
+static void intel_set_quirk(struct drm_i915_private *i915, enum intel_quirk_id quirk)
+{
+ i915->display.quirks.mask |= BIT(quirk);
+}
+
/*
* Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
*/
static void quirk_ssc_force_disable(struct drm_i915_private *i915)
{
- i915->quirks |= QUIRK_LVDS_SSC_DISABLE;
+ intel_set_quirk(i915, QUIRK_LVDS_SSC_DISABLE);
drm_info(&i915->drm, "applying lvds SSC disable quirk\n");
}
@@ -24,14 +29,14 @@ static void quirk_ssc_force_disable(struct drm_i915_private *i915)
*/
static void quirk_invert_brightness(struct drm_i915_private *i915)
{
- i915->quirks |= QUIRK_INVERT_BRIGHTNESS;
+ intel_set_quirk(i915, QUIRK_INVERT_BRIGHTNESS);
drm_info(&i915->drm, "applying inverted panel brightness quirk\n");
}
/* Some VBT's incorrectly indicate no backlight is present */
static void quirk_backlight_present(struct drm_i915_private *i915)
{
- i915->quirks |= QUIRK_BACKLIGHT_PRESENT;
+ intel_set_quirk(i915, QUIRK_BACKLIGHT_PRESENT);
drm_info(&i915->drm, "applying backlight present quirk\n");
}
@@ -40,7 +45,7 @@ static void quirk_backlight_present(struct drm_i915_private *i915)
*/
static void quirk_increase_t12_delay(struct drm_i915_private *i915)
{
- i915->quirks |= QUIRK_INCREASE_T12_DELAY;
+ intel_set_quirk(i915, QUIRK_INCREASE_T12_DELAY);
drm_info(&i915->drm, "Applying T12 delay quirk\n");
}
@@ -50,13 +55,13 @@ static void quirk_increase_t12_delay(struct drm_i915_private *i915)
*/
static void quirk_increase_ddi_disabled_time(struct drm_i915_private *i915)
{
- i915->quirks |= QUIRK_INCREASE_DDI_DISABLED_TIME;
+ intel_set_quirk(i915, QUIRK_INCREASE_DDI_DISABLED_TIME);
drm_info(&i915->drm, "Applying Increase DDI Disabled quirk\n");
}
static void quirk_no_pps_backlight_power_hook(struct drm_i915_private *i915)
{
- i915->quirks |= QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK;
+ intel_set_quirk(i915, QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK);
drm_info(&i915->drm, "Applying no pps backlight power quirk\n");
}
@@ -191,6 +196,9 @@ static struct intel_quirk intel_quirks[] = {
/* ASRock ITX*/
{ 0x3185, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
{ 0x3184, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
+ /* ECS Liva Q2 */
+ { 0x3185, 0x1019, 0xa94d, quirk_increase_ddi_disabled_time },
+ { 0x3184, 0x1019, 0xa94d, quirk_increase_ddi_disabled_time },
};
void intel_init_quirks(struct drm_i915_private *i915)
@@ -213,3 +221,8 @@ void intel_init_quirks(struct drm_i915_private *i915)
intel_dmi_quirks[i].hook(i915);
}
}
+
+bool intel_has_quirk(struct drm_i915_private *i915, enum intel_quirk_id quirk)
+{
+ return i915->display.quirks.mask & BIT(quirk);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_quirks.h b/drivers/gpu/drm/i915/display/intel_quirks.h
index b0fcff142a56..10a4d163149f 100644
--- a/drivers/gpu/drm/i915/display/intel_quirks.h
+++ b/drivers/gpu/drm/i915/display/intel_quirks.h
@@ -6,8 +6,20 @@
#ifndef __INTEL_QUIRKS_H__
#define __INTEL_QUIRKS_H__
+#include <linux/types.h>
+
struct drm_i915_private;
-void intel_init_quirks(struct drm_i915_private *dev_priv);
+enum intel_quirk_id {
+ QUIRK_BACKLIGHT_PRESENT,
+ QUIRK_INCREASE_DDI_DISABLED_TIME,
+ QUIRK_INCREASE_T12_DELAY,
+ QUIRK_INVERT_BRIGHTNESS,
+ QUIRK_LVDS_SSC_DISABLE,
+ QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK,
+};
+
+void intel_init_quirks(struct drm_i915_private *i915);
+bool intel_has_quirk(struct drm_i915_private *i915, enum intel_quirk_id quirk);
#endif /* __INTEL_QUIRKS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
index 19122bc6d2ab..f5b744bef18f 100644
--- a/drivers/gpu/drm/i915/display/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
@@ -2016,7 +2016,7 @@ intel_sdvo_get_analog_edid(struct drm_connector *connector)
return drm_get_edid(connector,
intel_gmbus_get_adapter(dev_priv,
- dev_priv->vbt.crt_ddc_pin));
+ dev_priv->display.vbt.crt_ddc_pin));
}
static enum drm_connector_status
@@ -2581,9 +2581,9 @@ intel_sdvo_select_ddc_bus(struct drm_i915_private *dev_priv,
struct sdvo_device_mapping *mapping;
if (sdvo->port == PORT_B)
- mapping = &dev_priv->vbt.sdvo_mappings[0];
+ mapping = &dev_priv->display.vbt.sdvo_mappings[0];
else
- mapping = &dev_priv->vbt.sdvo_mappings[1];
+ mapping = &dev_priv->display.vbt.sdvo_mappings[1];
if (mapping->initialized)
sdvo->ddc_bus = 1 << ((mapping->ddc_pin & 0xf0) >> 4);
@@ -2599,9 +2599,9 @@ intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv,
u8 pin;
if (sdvo->port == PORT_B)
- mapping = &dev_priv->vbt.sdvo_mappings[0];
+ mapping = &dev_priv->display.vbt.sdvo_mappings[0];
else
- mapping = &dev_priv->vbt.sdvo_mappings[1];
+ mapping = &dev_priv->display.vbt.sdvo_mappings[1];
if (mapping->initialized &&
intel_gmbus_is_valid_pin(dev_priv, mapping->i2c_pin))
@@ -2639,11 +2639,11 @@ intel_sdvo_get_slave_addr(struct drm_i915_private *dev_priv,
struct sdvo_device_mapping *my_mapping, *other_mapping;
if (sdvo->port == PORT_B) {
- my_mapping = &dev_priv->vbt.sdvo_mappings[0];
- other_mapping = &dev_priv->vbt.sdvo_mappings[1];
+ my_mapping = &dev_priv->display.vbt.sdvo_mappings[0];
+ other_mapping = &dev_priv->display.vbt.sdvo_mappings[1];
} else {
- my_mapping = &dev_priv->vbt.sdvo_mappings[1];
- other_mapping = &dev_priv->vbt.sdvo_mappings[0];
+ my_mapping = &dev_priv->display.vbt.sdvo_mappings[1];
+ other_mapping = &dev_priv->display.vbt.sdvo_mappings[0];
}
/* If the BIOS described our SDVO device, take advantage of it. */
diff --git a/drivers/gpu/drm/i915/display/intel_snps_phy.c b/drivers/gpu/drm/i915/display/intel_snps_phy.c
index 0bdbedc67d7d..937cefd6f78f 100644
--- a/drivers/gpu/drm/i915/display/intel_snps_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_snps_phy.c
@@ -518,6 +518,1086 @@ static const struct intel_mpllb_state dg2_hdmi_148_5 = {
};
/* values in the below table are calculted using the algo */
+static const struct intel_mpllb_state dg2_hdmi_25200 = {
+ .clock = 25200,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 7) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 5) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 0),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 128) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 41943) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 2621),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_27027 = {
+ .clock = 27027,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 5) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 0),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 140) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 31876) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 46555),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_28320 = {
+ .clock = 28320,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 5) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 0),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 148) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 40894) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 30408),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_30240 = {
+ .clock = 30240,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 5) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 0),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 160) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 50331) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 42466),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_31500 = {
+ .clock = 31500,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 7) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 4) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 68) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 26214) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 26214),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_36000 = {
+ .clock = 36000,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 4) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 82) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 39321) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 39320),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_40000 = {
+ .clock = 40000,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 4) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 0) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 2),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 96) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 0) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 0) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 0),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_49500 = {
+ .clock = 49500,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 4) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 1),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 126) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 13107) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 13107),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_50000 = {
+ .clock = 50000,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 4) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 0) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 1),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 128) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 0) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 0) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 0),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_57284 = {
+ .clock = 57284,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 4) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 0),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 150) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 42886) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 49701),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_58000 = {
+ .clock = 58000,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 4) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 0),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 152) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 52428) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 52427),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_65000 = {
+ .clock = 65000,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 7) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 3) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 0) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 72) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 0) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 0) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 0),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_71000 = {
+ .clock = 71000,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 3) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 80) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 52428) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 52427),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_74176 = {
+ .clock = 74176,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 3) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 86) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 22334) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 43829),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_75000 = {
+ .clock = 75000,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 3) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 0) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 88) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 0) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 0) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 0),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_78750 = {
+ .clock = 78750,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 3) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 0) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 2),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 94) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 0) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 0) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 0),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_85500 = {
+ .clock = 85500,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 3) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 2),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 104) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 26214) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 26214),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_88750 = {
+ .clock = 88750,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 7) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 15) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 3) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 0) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 1),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 110) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 0) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 0) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 0),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_106500 = {
+ .clock = 106500,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 3) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 0),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 138) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 13107) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 13107),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_108000 = {
+ .clock = 108000,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 3) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 0),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 140) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 26214) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 26214),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_115500 = {
+ .clock = 115500,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 3) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 0),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 152) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 26214) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 26214),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_119000 = {
+ .clock = 119000,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 3) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 0),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 158) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 13107) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 13107),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_135000 = {
+ .clock = 135000,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 7) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 15) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 0) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 76) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 0) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 0) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 0),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_138500 = {
+ .clock = 138500,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 78) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 26214) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 26214),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_147160 = {
+ .clock = 147160,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 84) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 56623) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 6815),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_148352 = {
+ .clock = 148352,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 86) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 22334) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 43829),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_154000 = {
+ .clock = 154000,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 13) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 2),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 90) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 39321) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 39320),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_162000 = {
+ .clock = 162000,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 2),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 96) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 52428) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 52427),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_209800 = {
+ .clock = 209800,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 7) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 0),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 134) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 60293) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 7864),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_262750 = {
+ .clock = 262750,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 7) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 72) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 36044) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 52427),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_268500 = {
+ .clock = 268500,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 7) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 74) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 45875) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 13107),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_296703 = {
+ .clock = 296703,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 86) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 22321) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 36804),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_241500 = {
+ .clock = 241500,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 0),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 160) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 39321) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 39320),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_497750 = {
+ .clock = 497750,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 15) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 0),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 166) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 36044) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 52427),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_592000 = {
+ .clock = 592000,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 0) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 86) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 13107) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 13107),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_593407 = {
+ .clock = 593407,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 0) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 86) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 22328) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 7549),
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
static const struct intel_mpllb_state dg2_hdmi_297 = {
.clock = 297000,
.ref_control =
@@ -584,6 +1664,42 @@ static const struct intel_mpllb_state * const dg2_hdmi_tables[] = {
&dg2_hdmi_148_5,
&dg2_hdmi_297,
&dg2_hdmi_594,
+ &dg2_hdmi_25200,
+ &dg2_hdmi_27027,
+ &dg2_hdmi_28320,
+ &dg2_hdmi_30240,
+ &dg2_hdmi_31500,
+ &dg2_hdmi_36000,
+ &dg2_hdmi_40000,
+ &dg2_hdmi_49500,
+ &dg2_hdmi_50000,
+ &dg2_hdmi_57284,
+ &dg2_hdmi_58000,
+ &dg2_hdmi_65000,
+ &dg2_hdmi_71000,
+ &dg2_hdmi_74176,
+ &dg2_hdmi_75000,
+ &dg2_hdmi_78750,
+ &dg2_hdmi_85500,
+ &dg2_hdmi_88750,
+ &dg2_hdmi_106500,
+ &dg2_hdmi_108000,
+ &dg2_hdmi_115500,
+ &dg2_hdmi_119000,
+ &dg2_hdmi_135000,
+ &dg2_hdmi_138500,
+ &dg2_hdmi_147160,
+ &dg2_hdmi_148352,
+ &dg2_hdmi_154000,
+ &dg2_hdmi_162000,
+ &dg2_hdmi_209800,
+ &dg2_hdmi_241500,
+ &dg2_hdmi_262750,
+ &dg2_hdmi_268500,
+ &dg2_hdmi_296703,
+ &dg2_hdmi_497750,
+ &dg2_hdmi_592000,
+ &dg2_hdmi_593407,
NULL,
};
diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c
index 6773840f6cc7..e5af955b5600 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.c
+++ b/drivers/gpu/drm/i915/display/intel_tc.c
@@ -246,7 +246,7 @@ static u32 icl_tc_port_live_status_mask(struct intel_digital_port *dig_port)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
struct intel_uncore *uncore = &i915->uncore;
- u32 isr_bit = i915->hotplug.pch_hpd[dig_port->base.hpd_pin];
+ u32 isr_bit = i915->display.hotplug.pch_hpd[dig_port->base.hpd_pin];
u32 mask = 0;
u32 val;
@@ -279,7 +279,7 @@ static u32 adl_tc_port_live_status_mask(struct intel_digital_port *dig_port)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port);
- u32 isr_bit = i915->hotplug.pch_hpd[dig_port->base.hpd_pin];
+ u32 isr_bit = i915->display.hotplug.pch_hpd[dig_port->base.hpd_pin];
struct intel_uncore *uncore = &i915->uncore;
u32 val, mask = 0;
diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c
index 9379f3463344..dcf89d701f0f 100644
--- a/drivers/gpu/drm/i915/display/intel_tv.c
+++ b/drivers/gpu/drm/i915/display/intel_tv.c
@@ -39,6 +39,7 @@
#include "intel_crtc.h"
#include "intel_de.h"
#include "intel_display_types.h"
+#include "intel_dpll.h"
#include "intel_hotplug.h"
#include "intel_tv.h"
@@ -982,10 +983,10 @@ intel_tv_mode_vdisplay(const struct tv_mode *tv_mode)
static void
intel_tv_mode_to_mode(struct drm_display_mode *mode,
- const struct tv_mode *tv_mode)
+ const struct tv_mode *tv_mode,
+ int clock)
{
- mode->clock = tv_mode->clock /
- (tv_mode->oversample >> !tv_mode->progressive);
+ mode->clock = clock / (tv_mode->oversample >> !tv_mode->progressive);
/*
* tv_mode horizontal timings:
@@ -1143,7 +1144,7 @@ intel_tv_get_config(struct intel_encoder *encoder,
xsize = tmp >> 16;
ysize = tmp & 0xffff;
- intel_tv_mode_to_mode(&mode, &tv_mode);
+ intel_tv_mode_to_mode(&mode, &tv_mode, pipe_config->port_clock);
drm_dbg_kms(&dev_priv->drm, "TV mode: " DRM_MODE_FMT "\n",
DRM_MODE_ARG(&mode));
@@ -1184,6 +1185,9 @@ intel_tv_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
+ struct intel_atomic_state *state =
+ to_intel_atomic_state(pipe_config->uapi.state);
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_tv_connector_state *tv_conn_state =
to_intel_tv_connector_state(conn_state);
@@ -1192,6 +1196,7 @@ intel_tv_compute_config(struct intel_encoder *encoder,
&pipe_config->hw.adjusted_mode;
int hdisplay = adjusted_mode->crtc_hdisplay;
int vdisplay = adjusted_mode->crtc_vdisplay;
+ int ret;
if (!tv_mode)
return -EINVAL;
@@ -1206,7 +1211,13 @@ intel_tv_compute_config(struct intel_encoder *encoder,
pipe_config->port_clock = tv_mode->clock;
- intel_tv_mode_to_mode(adjusted_mode, tv_mode);
+ ret = intel_dpll_crtc_compute_clock(state, crtc);
+ if (ret)
+ return ret;
+
+ pipe_config->clock_set = true;
+
+ intel_tv_mode_to_mode(adjusted_mode, tv_mode, pipe_config->port_clock);
drm_mode_set_crtcinfo(adjusted_mode, 0);
if (intel_tv_source_too_wide(dev_priv, hdisplay) ||
@@ -1804,7 +1815,7 @@ intel_tv_get_modes(struct drm_connector *connector)
* about the actual timings of the mode. We
* do ignore the margins though.
*/
- intel_tv_mode_to_mode(mode, tv_mode);
+ intel_tv_mode_to_mode(mode, tv_mode, tv_mode->clock);
if (count == 0) {
drm_dbg_kms(&dev_priv->drm, "TV mode: " DRM_MODE_FMT "\n",
DRM_MODE_ARG(mode));
diff --git a/drivers/gpu/drm/i915/display/intel_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
index 509b0a419c20..a9f44abfc9fc 100644
--- a/drivers/gpu/drm/i915/display/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
@@ -76,6 +76,20 @@ struct bdb_header {
} __packed;
/*
+ * BDB version number dependencies are documented as:
+ *
+ * <start>+
+ * indicates the field was introduced in version <start>
+ * and is still valid
+ *
+ * <start>-<end>
+ * indicates the field was introduced in version <start>
+ * and obsoleted in version <end>+1.
+ *
+ * ??? indicates the specific version number is unknown
+ */
+
+/*
* There are several types of BIOS data blocks (BDBs), each block has
* an ID and size in the first 3 bytes (ID in first, size in next 2).
* Known types are listed below.
@@ -144,12 +158,12 @@ struct bdb_general_features {
/* bits 3 */
u8 disable_smooth_vision:1;
u8 single_dvi:1;
- u8 rotate_180:1; /* 181 */
+ u8 rotate_180:1; /* 181+ */
u8 fdi_rx_polarity_inverted:1;
- u8 vbios_extended_mode:1; /* 160 */
- u8 copy_ilfp_dtd_to_sdvo_lvds_dtd:1; /* 160 */
- u8 panel_best_fit_timing:1; /* 160 */
- u8 ignore_strap_state:1; /* 160 */
+ u8 vbios_extended_mode:1; /* 160+ */
+ u8 copy_ilfp_dtd_to_sdvo_lvds_dtd:1; /* 160+ */
+ u8 panel_best_fit_timing:1; /* 160+ */
+ u8 ignore_strap_state:1; /* 160+ */
/* bits 4 */
u8 legacy_monitor_detect;
@@ -164,11 +178,11 @@ struct bdb_general_features {
u8 rsvd11:2; /* finish byte */
/* bits 6 */
- u8 tc_hpd_retry_timeout:7; /* 242 */
+ u8 tc_hpd_retry_timeout:7; /* 242+ */
u8 rsvd12:1;
/* bits 7 */
- u8 afc_startup_config:2;/* 249 */
+ u8 afc_startup_config:2; /* 249+ */
u8 rsvd13:6;
} __packed;
@@ -183,6 +197,15 @@ struct bdb_general_features {
#define GPIO_PIN_ADD_DDC_I2C 0x06 /* "ADDCARD DDC/I2C GPIO pins" */
/* Device handle */
+#define DEVICE_HANDLE_CRT 0x0001
+#define DEVICE_HANDLE_EFP1 0x0004
+#define DEVICE_HANDLE_EFP2 0x0040
+#define DEVICE_HANDLE_EFP3 0x0020
+#define DEVICE_HANDLE_EFP4 0x0010 /* 194+ */
+#define DEVICE_HANDLE_EFP5 0x0002 /* 215+ */
+#define DEVICE_HANDLE_EFP6 0x0001 /* 217+ */
+#define DEVICE_HANDLE_EFP7 0x0100 /* 217+ */
+#define DEVICE_HANDLE_EFP8 0x0200 /* 217+ */
#define DEVICE_HANDLE_LFP1 0x0008
#define DEVICE_HANDLE_LFP2 0x0080
@@ -275,27 +298,27 @@ struct bdb_general_features {
#define DVO_PORT_DPC 8
#define DVO_PORT_DPD 9
#define DVO_PORT_DPA 10
-#define DVO_PORT_DPE 11 /* 193 */
-#define DVO_PORT_HDMIE 12 /* 193 */
+#define DVO_PORT_DPE 11 /* 193+ */
+#define DVO_PORT_HDMIE 12 /* 193+ */
#define DVO_PORT_DPF 13 /* N/A */
#define DVO_PORT_HDMIF 14 /* N/A */
-#define DVO_PORT_DPG 15 /* 217 */
-#define DVO_PORT_HDMIG 16 /* 217 */
-#define DVO_PORT_DPH 17 /* 217 */
-#define DVO_PORT_HDMIH 18 /* 217 */
-#define DVO_PORT_DPI 19 /* 217 */
-#define DVO_PORT_HDMII 20 /* 217 */
-#define DVO_PORT_MIPIA 21 /* 171 */
-#define DVO_PORT_MIPIB 22 /* 171 */
-#define DVO_PORT_MIPIC 23 /* 171 */
-#define DVO_PORT_MIPID 24 /* 171 */
-
-#define HDMI_MAX_DATA_RATE_PLATFORM 0 /* 204 */
-#define HDMI_MAX_DATA_RATE_297 1 /* 204 */
-#define HDMI_MAX_DATA_RATE_165 2 /* 204 */
-#define HDMI_MAX_DATA_RATE_594 3 /* 249 */
-#define HDMI_MAX_DATA_RATE_340 4 /* 249 */
-#define HDMI_MAX_DATA_RATE_300 5 /* 249 */
+#define DVO_PORT_DPG 15 /* 217+ */
+#define DVO_PORT_HDMIG 16 /* 217+ */
+#define DVO_PORT_DPH 17 /* 217+ */
+#define DVO_PORT_HDMIH 18 /* 217+ */
+#define DVO_PORT_DPI 19 /* 217+ */
+#define DVO_PORT_HDMII 20 /* 217+ */
+#define DVO_PORT_MIPIA 21 /* 171+ */
+#define DVO_PORT_MIPIB 22 /* 171+ */
+#define DVO_PORT_MIPIC 23 /* 171+ */
+#define DVO_PORT_MIPID 24 /* 171+ */
+
+#define HDMI_MAX_DATA_RATE_PLATFORM 0 /* 204+ */
+#define HDMI_MAX_DATA_RATE_297 1 /* 204+ */
+#define HDMI_MAX_DATA_RATE_165 2 /* 204+ */
+#define HDMI_MAX_DATA_RATE_594 3 /* 249+ */
+#define HDMI_MAX_DATA_RATE_340 4 /* 249+ */
+#define HDMI_MAX_DATA_RATE_300 5 /* 249+ */
#define LEGACY_CHILD_DEVICE_CONFIG_SIZE 33
@@ -362,10 +385,10 @@ enum vbt_gmbus_ddi {
* basically any of the fields to ensure the correct interpretation for the BDB
* version in question.
*
- * When we copy the child device configs to dev_priv->vbt.child_dev, we reserve
- * space for the full structure below, and initialize the tail not actually
- * present in VBT to zeros. Accessing those fields is fine, as long as the
- * default zero is taken into account, again according to the BDB version.
+ * When we copy the child device configs to dev_priv->display.vbt.child_dev, we
+ * reserve space for the full structure below, and initialize the tail not
+ * actually present in VBT to zeros. Accessing those fields is fine, as long as
+ * the default zero is taken into account, again according to the BDB version.
*
* BDB versions 155 and below are considered legacy, and version 155 seems to be
* a baseline for some of the VBT documentation. When adding new fields, please
@@ -379,20 +402,30 @@ struct child_device_config {
u8 device_id[10]; /* ascii string */
struct {
u8 i2c_speed;
- u8 dp_onboard_redriver; /* 158 */
- u8 dp_ondock_redriver; /* 158 */
- u8 hdmi_level_shifter_value:5; /* 169 */
- u8 hdmi_max_data_rate:3; /* 204 */
- u16 dtd_buf_ptr; /* 161 */
- u8 edidless_efp:1; /* 161 */
- u8 compression_enable:1; /* 198 */
- u8 compression_method_cps:1; /* 198 */
- u8 ganged_edp:1; /* 202 */
- u8 reserved0:4;
- u8 compression_structure_index:4; /* 198 */
- u8 reserved1:4;
- u8 slave_port; /* 202 */
- u8 reserved2;
+ u8 dp_onboard_redriver_preemph:3; /* 158+ */
+ u8 dp_onboard_redriver_vswing:3; /* 158+ */
+ u8 dp_onboard_redriver_present:1; /* 158+ */
+ u8 reserved0:1;
+ u8 dp_ondock_redriver_preemph:3; /* 158+ */
+ u8 dp_ondock_redriver_vswing:3; /* 158+ */
+ u8 dp_ondock_redriver_present:1; /* 158+ */
+ u8 reserved1:1;
+ u8 hdmi_level_shifter_value:5; /* 158+ */
+ u8 hdmi_max_data_rate:3; /* 204+ */
+ u16 dtd_buf_ptr; /* 161+ */
+ u8 edidless_efp:1; /* 161+ */
+ u8 compression_enable:1; /* 198+ */
+ u8 compression_method_cps:1; /* 198+ */
+ u8 ganged_edp:1; /* 202+ */
+ u8 lttpr_non_transparent:1; /* 235+ */
+ u8 disable_compression_for_ext_disp:1; /* 251+ */
+ u8 reserved2:2;
+ u8 compression_structure_index:4; /* 198+ */
+ u8 reserved3:4;
+ u8 hdmi_max_frl_rate:4; /* 237+ */
+ u8 hdmi_max_frl_rate_valid:1; /* 237+ */
+ u8 reserved4:3; /* 237+ */
+ u8 reserved5;
} __packed;
} __packed;
@@ -412,16 +445,16 @@ struct child_device_config {
u8 ddc2_pin;
} __packed;
struct {
- u8 efp_routed:1; /* 158 */
- u8 lane_reversal:1; /* 184 */
- u8 lspcon:1; /* 192 */
- u8 iboost:1; /* 196 */
- u8 hpd_invert:1; /* 196 */
- u8 use_vbt_vswing:1; /* 218 */
- u8 flag_reserved:2;
- u8 hdmi_support:1; /* 158 */
- u8 dp_support:1; /* 158 */
- u8 tmds_support:1; /* 158 */
+ u8 efp_routed:1; /* 158+ */
+ u8 lane_reversal:1; /* 184+ */
+ u8 lspcon:1; /* 192+ */
+ u8 iboost:1; /* 196+ */
+ u8 hpd_invert:1; /* 196+ */
+ u8 use_vbt_vswing:1; /* 218+ */
+ u8 dp_max_lane_count:2; /* 244+ */
+ u8 hdmi_support:1; /* 158+ */
+ u8 dp_support:1; /* 158+ */
+ u8 tmds_support:1; /* 158+ */
u8 support_reserved:5;
u8 aux_channel;
u8 dongle_detect;
@@ -429,7 +462,7 @@ struct child_device_config {
} __packed;
u8 pipe_cap:2;
- u8 sdvo_stall:1; /* 158 */
+ u8 sdvo_stall:1; /* 158+ */
u8 hpd_status:2;
u8 integrated_encoder:1;
u8 capabilities_reserved:2;
@@ -437,21 +470,21 @@ struct child_device_config {
union {
u8 dvo2_wiring;
- u8 mipi_bridge_type; /* 171 */
+ u8 mipi_bridge_type; /* 171+ */
} __packed;
u16 extended_type;
u8 dvo_function;
- u8 dp_usb_type_c:1; /* 195 */
- u8 tbt:1; /* 209 */
- u8 flags2_reserved:2; /* 195 */
- u8 dp_port_trace_length:4; /* 209 */
- u8 dp_gpio_index; /* 195 */
- u16 dp_gpio_pin_num; /* 195 */
- u8 dp_iboost_level:4; /* 196 */
- u8 hdmi_iboost_level:4; /* 196 */
- u8 dp_max_link_rate:3; /* 216/230 GLK+ */
- u8 dp_max_link_rate_reserved:5; /* 216/230 */
+ u8 dp_usb_type_c:1; /* 195+ */
+ u8 tbt:1; /* 209+ */
+ u8 flags2_reserved:2; /* 195+ */
+ u8 dp_port_trace_length:4; /* 209+ */
+ u8 dp_gpio_index; /* 195+ */
+ u16 dp_gpio_pin_num; /* 195+ */
+ u8 dp_iboost_level:4; /* 196+ */
+ u8 hdmi_iboost_level:4; /* 196+ */
+ u8 dp_max_link_rate:3; /* 216+ */
+ u8 dp_max_link_rate_reserved:5; /* 216+ */
} __packed;
struct bdb_general_definitions {
@@ -459,7 +492,7 @@ struct bdb_general_definitions {
u8 crt_ddc_gmbus_pin;
/* DPMS bits */
- u8 dpms_acpi:1;
+ u8 dpms_non_acpi:1;
u8 skip_boot_crt_detect:1;
u8 dpms_aim:1;
u8 rsvd1:5; /* finish byte */
@@ -488,25 +521,25 @@ struct bdb_general_definitions {
struct psr_table {
/* Feature bits */
- u8 full_link:1;
- u8 require_aux_to_wakeup:1;
+ u8 full_link:1; /* 165+ */
+ u8 require_aux_to_wakeup:1; /* 165+ */
u8 feature_bits_rsvd:6;
/* Wait times */
- u8 idle_frames:4;
- u8 lines_to_wait:3;
+ u8 idle_frames:4; /* 165+ */
+ u8 lines_to_wait:3; /* 165+ */
u8 wait_times_rsvd:1;
/* TP wake up time in multiple of 100 */
- u16 tp1_wakeup_time;
- u16 tp2_tp3_wakeup_time;
+ u16 tp1_wakeup_time; /* 165+ */
+ u16 tp2_tp3_wakeup_time; /* 165+ */
} __packed;
struct bdb_psr {
struct psr_table psr_table[16];
/* PSR2 TP2/TP3 wakeup time for 16 panels */
- u32 psr2_tp2_tp3_wakeup_time;
+ u32 psr2_tp2_tp3_wakeup_time; /* 226+ */
} __packed;
/*
@@ -519,9 +552,10 @@ struct bdb_psr {
#define BDB_DRIVER_FEATURE_INT_SDVO_LVDS 3
struct bdb_driver_features {
+ /* Driver bits */
u8 boot_dev_algorithm:1;
- u8 block_display_switch:1;
- u8 allow_display_switch:1;
+ u8 allow_display_switch_dvd:1;
+ u8 allow_display_switch_dos:1;
u8 hotplug_dvo:1;
u8 dual_view_zoom:1;
u8 int15h_hook:1;
@@ -533,6 +567,7 @@ struct bdb_driver_features {
u8 boot_mode_bpp;
u8 boot_mode_refresh;
+ /* Extended Driver Bits 1 */
u16 enable_lfp_primary:1;
u16 selective_mode_pruning:1;
u16 dual_frequency:1;
@@ -548,29 +583,40 @@ struct bdb_driver_features {
u16 tv_hotplug:1;
u16 hdmi_config:2;
- u8 static_display:1;
- u8 reserved2:7;
+ /* Driver Flags 1 */
+ u8 static_display:1; /* 163+ */
+ u8 embedded_platform:1; /* 163+ */
+ u8 display_subsystem_enable:1; /* 163+ */
+ u8 reserved0:5;
+
u16 legacy_crt_max_x;
u16 legacy_crt_max_y;
u8 legacy_crt_max_refresh;
- u8 hdmi_termination;
- u8 custom_vbt_version;
- /* Driver features data block */
- u16 rmpm_enabled:1;
- u16 s2ddt_enabled:1;
- u16 dpst_enabled:1;
- u16 bltclt_enabled:1;
- u16 adb_enabled:1;
- u16 drrs_enabled:1;
- u16 grs_enabled:1;
- u16 gpmt_enabled:1;
- u16 tbt_enabled:1;
- u16 psr_enabled:1;
- u16 ips_enabled:1;
- u16 reserved3:1;
- u16 dmrrs_enabled:1;
- u16 reserved4:2;
+ /* Extended Driver Bits 2 */
+ u8 hdmi_termination:1;
+ u8 cea861d_hdmi_support:1;
+ u8 self_refresh_enable:1;
+ u8 reserved1:5;
+
+ u8 custom_vbt_version; /* 155+ */
+
+ /* Driver Feature Flags */
+ u16 rmpm_enabled:1; /* 165+ */
+ u16 s2ddt_enabled:1; /* 165+ */
+ u16 dpst_enabled:1; /* 165-227 */
+ u16 bltclt_enabled:1; /* 165+ */
+ u16 adb_enabled:1; /* 165-227 */
+ u16 drrs_enabled:1; /* 165-227 */
+ u16 grs_enabled:1; /* 165+ */
+ u16 gpmt_enabled:1; /* 165+ */
+ u16 tbt_enabled:1; /* 165+ */
+ u16 psr_enabled:1; /* 165-227 */
+ u16 ips_enabled:1; /* 165+ */
+ u16 dpfs_enabled:1; /* 165+ */
+ u16 dmrrs_enabled:1; /* 174-227 */
+ u16 adt_enabled:1; /* ???-228 */
+ u16 hpd_wake:1; /* 201-240 */
u16 pc_feature_valid:1;
} __packed;
@@ -657,7 +703,7 @@ struct bdb_sdvo_panel_dtds {
struct edp_fast_link_params {
- u8 rate:4;
+ u8 rate:4; /* ???-223 */
u8 lanes:4;
u8 preemphasis:4;
u8 vswing:4;
@@ -690,18 +736,18 @@ struct bdb_edp {
u32 sdrrs_msa_timing_delay;
/* ith bit indicates enabled/disabled for (i+1)th panel */
- u16 edp_s3d_feature; /* 162 */
- u16 edp_t3_optimization; /* 165 */
- u64 edp_vswing_preemph; /* 173 */
- u16 fast_link_training; /* 182 */
- u16 dpcd_600h_write_required; /* 185 */
- struct edp_pwm_delays pwm_delays[16]; /* 186 */
- u16 full_link_params_provided; /* 199 */
- struct edp_full_link_params full_link_params[16]; /* 199 */
- u16 apical_enable; /* 203 */
- struct edp_apical_params apical_params[16]; /* 203 */
- u16 edp_fast_link_training_rate[16]; /* 224 */
- u16 edp_max_port_link_rate[16]; /* 244 */
+ u16 edp_s3d_feature; /* 162+ */
+ u16 edp_t3_optimization; /* 165+ */
+ u64 edp_vswing_preemph; /* 173+ */
+ u16 fast_link_training; /* 182+ */
+ u16 dpcd_600h_write_required; /* 185+ */
+ struct edp_pwm_delays pwm_delays[16]; /* 186+ */
+ u16 full_link_params_provided; /* 199+ */
+ struct edp_full_link_params full_link_params[16]; /* 199+ */
+ u16 apical_enable; /* 203+ */
+ struct edp_apical_params apical_params[16]; /* 203+ */
+ u16 edp_fast_link_training_rate[16]; /* 224+ */
+ u16 edp_max_port_link_rate[16]; /* 244+ */
} __packed;
/*
@@ -710,14 +756,14 @@ struct bdb_edp {
struct bdb_lvds_options {
u8 panel_type;
- u8 panel_type2; /* 212 */
+ u8 panel_type2; /* 212+ */
/* LVDS capabilities, stored in a dword */
u8 pfit_mode:2;
u8 pfit_text_mode_enhanced:1;
u8 pfit_gfx_mode_enhanced:1;
u8 pfit_ratio_auto:1;
u8 pixel_dither:1;
- u8 lvds_edid:1;
+ u8 lvds_edid:1; /* ???-240 */
u8 rsvd2:1;
u8 rsvd4;
/* LVDS Panel channel bits stored here */
@@ -731,11 +777,11 @@ struct bdb_lvds_options {
/* LVDS panel type bits stored here */
u32 dps_panel_type_bits;
/* LVDS backlight control type bits stored here */
- u32 blt_control_type_bits;
+ u32 blt_control_type_bits; /* ???-240 */
- u16 lcdvcc_s0_enable; /* 200 */
- u32 rotation; /* 228 */
- u32 position; /* 240 */
+ u16 lcdvcc_s0_enable; /* 200+ */
+ u32 rotation; /* 228+ */
+ u32 position; /* 240+ */
} __packed;
/*
@@ -756,7 +802,7 @@ struct lvds_lfp_data_ptr {
struct bdb_lvds_lfp_data_ptrs {
u8 lvds_entries;
struct lvds_lfp_data_ptr ptr[16];
- struct lvds_lfp_data_ptr_table panel_name; /* 156-163? */
+ struct lvds_lfp_data_ptr_table panel_name; /* (156-163?)+ */
} __packed;
/*
@@ -808,20 +854,20 @@ struct lvds_lfp_panel_name {
} __packed;
struct lvds_lfp_black_border {
- u8 top; /* 227 */
- u8 bottom; /* 227 */
- u8 left; /* 238 */
- u8 right; /* 238 */
+ u8 top; /* 227+ */
+ u8 bottom; /* 227+ */
+ u8 left; /* 238+ */
+ u8 right; /* 238+ */
} __packed;
struct bdb_lvds_lfp_data_tail {
- struct lvds_lfp_panel_name panel_name[16]; /* 156-163? */
- u16 scaling_enable; /* 187 */
- u8 seamless_drrs_min_refresh_rate[16]; /* 188 */
- u8 pixel_overlap_count[16]; /* 208 */
- struct lvds_lfp_black_border black_border[16]; /* 227 */
- u16 dual_lfp_port_sync_enable; /* 231 */
- u16 gpu_dithering_for_banding_artifacts; /* 245 */
+ struct lvds_lfp_panel_name panel_name[16]; /* (156-163?)+ */
+ u16 scaling_enable; /* 187+ */
+ u8 seamless_drrs_min_refresh_rate[16]; /* 188+ */
+ u8 pixel_overlap_count[16]; /* 208+ */
+ struct lvds_lfp_black_border black_border[16]; /* 227+ */
+ u16 dual_lfp_port_sync_enable; /* 231+ */
+ u16 gpu_dithering_for_banding_artifacts; /* 245+ */
} __packed;
/*
@@ -836,7 +882,7 @@ struct lfp_backlight_data_entry {
u8 active_low_pwm:1;
u8 obsolete1:5;
u16 pwm_freq_hz;
- u8 min_brightness; /* Obsolete from 234+ */
+ u8 min_brightness; /* ???-233 */
u8 obsolete2;
u8 obsolete3;
} __packed;
@@ -859,7 +905,7 @@ struct lfp_brightness_level {
struct bdb_lfp_backlight_data {
u8 entry_size;
struct lfp_backlight_data_entry data[16];
- u8 level[16]; /* Obsolete from 234+ */
+ u8 level[16]; /* ???-233 */
struct lfp_backlight_control_method backlight_control[16];
struct lfp_brightness_level brightness_level[16]; /* 234+ */
struct lfp_brightness_level brightness_min_level[16]; /* 234+ */
@@ -874,8 +920,8 @@ struct lfp_power_features {
u8 reserved1:1;
u8 power_conservation_pref:3;
u8 reserved2:1;
- u8 lace_enabled_status:1;
- u8 lace_support:1;
+ u8 lace_enabled_status:1; /* 210+ */
+ u8 lace_support:1; /* 210+ */
u8 als_enable:1;
} __packed;
@@ -895,24 +941,24 @@ struct aggressiveness_profile2_entry {
} __packed;
struct bdb_lfp_power {
- struct lfp_power_features features;
+ struct lfp_power_features features; /* ???-227 */
struct als_data_entry als[5];
- u8 lace_aggressiveness_profile:3;
+ u8 lace_aggressiveness_profile:3; /* 210-227 */
u8 reserved1:5;
- u16 dpst;
- u16 psr;
- u16 drrs;
- u16 lace_support;
- u16 adt;
- u16 dmrrs;
- u16 adb;
- u16 lace_enabled_status;
- struct aggressiveness_profile_entry aggressiveness[16];
- u16 hobl; /* 232+ */
- u16 vrr_feature_enabled; /* 233+ */
- u16 elp; /* 247+ */
- u16 opst; /* 247+ */
- struct aggressiveness_profile2_entry aggressiveness2[16]; /* 247+ */
+ u16 dpst; /* 228+ */
+ u16 psr; /* 228+ */
+ u16 drrs; /* 228+ */
+ u16 lace_support; /* 228+ */
+ u16 adt; /* 228+ */
+ u16 dmrrs; /* 228+ */
+ u16 adb; /* 228+ */
+ u16 lace_enabled_status; /* 228+ */
+ struct aggressiveness_profile_entry aggressiveness[16]; /* 228+ */
+ u16 hobl; /* 232+ */
+ u16 vrr_feature_enabled; /* 233+ */
+ u16 elp; /* 247+ */
+ u16 opst; /* 247+ */
+ struct aggressiveness_profile2_entry aggressiveness2[16]; /* 247+ */
} __packed;
/*
@@ -922,10 +968,10 @@ struct bdb_lfp_power {
#define MAX_MIPI_CONFIGURATIONS 6
struct bdb_mipi_config {
- struct mipi_config config[MAX_MIPI_CONFIGURATIONS]; /* 175 */
- struct mipi_pps_data pps[MAX_MIPI_CONFIGURATIONS]; /* 177 */
- struct edp_pwm_delays pwm_delays[MAX_MIPI_CONFIGURATIONS]; /* 186 */
- u8 pmic_i2c_bus_number[MAX_MIPI_CONFIGURATIONS]; /* 190 */
+ struct mipi_config config[MAX_MIPI_CONFIGURATIONS]; /* 175+ */
+ struct mipi_pps_data pps[MAX_MIPI_CONFIGURATIONS]; /* 177+ */
+ struct edp_pwm_delays pwm_delays[MAX_MIPI_CONFIGURATIONS]; /* 186+ */
+ u8 pmic_i2c_bus_number[MAX_MIPI_CONFIGURATIONS]; /* 190+ */
} __packed;
/*
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c
index 43e1bbc1e303..269f9792390d 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.c
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.c
@@ -344,7 +344,7 @@ bool intel_dsc_source_support(const struct intel_crtc_state *crtc_state)
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- if (!INTEL_INFO(i915)->display.has_dsc)
+ if (!RUNTIME_INFO(i915)->has_dsc)
return false;
if (DISPLAY_VER(i915) >= 12)
@@ -460,7 +460,6 @@ int intel_dsc_compute_params(struct intel_crtc_state *pipe_config)
u8 i = 0;
vdsc_cfg->pic_width = pipe_config->hw.adjusted_mode.crtc_hdisplay;
- vdsc_cfg->pic_height = pipe_config->hw.adjusted_mode.crtc_vdisplay;
vdsc_cfg->slice_width = DIV_ROUND_UP(vdsc_cfg->pic_width,
pipe_config->dsc.slice_count);
@@ -597,6 +596,8 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state)
DSC_VER_MIN_SHIFT |
vdsc_cfg->bits_per_component << DSC_BPC_SHIFT |
vdsc_cfg->line_buf_depth << DSC_LINE_BUF_DEPTH_SHIFT;
+ if (vdsc_cfg->dsc_version_minor == 2)
+ pps_val |= DSC_ALT_ICH_SEL;
if (vdsc_cfg->block_pred_enable)
pps_val |= DSC_BLOCK_PREDICTION;
if (vdsc_cfg->convert_rgb)
diff --git a/drivers/gpu/drm/i915/display/intel_vrr.c b/drivers/gpu/drm/i915/display/intel_vrr.c
index 04250a0fec3c..5eac99021875 100644
--- a/drivers/gpu/drm/i915/display/intel_vrr.c
+++ b/drivers/gpu/drm/i915/display/intel_vrr.c
@@ -142,11 +142,16 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
* For XE_LPD+, we use guardband and pipeline override
* is deprecated.
*/
- if (DISPLAY_VER(i915) >= 13)
+ if (DISPLAY_VER(i915) >= 13) {
+ /*
+ * FIXME: Subtract Window2 delay from below value.
+ *
+ * Window2 specifies time required to program DSB (Window2) in
+ * number of scan lines. Assuming 0 for no DSB.
+ */
crtc_state->vrr.guardband =
- crtc_state->vrr.vmin - adjusted_mode->crtc_vdisplay -
- i915->window2_delay;
- else
+ crtc_state->vrr.vmin - adjusted_mode->crtc_vdisplay;
+ } else {
/*
* FIXME: s/4/framestart_delay/ to get consistent
* earliest/latest points for register latching regardless
@@ -159,6 +164,7 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
*/
crtc_state->vrr.pipeline_full =
min(255, crtc_state->vrr.vmin - adjusted_mode->crtc_vdisplay - 4 - 1);
+ }
crtc_state->mode_flags |= I915_MODE_FLAG_VRR;
}
diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c
index 4d6a27757065..7cb713043408 100644
--- a/drivers/gpu/drm/i915/display/skl_universal_plane.c
+++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c
@@ -14,11 +14,11 @@
#include "intel_display_types.h"
#include "intel_fb.h"
#include "intel_fbc.h"
-#include "intel_pm.h"
#include "intel_psr.h"
#include "intel_sprite.h"
#include "skl_scaler.h"
#include "skl_universal_plane.h"
+#include "skl_watermark.h"
#include "pxp/intel_pxp.h"
static const u32 skl_plane_formats[] = {
@@ -1928,7 +1928,7 @@ static enum intel_fbc_id skl_fbc_id_for_pipe(enum pipe pipe)
static bool skl_plane_has_fbc(struct drm_i915_private *dev_priv,
enum intel_fbc_id fbc_id, enum plane_id plane_id)
{
- if ((INTEL_INFO(dev_priv)->display.fbc_mask & BIT(fbc_id)) == 0)
+ if ((RUNTIME_INFO(dev_priv)->fbc_mask & BIT(fbc_id)) == 0)
return false;
return plane_id == PLANE_PRIMARY;
@@ -1940,7 +1940,7 @@ static struct intel_fbc *skl_plane_fbc(struct drm_i915_private *dev_priv,
enum intel_fbc_id fbc_id = skl_fbc_id_for_pipe(pipe);
if (skl_plane_has_fbc(dev_priv, fbc_id, plane_id))
- return dev_priv->fbc[fbc_id];
+ return dev_priv->display.fbc[fbc_id];
else
return NULL;
}
diff --git a/drivers/gpu/drm/i915/display/skl_watermark.c b/drivers/gpu/drm/i915/display/skl_watermark.c
new file mode 100644
index 000000000000..18178b01375e
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/skl_watermark.c
@@ -0,0 +1,3574 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include <drm/drm_blend.h>
+
+#include "intel_atomic.h"
+#include "intel_atomic_plane.h"
+#include "intel_bw.h"
+#include "intel_de.h"
+#include "intel_display.h"
+#include "intel_display_power.h"
+#include "intel_display_types.h"
+#include "intel_fb.h"
+#include "skl_watermark.h"
+
+#include "i915_drv.h"
+#include "i915_fixed.h"
+#include "i915_reg.h"
+#include "intel_pcode.h"
+#include "intel_pm.h"
+
+static void skl_sagv_disable(struct drm_i915_private *i915);
+
+/* Stores plane specific WM parameters */
+struct skl_wm_params {
+ bool x_tiled, y_tiled;
+ bool rc_surface;
+ bool is_planar;
+ u32 width;
+ u8 cpp;
+ u32 plane_pixel_rate;
+ u32 y_min_scanlines;
+ u32 plane_bytes_per_line;
+ uint_fixed_16_16_t plane_blocks_per_line;
+ uint_fixed_16_16_t y_tile_minimum;
+ u32 linetime_us;
+ u32 dbuf_block_size;
+};
+
+u8 intel_enabled_dbuf_slices_mask(struct drm_i915_private *i915)
+{
+ u8 enabled_slices = 0;
+ enum dbuf_slice slice;
+
+ for_each_dbuf_slice(i915, slice) {
+ if (intel_uncore_read(&i915->uncore,
+ DBUF_CTL_S(slice)) & DBUF_POWER_STATE)
+ enabled_slices |= BIT(slice);
+ }
+
+ return enabled_slices;
+}
+
+/*
+ * FIXME: We still don't have the proper code detect if we need to apply the WA,
+ * so assume we'll always need it in order to avoid underruns.
+ */
+static bool skl_needs_memory_bw_wa(struct drm_i915_private *i915)
+{
+ return DISPLAY_VER(i915) == 9;
+}
+
+static bool
+intel_has_sagv(struct drm_i915_private *i915)
+{
+ return DISPLAY_VER(i915) >= 9 && !IS_LP(i915) &&
+ i915->display.sagv.status != I915_SAGV_NOT_CONTROLLED;
+}
+
+static u32
+intel_sagv_block_time(struct drm_i915_private *i915)
+{
+ if (DISPLAY_VER(i915) >= 14) {
+ u32 val;
+
+ val = intel_uncore_read(&i915->uncore, MTL_LATENCY_SAGV);
+
+ return REG_FIELD_GET(MTL_LATENCY_QCLK_SAGV, val);
+ } else if (DISPLAY_VER(i915) >= 12) {
+ u32 val = 0;
+ int ret;
+
+ ret = snb_pcode_read(&i915->uncore,
+ GEN12_PCODE_READ_SAGV_BLOCK_TIME_US,
+ &val, NULL);
+ if (ret) {
+ drm_dbg_kms(&i915->drm, "Couldn't read SAGV block time!\n");
+ return 0;
+ }
+
+ return val;
+ } else if (DISPLAY_VER(i915) == 11) {
+ return 10;
+ } else if (DISPLAY_VER(i915) == 9 && !IS_LP(i915)) {
+ return 30;
+ } else {
+ return 0;
+ }
+}
+
+static void intel_sagv_init(struct drm_i915_private *i915)
+{
+ if (!intel_has_sagv(i915))
+ i915->display.sagv.status = I915_SAGV_NOT_CONTROLLED;
+
+ /*
+ * Probe to see if we have working SAGV control.
+ * For icl+ this was already determined by intel_bw_init_hw().
+ */
+ if (DISPLAY_VER(i915) < 11)
+ skl_sagv_disable(i915);
+
+ drm_WARN_ON(&i915->drm, i915->display.sagv.status == I915_SAGV_UNKNOWN);
+
+ i915->display.sagv.block_time_us = intel_sagv_block_time(i915);
+
+ drm_dbg_kms(&i915->drm, "SAGV supported: %s, original SAGV block time: %u us\n",
+ str_yes_no(intel_has_sagv(i915)), i915->display.sagv.block_time_us);
+
+ /* avoid overflow when adding with wm0 latency/etc. */
+ if (drm_WARN(&i915->drm, i915->display.sagv.block_time_us > U16_MAX,
+ "Excessive SAGV block time %u, ignoring\n",
+ i915->display.sagv.block_time_us))
+ i915->display.sagv.block_time_us = 0;
+
+ if (!intel_has_sagv(i915))
+ i915->display.sagv.block_time_us = 0;
+}
+
+/*
+ * SAGV dynamically adjusts the system agent voltage and clock frequencies
+ * depending on power and performance requirements. The display engine access
+ * to system memory is blocked during the adjustment time. Because of the
+ * blocking time, having this enabled can cause full system hangs and/or pipe
+ * underruns if we don't meet all of the following requirements:
+ *
+ * - <= 1 pipe enabled
+ * - All planes can enable watermarks for latencies >= SAGV engine block time
+ * - We're not using an interlaced display configuration
+ */
+static void skl_sagv_enable(struct drm_i915_private *i915)
+{
+ int ret;
+
+ if (!intel_has_sagv(i915))
+ return;
+
+ if (i915->display.sagv.status == I915_SAGV_ENABLED)
+ return;
+
+ drm_dbg_kms(&i915->drm, "Enabling SAGV\n");
+ ret = snb_pcode_write(&i915->uncore, GEN9_PCODE_SAGV_CONTROL,
+ GEN9_SAGV_ENABLE);
+
+ /* We don't need to wait for SAGV when enabling */
+
+ /*
+ * Some skl systems, pre-release machines in particular,
+ * don't actually have SAGV.
+ */
+ if (IS_SKYLAKE(i915) && ret == -ENXIO) {
+ drm_dbg(&i915->drm, "No SAGV found on system, ignoring\n");
+ i915->display.sagv.status = I915_SAGV_NOT_CONTROLLED;
+ return;
+ } else if (ret < 0) {
+ drm_err(&i915->drm, "Failed to enable SAGV\n");
+ return;
+ }
+
+ i915->display.sagv.status = I915_SAGV_ENABLED;
+}
+
+static void skl_sagv_disable(struct drm_i915_private *i915)
+{
+ int ret;
+
+ if (!intel_has_sagv(i915))
+ return;
+
+ if (i915->display.sagv.status == I915_SAGV_DISABLED)
+ return;
+
+ drm_dbg_kms(&i915->drm, "Disabling SAGV\n");
+ /* bspec says to keep retrying for at least 1 ms */
+ ret = skl_pcode_request(&i915->uncore, GEN9_PCODE_SAGV_CONTROL,
+ GEN9_SAGV_DISABLE,
+ GEN9_SAGV_IS_DISABLED, GEN9_SAGV_IS_DISABLED,
+ 1);
+ /*
+ * Some skl systems, pre-release machines in particular,
+ * don't actually have SAGV.
+ */
+ if (IS_SKYLAKE(i915) && ret == -ENXIO) {
+ drm_dbg(&i915->drm, "No SAGV found on system, ignoring\n");
+ i915->display.sagv.status = I915_SAGV_NOT_CONTROLLED;
+ return;
+ } else if (ret < 0) {
+ drm_err(&i915->drm, "Failed to disable SAGV (%d)\n", ret);
+ return;
+ }
+
+ i915->display.sagv.status = I915_SAGV_DISABLED;
+}
+
+static void skl_sagv_pre_plane_update(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_bw_state *new_bw_state =
+ intel_atomic_get_new_bw_state(state);
+
+ if (!new_bw_state)
+ return;
+
+ if (!intel_can_enable_sagv(i915, new_bw_state))
+ skl_sagv_disable(i915);
+}
+
+static void skl_sagv_post_plane_update(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_bw_state *new_bw_state =
+ intel_atomic_get_new_bw_state(state);
+
+ if (!new_bw_state)
+ return;
+
+ if (intel_can_enable_sagv(i915, new_bw_state))
+ skl_sagv_enable(i915);
+}
+
+static void icl_sagv_pre_plane_update(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_bw_state *old_bw_state =
+ intel_atomic_get_old_bw_state(state);
+ const struct intel_bw_state *new_bw_state =
+ intel_atomic_get_new_bw_state(state);
+ u16 old_mask, new_mask;
+
+ if (!new_bw_state)
+ return;
+
+ old_mask = old_bw_state->qgv_points_mask;
+ new_mask = old_bw_state->qgv_points_mask | new_bw_state->qgv_points_mask;
+
+ if (old_mask == new_mask)
+ return;
+
+ WARN_ON(!new_bw_state->base.changed);
+
+ drm_dbg_kms(&i915->drm, "Restricting QGV points: 0x%x -> 0x%x\n",
+ old_mask, new_mask);
+
+ /*
+ * Restrict required qgv points before updating the configuration.
+ * According to BSpec we can't mask and unmask qgv points at the same
+ * time. Also masking should be done before updating the configuration
+ * and unmasking afterwards.
+ */
+ icl_pcode_restrict_qgv_points(i915, new_mask);
+}
+
+static void icl_sagv_post_plane_update(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_bw_state *old_bw_state =
+ intel_atomic_get_old_bw_state(state);
+ const struct intel_bw_state *new_bw_state =
+ intel_atomic_get_new_bw_state(state);
+ u16 old_mask, new_mask;
+
+ if (!new_bw_state)
+ return;
+
+ old_mask = old_bw_state->qgv_points_mask | new_bw_state->qgv_points_mask;
+ new_mask = new_bw_state->qgv_points_mask;
+
+ if (old_mask == new_mask)
+ return;
+
+ WARN_ON(!new_bw_state->base.changed);
+
+ drm_dbg_kms(&i915->drm, "Relaxing QGV points: 0x%x -> 0x%x\n",
+ old_mask, new_mask);
+
+ /*
+ * Allow required qgv points after updating the configuration.
+ * According to BSpec we can't mask and unmask qgv points at the same
+ * time. Also masking should be done before updating the configuration
+ * and unmasking afterwards.
+ */
+ icl_pcode_restrict_qgv_points(i915, new_mask);
+}
+
+void intel_sagv_pre_plane_update(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+
+ /*
+ * Just return if we can't control SAGV or don't have it.
+ * This is different from situation when we have SAGV but just can't
+ * afford it due to DBuf limitation - in case if SAGV is completely
+ * disabled in a BIOS, we are not even allowed to send a PCode request,
+ * as it will throw an error. So have to check it here.
+ */
+ if (!intel_has_sagv(i915))
+ return;
+
+ if (DISPLAY_VER(i915) >= 11)
+ icl_sagv_pre_plane_update(state);
+ else
+ skl_sagv_pre_plane_update(state);
+}
+
+void intel_sagv_post_plane_update(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+
+ /*
+ * Just return if we can't control SAGV or don't have it.
+ * This is different from situation when we have SAGV but just can't
+ * afford it due to DBuf limitation - in case if SAGV is completely
+ * disabled in a BIOS, we are not even allowed to send a PCode request,
+ * as it will throw an error. So have to check it here.
+ */
+ if (!intel_has_sagv(i915))
+ return;
+
+ if (DISPLAY_VER(i915) >= 11)
+ icl_sagv_post_plane_update(state);
+ else
+ skl_sagv_post_plane_update(state);
+}
+
+static bool skl_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ enum plane_id plane_id;
+ int max_level = INT_MAX;
+
+ if (!intel_has_sagv(i915))
+ return false;
+
+ if (!crtc_state->hw.active)
+ return true;
+
+ if (crtc_state->hw.pipe_mode.flags & DRM_MODE_FLAG_INTERLACE)
+ return false;
+
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ const struct skl_plane_wm *wm =
+ &crtc_state->wm.skl.optimal.planes[plane_id];
+ int level;
+
+ /* Skip this plane if it's not enabled */
+ if (!wm->wm[0].enable)
+ continue;
+
+ /* Find the highest enabled wm level for this plane */
+ for (level = ilk_wm_max_level(i915);
+ !wm->wm[level].enable; --level)
+ { }
+
+ /* Highest common enabled wm level for all planes */
+ max_level = min(level, max_level);
+ }
+
+ /* No enabled planes? */
+ if (max_level == INT_MAX)
+ return true;
+
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ const struct skl_plane_wm *wm =
+ &crtc_state->wm.skl.optimal.planes[plane_id];
+
+ /*
+ * All enabled planes must have enabled a common wm level that
+ * can tolerate memory latencies higher than sagv_block_time_us
+ */
+ if (wm->wm[0].enable && !wm->wm[max_level].can_sagv)
+ return false;
+ }
+
+ return true;
+}
+
+static bool tgl_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ enum plane_id plane_id;
+
+ if (!crtc_state->hw.active)
+ return true;
+
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ const struct skl_plane_wm *wm =
+ &crtc_state->wm.skl.optimal.planes[plane_id];
+
+ if (wm->wm[0].enable && !wm->sagv.wm0.enable)
+ return false;
+ }
+
+ return true;
+}
+
+static bool intel_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+
+ if (DISPLAY_VER(i915) >= 12)
+ return tgl_crtc_can_enable_sagv(crtc_state);
+ else
+ return skl_crtc_can_enable_sagv(crtc_state);
+}
+
+bool intel_can_enable_sagv(struct drm_i915_private *i915,
+ const struct intel_bw_state *bw_state)
+{
+ if (DISPLAY_VER(i915) < 11 &&
+ bw_state->active_pipes && !is_power_of_2(bw_state->active_pipes))
+ return false;
+
+ return bw_state->pipe_sagv_reject == 0;
+}
+
+static int intel_compute_sagv_mask(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ int ret;
+ struct intel_crtc *crtc;
+ struct intel_crtc_state *new_crtc_state;
+ struct intel_bw_state *new_bw_state = NULL;
+ const struct intel_bw_state *old_bw_state = NULL;
+ int i;
+
+ for_each_new_intel_crtc_in_state(state, crtc,
+ new_crtc_state, i) {
+ new_bw_state = intel_atomic_get_bw_state(state);
+ if (IS_ERR(new_bw_state))
+ return PTR_ERR(new_bw_state);
+
+ old_bw_state = intel_atomic_get_old_bw_state(state);
+
+ if (intel_crtc_can_enable_sagv(new_crtc_state))
+ new_bw_state->pipe_sagv_reject &= ~BIT(crtc->pipe);
+ else
+ new_bw_state->pipe_sagv_reject |= BIT(crtc->pipe);
+ }
+
+ if (!new_bw_state)
+ return 0;
+
+ new_bw_state->active_pipes =
+ intel_calc_active_pipes(state, old_bw_state->active_pipes);
+
+ if (new_bw_state->active_pipes != old_bw_state->active_pipes) {
+ ret = intel_atomic_lock_global_state(&new_bw_state->base);
+ if (ret)
+ return ret;
+ }
+
+ if (intel_can_enable_sagv(i915, new_bw_state) !=
+ intel_can_enable_sagv(i915, old_bw_state)) {
+ ret = intel_atomic_serialize_global_state(&new_bw_state->base);
+ if (ret)
+ return ret;
+ } else if (new_bw_state->pipe_sagv_reject != old_bw_state->pipe_sagv_reject) {
+ ret = intel_atomic_lock_global_state(&new_bw_state->base);
+ if (ret)
+ return ret;
+ }
+
+ for_each_new_intel_crtc_in_state(state, crtc,
+ new_crtc_state, i) {
+ struct skl_pipe_wm *pipe_wm = &new_crtc_state->wm.skl.optimal;
+
+ /*
+ * We store use_sagv_wm in the crtc state rather than relying on
+ * that bw state since we have no convenient way to get at the
+ * latter from the plane commit hooks (especially in the legacy
+ * cursor case)
+ */
+ pipe_wm->use_sagv_wm = !HAS_HW_SAGV_WM(i915) &&
+ DISPLAY_VER(i915) >= 12 &&
+ intel_can_enable_sagv(i915, new_bw_state);
+ }
+
+ return 0;
+}
+
+static u16 skl_ddb_entry_init(struct skl_ddb_entry *entry,
+ u16 start, u16 end)
+{
+ entry->start = start;
+ entry->end = end;
+
+ return end;
+}
+
+static int intel_dbuf_slice_size(struct drm_i915_private *i915)
+{
+ return INTEL_INFO(i915)->display.dbuf.size /
+ hweight8(INTEL_INFO(i915)->display.dbuf.slice_mask);
+}
+
+static void
+skl_ddb_entry_for_slices(struct drm_i915_private *i915, u8 slice_mask,
+ struct skl_ddb_entry *ddb)
+{
+ int slice_size = intel_dbuf_slice_size(i915);
+
+ if (!slice_mask) {
+ ddb->start = 0;
+ ddb->end = 0;
+ return;
+ }
+
+ ddb->start = (ffs(slice_mask) - 1) * slice_size;
+ ddb->end = fls(slice_mask) * slice_size;
+
+ WARN_ON(ddb->start >= ddb->end);
+ WARN_ON(ddb->end > INTEL_INFO(i915)->display.dbuf.size);
+}
+
+static unsigned int mbus_ddb_offset(struct drm_i915_private *i915, u8 slice_mask)
+{
+ struct skl_ddb_entry ddb;
+
+ if (slice_mask & (BIT(DBUF_S1) | BIT(DBUF_S2)))
+ slice_mask = BIT(DBUF_S1);
+ else if (slice_mask & (BIT(DBUF_S3) | BIT(DBUF_S4)))
+ slice_mask = BIT(DBUF_S3);
+
+ skl_ddb_entry_for_slices(i915, slice_mask, &ddb);
+
+ return ddb.start;
+}
+
+u32 skl_ddb_dbuf_slice_mask(struct drm_i915_private *i915,
+ const struct skl_ddb_entry *entry)
+{
+ int slice_size = intel_dbuf_slice_size(i915);
+ enum dbuf_slice start_slice, end_slice;
+ u8 slice_mask = 0;
+
+ if (!skl_ddb_entry_size(entry))
+ return 0;
+
+ start_slice = entry->start / slice_size;
+ end_slice = (entry->end - 1) / slice_size;
+
+ /*
+ * Per plane DDB entry can in a really worst case be on multiple slices
+ * but single entry is anyway contigious.
+ */
+ while (start_slice <= end_slice) {
+ slice_mask |= BIT(start_slice);
+ start_slice++;
+ }
+
+ return slice_mask;
+}
+
+static unsigned int intel_crtc_ddb_weight(const struct intel_crtc_state *crtc_state)
+{
+ const struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
+ int hdisplay, vdisplay;
+
+ if (!crtc_state->hw.active)
+ return 0;
+
+ /*
+ * Watermark/ddb requirement highly depends upon width of the
+ * framebuffer, So instead of allocating DDB equally among pipes
+ * distribute DDB based on resolution/width of the display.
+ */
+ drm_mode_get_hv_timing(pipe_mode, &hdisplay, &vdisplay);
+
+ return hdisplay;
+}
+
+static void intel_crtc_dbuf_weights(const struct intel_dbuf_state *dbuf_state,
+ enum pipe for_pipe,
+ unsigned int *weight_start,
+ unsigned int *weight_end,
+ unsigned int *weight_total)
+{
+ struct drm_i915_private *i915 =
+ to_i915(dbuf_state->base.state->base.dev);
+ enum pipe pipe;
+
+ *weight_start = 0;
+ *weight_end = 0;
+ *weight_total = 0;
+
+ for_each_pipe(i915, pipe) {
+ int weight = dbuf_state->weight[pipe];
+
+ /*
+ * Do not account pipes using other slice sets
+ * luckily as of current BSpec slice sets do not partially
+ * intersect(pipes share either same one slice or same slice set
+ * i.e no partial intersection), so it is enough to check for
+ * equality for now.
+ */
+ if (dbuf_state->slices[pipe] != dbuf_state->slices[for_pipe])
+ continue;
+
+ *weight_total += weight;
+ if (pipe < for_pipe) {
+ *weight_start += weight;
+ *weight_end += weight;
+ } else if (pipe == for_pipe) {
+ *weight_end += weight;
+ }
+ }
+}
+
+static int
+skl_crtc_allocate_ddb(struct intel_atomic_state *state, struct intel_crtc *crtc)
+{
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ unsigned int weight_total, weight_start, weight_end;
+ const struct intel_dbuf_state *old_dbuf_state =
+ intel_atomic_get_old_dbuf_state(state);
+ struct intel_dbuf_state *new_dbuf_state =
+ intel_atomic_get_new_dbuf_state(state);
+ struct intel_crtc_state *crtc_state;
+ struct skl_ddb_entry ddb_slices;
+ enum pipe pipe = crtc->pipe;
+ unsigned int mbus_offset = 0;
+ u32 ddb_range_size;
+ u32 dbuf_slice_mask;
+ u32 start, end;
+ int ret;
+
+ if (new_dbuf_state->weight[pipe] == 0) {
+ skl_ddb_entry_init(&new_dbuf_state->ddb[pipe], 0, 0);
+ goto out;
+ }
+
+ dbuf_slice_mask = new_dbuf_state->slices[pipe];
+
+ skl_ddb_entry_for_slices(i915, dbuf_slice_mask, &ddb_slices);
+ mbus_offset = mbus_ddb_offset(i915, dbuf_slice_mask);
+ ddb_range_size = skl_ddb_entry_size(&ddb_slices);
+
+ intel_crtc_dbuf_weights(new_dbuf_state, pipe,
+ &weight_start, &weight_end, &weight_total);
+
+ start = ddb_range_size * weight_start / weight_total;
+ end = ddb_range_size * weight_end / weight_total;
+
+ skl_ddb_entry_init(&new_dbuf_state->ddb[pipe],
+ ddb_slices.start - mbus_offset + start,
+ ddb_slices.start - mbus_offset + end);
+
+out:
+ if (old_dbuf_state->slices[pipe] == new_dbuf_state->slices[pipe] &&
+ skl_ddb_entry_equal(&old_dbuf_state->ddb[pipe],
+ &new_dbuf_state->ddb[pipe]))
+ return 0;
+
+ ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
+ if (ret)
+ return ret;
+
+ crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ /*
+ * Used for checking overlaps, so we need absolute
+ * offsets instead of MBUS relative offsets.
+ */
+ crtc_state->wm.skl.ddb.start = mbus_offset + new_dbuf_state->ddb[pipe].start;
+ crtc_state->wm.skl.ddb.end = mbus_offset + new_dbuf_state->ddb[pipe].end;
+
+ drm_dbg_kms(&i915->drm,
+ "[CRTC:%d:%s] dbuf slices 0x%x -> 0x%x, ddb (%d - %d) -> (%d - %d), active pipes 0x%x -> 0x%x\n",
+ crtc->base.base.id, crtc->base.name,
+ old_dbuf_state->slices[pipe], new_dbuf_state->slices[pipe],
+ old_dbuf_state->ddb[pipe].start, old_dbuf_state->ddb[pipe].end,
+ new_dbuf_state->ddb[pipe].start, new_dbuf_state->ddb[pipe].end,
+ old_dbuf_state->active_pipes, new_dbuf_state->active_pipes);
+
+ return 0;
+}
+
+static int skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
+ int width, const struct drm_format_info *format,
+ u64 modifier, unsigned int rotation,
+ u32 plane_pixel_rate, struct skl_wm_params *wp,
+ int color_plane);
+
+static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
+ struct intel_plane *plane,
+ int level,
+ unsigned int latency,
+ const struct skl_wm_params *wp,
+ const struct skl_wm_level *result_prev,
+ struct skl_wm_level *result /* out */);
+
+static unsigned int
+skl_cursor_allocation(const struct intel_crtc_state *crtc_state,
+ int num_active)
+{
+ struct intel_plane *plane = to_intel_plane(crtc_state->uapi.crtc->cursor);
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ int level, max_level = ilk_wm_max_level(i915);
+ struct skl_wm_level wm = {};
+ int ret, min_ddb_alloc = 0;
+ struct skl_wm_params wp;
+
+ ret = skl_compute_wm_params(crtc_state, 256,
+ drm_format_info(DRM_FORMAT_ARGB8888),
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_MODE_ROTATE_0,
+ crtc_state->pixel_rate, &wp, 0);
+ drm_WARN_ON(&i915->drm, ret);
+
+ for (level = 0; level <= max_level; level++) {
+ unsigned int latency = i915->display.wm.skl_latency[level];
+
+ skl_compute_plane_wm(crtc_state, plane, level, latency, &wp, &wm, &wm);
+ if (wm.min_ddb_alloc == U16_MAX)
+ break;
+
+ min_ddb_alloc = wm.min_ddb_alloc;
+ }
+
+ return max(num_active == 1 ? 32 : 8, min_ddb_alloc);
+}
+
+static void skl_ddb_entry_init_from_hw(struct skl_ddb_entry *entry, u32 reg)
+{
+ skl_ddb_entry_init(entry,
+ REG_FIELD_GET(PLANE_BUF_START_MASK, reg),
+ REG_FIELD_GET(PLANE_BUF_END_MASK, reg));
+ if (entry->end)
+ entry->end++;
+}
+
+static void
+skl_ddb_get_hw_plane_state(struct drm_i915_private *i915,
+ const enum pipe pipe,
+ const enum plane_id plane_id,
+ struct skl_ddb_entry *ddb,
+ struct skl_ddb_entry *ddb_y)
+{
+ u32 val;
+
+ /* Cursor doesn't support NV12/planar, so no extra calculation needed */
+ if (plane_id == PLANE_CURSOR) {
+ val = intel_uncore_read(&i915->uncore, CUR_BUF_CFG(pipe));
+ skl_ddb_entry_init_from_hw(ddb, val);
+ return;
+ }
+
+ val = intel_uncore_read(&i915->uncore, PLANE_BUF_CFG(pipe, plane_id));
+ skl_ddb_entry_init_from_hw(ddb, val);
+
+ if (DISPLAY_VER(i915) >= 11)
+ return;
+
+ val = intel_uncore_read(&i915->uncore, PLANE_NV12_BUF_CFG(pipe, plane_id));
+ skl_ddb_entry_init_from_hw(ddb_y, val);
+}
+
+static void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
+ struct skl_ddb_entry *ddb,
+ struct skl_ddb_entry *ddb_y)
+{
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ enum intel_display_power_domain power_domain;
+ enum pipe pipe = crtc->pipe;
+ intel_wakeref_t wakeref;
+ enum plane_id plane_id;
+
+ power_domain = POWER_DOMAIN_PIPE(pipe);
+ wakeref = intel_display_power_get_if_enabled(i915, power_domain);
+ if (!wakeref)
+ return;
+
+ for_each_plane_id_on_crtc(crtc, plane_id)
+ skl_ddb_get_hw_plane_state(i915, pipe,
+ plane_id,
+ &ddb[plane_id],
+ &ddb_y[plane_id]);
+
+ intel_display_power_put(i915, power_domain, wakeref);
+}
+
+struct dbuf_slice_conf_entry {
+ u8 active_pipes;
+ u8 dbuf_mask[I915_MAX_PIPES];
+ bool join_mbus;
+};
+
+/*
+ * Table taken from Bspec 12716
+ * Pipes do have some preferred DBuf slice affinity,
+ * plus there are some hardcoded requirements on how
+ * those should be distributed for multipipe scenarios.
+ * For more DBuf slices algorithm can get even more messy
+ * and less readable, so decided to use a table almost
+ * as is from BSpec itself - that way it is at least easier
+ * to compare, change and check.
+ */
+static const struct dbuf_slice_conf_entry icl_allowed_dbufs[] =
+/* Autogenerated with igt/tools/intel_dbuf_map tool: */
+{
+ {
+ .active_pipes = BIT(PIPE_A),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_B),
+ .dbuf_mask = {
+ [PIPE_B] = BIT(DBUF_S1),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1),
+ [PIPE_B] = BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_C),
+ .dbuf_mask = {
+ [PIPE_C] = BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1),
+ [PIPE_C] = BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
+ .dbuf_mask = {
+ [PIPE_B] = BIT(DBUF_S1),
+ [PIPE_C] = BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1),
+ [PIPE_B] = BIT(DBUF_S1),
+ [PIPE_C] = BIT(DBUF_S2),
+ },
+ },
+ {}
+};
+
+/*
+ * Table taken from Bspec 49255
+ * Pipes do have some preferred DBuf slice affinity,
+ * plus there are some hardcoded requirements on how
+ * those should be distributed for multipipe scenarios.
+ * For more DBuf slices algorithm can get even more messy
+ * and less readable, so decided to use a table almost
+ * as is from BSpec itself - that way it is at least easier
+ * to compare, change and check.
+ */
+static const struct dbuf_slice_conf_entry tgl_allowed_dbufs[] =
+/* Autogenerated with igt/tools/intel_dbuf_map tool: */
+{
+ {
+ .active_pipes = BIT(PIPE_A),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_B),
+ .dbuf_mask = {
+ [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S2),
+ [PIPE_B] = BIT(DBUF_S1),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_C),
+ .dbuf_mask = {
+ [PIPE_C] = BIT(DBUF_S2) | BIT(DBUF_S1),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1),
+ [PIPE_C] = BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
+ .dbuf_mask = {
+ [PIPE_B] = BIT(DBUF_S1),
+ [PIPE_C] = BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1),
+ [PIPE_B] = BIT(DBUF_S1),
+ [PIPE_C] = BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_D] = BIT(DBUF_S2) | BIT(DBUF_S1),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1),
+ [PIPE_D] = BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_B) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_B] = BIT(DBUF_S1),
+ [PIPE_D] = BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1),
+ [PIPE_B] = BIT(DBUF_S1),
+ [PIPE_D] = BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_C) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_C] = BIT(DBUF_S1),
+ [PIPE_D] = BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_C) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1),
+ [PIPE_C] = BIT(DBUF_S2),
+ [PIPE_D] = BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_B] = BIT(DBUF_S1),
+ [PIPE_C] = BIT(DBUF_S2),
+ [PIPE_D] = BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1),
+ [PIPE_B] = BIT(DBUF_S1),
+ [PIPE_C] = BIT(DBUF_S2),
+ [PIPE_D] = BIT(DBUF_S2),
+ },
+ },
+ {}
+};
+
+static const struct dbuf_slice_conf_entry dg2_allowed_dbufs[] = {
+ {
+ .active_pipes = BIT(PIPE_A),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_B),
+ .dbuf_mask = {
+ [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1),
+ [PIPE_B] = BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_C),
+ .dbuf_mask = {
+ [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
+ .dbuf_mask = {
+ [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1),
+ [PIPE_B] = BIT(DBUF_S2),
+ [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_D] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ [PIPE_D] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_B) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ [PIPE_D] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1),
+ [PIPE_B] = BIT(DBUF_S2),
+ [PIPE_D] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_C) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_C] = BIT(DBUF_S3),
+ [PIPE_D] = BIT(DBUF_S4),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_C) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ [PIPE_C] = BIT(DBUF_S3),
+ [PIPE_D] = BIT(DBUF_S4),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ [PIPE_C] = BIT(DBUF_S3),
+ [PIPE_D] = BIT(DBUF_S4),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1),
+ [PIPE_B] = BIT(DBUF_S2),
+ [PIPE_C] = BIT(DBUF_S3),
+ [PIPE_D] = BIT(DBUF_S4),
+ },
+ },
+ {}
+};
+
+static const struct dbuf_slice_conf_entry adlp_allowed_dbufs[] = {
+ /*
+ * Keep the join_mbus cases first so check_mbus_joined()
+ * will prefer them over the !join_mbus cases.
+ */
+ {
+ .active_pipes = BIT(PIPE_A),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2) | BIT(DBUF_S3) | BIT(DBUF_S4),
+ },
+ .join_mbus = true,
+ },
+ {
+ .active_pipes = BIT(PIPE_B),
+ .dbuf_mask = {
+ [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2) | BIT(DBUF_S3) | BIT(DBUF_S4),
+ },
+ .join_mbus = true,
+ },
+ {
+ .active_pipes = BIT(PIPE_A),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ },
+ .join_mbus = false,
+ },
+ {
+ .active_pipes = BIT(PIPE_B),
+ .dbuf_mask = {
+ [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ },
+ .join_mbus = false,
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_C),
+ .dbuf_mask = {
+ [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
+ .dbuf_mask = {
+ [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_B) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_C) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_C) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ },
+ },
+ {}
+
+};
+
+static bool check_mbus_joined(u8 active_pipes,
+ const struct dbuf_slice_conf_entry *dbuf_slices)
+{
+ int i;
+
+ for (i = 0; dbuf_slices[i].active_pipes != 0; i++) {
+ if (dbuf_slices[i].active_pipes == active_pipes)
+ return dbuf_slices[i].join_mbus;
+ }
+ return false;
+}
+
+static bool adlp_check_mbus_joined(u8 active_pipes)
+{
+ return check_mbus_joined(active_pipes, adlp_allowed_dbufs);
+}
+
+static u8 compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus,
+ const struct dbuf_slice_conf_entry *dbuf_slices)
+{
+ int i;
+
+ for (i = 0; dbuf_slices[i].active_pipes != 0; i++) {
+ if (dbuf_slices[i].active_pipes == active_pipes &&
+ dbuf_slices[i].join_mbus == join_mbus)
+ return dbuf_slices[i].dbuf_mask[pipe];
+ }
+ return 0;
+}
+
+/*
+ * This function finds an entry with same enabled pipe configuration and
+ * returns correspondent DBuf slice mask as stated in BSpec for particular
+ * platform.
+ */
+static u8 icl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
+{
+ /*
+ * FIXME: For ICL this is still a bit unclear as prev BSpec revision
+ * required calculating "pipe ratio" in order to determine
+ * if one or two slices can be used for single pipe configurations
+ * as additional constraint to the existing table.
+ * However based on recent info, it should be not "pipe ratio"
+ * but rather ratio between pixel_rate and cdclk with additional
+ * constants, so for now we are using only table until this is
+ * clarified. Also this is the reason why crtc_state param is
+ * still here - we will need it once those additional constraints
+ * pop up.
+ */
+ return compute_dbuf_slices(pipe, active_pipes, join_mbus,
+ icl_allowed_dbufs);
+}
+
+static u8 tgl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
+{
+ return compute_dbuf_slices(pipe, active_pipes, join_mbus,
+ tgl_allowed_dbufs);
+}
+
+static u8 adlp_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
+{
+ return compute_dbuf_slices(pipe, active_pipes, join_mbus,
+ adlp_allowed_dbufs);
+}
+
+static u8 dg2_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
+{
+ return compute_dbuf_slices(pipe, active_pipes, join_mbus,
+ dg2_allowed_dbufs);
+}
+
+static u8 skl_compute_dbuf_slices(struct intel_crtc *crtc, u8 active_pipes, bool join_mbus)
+{
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+
+ if (IS_DG2(i915))
+ return dg2_compute_dbuf_slices(pipe, active_pipes, join_mbus);
+ else if (DISPLAY_VER(i915) >= 13)
+ return adlp_compute_dbuf_slices(pipe, active_pipes, join_mbus);
+ else if (DISPLAY_VER(i915) == 12)
+ return tgl_compute_dbuf_slices(pipe, active_pipes, join_mbus);
+ else if (DISPLAY_VER(i915) == 11)
+ return icl_compute_dbuf_slices(pipe, active_pipes, join_mbus);
+ /*
+ * For anything else just return one slice yet.
+ * Should be extended for other platforms.
+ */
+ return active_pipes & BIT(pipe) ? BIT(DBUF_S1) : 0;
+}
+
+static bool
+use_minimal_wm0_only(const struct intel_crtc_state *crtc_state,
+ struct intel_plane *plane)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+
+ return DISPLAY_VER(i915) >= 13 &&
+ crtc_state->uapi.async_flip &&
+ plane->async_flip;
+}
+
+static u64
+skl_total_relative_data_rate(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ enum plane_id plane_id;
+ u64 data_rate = 0;
+
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ if (plane_id == PLANE_CURSOR)
+ continue;
+
+ data_rate += crtc_state->rel_data_rate[plane_id];
+
+ if (DISPLAY_VER(i915) < 11)
+ data_rate += crtc_state->rel_data_rate_y[plane_id];
+ }
+
+ return data_rate;
+}
+
+static const struct skl_wm_level *
+skl_plane_wm_level(const struct skl_pipe_wm *pipe_wm,
+ enum plane_id plane_id,
+ int level)
+{
+ const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
+
+ if (level == 0 && pipe_wm->use_sagv_wm)
+ return &wm->sagv.wm0;
+
+ return &wm->wm[level];
+}
+
+static const struct skl_wm_level *
+skl_plane_trans_wm(const struct skl_pipe_wm *pipe_wm,
+ enum plane_id plane_id)
+{
+ const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
+
+ if (pipe_wm->use_sagv_wm)
+ return &wm->sagv.trans_wm;
+
+ return &wm->trans_wm;
+}
+
+/*
+ * We only disable the watermarks for each plane if
+ * they exceed the ddb allocation of said plane. This
+ * is done so that we don't end up touching cursor
+ * watermarks needlessly when some other plane reduces
+ * our max possible watermark level.
+ *
+ * Bspec has this to say about the PLANE_WM enable bit:
+ * "All the watermarks at this level for all enabled
+ * planes must be enabled before the level will be used."
+ * So this is actually safe to do.
+ */
+static void
+skl_check_wm_level(struct skl_wm_level *wm, const struct skl_ddb_entry *ddb)
+{
+ if (wm->min_ddb_alloc > skl_ddb_entry_size(ddb))
+ memset(wm, 0, sizeof(*wm));
+}
+
+static void
+skl_check_nv12_wm_level(struct skl_wm_level *wm, struct skl_wm_level *uv_wm,
+ const struct skl_ddb_entry *ddb_y, const struct skl_ddb_entry *ddb)
+{
+ if (wm->min_ddb_alloc > skl_ddb_entry_size(ddb_y) ||
+ uv_wm->min_ddb_alloc > skl_ddb_entry_size(ddb)) {
+ memset(wm, 0, sizeof(*wm));
+ memset(uv_wm, 0, sizeof(*uv_wm));
+ }
+}
+
+static bool icl_need_wm1_wa(struct drm_i915_private *i915,
+ enum plane_id plane_id)
+{
+ /*
+ * Wa_1408961008:icl, ehl
+ * Wa_14012656716:tgl, adl
+ * Underruns with WM1+ disabled
+ */
+ return DISPLAY_VER(i915) == 11 ||
+ (IS_DISPLAY_VER(i915, 12, 13) && plane_id == PLANE_CURSOR);
+}
+
+struct skl_plane_ddb_iter {
+ u64 data_rate;
+ u16 start, size;
+};
+
+static void
+skl_allocate_plane_ddb(struct skl_plane_ddb_iter *iter,
+ struct skl_ddb_entry *ddb,
+ const struct skl_wm_level *wm,
+ u64 data_rate)
+{
+ u16 size, extra = 0;
+
+ if (data_rate) {
+ extra = min_t(u16, iter->size,
+ DIV64_U64_ROUND_UP(iter->size * data_rate,
+ iter->data_rate));
+ iter->size -= extra;
+ iter->data_rate -= data_rate;
+ }
+
+ /*
+ * Keep ddb entry of all disabled planes explicitly zeroed
+ * to avoid skl_ddb_add_affected_planes() adding them to
+ * the state when other planes change their allocations.
+ */
+ size = wm->min_ddb_alloc + extra;
+ if (size)
+ iter->start = skl_ddb_entry_init(ddb, iter->start,
+ iter->start + size);
+}
+
+static int
+skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ const struct intel_dbuf_state *dbuf_state =
+ intel_atomic_get_new_dbuf_state(state);
+ const struct skl_ddb_entry *alloc = &dbuf_state->ddb[crtc->pipe];
+ int num_active = hweight8(dbuf_state->active_pipes);
+ struct skl_plane_ddb_iter iter;
+ enum plane_id plane_id;
+ u16 cursor_size;
+ u32 blocks;
+ int level;
+
+ /* Clear the partitioning for disabled planes. */
+ memset(crtc_state->wm.skl.plane_ddb, 0, sizeof(crtc_state->wm.skl.plane_ddb));
+ memset(crtc_state->wm.skl.plane_ddb_y, 0, sizeof(crtc_state->wm.skl.plane_ddb_y));
+
+ if (!crtc_state->hw.active)
+ return 0;
+
+ iter.start = alloc->start;
+ iter.size = skl_ddb_entry_size(alloc);
+ if (iter.size == 0)
+ return 0;
+
+ /* Allocate fixed number of blocks for cursor. */
+ cursor_size = skl_cursor_allocation(crtc_state, num_active);
+ iter.size -= cursor_size;
+ skl_ddb_entry_init(&crtc_state->wm.skl.plane_ddb[PLANE_CURSOR],
+ alloc->end - cursor_size, alloc->end);
+
+ iter.data_rate = skl_total_relative_data_rate(crtc_state);
+
+ /*
+ * Find the highest watermark level for which we can satisfy the block
+ * requirement of active planes.
+ */
+ for (level = ilk_wm_max_level(i915); level >= 0; level--) {
+ blocks = 0;
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ const struct skl_plane_wm *wm =
+ &crtc_state->wm.skl.optimal.planes[plane_id];
+
+ if (plane_id == PLANE_CURSOR) {
+ const struct skl_ddb_entry *ddb =
+ &crtc_state->wm.skl.plane_ddb[plane_id];
+
+ if (wm->wm[level].min_ddb_alloc > skl_ddb_entry_size(ddb)) {
+ drm_WARN_ON(&i915->drm,
+ wm->wm[level].min_ddb_alloc != U16_MAX);
+ blocks = U32_MAX;
+ break;
+ }
+ continue;
+ }
+
+ blocks += wm->wm[level].min_ddb_alloc;
+ blocks += wm->uv_wm[level].min_ddb_alloc;
+ }
+
+ if (blocks <= iter.size) {
+ iter.size -= blocks;
+ break;
+ }
+ }
+
+ if (level < 0) {
+ drm_dbg_kms(&i915->drm,
+ "Requested display configuration exceeds system DDB limitations");
+ drm_dbg_kms(&i915->drm, "minimum required %d/%d\n",
+ blocks, iter.size);
+ return -EINVAL;
+ }
+
+ /* avoid the WARN later when we don't allocate any extra DDB */
+ if (iter.data_rate == 0)
+ iter.size = 0;
+
+ /*
+ * Grant each plane the blocks it requires at the highest achievable
+ * watermark level, plus an extra share of the leftover blocks
+ * proportional to its relative data rate.
+ */
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ struct skl_ddb_entry *ddb =
+ &crtc_state->wm.skl.plane_ddb[plane_id];
+ struct skl_ddb_entry *ddb_y =
+ &crtc_state->wm.skl.plane_ddb_y[plane_id];
+ const struct skl_plane_wm *wm =
+ &crtc_state->wm.skl.optimal.planes[plane_id];
+
+ if (plane_id == PLANE_CURSOR)
+ continue;
+
+ if (DISPLAY_VER(i915) < 11 &&
+ crtc_state->nv12_planes & BIT(plane_id)) {
+ skl_allocate_plane_ddb(&iter, ddb_y, &wm->wm[level],
+ crtc_state->rel_data_rate_y[plane_id]);
+ skl_allocate_plane_ddb(&iter, ddb, &wm->uv_wm[level],
+ crtc_state->rel_data_rate[plane_id]);
+ } else {
+ skl_allocate_plane_ddb(&iter, ddb, &wm->wm[level],
+ crtc_state->rel_data_rate[plane_id]);
+ }
+ }
+ drm_WARN_ON(&i915->drm, iter.size != 0 || iter.data_rate != 0);
+
+ /*
+ * When we calculated watermark values we didn't know how high
+ * of a level we'd actually be able to hit, so we just marked
+ * all levels as "enabled." Go back now and disable the ones
+ * that aren't actually possible.
+ */
+ for (level++; level <= ilk_wm_max_level(i915); level++) {
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ const struct skl_ddb_entry *ddb =
+ &crtc_state->wm.skl.plane_ddb[plane_id];
+ const struct skl_ddb_entry *ddb_y =
+ &crtc_state->wm.skl.plane_ddb_y[plane_id];
+ struct skl_plane_wm *wm =
+ &crtc_state->wm.skl.optimal.planes[plane_id];
+
+ if (DISPLAY_VER(i915) < 11 &&
+ crtc_state->nv12_planes & BIT(plane_id))
+ skl_check_nv12_wm_level(&wm->wm[level],
+ &wm->uv_wm[level],
+ ddb_y, ddb);
+ else
+ skl_check_wm_level(&wm->wm[level], ddb);
+
+ if (icl_need_wm1_wa(i915, plane_id) &&
+ level == 1 && wm->wm[0].enable) {
+ wm->wm[level].blocks = wm->wm[0].blocks;
+ wm->wm[level].lines = wm->wm[0].lines;
+ wm->wm[level].ignore_lines = wm->wm[0].ignore_lines;
+ }
+ }
+ }
+
+ /*
+ * Go back and disable the transition and SAGV watermarks
+ * if it turns out we don't have enough DDB blocks for them.
+ */
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ const struct skl_ddb_entry *ddb =
+ &crtc_state->wm.skl.plane_ddb[plane_id];
+ const struct skl_ddb_entry *ddb_y =
+ &crtc_state->wm.skl.plane_ddb_y[plane_id];
+ struct skl_plane_wm *wm =
+ &crtc_state->wm.skl.optimal.planes[plane_id];
+
+ if (DISPLAY_VER(i915) < 11 &&
+ crtc_state->nv12_planes & BIT(plane_id)) {
+ skl_check_wm_level(&wm->trans_wm, ddb_y);
+ } else {
+ WARN_ON(skl_ddb_entry_size(ddb_y));
+
+ skl_check_wm_level(&wm->trans_wm, ddb);
+ }
+
+ skl_check_wm_level(&wm->sagv.wm0, ddb);
+ skl_check_wm_level(&wm->sagv.trans_wm, ddb);
+ }
+
+ return 0;
+}
+
+/*
+ * The max latency should be 257 (max the punit can code is 255 and we add 2us
+ * for the read latency) and cpp should always be <= 8, so that
+ * should allow pixel_rate up to ~2 GHz which seems sufficient since max
+ * 2xcdclk is 1350 MHz and the pixel rate should never exceed that.
+ */
+static uint_fixed_16_16_t
+skl_wm_method1(const struct drm_i915_private *i915, u32 pixel_rate,
+ u8 cpp, u32 latency, u32 dbuf_block_size)
+{
+ u32 wm_intermediate_val;
+ uint_fixed_16_16_t ret;
+
+ if (latency == 0)
+ return FP_16_16_MAX;
+
+ wm_intermediate_val = latency * pixel_rate * cpp;
+ ret = div_fixed16(wm_intermediate_val, 1000 * dbuf_block_size);
+
+ if (DISPLAY_VER(i915) >= 10)
+ ret = add_fixed16_u32(ret, 1);
+
+ return ret;
+}
+
+static uint_fixed_16_16_t
+skl_wm_method2(u32 pixel_rate, u32 pipe_htotal, u32 latency,
+ uint_fixed_16_16_t plane_blocks_per_line)
+{
+ u32 wm_intermediate_val;
+ uint_fixed_16_16_t ret;
+
+ if (latency == 0)
+ return FP_16_16_MAX;
+
+ wm_intermediate_val = latency * pixel_rate;
+ wm_intermediate_val = DIV_ROUND_UP(wm_intermediate_val,
+ pipe_htotal * 1000);
+ ret = mul_u32_fixed16(wm_intermediate_val, plane_blocks_per_line);
+ return ret;
+}
+
+static uint_fixed_16_16_t
+intel_get_linetime_us(const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ u32 pixel_rate;
+ u32 crtc_htotal;
+ uint_fixed_16_16_t linetime_us;
+
+ if (!crtc_state->hw.active)
+ return u32_to_fixed16(0);
+
+ pixel_rate = crtc_state->pixel_rate;
+
+ if (drm_WARN_ON(&i915->drm, pixel_rate == 0))
+ return u32_to_fixed16(0);
+
+ crtc_htotal = crtc_state->hw.pipe_mode.crtc_htotal;
+ linetime_us = div_fixed16(crtc_htotal * 1000, pixel_rate);
+
+ return linetime_us;
+}
+
+static int
+skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
+ int width, const struct drm_format_info *format,
+ u64 modifier, unsigned int rotation,
+ u32 plane_pixel_rate, struct skl_wm_params *wp,
+ int color_plane)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ u32 interm_pbpl;
+
+ /* only planar format has two planes */
+ if (color_plane == 1 &&
+ !intel_format_info_is_yuv_semiplanar(format, modifier)) {
+ drm_dbg_kms(&i915->drm,
+ "Non planar format have single plane\n");
+ return -EINVAL;
+ }
+
+ wp->y_tiled = modifier == I915_FORMAT_MOD_Y_TILED ||
+ modifier == I915_FORMAT_MOD_4_TILED ||
+ modifier == I915_FORMAT_MOD_Yf_TILED ||
+ modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
+ modifier == I915_FORMAT_MOD_Yf_TILED_CCS ||
+ modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
+ modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS ||
+ modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC ||
+ modifier == I915_FORMAT_MOD_4_TILED_DG2_RC_CCS ||
+ modifier == I915_FORMAT_MOD_4_TILED_DG2_MC_CCS ||
+ modifier == I915_FORMAT_MOD_4_TILED_DG2_RC_CCS_CC;
+ wp->x_tiled = modifier == I915_FORMAT_MOD_X_TILED;
+ wp->rc_surface = modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
+ modifier == I915_FORMAT_MOD_Yf_TILED_CCS ||
+ modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
+ modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS ||
+ modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC ||
+ modifier == I915_FORMAT_MOD_4_TILED_DG2_RC_CCS ||
+ modifier == I915_FORMAT_MOD_4_TILED_DG2_MC_CCS ||
+ modifier == I915_FORMAT_MOD_4_TILED_DG2_RC_CCS_CC;
+ wp->is_planar = intel_format_info_is_yuv_semiplanar(format, modifier);
+
+ wp->width = width;
+ if (color_plane == 1 && wp->is_planar)
+ wp->width /= 2;
+
+ wp->cpp = format->cpp[color_plane];
+ wp->plane_pixel_rate = plane_pixel_rate;
+
+ if (DISPLAY_VER(i915) >= 11 &&
+ modifier == I915_FORMAT_MOD_Yf_TILED && wp->cpp == 1)
+ wp->dbuf_block_size = 256;
+ else
+ wp->dbuf_block_size = 512;
+
+ if (drm_rotation_90_or_270(rotation)) {
+ switch (wp->cpp) {
+ case 1:
+ wp->y_min_scanlines = 16;
+ break;
+ case 2:
+ wp->y_min_scanlines = 8;
+ break;
+ case 4:
+ wp->y_min_scanlines = 4;
+ break;
+ default:
+ MISSING_CASE(wp->cpp);
+ return -EINVAL;
+ }
+ } else {
+ wp->y_min_scanlines = 4;
+ }
+
+ if (skl_needs_memory_bw_wa(i915))
+ wp->y_min_scanlines *= 2;
+
+ wp->plane_bytes_per_line = wp->width * wp->cpp;
+ if (wp->y_tiled) {
+ interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line *
+ wp->y_min_scanlines,
+ wp->dbuf_block_size);
+
+ if (DISPLAY_VER(i915) >= 10)
+ interm_pbpl++;
+
+ wp->plane_blocks_per_line = div_fixed16(interm_pbpl,
+ wp->y_min_scanlines);
+ } else {
+ interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line,
+ wp->dbuf_block_size);
+
+ if (!wp->x_tiled || DISPLAY_VER(i915) >= 10)
+ interm_pbpl++;
+
+ wp->plane_blocks_per_line = u32_to_fixed16(interm_pbpl);
+ }
+
+ wp->y_tile_minimum = mul_u32_fixed16(wp->y_min_scanlines,
+ wp->plane_blocks_per_line);
+
+ wp->linetime_us = fixed16_to_u32_round_up(intel_get_linetime_us(crtc_state));
+
+ return 0;
+}
+
+static int
+skl_compute_plane_wm_params(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ struct skl_wm_params *wp, int color_plane)
+{
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ int width;
+
+ /*
+ * Src coordinates are already rotated by 270 degrees for
+ * the 90/270 degree plane rotation cases (to match the
+ * GTT mapping), hence no need to account for rotation here.
+ */
+ width = drm_rect_width(&plane_state->uapi.src) >> 16;
+
+ return skl_compute_wm_params(crtc_state, width,
+ fb->format, fb->modifier,
+ plane_state->hw.rotation,
+ intel_plane_pixel_rate(crtc_state, plane_state),
+ wp, color_plane);
+}
+
+static bool skl_wm_has_lines(struct drm_i915_private *i915, int level)
+{
+ if (DISPLAY_VER(i915) >= 10)
+ return true;
+
+ /* The number of lines are ignored for the level 0 watermark. */
+ return level > 0;
+}
+
+static int skl_wm_max_lines(struct drm_i915_private *i915)
+{
+ if (DISPLAY_VER(i915) >= 13)
+ return 255;
+ else
+ return 31;
+}
+
+static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
+ struct intel_plane *plane,
+ int level,
+ unsigned int latency,
+ const struct skl_wm_params *wp,
+ const struct skl_wm_level *result_prev,
+ struct skl_wm_level *result /* out */)
+{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ uint_fixed_16_16_t method1, method2;
+ uint_fixed_16_16_t selected_result;
+ u32 blocks, lines, min_ddb_alloc = 0;
+
+ if (latency == 0 ||
+ (use_minimal_wm0_only(crtc_state, plane) && level > 0)) {
+ /* reject it */
+ result->min_ddb_alloc = U16_MAX;
+ return;
+ }
+
+ /*
+ * WaIncreaseLatencyIPCEnabled: kbl,cfl
+ * Display WA #1141: kbl,cfl
+ */
+ if ((IS_KABYLAKE(i915) || IS_COFFEELAKE(i915) || IS_COMETLAKE(i915)) &&
+ skl_watermark_ipc_enabled(i915))
+ latency += 4;
+
+ if (skl_needs_memory_bw_wa(i915) && wp->x_tiled)
+ latency += 15;
+
+ method1 = skl_wm_method1(i915, wp->plane_pixel_rate,
+ wp->cpp, latency, wp->dbuf_block_size);
+ method2 = skl_wm_method2(wp->plane_pixel_rate,
+ crtc_state->hw.pipe_mode.crtc_htotal,
+ latency,
+ wp->plane_blocks_per_line);
+
+ if (wp->y_tiled) {
+ selected_result = max_fixed16(method2, wp->y_tile_minimum);
+ } else {
+ if ((wp->cpp * crtc_state->hw.pipe_mode.crtc_htotal /
+ wp->dbuf_block_size < 1) &&
+ (wp->plane_bytes_per_line / wp->dbuf_block_size < 1)) {
+ selected_result = method2;
+ } else if (latency >= wp->linetime_us) {
+ if (DISPLAY_VER(i915) == 9)
+ selected_result = min_fixed16(method1, method2);
+ else
+ selected_result = method2;
+ } else {
+ selected_result = method1;
+ }
+ }
+
+ blocks = fixed16_to_u32_round_up(selected_result) + 1;
+ /*
+ * Lets have blocks at minimum equivalent to plane_blocks_per_line
+ * as there will be at minimum one line for lines configuration. This
+ * is a work around for FIFO underruns observed with resolutions like
+ * 4k 60 Hz in single channel DRAM configurations.
+ *
+ * As per the Bspec 49325, if the ddb allocation can hold at least
+ * one plane_blocks_per_line, we should have selected method2 in
+ * the above logic. Assuming that modern versions have enough dbuf
+ * and method2 guarantees blocks equivalent to at least 1 line,
+ * select the blocks as plane_blocks_per_line.
+ *
+ * TODO: Revisit the logic when we have better understanding on DRAM
+ * channels' impact on the level 0 memory latency and the relevant
+ * wm calculations.
+ */
+ if (skl_wm_has_lines(i915, level))
+ blocks = max(blocks,
+ fixed16_to_u32_round_up(wp->plane_blocks_per_line));
+ lines = div_round_up_fixed16(selected_result,
+ wp->plane_blocks_per_line);
+
+ if (DISPLAY_VER(i915) == 9) {
+ /* Display WA #1125: skl,bxt,kbl */
+ if (level == 0 && wp->rc_surface)
+ blocks += fixed16_to_u32_round_up(wp->y_tile_minimum);
+
+ /* Display WA #1126: skl,bxt,kbl */
+ if (level >= 1 && level <= 7) {
+ if (wp->y_tiled) {
+ blocks += fixed16_to_u32_round_up(wp->y_tile_minimum);
+ lines += wp->y_min_scanlines;
+ } else {
+ blocks++;
+ }
+
+ /*
+ * Make sure result blocks for higher latency levels are
+ * at least as high as level below the current level.
+ * Assumption in DDB algorithm optimization for special
+ * cases. Also covers Display WA #1125 for RC.
+ */
+ if (result_prev->blocks > blocks)
+ blocks = result_prev->blocks;
+ }
+ }
+
+ if (DISPLAY_VER(i915) >= 11) {
+ if (wp->y_tiled) {
+ int extra_lines;
+
+ if (lines % wp->y_min_scanlines == 0)
+ extra_lines = wp->y_min_scanlines;
+ else
+ extra_lines = wp->y_min_scanlines * 2 -
+ lines % wp->y_min_scanlines;
+
+ min_ddb_alloc = mul_round_up_u32_fixed16(lines + extra_lines,
+ wp->plane_blocks_per_line);
+ } else {
+ min_ddb_alloc = blocks + DIV_ROUND_UP(blocks, 10);
+ }
+ }
+
+ if (!skl_wm_has_lines(i915, level))
+ lines = 0;
+
+ if (lines > skl_wm_max_lines(i915)) {
+ /* reject it */
+ result->min_ddb_alloc = U16_MAX;
+ return;
+ }
+
+ /*
+ * If lines is valid, assume we can use this watermark level
+ * for now. We'll come back and disable it after we calculate the
+ * DDB allocation if it turns out we don't actually have enough
+ * blocks to satisfy it.
+ */
+ result->blocks = blocks;
+ result->lines = lines;
+ /* Bspec says: value >= plane ddb allocation -> invalid, hence the +1 here */
+ result->min_ddb_alloc = max(min_ddb_alloc, blocks) + 1;
+ result->enable = true;
+
+ if (DISPLAY_VER(i915) < 12 && i915->display.sagv.block_time_us)
+ result->can_sagv = latency >= i915->display.sagv.block_time_us;
+}
+
+static void
+skl_compute_wm_levels(const struct intel_crtc_state *crtc_state,
+ struct intel_plane *plane,
+ const struct skl_wm_params *wm_params,
+ struct skl_wm_level *levels)
+{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ int level, max_level = ilk_wm_max_level(i915);
+ struct skl_wm_level *result_prev = &levels[0];
+
+ for (level = 0; level <= max_level; level++) {
+ struct skl_wm_level *result = &levels[level];
+ unsigned int latency = i915->display.wm.skl_latency[level];
+
+ skl_compute_plane_wm(crtc_state, plane, level, latency,
+ wm_params, result_prev, result);
+
+ result_prev = result;
+ }
+}
+
+static void tgl_compute_sagv_wm(const struct intel_crtc_state *crtc_state,
+ struct intel_plane *plane,
+ const struct skl_wm_params *wm_params,
+ struct skl_plane_wm *plane_wm)
+{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct skl_wm_level *sagv_wm = &plane_wm->sagv.wm0;
+ struct skl_wm_level *levels = plane_wm->wm;
+ unsigned int latency = 0;
+
+ if (i915->display.sagv.block_time_us)
+ latency = i915->display.sagv.block_time_us + i915->display.wm.skl_latency[0];
+
+ skl_compute_plane_wm(crtc_state, plane, 0, latency,
+ wm_params, &levels[0],
+ sagv_wm);
+}
+
+static void skl_compute_transition_wm(struct drm_i915_private *i915,
+ struct skl_wm_level *trans_wm,
+ const struct skl_wm_level *wm0,
+ const struct skl_wm_params *wp)
+{
+ u16 trans_min, trans_amount, trans_y_tile_min;
+ u16 wm0_blocks, trans_offset, blocks;
+
+ /* Transition WM don't make any sense if ipc is disabled */
+ if (!skl_watermark_ipc_enabled(i915))
+ return;
+
+ /*
+ * WaDisableTWM:skl,kbl,cfl,bxt
+ * Transition WM are not recommended by HW team for GEN9
+ */
+ if (DISPLAY_VER(i915) == 9)
+ return;
+
+ if (DISPLAY_VER(i915) >= 11)
+ trans_min = 4;
+ else
+ trans_min = 14;
+
+ /* Display WA #1140: glk,cnl */
+ if (DISPLAY_VER(i915) == 10)
+ trans_amount = 0;
+ else
+ trans_amount = 10; /* This is configurable amount */
+
+ trans_offset = trans_min + trans_amount;
+
+ /*
+ * The spec asks for Selected Result Blocks for wm0 (the real value),
+ * not Result Blocks (the integer value). Pay attention to the capital
+ * letters. The value wm_l0->blocks is actually Result Blocks, but
+ * since Result Blocks is the ceiling of Selected Result Blocks plus 1,
+ * and since we later will have to get the ceiling of the sum in the
+ * transition watermarks calculation, we can just pretend Selected
+ * Result Blocks is Result Blocks minus 1 and it should work for the
+ * current platforms.
+ */
+ wm0_blocks = wm0->blocks - 1;
+
+ if (wp->y_tiled) {
+ trans_y_tile_min =
+ (u16)mul_round_up_u32_fixed16(2, wp->y_tile_minimum);
+ blocks = max(wm0_blocks, trans_y_tile_min) + trans_offset;
+ } else {
+ blocks = wm0_blocks + trans_offset;
+ }
+ blocks++;
+
+ /*
+ * Just assume we can enable the transition watermark. After
+ * computing the DDB we'll come back and disable it if that
+ * assumption turns out to be false.
+ */
+ trans_wm->blocks = blocks;
+ trans_wm->min_ddb_alloc = max_t(u16, wm0->min_ddb_alloc, blocks + 1);
+ trans_wm->enable = true;
+}
+
+static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ struct intel_plane *plane, int color_plane)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane->id];
+ struct skl_wm_params wm_params;
+ int ret;
+
+ ret = skl_compute_plane_wm_params(crtc_state, plane_state,
+ &wm_params, color_plane);
+ if (ret)
+ return ret;
+
+ skl_compute_wm_levels(crtc_state, plane, &wm_params, wm->wm);
+
+ skl_compute_transition_wm(i915, &wm->trans_wm,
+ &wm->wm[0], &wm_params);
+
+ if (DISPLAY_VER(i915) >= 12) {
+ tgl_compute_sagv_wm(crtc_state, plane, &wm_params, wm);
+
+ skl_compute_transition_wm(i915, &wm->sagv.trans_wm,
+ &wm->sagv.wm0, &wm_params);
+ }
+
+ return 0;
+}
+
+static int skl_build_plane_wm_uv(struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ struct intel_plane *plane)
+{
+ struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane->id];
+ struct skl_wm_params wm_params;
+ int ret;
+
+ wm->is_planar = true;
+
+ /* uv plane watermarks must also be validated for NV12/Planar */
+ ret = skl_compute_plane_wm_params(crtc_state, plane_state,
+ &wm_params, 1);
+ if (ret)
+ return ret;
+
+ skl_compute_wm_levels(crtc_state, plane, &wm_params, wm->uv_wm);
+
+ return 0;
+}
+
+static int skl_build_plane_wm(struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ enum plane_id plane_id = plane->id;
+ struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane_id];
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ int ret;
+
+ memset(wm, 0, sizeof(*wm));
+
+ if (!intel_wm_plane_visible(crtc_state, plane_state))
+ return 0;
+
+ ret = skl_build_plane_wm_single(crtc_state, plane_state,
+ plane, 0);
+ if (ret)
+ return ret;
+
+ if (fb->format->is_yuv && fb->format->num_planes > 1) {
+ ret = skl_build_plane_wm_uv(crtc_state, plane_state,
+ plane);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int icl_build_plane_wm(struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ enum plane_id plane_id = plane->id;
+ struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane_id];
+ int ret;
+
+ /* Watermarks calculated in master */
+ if (plane_state->planar_slave)
+ return 0;
+
+ memset(wm, 0, sizeof(*wm));
+
+ if (plane_state->planar_linked_plane) {
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+
+ drm_WARN_ON(&i915->drm,
+ !intel_wm_plane_visible(crtc_state, plane_state));
+ drm_WARN_ON(&i915->drm, !fb->format->is_yuv ||
+ fb->format->num_planes == 1);
+
+ ret = skl_build_plane_wm_single(crtc_state, plane_state,
+ plane_state->planar_linked_plane, 0);
+ if (ret)
+ return ret;
+
+ ret = skl_build_plane_wm_single(crtc_state, plane_state,
+ plane, 1);
+ if (ret)
+ return ret;
+ } else if (intel_wm_plane_visible(crtc_state, plane_state)) {
+ ret = skl_build_plane_wm_single(crtc_state, plane_state,
+ plane, 0);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int skl_build_pipe_wm(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ const struct intel_plane_state *plane_state;
+ struct intel_plane *plane;
+ int ret, i;
+
+ for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+ /*
+ * FIXME should perhaps check {old,new}_plane_crtc->hw.crtc
+ * instead but we don't populate that correctly for NV12 Y
+ * planes so for now hack this.
+ */
+ if (plane->pipe != crtc->pipe)
+ continue;
+
+ if (DISPLAY_VER(i915) >= 11)
+ ret = icl_build_plane_wm(crtc_state, plane_state);
+ else
+ ret = skl_build_plane_wm(crtc_state, plane_state);
+ if (ret)
+ return ret;
+ }
+
+ crtc_state->wm.skl.optimal = crtc_state->wm.skl.raw;
+
+ return 0;
+}
+
+static void skl_ddb_entry_write(struct drm_i915_private *i915,
+ i915_reg_t reg,
+ const struct skl_ddb_entry *entry)
+{
+ if (entry->end)
+ intel_de_write_fw(i915, reg,
+ PLANE_BUF_END(entry->end - 1) |
+ PLANE_BUF_START(entry->start));
+ else
+ intel_de_write_fw(i915, reg, 0);
+}
+
+static void skl_write_wm_level(struct drm_i915_private *i915,
+ i915_reg_t reg,
+ const struct skl_wm_level *level)
+{
+ u32 val = 0;
+
+ if (level->enable)
+ val |= PLANE_WM_EN;
+ if (level->ignore_lines)
+ val |= PLANE_WM_IGNORE_LINES;
+ val |= REG_FIELD_PREP(PLANE_WM_BLOCKS_MASK, level->blocks);
+ val |= REG_FIELD_PREP(PLANE_WM_LINES_MASK, level->lines);
+
+ intel_de_write_fw(i915, reg, val);
+}
+
+void skl_write_plane_wm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ int level, max_level = ilk_wm_max_level(i915);
+ enum plane_id plane_id = plane->id;
+ enum pipe pipe = plane->pipe;
+ const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
+ const struct skl_ddb_entry *ddb =
+ &crtc_state->wm.skl.plane_ddb[plane_id];
+ const struct skl_ddb_entry *ddb_y =
+ &crtc_state->wm.skl.plane_ddb_y[plane_id];
+
+ for (level = 0; level <= max_level; level++)
+ skl_write_wm_level(i915, PLANE_WM(pipe, plane_id, level),
+ skl_plane_wm_level(pipe_wm, plane_id, level));
+
+ skl_write_wm_level(i915, PLANE_WM_TRANS(pipe, plane_id),
+ skl_plane_trans_wm(pipe_wm, plane_id));
+
+ if (HAS_HW_SAGV_WM(i915)) {
+ const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
+
+ skl_write_wm_level(i915, PLANE_WM_SAGV(pipe, plane_id),
+ &wm->sagv.wm0);
+ skl_write_wm_level(i915, PLANE_WM_SAGV_TRANS(pipe, plane_id),
+ &wm->sagv.trans_wm);
+ }
+
+ skl_ddb_entry_write(i915,
+ PLANE_BUF_CFG(pipe, plane_id), ddb);
+
+ if (DISPLAY_VER(i915) < 11)
+ skl_ddb_entry_write(i915,
+ PLANE_NV12_BUF_CFG(pipe, plane_id), ddb_y);
+}
+
+void skl_write_cursor_wm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ int level, max_level = ilk_wm_max_level(i915);
+ enum plane_id plane_id = plane->id;
+ enum pipe pipe = plane->pipe;
+ const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
+ const struct skl_ddb_entry *ddb =
+ &crtc_state->wm.skl.plane_ddb[plane_id];
+
+ for (level = 0; level <= max_level; level++)
+ skl_write_wm_level(i915, CUR_WM(pipe, level),
+ skl_plane_wm_level(pipe_wm, plane_id, level));
+
+ skl_write_wm_level(i915, CUR_WM_TRANS(pipe),
+ skl_plane_trans_wm(pipe_wm, plane_id));
+
+ if (HAS_HW_SAGV_WM(i915)) {
+ const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
+
+ skl_write_wm_level(i915, CUR_WM_SAGV(pipe),
+ &wm->sagv.wm0);
+ skl_write_wm_level(i915, CUR_WM_SAGV_TRANS(pipe),
+ &wm->sagv.trans_wm);
+ }
+
+ skl_ddb_entry_write(i915, CUR_BUF_CFG(pipe), ddb);
+}
+
+static bool skl_wm_level_equals(const struct skl_wm_level *l1,
+ const struct skl_wm_level *l2)
+{
+ return l1->enable == l2->enable &&
+ l1->ignore_lines == l2->ignore_lines &&
+ l1->lines == l2->lines &&
+ l1->blocks == l2->blocks;
+}
+
+static bool skl_plane_wm_equals(struct drm_i915_private *i915,
+ const struct skl_plane_wm *wm1,
+ const struct skl_plane_wm *wm2)
+{
+ int level, max_level = ilk_wm_max_level(i915);
+
+ for (level = 0; level <= max_level; level++) {
+ /*
+ * We don't check uv_wm as the hardware doesn't actually
+ * use it. It only gets used for calculating the required
+ * ddb allocation.
+ */
+ if (!skl_wm_level_equals(&wm1->wm[level], &wm2->wm[level]))
+ return false;
+ }
+
+ return skl_wm_level_equals(&wm1->trans_wm, &wm2->trans_wm) &&
+ skl_wm_level_equals(&wm1->sagv.wm0, &wm2->sagv.wm0) &&
+ skl_wm_level_equals(&wm1->sagv.trans_wm, &wm2->sagv.trans_wm);
+}
+
+static bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
+ const struct skl_ddb_entry *b)
+{
+ return a->start < b->end && b->start < a->end;
+}
+
+static void skl_ddb_entry_union(struct skl_ddb_entry *a,
+ const struct skl_ddb_entry *b)
+{
+ if (a->end && b->end) {
+ a->start = min(a->start, b->start);
+ a->end = max(a->end, b->end);
+ } else if (b->end) {
+ a->start = b->start;
+ a->end = b->end;
+ }
+}
+
+bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
+ const struct skl_ddb_entry *entries,
+ int num_entries, int ignore_idx)
+{
+ int i;
+
+ for (i = 0; i < num_entries; i++) {
+ if (i != ignore_idx &&
+ skl_ddb_entries_overlap(ddb, &entries[i]))
+ return true;
+ }
+
+ return false;
+}
+
+static int
+skl_ddb_add_affected_planes(const struct intel_crtc_state *old_crtc_state,
+ struct intel_crtc_state *new_crtc_state)
+{
+ struct intel_atomic_state *state = to_intel_atomic_state(new_crtc_state->uapi.state);
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_plane *plane;
+
+ for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
+ struct intel_plane_state *plane_state;
+ enum plane_id plane_id = plane->id;
+
+ if (skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb[plane_id],
+ &new_crtc_state->wm.skl.plane_ddb[plane_id]) &&
+ skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb_y[plane_id],
+ &new_crtc_state->wm.skl.plane_ddb_y[plane_id]))
+ continue;
+
+ plane_state = intel_atomic_get_plane_state(state, plane);
+ if (IS_ERR(plane_state))
+ return PTR_ERR(plane_state);
+
+ new_crtc_state->update_planes |= BIT(plane_id);
+ }
+
+ return 0;
+}
+
+static u8 intel_dbuf_enabled_slices(const struct intel_dbuf_state *dbuf_state)
+{
+ struct drm_i915_private *i915 = to_i915(dbuf_state->base.state->base.dev);
+ u8 enabled_slices;
+ enum pipe pipe;
+
+ /*
+ * FIXME: For now we always enable slice S1 as per
+ * the Bspec display initialization sequence.
+ */
+ enabled_slices = BIT(DBUF_S1);
+
+ for_each_pipe(i915, pipe)
+ enabled_slices |= dbuf_state->slices[pipe];
+
+ return enabled_slices;
+}
+
+static int
+skl_compute_ddb(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_dbuf_state *old_dbuf_state;
+ struct intel_dbuf_state *new_dbuf_state = NULL;
+ const struct intel_crtc_state *old_crtc_state;
+ struct intel_crtc_state *new_crtc_state;
+ struct intel_crtc *crtc;
+ int ret, i;
+
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+ new_dbuf_state = intel_atomic_get_dbuf_state(state);
+ if (IS_ERR(new_dbuf_state))
+ return PTR_ERR(new_dbuf_state);
+
+ old_dbuf_state = intel_atomic_get_old_dbuf_state(state);
+ break;
+ }
+
+ if (!new_dbuf_state)
+ return 0;
+
+ new_dbuf_state->active_pipes =
+ intel_calc_active_pipes(state, old_dbuf_state->active_pipes);
+
+ if (old_dbuf_state->active_pipes != new_dbuf_state->active_pipes) {
+ ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
+ if (ret)
+ return ret;
+ }
+
+ if (HAS_MBUS_JOINING(i915))
+ new_dbuf_state->joined_mbus =
+ adlp_check_mbus_joined(new_dbuf_state->active_pipes);
+
+ for_each_intel_crtc(&i915->drm, crtc) {
+ enum pipe pipe = crtc->pipe;
+
+ new_dbuf_state->slices[pipe] =
+ skl_compute_dbuf_slices(crtc, new_dbuf_state->active_pipes,
+ new_dbuf_state->joined_mbus);
+
+ if (old_dbuf_state->slices[pipe] == new_dbuf_state->slices[pipe])
+ continue;
+
+ ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
+ if (ret)
+ return ret;
+ }
+
+ new_dbuf_state->enabled_slices = intel_dbuf_enabled_slices(new_dbuf_state);
+
+ if (old_dbuf_state->enabled_slices != new_dbuf_state->enabled_slices ||
+ old_dbuf_state->joined_mbus != new_dbuf_state->joined_mbus) {
+ ret = intel_atomic_serialize_global_state(&new_dbuf_state->base);
+ if (ret)
+ return ret;
+
+ if (old_dbuf_state->joined_mbus != new_dbuf_state->joined_mbus) {
+ /* TODO: Implement vblank synchronized MBUS joining changes */
+ ret = intel_modeset_all_pipes(state);
+ if (ret)
+ return ret;
+ }
+
+ drm_dbg_kms(&i915->drm,
+ "Enabled dbuf slices 0x%x -> 0x%x (total dbuf slices 0x%x), mbus joined? %s->%s\n",
+ old_dbuf_state->enabled_slices,
+ new_dbuf_state->enabled_slices,
+ INTEL_INFO(i915)->display.dbuf.slice_mask,
+ str_yes_no(old_dbuf_state->joined_mbus),
+ str_yes_no(new_dbuf_state->joined_mbus));
+ }
+
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+ enum pipe pipe = crtc->pipe;
+
+ new_dbuf_state->weight[pipe] = intel_crtc_ddb_weight(new_crtc_state);
+
+ if (old_dbuf_state->weight[pipe] == new_dbuf_state->weight[pipe])
+ continue;
+
+ ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
+ if (ret)
+ return ret;
+ }
+
+ for_each_intel_crtc(&i915->drm, crtc) {
+ ret = skl_crtc_allocate_ddb(state, crtc);
+ if (ret)
+ return ret;
+ }
+
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ ret = skl_crtc_allocate_plane_ddb(state, crtc);
+ if (ret)
+ return ret;
+
+ ret = skl_ddb_add_affected_planes(old_crtc_state,
+ new_crtc_state);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static char enast(bool enable)
+{
+ return enable ? '*' : ' ';
+}
+
+static void
+skl_print_wm_changes(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_crtc_state *old_crtc_state;
+ const struct intel_crtc_state *new_crtc_state;
+ struct intel_plane *plane;
+ struct intel_crtc *crtc;
+ int i;
+
+ if (!drm_debug_enabled(DRM_UT_KMS))
+ return;
+
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ const struct skl_pipe_wm *old_pipe_wm, *new_pipe_wm;
+
+ old_pipe_wm = &old_crtc_state->wm.skl.optimal;
+ new_pipe_wm = &new_crtc_state->wm.skl.optimal;
+
+ for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
+ enum plane_id plane_id = plane->id;
+ const struct skl_ddb_entry *old, *new;
+
+ old = &old_crtc_state->wm.skl.plane_ddb[plane_id];
+ new = &new_crtc_state->wm.skl.plane_ddb[plane_id];
+
+ if (skl_ddb_entry_equal(old, new))
+ continue;
+
+ drm_dbg_kms(&i915->drm,
+ "[PLANE:%d:%s] ddb (%4d - %4d) -> (%4d - %4d), size %4d -> %4d\n",
+ plane->base.base.id, plane->base.name,
+ old->start, old->end, new->start, new->end,
+ skl_ddb_entry_size(old), skl_ddb_entry_size(new));
+ }
+
+ for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
+ enum plane_id plane_id = plane->id;
+ const struct skl_plane_wm *old_wm, *new_wm;
+
+ old_wm = &old_pipe_wm->planes[plane_id];
+ new_wm = &new_pipe_wm->planes[plane_id];
+
+ if (skl_plane_wm_equals(i915, old_wm, new_wm))
+ continue;
+
+ drm_dbg_kms(&i915->drm,
+ "[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm"
+ " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm\n",
+ plane->base.base.id, plane->base.name,
+ enast(old_wm->wm[0].enable), enast(old_wm->wm[1].enable),
+ enast(old_wm->wm[2].enable), enast(old_wm->wm[3].enable),
+ enast(old_wm->wm[4].enable), enast(old_wm->wm[5].enable),
+ enast(old_wm->wm[6].enable), enast(old_wm->wm[7].enable),
+ enast(old_wm->trans_wm.enable),
+ enast(old_wm->sagv.wm0.enable),
+ enast(old_wm->sagv.trans_wm.enable),
+ enast(new_wm->wm[0].enable), enast(new_wm->wm[1].enable),
+ enast(new_wm->wm[2].enable), enast(new_wm->wm[3].enable),
+ enast(new_wm->wm[4].enable), enast(new_wm->wm[5].enable),
+ enast(new_wm->wm[6].enable), enast(new_wm->wm[7].enable),
+ enast(new_wm->trans_wm.enable),
+ enast(new_wm->sagv.wm0.enable),
+ enast(new_wm->sagv.trans_wm.enable));
+
+ drm_dbg_kms(&i915->drm,
+ "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d"
+ " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d\n",
+ plane->base.base.id, plane->base.name,
+ enast(old_wm->wm[0].ignore_lines), old_wm->wm[0].lines,
+ enast(old_wm->wm[1].ignore_lines), old_wm->wm[1].lines,
+ enast(old_wm->wm[2].ignore_lines), old_wm->wm[2].lines,
+ enast(old_wm->wm[3].ignore_lines), old_wm->wm[3].lines,
+ enast(old_wm->wm[4].ignore_lines), old_wm->wm[4].lines,
+ enast(old_wm->wm[5].ignore_lines), old_wm->wm[5].lines,
+ enast(old_wm->wm[6].ignore_lines), old_wm->wm[6].lines,
+ enast(old_wm->wm[7].ignore_lines), old_wm->wm[7].lines,
+ enast(old_wm->trans_wm.ignore_lines), old_wm->trans_wm.lines,
+ enast(old_wm->sagv.wm0.ignore_lines), old_wm->sagv.wm0.lines,
+ enast(old_wm->sagv.trans_wm.ignore_lines), old_wm->sagv.trans_wm.lines,
+ enast(new_wm->wm[0].ignore_lines), new_wm->wm[0].lines,
+ enast(new_wm->wm[1].ignore_lines), new_wm->wm[1].lines,
+ enast(new_wm->wm[2].ignore_lines), new_wm->wm[2].lines,
+ enast(new_wm->wm[3].ignore_lines), new_wm->wm[3].lines,
+ enast(new_wm->wm[4].ignore_lines), new_wm->wm[4].lines,
+ enast(new_wm->wm[5].ignore_lines), new_wm->wm[5].lines,
+ enast(new_wm->wm[6].ignore_lines), new_wm->wm[6].lines,
+ enast(new_wm->wm[7].ignore_lines), new_wm->wm[7].lines,
+ enast(new_wm->trans_wm.ignore_lines), new_wm->trans_wm.lines,
+ enast(new_wm->sagv.wm0.ignore_lines), new_wm->sagv.wm0.lines,
+ enast(new_wm->sagv.trans_wm.ignore_lines), new_wm->sagv.trans_wm.lines);
+
+ drm_dbg_kms(&i915->drm,
+ "[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d"
+ " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n",
+ plane->base.base.id, plane->base.name,
+ old_wm->wm[0].blocks, old_wm->wm[1].blocks,
+ old_wm->wm[2].blocks, old_wm->wm[3].blocks,
+ old_wm->wm[4].blocks, old_wm->wm[5].blocks,
+ old_wm->wm[6].blocks, old_wm->wm[7].blocks,
+ old_wm->trans_wm.blocks,
+ old_wm->sagv.wm0.blocks,
+ old_wm->sagv.trans_wm.blocks,
+ new_wm->wm[0].blocks, new_wm->wm[1].blocks,
+ new_wm->wm[2].blocks, new_wm->wm[3].blocks,
+ new_wm->wm[4].blocks, new_wm->wm[5].blocks,
+ new_wm->wm[6].blocks, new_wm->wm[7].blocks,
+ new_wm->trans_wm.blocks,
+ new_wm->sagv.wm0.blocks,
+ new_wm->sagv.trans_wm.blocks);
+
+ drm_dbg_kms(&i915->drm,
+ "[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d"
+ " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n",
+ plane->base.base.id, plane->base.name,
+ old_wm->wm[0].min_ddb_alloc, old_wm->wm[1].min_ddb_alloc,
+ old_wm->wm[2].min_ddb_alloc, old_wm->wm[3].min_ddb_alloc,
+ old_wm->wm[4].min_ddb_alloc, old_wm->wm[5].min_ddb_alloc,
+ old_wm->wm[6].min_ddb_alloc, old_wm->wm[7].min_ddb_alloc,
+ old_wm->trans_wm.min_ddb_alloc,
+ old_wm->sagv.wm0.min_ddb_alloc,
+ old_wm->sagv.trans_wm.min_ddb_alloc,
+ new_wm->wm[0].min_ddb_alloc, new_wm->wm[1].min_ddb_alloc,
+ new_wm->wm[2].min_ddb_alloc, new_wm->wm[3].min_ddb_alloc,
+ new_wm->wm[4].min_ddb_alloc, new_wm->wm[5].min_ddb_alloc,
+ new_wm->wm[6].min_ddb_alloc, new_wm->wm[7].min_ddb_alloc,
+ new_wm->trans_wm.min_ddb_alloc,
+ new_wm->sagv.wm0.min_ddb_alloc,
+ new_wm->sagv.trans_wm.min_ddb_alloc);
+ }
+ }
+}
+
+static bool skl_plane_selected_wm_equals(struct intel_plane *plane,
+ const struct skl_pipe_wm *old_pipe_wm,
+ const struct skl_pipe_wm *new_pipe_wm)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ int level, max_level = ilk_wm_max_level(i915);
+
+ for (level = 0; level <= max_level; level++) {
+ /*
+ * We don't check uv_wm as the hardware doesn't actually
+ * use it. It only gets used for calculating the required
+ * ddb allocation.
+ */
+ if (!skl_wm_level_equals(skl_plane_wm_level(old_pipe_wm, plane->id, level),
+ skl_plane_wm_level(new_pipe_wm, plane->id, level)))
+ return false;
+ }
+
+ if (HAS_HW_SAGV_WM(i915)) {
+ const struct skl_plane_wm *old_wm = &old_pipe_wm->planes[plane->id];
+ const struct skl_plane_wm *new_wm = &new_pipe_wm->planes[plane->id];
+
+ if (!skl_wm_level_equals(&old_wm->sagv.wm0, &new_wm->sagv.wm0) ||
+ !skl_wm_level_equals(&old_wm->sagv.trans_wm, &new_wm->sagv.trans_wm))
+ return false;
+ }
+
+ return skl_wm_level_equals(skl_plane_trans_wm(old_pipe_wm, plane->id),
+ skl_plane_trans_wm(new_pipe_wm, plane->id));
+}
+
+/*
+ * To make sure the cursor watermark registers are always consistent
+ * with our computed state the following scenario needs special
+ * treatment:
+ *
+ * 1. enable cursor
+ * 2. move cursor entirely offscreen
+ * 3. disable cursor
+ *
+ * Step 2. does call .disable_plane() but does not zero the watermarks
+ * (since we consider an offscreen cursor still active for the purposes
+ * of watermarks). Step 3. would not normally call .disable_plane()
+ * because the actual plane visibility isn't changing, and we don't
+ * deallocate the cursor ddb until the pipe gets disabled. So we must
+ * force step 3. to call .disable_plane() to update the watermark
+ * registers properly.
+ *
+ * Other planes do not suffer from this issues as their watermarks are
+ * calculated based on the actual plane visibility. The only time this
+ * can trigger for the other planes is during the initial readout as the
+ * default value of the watermarks registers is not zero.
+ */
+static int skl_wm_add_affected_planes(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct intel_plane *plane;
+
+ for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
+ struct intel_plane_state *plane_state;
+ enum plane_id plane_id = plane->id;
+
+ /*
+ * Force a full wm update for every plane on modeset.
+ * Required because the reset value of the wm registers
+ * is non-zero, whereas we want all disabled planes to
+ * have zero watermarks. So if we turn off the relevant
+ * power well the hardware state will go out of sync
+ * with the software state.
+ */
+ if (!drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi) &&
+ skl_plane_selected_wm_equals(plane,
+ &old_crtc_state->wm.skl.optimal,
+ &new_crtc_state->wm.skl.optimal))
+ continue;
+
+ plane_state = intel_atomic_get_plane_state(state, plane);
+ if (IS_ERR(plane_state))
+ return PTR_ERR(plane_state);
+
+ new_crtc_state->update_planes |= BIT(plane_id);
+ }
+
+ return 0;
+}
+
+static int
+skl_compute_wm(struct intel_atomic_state *state)
+{
+ struct intel_crtc *crtc;
+ struct intel_crtc_state *new_crtc_state;
+ int ret, i;
+
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+ ret = skl_build_pipe_wm(state, crtc);
+ if (ret)
+ return ret;
+ }
+
+ ret = skl_compute_ddb(state);
+ if (ret)
+ return ret;
+
+ ret = intel_compute_sagv_mask(state);
+ if (ret)
+ return ret;
+
+ /*
+ * skl_compute_ddb() will have adjusted the final watermarks
+ * based on how much ddb is available. Now we can actually
+ * check if the final watermarks changed.
+ */
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+ ret = skl_wm_add_affected_planes(state, crtc);
+ if (ret)
+ return ret;
+ }
+
+ skl_print_wm_changes(state);
+
+ return 0;
+}
+
+static void skl_wm_level_from_reg_val(u32 val, struct skl_wm_level *level)
+{
+ level->enable = val & PLANE_WM_EN;
+ level->ignore_lines = val & PLANE_WM_IGNORE_LINES;
+ level->blocks = REG_FIELD_GET(PLANE_WM_BLOCKS_MASK, val);
+ level->lines = REG_FIELD_GET(PLANE_WM_LINES_MASK, val);
+}
+
+static void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
+ struct skl_pipe_wm *out)
+{
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+ int level, max_level;
+ enum plane_id plane_id;
+ u32 val;
+
+ max_level = ilk_wm_max_level(i915);
+
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ struct skl_plane_wm *wm = &out->planes[plane_id];
+
+ for (level = 0; level <= max_level; level++) {
+ if (plane_id != PLANE_CURSOR)
+ val = intel_uncore_read(&i915->uncore, PLANE_WM(pipe, plane_id, level));
+ else
+ val = intel_uncore_read(&i915->uncore, CUR_WM(pipe, level));
+
+ skl_wm_level_from_reg_val(val, &wm->wm[level]);
+ }
+
+ if (plane_id != PLANE_CURSOR)
+ val = intel_uncore_read(&i915->uncore, PLANE_WM_TRANS(pipe, plane_id));
+ else
+ val = intel_uncore_read(&i915->uncore, CUR_WM_TRANS(pipe));
+
+ skl_wm_level_from_reg_val(val, &wm->trans_wm);
+
+ if (HAS_HW_SAGV_WM(i915)) {
+ if (plane_id != PLANE_CURSOR)
+ val = intel_uncore_read(&i915->uncore,
+ PLANE_WM_SAGV(pipe, plane_id));
+ else
+ val = intel_uncore_read(&i915->uncore,
+ CUR_WM_SAGV(pipe));
+
+ skl_wm_level_from_reg_val(val, &wm->sagv.wm0);
+
+ if (plane_id != PLANE_CURSOR)
+ val = intel_uncore_read(&i915->uncore,
+ PLANE_WM_SAGV_TRANS(pipe, plane_id));
+ else
+ val = intel_uncore_read(&i915->uncore,
+ CUR_WM_SAGV_TRANS(pipe));
+
+ skl_wm_level_from_reg_val(val, &wm->sagv.trans_wm);
+ } else if (DISPLAY_VER(i915) >= 12) {
+ wm->sagv.wm0 = wm->wm[0];
+ wm->sagv.trans_wm = wm->trans_wm;
+ }
+ }
+}
+
+void skl_wm_get_hw_state(struct drm_i915_private *i915)
+{
+ struct intel_dbuf_state *dbuf_state =
+ to_intel_dbuf_state(i915->display.dbuf.obj.state);
+ struct intel_crtc *crtc;
+
+ if (HAS_MBUS_JOINING(i915))
+ dbuf_state->joined_mbus = intel_de_read(i915, MBUS_CTL) & MBUS_JOIN;
+
+ for_each_intel_crtc(&i915->drm, crtc) {
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ enum pipe pipe = crtc->pipe;
+ unsigned int mbus_offset;
+ enum plane_id plane_id;
+ u8 slices;
+
+ memset(&crtc_state->wm.skl.optimal, 0,
+ sizeof(crtc_state->wm.skl.optimal));
+ if (crtc_state->hw.active)
+ skl_pipe_wm_get_hw_state(crtc, &crtc_state->wm.skl.optimal);
+ crtc_state->wm.skl.raw = crtc_state->wm.skl.optimal;
+
+ memset(&dbuf_state->ddb[pipe], 0, sizeof(dbuf_state->ddb[pipe]));
+
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ struct skl_ddb_entry *ddb =
+ &crtc_state->wm.skl.plane_ddb[plane_id];
+ struct skl_ddb_entry *ddb_y =
+ &crtc_state->wm.skl.plane_ddb_y[plane_id];
+
+ if (!crtc_state->hw.active)
+ continue;
+
+ skl_ddb_get_hw_plane_state(i915, crtc->pipe,
+ plane_id, ddb, ddb_y);
+
+ skl_ddb_entry_union(&dbuf_state->ddb[pipe], ddb);
+ skl_ddb_entry_union(&dbuf_state->ddb[pipe], ddb_y);
+ }
+
+ dbuf_state->weight[pipe] = intel_crtc_ddb_weight(crtc_state);
+
+ /*
+ * Used for checking overlaps, so we need absolute
+ * offsets instead of MBUS relative offsets.
+ */
+ slices = skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes,
+ dbuf_state->joined_mbus);
+ mbus_offset = mbus_ddb_offset(i915, slices);
+ crtc_state->wm.skl.ddb.start = mbus_offset + dbuf_state->ddb[pipe].start;
+ crtc_state->wm.skl.ddb.end = mbus_offset + dbuf_state->ddb[pipe].end;
+
+ /* The slices actually used by the planes on the pipe */
+ dbuf_state->slices[pipe] =
+ skl_ddb_dbuf_slice_mask(i915, &crtc_state->wm.skl.ddb);
+
+ drm_dbg_kms(&i915->drm,
+ "[CRTC:%d:%s] dbuf slices 0x%x, ddb (%d - %d), active pipes 0x%x, mbus joined: %s\n",
+ crtc->base.base.id, crtc->base.name,
+ dbuf_state->slices[pipe], dbuf_state->ddb[pipe].start,
+ dbuf_state->ddb[pipe].end, dbuf_state->active_pipes,
+ str_yes_no(dbuf_state->joined_mbus));
+ }
+
+ dbuf_state->enabled_slices = i915->display.dbuf.enabled_slices;
+}
+
+static bool skl_dbuf_is_misconfigured(struct drm_i915_private *i915)
+{
+ const struct intel_dbuf_state *dbuf_state =
+ to_intel_dbuf_state(i915->display.dbuf.obj.state);
+ struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
+ struct intel_crtc *crtc;
+
+ for_each_intel_crtc(&i915->drm, crtc) {
+ const struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+
+ entries[crtc->pipe] = crtc_state->wm.skl.ddb;
+ }
+
+ for_each_intel_crtc(&i915->drm, crtc) {
+ const struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ u8 slices;
+
+ slices = skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes,
+ dbuf_state->joined_mbus);
+ if (dbuf_state->slices[crtc->pipe] & ~slices)
+ return true;
+
+ if (skl_ddb_allocation_overlaps(&crtc_state->wm.skl.ddb, entries,
+ I915_MAX_PIPES, crtc->pipe))
+ return true;
+ }
+
+ return false;
+}
+
+void skl_wm_sanitize(struct drm_i915_private *i915)
+{
+ struct intel_crtc *crtc;
+
+ /*
+ * On TGL/RKL (at least) the BIOS likes to assign the planes
+ * to the wrong DBUF slices. This will cause an infinite loop
+ * in skl_commit_modeset_enables() as it can't find a way to
+ * transition between the old bogus DBUF layout to the new
+ * proper DBUF layout without DBUF allocation overlaps between
+ * the planes (which cannot be allowed or else the hardware
+ * may hang). If we detect a bogus DBUF layout just turn off
+ * all the planes so that skl_commit_modeset_enables() can
+ * simply ignore them.
+ */
+ if (!skl_dbuf_is_misconfigured(i915))
+ return;
+
+ drm_dbg_kms(&i915->drm, "BIOS has misprogrammed the DBUF, disabling all planes\n");
+
+ for_each_intel_crtc(&i915->drm, crtc) {
+ struct intel_plane *plane = to_intel_plane(crtc->base.primary);
+ const struct intel_plane_state *plane_state =
+ to_intel_plane_state(plane->base.state);
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+
+ if (plane_state->uapi.visible)
+ intel_plane_disable_noatomic(crtc, plane);
+
+ drm_WARN_ON(&i915->drm, crtc_state->active_planes != 0);
+
+ memset(&crtc_state->wm.skl.ddb, 0, sizeof(crtc_state->wm.skl.ddb));
+ }
+}
+
+void intel_wm_state_verify(struct intel_crtc *crtc,
+ struct intel_crtc_state *new_crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct skl_hw_state {
+ struct skl_ddb_entry ddb[I915_MAX_PLANES];
+ struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
+ struct skl_pipe_wm wm;
+ } *hw;
+ const struct skl_pipe_wm *sw_wm = &new_crtc_state->wm.skl.optimal;
+ int level, max_level = ilk_wm_max_level(i915);
+ struct intel_plane *plane;
+ u8 hw_enabled_slices;
+
+ if (DISPLAY_VER(i915) < 9 || !new_crtc_state->hw.active)
+ return;
+
+ hw = kzalloc(sizeof(*hw), GFP_KERNEL);
+ if (!hw)
+ return;
+
+ skl_pipe_wm_get_hw_state(crtc, &hw->wm);
+
+ skl_pipe_ddb_get_hw_state(crtc, hw->ddb, hw->ddb_y);
+
+ hw_enabled_slices = intel_enabled_dbuf_slices_mask(i915);
+
+ if (DISPLAY_VER(i915) >= 11 &&
+ hw_enabled_slices != i915->display.dbuf.enabled_slices)
+ drm_err(&i915->drm,
+ "mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n",
+ i915->display.dbuf.enabled_slices,
+ hw_enabled_slices);
+
+ for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) {
+ const struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
+ const struct skl_wm_level *hw_wm_level, *sw_wm_level;
+
+ /* Watermarks */
+ for (level = 0; level <= max_level; level++) {
+ hw_wm_level = &hw->wm.planes[plane->id].wm[level];
+ sw_wm_level = skl_plane_wm_level(sw_wm, plane->id, level);
+
+ if (skl_wm_level_equals(hw_wm_level, sw_wm_level))
+ continue;
+
+ drm_err(&i915->drm,
+ "[PLANE:%d:%s] mismatch in WM%d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
+ plane->base.base.id, plane->base.name, level,
+ sw_wm_level->enable,
+ sw_wm_level->blocks,
+ sw_wm_level->lines,
+ hw_wm_level->enable,
+ hw_wm_level->blocks,
+ hw_wm_level->lines);
+ }
+
+ hw_wm_level = &hw->wm.planes[plane->id].trans_wm;
+ sw_wm_level = skl_plane_trans_wm(sw_wm, plane->id);
+
+ if (!skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
+ drm_err(&i915->drm,
+ "[PLANE:%d:%s] mismatch in trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
+ plane->base.base.id, plane->base.name,
+ sw_wm_level->enable,
+ sw_wm_level->blocks,
+ sw_wm_level->lines,
+ hw_wm_level->enable,
+ hw_wm_level->blocks,
+ hw_wm_level->lines);
+ }
+
+ hw_wm_level = &hw->wm.planes[plane->id].sagv.wm0;
+ sw_wm_level = &sw_wm->planes[plane->id].sagv.wm0;
+
+ if (HAS_HW_SAGV_WM(i915) &&
+ !skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
+ drm_err(&i915->drm,
+ "[PLANE:%d:%s] mismatch in SAGV WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
+ plane->base.base.id, plane->base.name,
+ sw_wm_level->enable,
+ sw_wm_level->blocks,
+ sw_wm_level->lines,
+ hw_wm_level->enable,
+ hw_wm_level->blocks,
+ hw_wm_level->lines);
+ }
+
+ hw_wm_level = &hw->wm.planes[plane->id].sagv.trans_wm;
+ sw_wm_level = &sw_wm->planes[plane->id].sagv.trans_wm;
+
+ if (HAS_HW_SAGV_WM(i915) &&
+ !skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
+ drm_err(&i915->drm,
+ "[PLANE:%d:%s] mismatch in SAGV trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
+ plane->base.base.id, plane->base.name,
+ sw_wm_level->enable,
+ sw_wm_level->blocks,
+ sw_wm_level->lines,
+ hw_wm_level->enable,
+ hw_wm_level->blocks,
+ hw_wm_level->lines);
+ }
+
+ /* DDB */
+ hw_ddb_entry = &hw->ddb[PLANE_CURSOR];
+ sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb[PLANE_CURSOR];
+
+ if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
+ drm_err(&i915->drm,
+ "[PLANE:%d:%s] mismatch in DDB (expected (%u,%u), found (%u,%u))\n",
+ plane->base.base.id, plane->base.name,
+ sw_ddb_entry->start, sw_ddb_entry->end,
+ hw_ddb_entry->start, hw_ddb_entry->end);
+ }
+ }
+
+ kfree(hw);
+}
+
+bool skl_watermark_ipc_enabled(struct drm_i915_private *i915)
+{
+ return i915->display.wm.ipc_enabled;
+}
+
+void skl_watermark_ipc_update(struct drm_i915_private *i915)
+{
+ if (!HAS_IPC(i915))
+ return;
+
+ intel_uncore_rmw(&i915->uncore, DISP_ARB_CTL2, DISP_IPC_ENABLE,
+ skl_watermark_ipc_enabled(i915) ? DISP_IPC_ENABLE : 0);
+}
+
+static bool skl_watermark_ipc_can_enable(struct drm_i915_private *i915)
+{
+ /* Display WA #0477 WaDisableIPC: skl */
+ if (IS_SKYLAKE(i915))
+ return false;
+
+ /* Display WA #1141: SKL:all KBL:all CFL */
+ if (IS_KABYLAKE(i915) ||
+ IS_COFFEELAKE(i915) ||
+ IS_COMETLAKE(i915))
+ return i915->dram_info.symmetric_memory;
+
+ return true;
+}
+
+void skl_watermark_ipc_init(struct drm_i915_private *i915)
+{
+ if (!HAS_IPC(i915))
+ return;
+
+ i915->display.wm.ipc_enabled = skl_watermark_ipc_can_enable(i915);
+
+ skl_watermark_ipc_update(i915);
+}
+
+static void
+adjust_wm_latency(struct drm_i915_private *i915,
+ u16 wm[], int max_level, int read_latency)
+{
+ bool wm_lv_0_adjust_needed = i915->dram_info.wm_lv_0_adjust_needed;
+ int i, level;
+
+ /*
+ * If a level n (n > 1) has a 0us latency, all levels m (m >= n)
+ * need to be disabled. We make sure to sanitize the values out
+ * of the punit to satisfy this requirement.
+ */
+ for (level = 1; level <= max_level; level++) {
+ if (wm[level] == 0) {
+ for (i = level + 1; i <= max_level; i++)
+ wm[i] = 0;
+
+ max_level = level - 1;
+ break;
+ }
+ }
+
+ /*
+ * WaWmMemoryReadLatency
+ *
+ * punit doesn't take into account the read latency so we need
+ * to add proper adjustement to each valid level we retrieve
+ * from the punit when level 0 response data is 0us.
+ */
+ if (wm[0] == 0) {
+ for (level = 0; level <= max_level; level++)
+ wm[level] += read_latency;
+ }
+
+ /*
+ * WA Level-0 adjustment for 16GB DIMMs: SKL+
+ * If we could not get dimm info enable this WA to prevent from
+ * any underrun. If not able to get Dimm info assume 16GB dimm
+ * to avoid any underrun.
+ */
+ if (wm_lv_0_adjust_needed)
+ wm[0] += 1;
+}
+
+static void mtl_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
+{
+ struct intel_uncore *uncore = &i915->uncore;
+ int max_level = ilk_wm_max_level(i915);
+ u32 val;
+
+ val = intel_uncore_read(uncore, MTL_LATENCY_LP0_LP1);
+ wm[0] = REG_FIELD_GET(MTL_LATENCY_LEVEL_EVEN_MASK, val);
+ wm[1] = REG_FIELD_GET(MTL_LATENCY_LEVEL_ODD_MASK, val);
+
+ val = intel_uncore_read(uncore, MTL_LATENCY_LP2_LP3);
+ wm[2] = REG_FIELD_GET(MTL_LATENCY_LEVEL_EVEN_MASK, val);
+ wm[3] = REG_FIELD_GET(MTL_LATENCY_LEVEL_ODD_MASK, val);
+
+ val = intel_uncore_read(uncore, MTL_LATENCY_LP4_LP5);
+ wm[4] = REG_FIELD_GET(MTL_LATENCY_LEVEL_EVEN_MASK, val);
+ wm[5] = REG_FIELD_GET(MTL_LATENCY_LEVEL_ODD_MASK, val);
+
+ adjust_wm_latency(i915, wm, max_level, 6);
+}
+
+static void skl_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
+{
+ int max_level = ilk_wm_max_level(i915);
+ int read_latency = DISPLAY_VER(i915) >= 12 ? 3 : 2;
+ int mult = IS_DG2(i915) ? 2 : 1;
+ u32 val;
+ int ret;
+
+ /* read the first set of memory latencies[0:3] */
+ val = 0; /* data0 to be programmed to 0 for first set */
+ ret = snb_pcode_read(&i915->uncore, GEN9_PCODE_READ_MEM_LATENCY, &val, NULL);
+ if (ret) {
+ drm_err(&i915->drm, "SKL Mailbox read error = %d\n", ret);
+ return;
+ }
+
+ wm[0] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_0_4_MASK, val) * mult;
+ wm[1] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_1_5_MASK, val) * mult;
+ wm[2] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_2_6_MASK, val) * mult;
+ wm[3] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_3_7_MASK, val) * mult;
+
+ /* read the second set of memory latencies[4:7] */
+ val = 1; /* data0 to be programmed to 1 for second set */
+ ret = snb_pcode_read(&i915->uncore, GEN9_PCODE_READ_MEM_LATENCY, &val, NULL);
+ if (ret) {
+ drm_err(&i915->drm, "SKL Mailbox read error = %d\n", ret);
+ return;
+ }
+
+ wm[4] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_0_4_MASK, val) * mult;
+ wm[5] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_1_5_MASK, val) * mult;
+ wm[6] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_2_6_MASK, val) * mult;
+ wm[7] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_3_7_MASK, val) * mult;
+
+ adjust_wm_latency(i915, wm, max_level, read_latency);
+}
+
+static void skl_setup_wm_latency(struct drm_i915_private *i915)
+{
+ if (DISPLAY_VER(i915) >= 14)
+ mtl_read_wm_latency(i915, i915->display.wm.skl_latency);
+ else
+ skl_read_wm_latency(i915, i915->display.wm.skl_latency);
+
+ intel_print_wm_latency(i915, "Gen9 Plane", i915->display.wm.skl_latency);
+}
+
+static const struct intel_wm_funcs skl_wm_funcs = {
+ .compute_global_watermarks = skl_compute_wm,
+};
+
+void skl_wm_init(struct drm_i915_private *i915)
+{
+ intel_sagv_init(i915);
+
+ skl_setup_wm_latency(i915);
+
+ i915->display.funcs.wm = &skl_wm_funcs;
+}
+
+static struct intel_global_state *intel_dbuf_duplicate_state(struct intel_global_obj *obj)
+{
+ struct intel_dbuf_state *dbuf_state;
+
+ dbuf_state = kmemdup(obj->state, sizeof(*dbuf_state), GFP_KERNEL);
+ if (!dbuf_state)
+ return NULL;
+
+ return &dbuf_state->base;
+}
+
+static void intel_dbuf_destroy_state(struct intel_global_obj *obj,
+ struct intel_global_state *state)
+{
+ kfree(state);
+}
+
+static const struct intel_global_state_funcs intel_dbuf_funcs = {
+ .atomic_duplicate_state = intel_dbuf_duplicate_state,
+ .atomic_destroy_state = intel_dbuf_destroy_state,
+};
+
+struct intel_dbuf_state *
+intel_atomic_get_dbuf_state(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_global_state *dbuf_state;
+
+ dbuf_state = intel_atomic_get_global_obj_state(state, &i915->display.dbuf.obj);
+ if (IS_ERR(dbuf_state))
+ return ERR_CAST(dbuf_state);
+
+ return to_intel_dbuf_state(dbuf_state);
+}
+
+int intel_dbuf_init(struct drm_i915_private *i915)
+{
+ struct intel_dbuf_state *dbuf_state;
+
+ dbuf_state = kzalloc(sizeof(*dbuf_state), GFP_KERNEL);
+ if (!dbuf_state)
+ return -ENOMEM;
+
+ intel_atomic_global_obj_init(i915, &i915->display.dbuf.obj,
+ &dbuf_state->base, &intel_dbuf_funcs);
+
+ return 0;
+}
+
+/*
+ * Configure MBUS_CTL and all DBUF_CTL_S of each slice to join_mbus state before
+ * update the request state of all DBUS slices.
+ */
+static void update_mbus_pre_enable(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ u32 mbus_ctl, dbuf_min_tracker_val;
+ enum dbuf_slice slice;
+ const struct intel_dbuf_state *dbuf_state =
+ intel_atomic_get_new_dbuf_state(state);
+
+ if (!HAS_MBUS_JOINING(i915))
+ return;
+
+ /*
+ * TODO: Implement vblank synchronized MBUS joining changes.
+ * Must be properly coordinated with dbuf reprogramming.
+ */
+ if (dbuf_state->joined_mbus) {
+ mbus_ctl = MBUS_HASHING_MODE_1x4 | MBUS_JOIN |
+ MBUS_JOIN_PIPE_SELECT_NONE;
+ dbuf_min_tracker_val = DBUF_MIN_TRACKER_STATE_SERVICE(3);
+ } else {
+ mbus_ctl = MBUS_HASHING_MODE_2x2 |
+ MBUS_JOIN_PIPE_SELECT_NONE;
+ dbuf_min_tracker_val = DBUF_MIN_TRACKER_STATE_SERVICE(1);
+ }
+
+ intel_de_rmw(i915, MBUS_CTL,
+ MBUS_HASHING_MODE_MASK | MBUS_JOIN |
+ MBUS_JOIN_PIPE_SELECT_MASK, mbus_ctl);
+
+ for_each_dbuf_slice(i915, slice)
+ intel_de_rmw(i915, DBUF_CTL_S(slice),
+ DBUF_MIN_TRACKER_STATE_SERVICE_MASK,
+ dbuf_min_tracker_val);
+}
+
+void intel_dbuf_pre_plane_update(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_dbuf_state *new_dbuf_state =
+ intel_atomic_get_new_dbuf_state(state);
+ const struct intel_dbuf_state *old_dbuf_state =
+ intel_atomic_get_old_dbuf_state(state);
+
+ if (!new_dbuf_state ||
+ (new_dbuf_state->enabled_slices == old_dbuf_state->enabled_slices &&
+ new_dbuf_state->joined_mbus == old_dbuf_state->joined_mbus))
+ return;
+
+ WARN_ON(!new_dbuf_state->base.changed);
+
+ update_mbus_pre_enable(state);
+ gen9_dbuf_slices_update(i915,
+ old_dbuf_state->enabled_slices |
+ new_dbuf_state->enabled_slices);
+}
+
+void intel_dbuf_post_plane_update(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_dbuf_state *new_dbuf_state =
+ intel_atomic_get_new_dbuf_state(state);
+ const struct intel_dbuf_state *old_dbuf_state =
+ intel_atomic_get_old_dbuf_state(state);
+
+ if (!new_dbuf_state ||
+ (new_dbuf_state->enabled_slices == old_dbuf_state->enabled_slices &&
+ new_dbuf_state->joined_mbus == old_dbuf_state->joined_mbus))
+ return;
+
+ WARN_ON(!new_dbuf_state->base.changed);
+
+ gen9_dbuf_slices_update(i915,
+ new_dbuf_state->enabled_slices);
+}
+
+static bool xelpdp_is_only_pipe_per_dbuf_bank(enum pipe pipe, u8 active_pipes)
+{
+ switch (pipe) {
+ case PIPE_A:
+ return !(active_pipes & BIT(PIPE_D));
+ case PIPE_D:
+ return !(active_pipes & BIT(PIPE_A));
+ case PIPE_B:
+ return !(active_pipes & BIT(PIPE_C));
+ case PIPE_C:
+ return !(active_pipes & BIT(PIPE_B));
+ default: /* to suppress compiler warning */
+ MISSING_CASE(pipe);
+ break;
+ }
+
+ return false;
+}
+
+void intel_mbus_dbox_update(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_dbuf_state *new_dbuf_state, *old_dbuf_state;
+ const struct intel_crtc_state *new_crtc_state;
+ const struct intel_crtc *crtc;
+ u32 val = 0;
+ int i;
+
+ if (DISPLAY_VER(i915) < 11)
+ return;
+
+ new_dbuf_state = intel_atomic_get_new_dbuf_state(state);
+ old_dbuf_state = intel_atomic_get_old_dbuf_state(state);
+ if (!new_dbuf_state ||
+ (new_dbuf_state->joined_mbus == old_dbuf_state->joined_mbus &&
+ new_dbuf_state->active_pipes == old_dbuf_state->active_pipes))
+ return;
+
+ if (DISPLAY_VER(i915) >= 14)
+ val |= MBUS_DBOX_I_CREDIT(2);
+
+ if (DISPLAY_VER(i915) >= 12) {
+ val |= MBUS_DBOX_B2B_TRANSACTIONS_MAX(16);
+ val |= MBUS_DBOX_B2B_TRANSACTIONS_DELAY(1);
+ val |= MBUS_DBOX_REGULATE_B2B_TRANSACTIONS_EN;
+ }
+
+ if (DISPLAY_VER(i915) >= 14)
+ val |= new_dbuf_state->joined_mbus ? MBUS_DBOX_A_CREDIT(12) :
+ MBUS_DBOX_A_CREDIT(8);
+ else if (IS_ALDERLAKE_P(i915))
+ /* Wa_22010947358:adl-p */
+ val |= new_dbuf_state->joined_mbus ? MBUS_DBOX_A_CREDIT(6) :
+ MBUS_DBOX_A_CREDIT(4);
+ else
+ val |= MBUS_DBOX_A_CREDIT(2);
+
+ if (DISPLAY_VER(i915) >= 14) {
+ val |= MBUS_DBOX_B_CREDIT(0xA);
+ } else if (IS_ALDERLAKE_P(i915)) {
+ val |= MBUS_DBOX_BW_CREDIT(2);
+ val |= MBUS_DBOX_B_CREDIT(8);
+ } else if (DISPLAY_VER(i915) >= 12) {
+ val |= MBUS_DBOX_BW_CREDIT(2);
+ val |= MBUS_DBOX_B_CREDIT(12);
+ } else {
+ val |= MBUS_DBOX_BW_CREDIT(1);
+ val |= MBUS_DBOX_B_CREDIT(8);
+ }
+
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+ u32 pipe_val = val;
+
+ if (!new_crtc_state->hw.active)
+ continue;
+
+ if (DISPLAY_VER(i915) >= 14) {
+ if (xelpdp_is_only_pipe_per_dbuf_bank(crtc->pipe,
+ new_dbuf_state->active_pipes))
+ pipe_val |= MBUS_DBOX_BW_8CREDITS_MTL;
+ else
+ pipe_val |= MBUS_DBOX_BW_4CREDITS_MTL;
+ }
+
+ intel_de_write(i915, PIPE_MBUS_DBOX_CTL(crtc->pipe), pipe_val);
+ }
+}
+
+static int skl_watermark_ipc_status_show(struct seq_file *m, void *data)
+{
+ struct drm_i915_private *i915 = m->private;
+
+ seq_printf(m, "Isochronous Priority Control: %s\n",
+ str_yes_no(skl_watermark_ipc_enabled(i915)));
+ return 0;
+}
+
+static int skl_watermark_ipc_status_open(struct inode *inode, struct file *file)
+{
+ struct drm_i915_private *i915 = inode->i_private;
+
+ return single_open(file, skl_watermark_ipc_status_show, i915);
+}
+
+static ssize_t skl_watermark_ipc_status_write(struct file *file,
+ const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ struct drm_i915_private *i915 = m->private;
+ intel_wakeref_t wakeref;
+ bool enable;
+ int ret;
+
+ ret = kstrtobool_from_user(ubuf, len, &enable);
+ if (ret < 0)
+ return ret;
+
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
+ if (!skl_watermark_ipc_enabled(i915) && enable)
+ drm_info(&i915->drm,
+ "Enabling IPC: WM will be proper only after next commit\n");
+ i915->display.wm.ipc_enabled = enable;
+ skl_watermark_ipc_update(i915);
+ }
+
+ return len;
+}
+
+static const struct file_operations skl_watermark_ipc_status_fops = {
+ .owner = THIS_MODULE,
+ .open = skl_watermark_ipc_status_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = skl_watermark_ipc_status_write
+};
+
+void skl_watermark_ipc_debugfs_register(struct drm_i915_private *i915)
+{
+ struct drm_minor *minor = i915->drm.primary;
+
+ if (!HAS_IPC(i915))
+ return;
+
+ debugfs_create_file("i915_ipc_status", 0644, minor->debugfs_root, i915,
+ &skl_watermark_ipc_status_fops);
+}
diff --git a/drivers/gpu/drm/i915/display/skl_watermark.h b/drivers/gpu/drm/i915/display/skl_watermark.h
new file mode 100644
index 000000000000..7a5a4e67cd73
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/skl_watermark.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef __SKL_WATERMARK_H__
+#define __SKL_WATERMARK_H__
+
+#include <linux/types.h>
+
+#include "intel_display.h"
+#include "intel_global_state.h"
+#include "intel_pm_types.h"
+
+struct drm_i915_private;
+struct intel_atomic_state;
+struct intel_bw_state;
+struct intel_crtc;
+struct intel_crtc_state;
+struct intel_plane;
+
+u8 intel_enabled_dbuf_slices_mask(struct drm_i915_private *i915);
+
+void intel_sagv_pre_plane_update(struct intel_atomic_state *state);
+void intel_sagv_post_plane_update(struct intel_atomic_state *state);
+bool intel_can_enable_sagv(struct drm_i915_private *i915,
+ const struct intel_bw_state *bw_state);
+
+u32 skl_ddb_dbuf_slice_mask(struct drm_i915_private *i915,
+ const struct skl_ddb_entry *entry);
+
+void skl_write_plane_wm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state);
+void skl_write_cursor_wm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state);
+
+bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
+ const struct skl_ddb_entry *entries,
+ int num_entries, int ignore_idx);
+
+void skl_wm_get_hw_state(struct drm_i915_private *i915);
+void skl_wm_sanitize(struct drm_i915_private *i915);
+
+void intel_wm_state_verify(struct intel_crtc *crtc,
+ struct intel_crtc_state *new_crtc_state);
+
+void skl_watermark_ipc_init(struct drm_i915_private *i915);
+void skl_watermark_ipc_update(struct drm_i915_private *i915);
+bool skl_watermark_ipc_enabled(struct drm_i915_private *i915);
+void skl_watermark_ipc_debugfs_register(struct drm_i915_private *i915);
+
+void skl_wm_init(struct drm_i915_private *i915);
+
+struct intel_dbuf_state {
+ struct intel_global_state base;
+
+ struct skl_ddb_entry ddb[I915_MAX_PIPES];
+ unsigned int weight[I915_MAX_PIPES];
+ u8 slices[I915_MAX_PIPES];
+ u8 enabled_slices;
+ u8 active_pipes;
+ bool joined_mbus;
+};
+
+struct intel_dbuf_state *
+intel_atomic_get_dbuf_state(struct intel_atomic_state *state);
+
+#define to_intel_dbuf_state(x) container_of((x), struct intel_dbuf_state, base)
+#define intel_atomic_get_old_dbuf_state(state) \
+ to_intel_dbuf_state(intel_atomic_get_old_global_obj_state(state, &to_i915(state->base.dev)->display.dbuf.obj))
+#define intel_atomic_get_new_dbuf_state(state) \
+ to_intel_dbuf_state(intel_atomic_get_new_global_obj_state(state, &to_i915(state->base.dev)->display.dbuf.obj))
+
+int intel_dbuf_init(struct drm_i915_private *i915);
+void intel_dbuf_pre_plane_update(struct intel_atomic_state *state);
+void intel_dbuf_post_plane_update(struct intel_atomic_state *state);
+void intel_mbus_dbox_update(struct intel_atomic_state *state);
+
+#endif /* __SKL_WATERMARK_H__ */
+
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
index b9b1fed99874..b3f5ca280ef2 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
@@ -822,9 +822,9 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
u32 val;
/* Disable DPOunit clock gating, can stall pipe */
- val = intel_de_read(dev_priv, DSPCLK_GATE_D);
+ val = intel_de_read(dev_priv, DSPCLK_GATE_D(dev_priv));
val |= DPOUNIT_CLOCK_GATE_DISABLE;
- intel_de_write(dev_priv, DSPCLK_GATE_D, val);
+ intel_de_write(dev_priv, DSPCLK_GATE_D(dev_priv), val);
}
if (!IS_GEMINILAKE(dev_priv))
@@ -998,9 +998,9 @@ static void intel_dsi_post_disable(struct intel_atomic_state *state,
vlv_dsi_pll_disable(encoder);
- val = intel_de_read(dev_priv, DSPCLK_GATE_D);
+ val = intel_de_read(dev_priv, DSPCLK_GATE_D(dev_priv));
val &= ~DPOUNIT_CLOCK_GATE_DISABLE;
- intel_de_write(dev_priv, DSPCLK_GATE_D, val);
+ intel_de_write(dev_priv, DSPCLK_GATE_D(dev_priv), val);
}
/* Assert reset */
@@ -1277,13 +1277,12 @@ static void intel_dsi_get_config(struct intel_encoder *encoder,
pclk = vlv_dsi_get_pclk(encoder, pipe_config);
}
- if (intel_dsi->dual_link)
- pclk *= 2;
+ pipe_config->port_clock = pclk;
- if (pclk) {
- pipe_config->hw.adjusted_mode.crtc_clock = pclk;
- pipe_config->port_clock = pclk;
- }
+ /* FIXME definitely not right for burst/cmd mode/pixel overlap */
+ pipe_config->hw.adjusted_mode.crtc_clock = pclk;
+ if (intel_dsi->dual_link)
+ pipe_config->hw.adjusted_mode.crtc_clock *= 2;
}
/* return txclkesc cycles in terms of divider and duration in us */
@@ -1872,9 +1871,9 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
return;
if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
- dev_priv->mipi_mmio_base = BXT_MIPI_BASE;
+ dev_priv->display.dsi.mmio_base = BXT_MIPI_BASE;
else
- dev_priv->mipi_mmio_base = VLV_MIPI_BASE;
+ dev_priv->display.dsi.mmio_base = VLV_MIPI_BASE;
intel_dsi = kzalloc(sizeof(*intel_dsi), GFP_KERNEL);
if (!intel_dsi)
@@ -1933,8 +1932,11 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
else
intel_dsi->ports = BIT(port);
- intel_dsi->dcs_backlight_ports = intel_connector->panel.vbt.dsi.bl_ports;
- intel_dsi->dcs_cabc_ports = intel_connector->panel.vbt.dsi.cabc_ports;
+ if (drm_WARN_ON(&dev_priv->drm, intel_connector->panel.vbt.dsi.bl_ports & ~intel_dsi->ports))
+ intel_connector->panel.vbt.dsi.bl_ports &= intel_dsi->ports;
+
+ if (drm_WARN_ON(&dev_priv->drm, intel_connector->panel.vbt.dsi.cabc_ports & ~intel_dsi->ports))
+ intel_connector->panel.vbt.dsi.cabc_ports &= intel_dsi->ports;
/* Create a DSI host (and a device) for each port. */
for_each_dsi_port(port, intel_dsi->ports) {
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
index 5894b0138343..af7402127cd9 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
@@ -113,6 +113,61 @@ static int dsi_calc_mnp(struct drm_i915_private *dev_priv,
return 0;
}
+static int vlv_dsi_pclk(struct intel_encoder *encoder,
+ struct intel_crtc_state *config)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
+ u32 dsi_clock;
+ u32 pll_ctl, pll_div;
+ u32 m = 0, p = 0, n;
+ int refclk = IS_CHERRYVIEW(dev_priv) ? 100000 : 25000;
+ int i;
+
+ pll_ctl = config->dsi_pll.ctrl;
+ pll_div = config->dsi_pll.div;
+
+ /* mask out other bits and extract the P1 divisor */
+ pll_ctl &= DSI_PLL_P1_POST_DIV_MASK;
+ pll_ctl = pll_ctl >> (DSI_PLL_P1_POST_DIV_SHIFT - 2);
+
+ /* N1 divisor */
+ n = (pll_div & DSI_PLL_N1_DIV_MASK) >> DSI_PLL_N1_DIV_SHIFT;
+ n = 1 << n; /* register has log2(N1) */
+
+ /* mask out the other bits and extract the M1 divisor */
+ pll_div &= DSI_PLL_M1_DIV_MASK;
+ pll_div = pll_div >> DSI_PLL_M1_DIV_SHIFT;
+
+ while (pll_ctl) {
+ pll_ctl = pll_ctl >> 1;
+ p++;
+ }
+ p--;
+
+ if (!p) {
+ drm_err(&dev_priv->drm, "wrong P1 divisor\n");
+ return 0;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(lfsr_converts); i++) {
+ if (lfsr_converts[i] == pll_div)
+ break;
+ }
+
+ if (i == ARRAY_SIZE(lfsr_converts)) {
+ drm_err(&dev_priv->drm, "wrong m_seed programmed\n");
+ return 0;
+ }
+
+ m = i + 62;
+
+ dsi_clock = (m * refclk) / (p * n);
+
+ return DIV_ROUND_CLOSEST(dsi_clock * intel_dsi->lane_count, bpp);
+}
+
/*
* XXX: The muxing and gating is hard coded for now. Need to add support for
* sharing PLLs with two DSI outputs.
@@ -122,8 +177,7 @@ int vlv_dsi_pll_compute(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
- int ret;
- u32 dsi_clk;
+ int pclk, dsi_clk, ret;
dsi_clk = dsi_clk_from_pclk(intel_dsi->pclk, intel_dsi->pixel_format,
intel_dsi->lane_count);
@@ -145,6 +199,14 @@ int vlv_dsi_pll_compute(struct intel_encoder *encoder,
drm_dbg_kms(&dev_priv->drm, "dsi pll div %08x, ctrl %08x\n",
config->dsi_pll.div, config->dsi_pll.ctrl);
+ pclk = vlv_dsi_pclk(encoder, config);
+ config->port_clock = pclk;
+
+ /* FIXME definitely not right for burst/cmd mode/pixel overlap */
+ config->hw.adjusted_mode.crtc_clock = pclk;
+ if (intel_dsi->dual_link)
+ config->hw.adjusted_mode.crtc_clock *= 2;
+
return 0;
}
@@ -262,13 +324,7 @@ u32 vlv_dsi_get_pclk(struct intel_encoder *encoder,
struct intel_crtc_state *config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
- int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
- u32 dsi_clock, pclk;
u32 pll_ctl, pll_div;
- u32 m = 0, p = 0, n;
- int refclk = IS_CHERRYVIEW(dev_priv) ? 100000 : 25000;
- int i;
drm_dbg_kms(&dev_priv->drm, "\n");
@@ -280,65 +336,31 @@ u32 vlv_dsi_get_pclk(struct intel_encoder *encoder,
config->dsi_pll.ctrl = pll_ctl & ~DSI_PLL_LOCK;
config->dsi_pll.div = pll_div;
- /* mask out other bits and extract the P1 divisor */
- pll_ctl &= DSI_PLL_P1_POST_DIV_MASK;
- pll_ctl = pll_ctl >> (DSI_PLL_P1_POST_DIV_SHIFT - 2);
-
- /* N1 divisor */
- n = (pll_div & DSI_PLL_N1_DIV_MASK) >> DSI_PLL_N1_DIV_SHIFT;
- n = 1 << n; /* register has log2(N1) */
-
- /* mask out the other bits and extract the M1 divisor */
- pll_div &= DSI_PLL_M1_DIV_MASK;
- pll_div = pll_div >> DSI_PLL_M1_DIV_SHIFT;
-
- while (pll_ctl) {
- pll_ctl = pll_ctl >> 1;
- p++;
- }
- p--;
-
- if (!p) {
- drm_err(&dev_priv->drm, "wrong P1 divisor\n");
- return 0;
- }
-
- for (i = 0; i < ARRAY_SIZE(lfsr_converts); i++) {
- if (lfsr_converts[i] == pll_div)
- break;
- }
-
- if (i == ARRAY_SIZE(lfsr_converts)) {
- drm_err(&dev_priv->drm, "wrong m_seed programmed\n");
- return 0;
- }
-
- m = i + 62;
+ return vlv_dsi_pclk(encoder, config);
+}
- dsi_clock = (m * refclk) / (p * n);
+static int bxt_dsi_pclk(struct intel_encoder *encoder,
+ const struct intel_crtc_state *config)
+{
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
+ u32 dsi_ratio, dsi_clk;
- pclk = DIV_ROUND_CLOSEST(dsi_clock * intel_dsi->lane_count, bpp);
+ dsi_ratio = config->dsi_pll.ctrl & BXT_DSI_PLL_RATIO_MASK;
+ dsi_clk = (dsi_ratio * BXT_REF_CLOCK_KHZ) / 2;
- return pclk;
+ return DIV_ROUND_CLOSEST(dsi_clk * intel_dsi->lane_count, bpp);
}
u32 bxt_dsi_get_pclk(struct intel_encoder *encoder,
struct intel_crtc_state *config)
{
- u32 pclk;
- u32 dsi_clk;
- u32 dsi_ratio;
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
+ u32 pclk;
config->dsi_pll.ctrl = intel_de_read(dev_priv, BXT_DSI_PLL_CTL);
- dsi_ratio = config->dsi_pll.ctrl & BXT_DSI_PLL_RATIO_MASK;
-
- dsi_clk = (dsi_ratio * BXT_REF_CLOCK_KHZ) / 2;
-
- pclk = DIV_ROUND_CLOSEST(dsi_clk * intel_dsi->lane_count, bpp);
+ pclk = bxt_dsi_pclk(encoder, config);
drm_dbg(&dev_priv->drm, "Calculated pclk=%u\n", pclk);
return pclk;
@@ -463,6 +485,7 @@ int bxt_dsi_pll_compute(struct intel_encoder *encoder,
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
u8 dsi_ratio, dsi_ratio_min, dsi_ratio_max;
u32 dsi_clk;
+ int pclk;
dsi_clk = dsi_clk_from_pclk(intel_dsi->pclk, intel_dsi->pixel_format,
intel_dsi->lane_count);
@@ -502,6 +525,14 @@ int bxt_dsi_pll_compute(struct intel_encoder *encoder,
if (IS_BROXTON(dev_priv) && dsi_ratio <= 50)
config->dsi_pll.ctrl |= BXT_DSI_PLL_PVD_RATIO_1;
+ pclk = bxt_dsi_pclk(encoder, config);
+ config->port_clock = pclk;
+
+ /* FIXME definitely not right for burst/cmd mode/pixel overlap */
+ config->hw.adjusted_mode.crtc_clock = pclk;
+ if (intel_dsi->dual_link)
+ config->hw.adjusted_mode.crtc_clock *= 2;
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_regs.h b/drivers/gpu/drm/i915/display/vlv_dsi_regs.h
index 356e51515346..e065b8f2ee08 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi_regs.h
+++ b/drivers/gpu/drm/i915/display/vlv_dsi_regs.h
@@ -11,6 +11,8 @@
#define VLV_MIPI_BASE VLV_DISPLAY_BASE
#define BXT_MIPI_BASE 0x60000
+#define _MIPI_MMIO_BASE(__i915) ((__i915)->display.dsi.mmio_base)
+
#define _MIPI_PORT(port, a, c) (((port) == PORT_A) ? a : c) /* ports A and C only */
#define _MMIO_MIPI(port, a, c) _MMIO(_MIPI_PORT(port, a, c))
@@ -96,8 +98,8 @@
/* MIPI DSI Controller and D-PHY registers */
-#define _MIPIA_DEVICE_READY (dev_priv->mipi_mmio_base + 0xb000)
-#define _MIPIC_DEVICE_READY (dev_priv->mipi_mmio_base + 0xb800)
+#define _MIPIA_DEVICE_READY (_MIPI_MMIO_BASE(dev_priv) + 0xb000)
+#define _MIPIC_DEVICE_READY (_MIPI_MMIO_BASE(dev_priv) + 0xb800)
#define MIPI_DEVICE_READY(port) _MMIO_MIPI(port, _MIPIA_DEVICE_READY, _MIPIC_DEVICE_READY)
#define BUS_POSSESSION (1 << 3) /* set to give bus to receiver */
#define ULPS_STATE_MASK (3 << 1)
@@ -106,11 +108,11 @@
#define ULPS_STATE_NORMAL_OPERATION (0 << 1)
#define DEVICE_READY (1 << 0)
-#define _MIPIA_INTR_STAT (dev_priv->mipi_mmio_base + 0xb004)
-#define _MIPIC_INTR_STAT (dev_priv->mipi_mmio_base + 0xb804)
+#define _MIPIA_INTR_STAT (_MIPI_MMIO_BASE(dev_priv) + 0xb004)
+#define _MIPIC_INTR_STAT (_MIPI_MMIO_BASE(dev_priv) + 0xb804)
#define MIPI_INTR_STAT(port) _MMIO_MIPI(port, _MIPIA_INTR_STAT, _MIPIC_INTR_STAT)
-#define _MIPIA_INTR_EN (dev_priv->mipi_mmio_base + 0xb008)
-#define _MIPIC_INTR_EN (dev_priv->mipi_mmio_base + 0xb808)
+#define _MIPIA_INTR_EN (_MIPI_MMIO_BASE(dev_priv) + 0xb008)
+#define _MIPIC_INTR_EN (_MIPI_MMIO_BASE(dev_priv) + 0xb808)
#define MIPI_INTR_EN(port) _MMIO_MIPI(port, _MIPIA_INTR_EN, _MIPIC_INTR_EN)
#define TEARING_EFFECT (1 << 31)
#define SPL_PKT_SENT_INTERRUPT (1 << 30)
@@ -145,8 +147,8 @@
#define RXSOT_SYNC_ERROR (1 << 1)
#define RXSOT_ERROR (1 << 0)
-#define _MIPIA_DSI_FUNC_PRG (dev_priv->mipi_mmio_base + 0xb00c)
-#define _MIPIC_DSI_FUNC_PRG (dev_priv->mipi_mmio_base + 0xb80c)
+#define _MIPIA_DSI_FUNC_PRG (_MIPI_MMIO_BASE(dev_priv) + 0xb00c)
+#define _MIPIC_DSI_FUNC_PRG (_MIPI_MMIO_BASE(dev_priv) + 0xb80c)
#define MIPI_DSI_FUNC_PRG(port) _MMIO_MIPI(port, _MIPIA_DSI_FUNC_PRG, _MIPIC_DSI_FUNC_PRG)
#define CMD_MODE_DATA_WIDTH_MASK (7 << 13)
#define CMD_MODE_NOT_SUPPORTED (0 << 13)
@@ -168,76 +170,76 @@
#define DATA_LANES_PRG_REG_SHIFT 0
#define DATA_LANES_PRG_REG_MASK (7 << 0)
-#define _MIPIA_HS_TX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb010)
-#define _MIPIC_HS_TX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb810)
+#define _MIPIA_HS_TX_TIMEOUT (_MIPI_MMIO_BASE(dev_priv) + 0xb010)
+#define _MIPIC_HS_TX_TIMEOUT (_MIPI_MMIO_BASE(dev_priv) + 0xb810)
#define MIPI_HS_TX_TIMEOUT(port) _MMIO_MIPI(port, _MIPIA_HS_TX_TIMEOUT, _MIPIC_HS_TX_TIMEOUT)
#define HIGH_SPEED_TX_TIMEOUT_COUNTER_MASK 0xffffff
-#define _MIPIA_LP_RX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb014)
-#define _MIPIC_LP_RX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb814)
+#define _MIPIA_LP_RX_TIMEOUT (_MIPI_MMIO_BASE(dev_priv) + 0xb014)
+#define _MIPIC_LP_RX_TIMEOUT (_MIPI_MMIO_BASE(dev_priv) + 0xb814)
#define MIPI_LP_RX_TIMEOUT(port) _MMIO_MIPI(port, _MIPIA_LP_RX_TIMEOUT, _MIPIC_LP_RX_TIMEOUT)
#define LOW_POWER_RX_TIMEOUT_COUNTER_MASK 0xffffff
-#define _MIPIA_TURN_AROUND_TIMEOUT (dev_priv->mipi_mmio_base + 0xb018)
-#define _MIPIC_TURN_AROUND_TIMEOUT (dev_priv->mipi_mmio_base + 0xb818)
+#define _MIPIA_TURN_AROUND_TIMEOUT (_MIPI_MMIO_BASE(dev_priv) + 0xb018)
+#define _MIPIC_TURN_AROUND_TIMEOUT (_MIPI_MMIO_BASE(dev_priv) + 0xb818)
#define MIPI_TURN_AROUND_TIMEOUT(port) _MMIO_MIPI(port, _MIPIA_TURN_AROUND_TIMEOUT, _MIPIC_TURN_AROUND_TIMEOUT)
#define TURN_AROUND_TIMEOUT_MASK 0x3f
-#define _MIPIA_DEVICE_RESET_TIMER (dev_priv->mipi_mmio_base + 0xb01c)
-#define _MIPIC_DEVICE_RESET_TIMER (dev_priv->mipi_mmio_base + 0xb81c)
+#define _MIPIA_DEVICE_RESET_TIMER (_MIPI_MMIO_BASE(dev_priv) + 0xb01c)
+#define _MIPIC_DEVICE_RESET_TIMER (_MIPI_MMIO_BASE(dev_priv) + 0xb81c)
#define MIPI_DEVICE_RESET_TIMER(port) _MMIO_MIPI(port, _MIPIA_DEVICE_RESET_TIMER, _MIPIC_DEVICE_RESET_TIMER)
#define DEVICE_RESET_TIMER_MASK 0xffff
-#define _MIPIA_DPI_RESOLUTION (dev_priv->mipi_mmio_base + 0xb020)
-#define _MIPIC_DPI_RESOLUTION (dev_priv->mipi_mmio_base + 0xb820)
+#define _MIPIA_DPI_RESOLUTION (_MIPI_MMIO_BASE(dev_priv) + 0xb020)
+#define _MIPIC_DPI_RESOLUTION (_MIPI_MMIO_BASE(dev_priv) + 0xb820)
#define MIPI_DPI_RESOLUTION(port) _MMIO_MIPI(port, _MIPIA_DPI_RESOLUTION, _MIPIC_DPI_RESOLUTION)
#define VERTICAL_ADDRESS_SHIFT 16
#define VERTICAL_ADDRESS_MASK (0xffff << 16)
#define HORIZONTAL_ADDRESS_SHIFT 0
#define HORIZONTAL_ADDRESS_MASK 0xffff
-#define _MIPIA_DBI_FIFO_THROTTLE (dev_priv->mipi_mmio_base + 0xb024)
-#define _MIPIC_DBI_FIFO_THROTTLE (dev_priv->mipi_mmio_base + 0xb824)
+#define _MIPIA_DBI_FIFO_THROTTLE (_MIPI_MMIO_BASE(dev_priv) + 0xb024)
+#define _MIPIC_DBI_FIFO_THROTTLE (_MIPI_MMIO_BASE(dev_priv) + 0xb824)
#define MIPI_DBI_FIFO_THROTTLE(port) _MMIO_MIPI(port, _MIPIA_DBI_FIFO_THROTTLE, _MIPIC_DBI_FIFO_THROTTLE)
#define DBI_FIFO_EMPTY_HALF (0 << 0)
#define DBI_FIFO_EMPTY_QUARTER (1 << 0)
#define DBI_FIFO_EMPTY_7_LOCATIONS (2 << 0)
/* regs below are bits 15:0 */
-#define _MIPIA_HSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb028)
-#define _MIPIC_HSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb828)
+#define _MIPIA_HSYNC_PADDING_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb028)
+#define _MIPIC_HSYNC_PADDING_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb828)
#define MIPI_HSYNC_PADDING_COUNT(port) _MMIO_MIPI(port, _MIPIA_HSYNC_PADDING_COUNT, _MIPIC_HSYNC_PADDING_COUNT)
-#define _MIPIA_HBP_COUNT (dev_priv->mipi_mmio_base + 0xb02c)
-#define _MIPIC_HBP_COUNT (dev_priv->mipi_mmio_base + 0xb82c)
+#define _MIPIA_HBP_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb02c)
+#define _MIPIC_HBP_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb82c)
#define MIPI_HBP_COUNT(port) _MMIO_MIPI(port, _MIPIA_HBP_COUNT, _MIPIC_HBP_COUNT)
-#define _MIPIA_HFP_COUNT (dev_priv->mipi_mmio_base + 0xb030)
-#define _MIPIC_HFP_COUNT (dev_priv->mipi_mmio_base + 0xb830)
+#define _MIPIA_HFP_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb030)
+#define _MIPIC_HFP_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb830)
#define MIPI_HFP_COUNT(port) _MMIO_MIPI(port, _MIPIA_HFP_COUNT, _MIPIC_HFP_COUNT)
-#define _MIPIA_HACTIVE_AREA_COUNT (dev_priv->mipi_mmio_base + 0xb034)
-#define _MIPIC_HACTIVE_AREA_COUNT (dev_priv->mipi_mmio_base + 0xb834)
+#define _MIPIA_HACTIVE_AREA_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb034)
+#define _MIPIC_HACTIVE_AREA_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb834)
#define MIPI_HACTIVE_AREA_COUNT(port) _MMIO_MIPI(port, _MIPIA_HACTIVE_AREA_COUNT, _MIPIC_HACTIVE_AREA_COUNT)
-#define _MIPIA_VSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb038)
-#define _MIPIC_VSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb838)
+#define _MIPIA_VSYNC_PADDING_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb038)
+#define _MIPIC_VSYNC_PADDING_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb838)
#define MIPI_VSYNC_PADDING_COUNT(port) _MMIO_MIPI(port, _MIPIA_VSYNC_PADDING_COUNT, _MIPIC_VSYNC_PADDING_COUNT)
-#define _MIPIA_VBP_COUNT (dev_priv->mipi_mmio_base + 0xb03c)
-#define _MIPIC_VBP_COUNT (dev_priv->mipi_mmio_base + 0xb83c)
+#define _MIPIA_VBP_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb03c)
+#define _MIPIC_VBP_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb83c)
#define MIPI_VBP_COUNT(port) _MMIO_MIPI(port, _MIPIA_VBP_COUNT, _MIPIC_VBP_COUNT)
-#define _MIPIA_VFP_COUNT (dev_priv->mipi_mmio_base + 0xb040)
-#define _MIPIC_VFP_COUNT (dev_priv->mipi_mmio_base + 0xb840)
+#define _MIPIA_VFP_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb040)
+#define _MIPIC_VFP_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb840)
#define MIPI_VFP_COUNT(port) _MMIO_MIPI(port, _MIPIA_VFP_COUNT, _MIPIC_VFP_COUNT)
-#define _MIPIA_HIGH_LOW_SWITCH_COUNT (dev_priv->mipi_mmio_base + 0xb044)
-#define _MIPIC_HIGH_LOW_SWITCH_COUNT (dev_priv->mipi_mmio_base + 0xb844)
+#define _MIPIA_HIGH_LOW_SWITCH_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb044)
+#define _MIPIC_HIGH_LOW_SWITCH_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb844)
#define MIPI_HIGH_LOW_SWITCH_COUNT(port) _MMIO_MIPI(port, _MIPIA_HIGH_LOW_SWITCH_COUNT, _MIPIC_HIGH_LOW_SWITCH_COUNT)
-#define _MIPIA_DPI_CONTROL (dev_priv->mipi_mmio_base + 0xb048)
-#define _MIPIC_DPI_CONTROL (dev_priv->mipi_mmio_base + 0xb848)
+#define _MIPIA_DPI_CONTROL (_MIPI_MMIO_BASE(dev_priv) + 0xb048)
+#define _MIPIC_DPI_CONTROL (_MIPI_MMIO_BASE(dev_priv) + 0xb848)
#define MIPI_DPI_CONTROL(port) _MMIO_MIPI(port, _MIPIA_DPI_CONTROL, _MIPIC_DPI_CONTROL)
#define DPI_LP_MODE (1 << 6)
#define BACKLIGHT_OFF (1 << 5)
@@ -247,27 +249,27 @@
#define TURN_ON (1 << 1)
#define SHUTDOWN (1 << 0)
-#define _MIPIA_DPI_DATA (dev_priv->mipi_mmio_base + 0xb04c)
-#define _MIPIC_DPI_DATA (dev_priv->mipi_mmio_base + 0xb84c)
+#define _MIPIA_DPI_DATA (_MIPI_MMIO_BASE(dev_priv) + 0xb04c)
+#define _MIPIC_DPI_DATA (_MIPI_MMIO_BASE(dev_priv) + 0xb84c)
#define MIPI_DPI_DATA(port) _MMIO_MIPI(port, _MIPIA_DPI_DATA, _MIPIC_DPI_DATA)
#define COMMAND_BYTE_SHIFT 0
#define COMMAND_BYTE_MASK (0x3f << 0)
-#define _MIPIA_INIT_COUNT (dev_priv->mipi_mmio_base + 0xb050)
-#define _MIPIC_INIT_COUNT (dev_priv->mipi_mmio_base + 0xb850)
+#define _MIPIA_INIT_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb050)
+#define _MIPIC_INIT_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb850)
#define MIPI_INIT_COUNT(port) _MMIO_MIPI(port, _MIPIA_INIT_COUNT, _MIPIC_INIT_COUNT)
#define MASTER_INIT_TIMER_SHIFT 0
#define MASTER_INIT_TIMER_MASK (0xffff << 0)
-#define _MIPIA_MAX_RETURN_PKT_SIZE (dev_priv->mipi_mmio_base + 0xb054)
-#define _MIPIC_MAX_RETURN_PKT_SIZE (dev_priv->mipi_mmio_base + 0xb854)
+#define _MIPIA_MAX_RETURN_PKT_SIZE (_MIPI_MMIO_BASE(dev_priv) + 0xb054)
+#define _MIPIC_MAX_RETURN_PKT_SIZE (_MIPI_MMIO_BASE(dev_priv) + 0xb854)
#define MIPI_MAX_RETURN_PKT_SIZE(port) _MMIO_MIPI(port, \
_MIPIA_MAX_RETURN_PKT_SIZE, _MIPIC_MAX_RETURN_PKT_SIZE)
#define MAX_RETURN_PKT_SIZE_SHIFT 0
#define MAX_RETURN_PKT_SIZE_MASK (0x3ff << 0)
-#define _MIPIA_VIDEO_MODE_FORMAT (dev_priv->mipi_mmio_base + 0xb058)
-#define _MIPIC_VIDEO_MODE_FORMAT (dev_priv->mipi_mmio_base + 0xb858)
+#define _MIPIA_VIDEO_MODE_FORMAT (_MIPI_MMIO_BASE(dev_priv) + 0xb058)
+#define _MIPIC_VIDEO_MODE_FORMAT (_MIPI_MMIO_BASE(dev_priv) + 0xb858)
#define MIPI_VIDEO_MODE_FORMAT(port) _MMIO_MIPI(port, _MIPIA_VIDEO_MODE_FORMAT, _MIPIC_VIDEO_MODE_FORMAT)
#define RANDOM_DPI_DISPLAY_RESOLUTION (1 << 4)
#define DISABLE_VIDEO_BTA (1 << 3)
@@ -276,8 +278,8 @@
#define VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS (2 << 0)
#define VIDEO_MODE_BURST (3 << 0)
-#define _MIPIA_EOT_DISABLE (dev_priv->mipi_mmio_base + 0xb05c)
-#define _MIPIC_EOT_DISABLE (dev_priv->mipi_mmio_base + 0xb85c)
+#define _MIPIA_EOT_DISABLE (_MIPI_MMIO_BASE(dev_priv) + 0xb05c)
+#define _MIPIC_EOT_DISABLE (_MIPI_MMIO_BASE(dev_priv) + 0xb85c)
#define MIPI_EOT_DISABLE(port) _MMIO_MIPI(port, _MIPIA_EOT_DISABLE, _MIPIC_EOT_DISABLE)
#define BXT_DEFEATURE_DPI_FIFO_CTR (1 << 9)
#define BXT_DPHY_DEFEATURE_EN (1 << 8)
@@ -290,35 +292,35 @@
#define CLOCKSTOP (1 << 1)
#define EOT_DISABLE (1 << 0)
-#define _MIPIA_LP_BYTECLK (dev_priv->mipi_mmio_base + 0xb060)
-#define _MIPIC_LP_BYTECLK (dev_priv->mipi_mmio_base + 0xb860)
+#define _MIPIA_LP_BYTECLK (_MIPI_MMIO_BASE(dev_priv) + 0xb060)
+#define _MIPIC_LP_BYTECLK (_MIPI_MMIO_BASE(dev_priv) + 0xb860)
#define MIPI_LP_BYTECLK(port) _MMIO_MIPI(port, _MIPIA_LP_BYTECLK, _MIPIC_LP_BYTECLK)
#define LP_BYTECLK_SHIFT 0
#define LP_BYTECLK_MASK (0xffff << 0)
-#define _MIPIA_TLPX_TIME_COUNT (dev_priv->mipi_mmio_base + 0xb0a4)
-#define _MIPIC_TLPX_TIME_COUNT (dev_priv->mipi_mmio_base + 0xb8a4)
+#define _MIPIA_TLPX_TIME_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb0a4)
+#define _MIPIC_TLPX_TIME_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb8a4)
#define MIPI_TLPX_TIME_COUNT(port) _MMIO_MIPI(port, _MIPIA_TLPX_TIME_COUNT, _MIPIC_TLPX_TIME_COUNT)
-#define _MIPIA_CLK_LANE_TIMING (dev_priv->mipi_mmio_base + 0xb098)
-#define _MIPIC_CLK_LANE_TIMING (dev_priv->mipi_mmio_base + 0xb898)
+#define _MIPIA_CLK_LANE_TIMING (_MIPI_MMIO_BASE(dev_priv) + 0xb098)
+#define _MIPIC_CLK_LANE_TIMING (_MIPI_MMIO_BASE(dev_priv) + 0xb898)
#define MIPI_CLK_LANE_TIMING(port) _MMIO_MIPI(port, _MIPIA_CLK_LANE_TIMING, _MIPIC_CLK_LANE_TIMING)
/* bits 31:0 */
-#define _MIPIA_LP_GEN_DATA (dev_priv->mipi_mmio_base + 0xb064)
-#define _MIPIC_LP_GEN_DATA (dev_priv->mipi_mmio_base + 0xb864)
+#define _MIPIA_LP_GEN_DATA (_MIPI_MMIO_BASE(dev_priv) + 0xb064)
+#define _MIPIC_LP_GEN_DATA (_MIPI_MMIO_BASE(dev_priv) + 0xb864)
#define MIPI_LP_GEN_DATA(port) _MMIO_MIPI(port, _MIPIA_LP_GEN_DATA, _MIPIC_LP_GEN_DATA)
/* bits 31:0 */
-#define _MIPIA_HS_GEN_DATA (dev_priv->mipi_mmio_base + 0xb068)
-#define _MIPIC_HS_GEN_DATA (dev_priv->mipi_mmio_base + 0xb868)
+#define _MIPIA_HS_GEN_DATA (_MIPI_MMIO_BASE(dev_priv) + 0xb068)
+#define _MIPIC_HS_GEN_DATA (_MIPI_MMIO_BASE(dev_priv) + 0xb868)
#define MIPI_HS_GEN_DATA(port) _MMIO_MIPI(port, _MIPIA_HS_GEN_DATA, _MIPIC_HS_GEN_DATA)
-#define _MIPIA_LP_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb06c)
-#define _MIPIC_LP_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb86c)
+#define _MIPIA_LP_GEN_CTRL (_MIPI_MMIO_BASE(dev_priv) + 0xb06c)
+#define _MIPIC_LP_GEN_CTRL (_MIPI_MMIO_BASE(dev_priv) + 0xb86c)
#define MIPI_LP_GEN_CTRL(port) _MMIO_MIPI(port, _MIPIA_LP_GEN_CTRL, _MIPIC_LP_GEN_CTRL)
-#define _MIPIA_HS_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb070)
-#define _MIPIC_HS_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb870)
+#define _MIPIA_HS_GEN_CTRL (_MIPI_MMIO_BASE(dev_priv) + 0xb070)
+#define _MIPIC_HS_GEN_CTRL (_MIPI_MMIO_BASE(dev_priv) + 0xb870)
#define MIPI_HS_GEN_CTRL(port) _MMIO_MIPI(port, _MIPIA_HS_GEN_CTRL, _MIPIC_HS_GEN_CTRL)
#define LONG_PACKET_WORD_COUNT_SHIFT 8
#define LONG_PACKET_WORD_COUNT_MASK (0xffff << 8)
@@ -330,8 +332,8 @@
#define DATA_TYPE_MASK (0x3f << 0)
/* data type values, see include/video/mipi_display.h */
-#define _MIPIA_GEN_FIFO_STAT (dev_priv->mipi_mmio_base + 0xb074)
-#define _MIPIC_GEN_FIFO_STAT (dev_priv->mipi_mmio_base + 0xb874)
+#define _MIPIA_GEN_FIFO_STAT (_MIPI_MMIO_BASE(dev_priv) + 0xb074)
+#define _MIPIC_GEN_FIFO_STAT (_MIPI_MMIO_BASE(dev_priv) + 0xb874)
#define MIPI_GEN_FIFO_STAT(port) _MMIO_MIPI(port, _MIPIA_GEN_FIFO_STAT, _MIPIC_GEN_FIFO_STAT)
#define DPI_FIFO_EMPTY (1 << 28)
#define DBI_FIFO_EMPTY (1 << 27)
@@ -348,15 +350,15 @@
#define HS_DATA_FIFO_HALF_EMPTY (1 << 1)
#define HS_DATA_FIFO_FULL (1 << 0)
-#define _MIPIA_HS_LS_DBI_ENABLE (dev_priv->mipi_mmio_base + 0xb078)
-#define _MIPIC_HS_LS_DBI_ENABLE (dev_priv->mipi_mmio_base + 0xb878)
+#define _MIPIA_HS_LS_DBI_ENABLE (_MIPI_MMIO_BASE(dev_priv) + 0xb078)
+#define _MIPIC_HS_LS_DBI_ENABLE (_MIPI_MMIO_BASE(dev_priv) + 0xb878)
#define MIPI_HS_LP_DBI_ENABLE(port) _MMIO_MIPI(port, _MIPIA_HS_LS_DBI_ENABLE, _MIPIC_HS_LS_DBI_ENABLE)
#define DBI_HS_LP_MODE_MASK (1 << 0)
#define DBI_LP_MODE (1 << 0)
#define DBI_HS_MODE (0 << 0)
-#define _MIPIA_DPHY_PARAM (dev_priv->mipi_mmio_base + 0xb080)
-#define _MIPIC_DPHY_PARAM (dev_priv->mipi_mmio_base + 0xb880)
+#define _MIPIA_DPHY_PARAM (_MIPI_MMIO_BASE(dev_priv) + 0xb080)
+#define _MIPIC_DPHY_PARAM (_MIPI_MMIO_BASE(dev_priv) + 0xb880)
#define MIPI_DPHY_PARAM(port) _MMIO_MIPI(port, _MIPIA_DPHY_PARAM, _MIPIC_DPHY_PARAM)
#define EXIT_ZERO_COUNT_SHIFT 24
#define EXIT_ZERO_COUNT_MASK (0x3f << 24)
@@ -367,34 +369,34 @@
#define PREPARE_COUNT_SHIFT 0
#define PREPARE_COUNT_MASK (0x3f << 0)
-#define _MIPIA_DBI_BW_CTRL (dev_priv->mipi_mmio_base + 0xb084)
-#define _MIPIC_DBI_BW_CTRL (dev_priv->mipi_mmio_base + 0xb884)
+#define _MIPIA_DBI_BW_CTRL (_MIPI_MMIO_BASE(dev_priv) + 0xb084)
+#define _MIPIC_DBI_BW_CTRL (_MIPI_MMIO_BASE(dev_priv) + 0xb884)
#define MIPI_DBI_BW_CTRL(port) _MMIO_MIPI(port, _MIPIA_DBI_BW_CTRL, _MIPIC_DBI_BW_CTRL)
-#define _MIPIA_CLK_LANE_SWITCH_TIME_CNT (dev_priv->mipi_mmio_base + 0xb088)
-#define _MIPIC_CLK_LANE_SWITCH_TIME_CNT (dev_priv->mipi_mmio_base + 0xb888)
+#define _MIPIA_CLK_LANE_SWITCH_TIME_CNT (_MIPI_MMIO_BASE(dev_priv) + 0xb088)
+#define _MIPIC_CLK_LANE_SWITCH_TIME_CNT (_MIPI_MMIO_BASE(dev_priv) + 0xb888)
#define MIPI_CLK_LANE_SWITCH_TIME_CNT(port) _MMIO_MIPI(port, _MIPIA_CLK_LANE_SWITCH_TIME_CNT, _MIPIC_CLK_LANE_SWITCH_TIME_CNT)
#define LP_HS_SSW_CNT_SHIFT 16
#define LP_HS_SSW_CNT_MASK (0xffff << 16)
#define HS_LP_PWR_SW_CNT_SHIFT 0
#define HS_LP_PWR_SW_CNT_MASK (0xffff << 0)
-#define _MIPIA_STOP_STATE_STALL (dev_priv->mipi_mmio_base + 0xb08c)
-#define _MIPIC_STOP_STATE_STALL (dev_priv->mipi_mmio_base + 0xb88c)
+#define _MIPIA_STOP_STATE_STALL (_MIPI_MMIO_BASE(dev_priv) + 0xb08c)
+#define _MIPIC_STOP_STATE_STALL (_MIPI_MMIO_BASE(dev_priv) + 0xb88c)
#define MIPI_STOP_STATE_STALL(port) _MMIO_MIPI(port, _MIPIA_STOP_STATE_STALL, _MIPIC_STOP_STATE_STALL)
#define STOP_STATE_STALL_COUNTER_SHIFT 0
#define STOP_STATE_STALL_COUNTER_MASK (0xff << 0)
-#define _MIPIA_INTR_STAT_REG_1 (dev_priv->mipi_mmio_base + 0xb090)
-#define _MIPIC_INTR_STAT_REG_1 (dev_priv->mipi_mmio_base + 0xb890)
+#define _MIPIA_INTR_STAT_REG_1 (_MIPI_MMIO_BASE(dev_priv) + 0xb090)
+#define _MIPIC_INTR_STAT_REG_1 (_MIPI_MMIO_BASE(dev_priv) + 0xb890)
#define MIPI_INTR_STAT_REG_1(port) _MMIO_MIPI(port, _MIPIA_INTR_STAT_REG_1, _MIPIC_INTR_STAT_REG_1)
-#define _MIPIA_INTR_EN_REG_1 (dev_priv->mipi_mmio_base + 0xb094)
-#define _MIPIC_INTR_EN_REG_1 (dev_priv->mipi_mmio_base + 0xb894)
+#define _MIPIA_INTR_EN_REG_1 (_MIPI_MMIO_BASE(dev_priv) + 0xb094)
+#define _MIPIC_INTR_EN_REG_1 (_MIPI_MMIO_BASE(dev_priv) + 0xb894)
#define MIPI_INTR_EN_REG_1(port) _MMIO_MIPI(port, _MIPIA_INTR_EN_REG_1, _MIPIC_INTR_EN_REG_1)
#define RX_CONTENTION_DETECTED (1 << 0)
/* XXX: only pipe A ?!? */
-#define MIPIA_DBI_TYPEC_CTRL (dev_priv->mipi_mmio_base + 0xb100)
+#define MIPIA_DBI_TYPEC_CTRL (_MIPI_MMIO_BASE(dev_priv) + 0xb100)
#define DBI_TYPEC_ENABLE (1 << 31)
#define DBI_TYPEC_WIP (1 << 30)
#define DBI_TYPEC_OPTION_SHIFT 28
@@ -407,8 +409,8 @@
/* MIPI adapter registers */
-#define _MIPIA_CTRL (dev_priv->mipi_mmio_base + 0xb104)
-#define _MIPIC_CTRL (dev_priv->mipi_mmio_base + 0xb904)
+#define _MIPIA_CTRL (_MIPI_MMIO_BASE(dev_priv) + 0xb104)
+#define _MIPIC_CTRL (_MIPI_MMIO_BASE(dev_priv) + 0xb904)
#define MIPI_CTRL(port) _MMIO_MIPI(port, _MIPIA_CTRL, _MIPIC_CTRL)
#define ESCAPE_CLOCK_DIVIDER_SHIFT 5 /* A only */
#define ESCAPE_CLOCK_DIVIDER_MASK (3 << 5)
@@ -440,21 +442,21 @@
#define GLK_MIPIIO_PORT_POWERED (1 << 1) /* RO */
#define GLK_MIPIIO_ENABLE (1 << 0)
-#define _MIPIA_DATA_ADDRESS (dev_priv->mipi_mmio_base + 0xb108)
-#define _MIPIC_DATA_ADDRESS (dev_priv->mipi_mmio_base + 0xb908)
+#define _MIPIA_DATA_ADDRESS (_MIPI_MMIO_BASE(dev_priv) + 0xb108)
+#define _MIPIC_DATA_ADDRESS (_MIPI_MMIO_BASE(dev_priv) + 0xb908)
#define MIPI_DATA_ADDRESS(port) _MMIO_MIPI(port, _MIPIA_DATA_ADDRESS, _MIPIC_DATA_ADDRESS)
#define DATA_MEM_ADDRESS_SHIFT 5
#define DATA_MEM_ADDRESS_MASK (0x7ffffff << 5)
#define DATA_VALID (1 << 0)
-#define _MIPIA_DATA_LENGTH (dev_priv->mipi_mmio_base + 0xb10c)
-#define _MIPIC_DATA_LENGTH (dev_priv->mipi_mmio_base + 0xb90c)
+#define _MIPIA_DATA_LENGTH (_MIPI_MMIO_BASE(dev_priv) + 0xb10c)
+#define _MIPIC_DATA_LENGTH (_MIPI_MMIO_BASE(dev_priv) + 0xb90c)
#define MIPI_DATA_LENGTH(port) _MMIO_MIPI(port, _MIPIA_DATA_LENGTH, _MIPIC_DATA_LENGTH)
#define DATA_LENGTH_SHIFT 0
#define DATA_LENGTH_MASK (0xfffff << 0)
-#define _MIPIA_COMMAND_ADDRESS (dev_priv->mipi_mmio_base + 0xb110)
-#define _MIPIC_COMMAND_ADDRESS (dev_priv->mipi_mmio_base + 0xb910)
+#define _MIPIA_COMMAND_ADDRESS (_MIPI_MMIO_BASE(dev_priv) + 0xb110)
+#define _MIPIC_COMMAND_ADDRESS (_MIPI_MMIO_BASE(dev_priv) + 0xb910)
#define MIPI_COMMAND_ADDRESS(port) _MMIO_MIPI(port, _MIPIA_COMMAND_ADDRESS, _MIPIC_COMMAND_ADDRESS)
#define COMMAND_MEM_ADDRESS_SHIFT 5
#define COMMAND_MEM_ADDRESS_MASK (0x7ffffff << 5)
@@ -462,18 +464,18 @@
#define MEMORY_WRITE_DATA_FROM_PIPE_RENDERING (1 << 1)
#define COMMAND_VALID (1 << 0)
-#define _MIPIA_COMMAND_LENGTH (dev_priv->mipi_mmio_base + 0xb114)
-#define _MIPIC_COMMAND_LENGTH (dev_priv->mipi_mmio_base + 0xb914)
+#define _MIPIA_COMMAND_LENGTH (_MIPI_MMIO_BASE(dev_priv) + 0xb114)
+#define _MIPIC_COMMAND_LENGTH (_MIPI_MMIO_BASE(dev_priv) + 0xb914)
#define MIPI_COMMAND_LENGTH(port) _MMIO_MIPI(port, _MIPIA_COMMAND_LENGTH, _MIPIC_COMMAND_LENGTH)
#define COMMAND_LENGTH_SHIFT(n) (8 * (n)) /* n: 0...3 */
#define COMMAND_LENGTH_MASK(n) (0xff << (8 * (n)))
-#define _MIPIA_READ_DATA_RETURN0 (dev_priv->mipi_mmio_base + 0xb118)
-#define _MIPIC_READ_DATA_RETURN0 (dev_priv->mipi_mmio_base + 0xb918)
+#define _MIPIA_READ_DATA_RETURN0 (_MIPI_MMIO_BASE(dev_priv) + 0xb118)
+#define _MIPIC_READ_DATA_RETURN0 (_MIPI_MMIO_BASE(dev_priv) + 0xb918)
#define MIPI_READ_DATA_RETURN(port, n) _MMIO(_MIPI(port, _MIPIA_READ_DATA_RETURN0, _MIPIC_READ_DATA_RETURN0) + 4 * (n)) /* n: 0...7 */
-#define _MIPIA_READ_DATA_VALID (dev_priv->mipi_mmio_base + 0xb138)
-#define _MIPIC_READ_DATA_VALID (dev_priv->mipi_mmio_base + 0xb938)
+#define _MIPIA_READ_DATA_VALID (_MIPI_MMIO_BASE(dev_priv) + 0xb138)
+#define _MIPIC_READ_DATA_VALID (_MIPI_MMIO_BASE(dev_priv) + 0xb938)
#define MIPI_READ_DATA_VALID(port) _MMIO_MIPI(port, _MIPIA_READ_DATA_VALID, _MIPIC_READ_DATA_VALID)
#define READ_DATA_VALID(n) (1 << (n))
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index dabdfe09f5e5..1e29b1e6d186 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -1269,6 +1269,10 @@ static void i915_gem_context_release_work(struct work_struct *work)
trace_i915_context_free(ctx);
GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
+ spin_lock(&ctx->i915->gem.contexts.lock);
+ list_del(&ctx->link);
+ spin_unlock(&ctx->i915->gem.contexts.lock);
+
if (ctx->syncobj)
drm_syncobj_put(ctx->syncobj);
@@ -1383,14 +1387,8 @@ kill_engines(struct i915_gem_engines *engines, bool exit, bool persistent)
*/
for_each_gem_engine(ce, engines, it) {
struct intel_engine_cs *engine;
- bool skip = false;
-
- if (exit)
- skip = intel_context_set_exiting(ce);
- else if (!persistent)
- skip = intel_context_exit_nonpersistent(ce, NULL);
- if (skip)
+ if ((exit || !persistent) && intel_context_revoke(ce))
continue; /* Already marked. */
/*
@@ -1521,10 +1519,6 @@ static void context_close(struct i915_gem_context *ctx)
ctx->file_priv = ERR_PTR(-EBADF);
- spin_lock(&ctx->i915->gem.contexts.lock);
- list_del(&ctx->link);
- spin_unlock(&ctx->i915->gem.contexts.lock);
-
client = ctx->client;
if (client) {
spin_lock(&client->ctx_lock);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index cd75b0ca2555..845023c14eb3 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -2424,7 +2424,7 @@ gen8_dispatch_bsd_engine(struct drm_i915_private *dev_priv,
/* Check whether the file_priv has already selected one ring. */
if ((int)file_priv->bsd_engine < 0)
file_priv->bsd_engine =
- get_random_int() % num_vcs_engines(dev_priv);
+ prandom_u32_max(num_vcs_engines(dev_priv));
return file_priv->bsd_engine;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.h b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
index 1b88ea13435c..5a7a14e85c3f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
@@ -12,8 +12,6 @@ struct drm_i915_private;
struct drm_i915_gem_object;
struct intel_memory_region;
-extern const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops;
-
void __iomem *
i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj,
unsigned long n,
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index 3218981488cc..73d9eda1d6b7 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -413,7 +413,7 @@ retry:
vma->mmo = mmo;
if (CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)
- intel_wakeref_auto(&to_gt(i915)->ggtt->userfault_wakeref,
+ intel_wakeref_auto(&to_gt(i915)->userfault_wakeref,
msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND));
if (write) {
@@ -550,6 +550,20 @@ out:
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
}
+void i915_gem_object_runtime_pm_release_mmap_offset(struct drm_i915_gem_object *obj)
+{
+ struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
+ struct ttm_device *bdev = bo->bdev;
+
+ drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping);
+
+ if (obj->userfault_count) {
+ /* rpm wakeref provide exclusive access */
+ list_del(&obj->userfault_link);
+ obj->userfault_count = 0;
+ }
+}
+
void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj)
{
struct i915_mmap_offset *mmo, *mn;
@@ -573,6 +587,13 @@ void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj)
spin_lock(&obj->mmo.lock);
}
spin_unlock(&obj->mmo.lock);
+
+ if (obj->userfault_count) {
+ mutex_lock(&to_gt(to_i915(obj->base.dev))->lmem_userfault_lock);
+ list_del(&obj->userfault_link);
+ mutex_unlock(&to_gt(to_i915(obj->base.dev))->lmem_userfault_lock);
+ obj->userfault_count = 0;
+ }
}
static struct i915_mmap_offset *
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.h b/drivers/gpu/drm/i915/gem/i915_gem_mman.h
index efee9e0d2508..1fa91b3033b3 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.h
@@ -27,6 +27,7 @@ int i915_gem_dumb_mmap_offset(struct drm_file *file_priv,
void __i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj);
void i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj);
+void i915_gem_object_runtime_pm_release_mmap_offset(struct drm_i915_gem_object *obj);
void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj);
#endif
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index 7cab89618bad..6b8710ba8ded 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -238,7 +238,7 @@ static void __i915_gem_object_free_mmaps(struct drm_i915_gem_object *obj)
{
/* Skip serialisation and waking the device if known to be not used. */
- if (obj->userfault_count)
+ if (obj->userfault_count && !IS_DGFX(to_i915(obj->base.dev)))
i915_gem_object_release_mmap_gtt(obj);
if (!RB_EMPTY_ROOT(&obj->mmo.offsets)) {
@@ -667,6 +667,41 @@ int i915_gem_object_migrate(struct drm_i915_gem_object *obj,
struct i915_gem_ww_ctx *ww,
enum intel_region_id id)
{
+ return __i915_gem_object_migrate(obj, ww, id, obj->flags);
+}
+
+/**
+ * __i915_gem_object_migrate - Migrate an object to the desired region id, with
+ * control of the extra flags
+ * @obj: The object to migrate.
+ * @ww: An optional struct i915_gem_ww_ctx. If NULL, the backend may
+ * not be successful in evicting other objects to make room for this object.
+ * @id: The region id to migrate to.
+ * @flags: The object flags. Normally just obj->flags.
+ *
+ * Attempt to migrate the object to the desired memory region. The
+ * object backend must support migration and the object may not be
+ * pinned, (explicitly pinned pages or pinned vmas). The object must
+ * be locked.
+ * On successful completion, the object will have pages pointing to
+ * memory in the new region, but an async migration task may not have
+ * completed yet, and to accomplish that, i915_gem_object_wait_migration()
+ * must be called.
+ *
+ * Note: the @ww parameter is not used yet, but included to make sure
+ * callers put some effort into obtaining a valid ww ctx if one is
+ * available.
+ *
+ * Return: 0 on success. Negative error code on failure. In particular may
+ * return -ENXIO on lack of region space, -EDEADLK for deadlock avoidance
+ * if @ww is set, -EINTR or -ERESTARTSYS if signal pending, and
+ * -EBUSY if the object is pinned.
+ */
+int __i915_gem_object_migrate(struct drm_i915_gem_object *obj,
+ struct i915_gem_ww_ctx *ww,
+ enum intel_region_id id,
+ unsigned int flags)
+{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct intel_memory_region *mr;
@@ -686,7 +721,7 @@ int i915_gem_object_migrate(struct drm_i915_gem_object *obj,
return 0;
}
- return obj->ops->migrate(obj, mr);
+ return obj->ops->migrate(obj, mr, flags);
}
/**
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index 7317d4102955..1723af9b0f6a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -608,6 +608,10 @@ bool i915_gem_object_migratable(struct drm_i915_gem_object *obj);
int i915_gem_object_migrate(struct drm_i915_gem_object *obj,
struct i915_gem_ww_ctx *ww,
enum intel_region_id id);
+int __i915_gem_object_migrate(struct drm_i915_gem_object *obj,
+ struct i915_gem_ww_ctx *ww,
+ enum intel_region_id id,
+ unsigned int flags);
bool i915_gem_object_can_migrate(struct drm_i915_gem_object *obj,
enum intel_region_id id);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index 9f6b14ec189a..d0d6772e6f36 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -107,7 +107,8 @@ struct drm_i915_gem_object_ops {
* pinning or for as long as the object lock is held.
*/
int (*migrate)(struct drm_i915_gem_object *obj,
- struct intel_memory_region *mr);
+ struct intel_memory_region *mr,
+ unsigned int flags);
void (*release)(struct drm_i915_gem_object *obj);
@@ -298,7 +299,8 @@ struct drm_i915_gem_object {
};
/**
- * Whether the object is currently in the GGTT mmap.
+ * Whether the object is currently in the GGTT or any other supported
+ * fake offset mmap backed by lmem.
*/
unsigned int userfault_count;
struct list_head userfault_link;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index 8357dbdcab5c..4df50b049cea 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -20,7 +20,7 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
unsigned int sg_page_sizes)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
- unsigned long supported = INTEL_INFO(i915)->page_sizes;
+ unsigned long supported = RUNTIME_INFO(i915)->page_sizes;
bool shrinkable;
int i;
@@ -66,7 +66,7 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
shrinkable = i915_gem_object_is_shrinkable(obj);
if (i915_gem_object_is_tiled(obj) &&
- i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
+ i915->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES) {
GEM_BUG_ON(i915_gem_object_has_tiling_quirk(obj));
i915_gem_object_set_tiling_quirk(obj);
GEM_BUG_ON(!list_empty(&obj->mm.link));
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
index 00359ec9d58b..3428f735e786 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
@@ -24,7 +24,7 @@ void i915_gem_suspend(struct drm_i915_private *i915)
{
GEM_TRACE("%s\n", dev_name(i915->drm.dev));
- intel_wakeref_auto(&to_gt(i915)->ggtt->userfault_wakeref, 0);
+ intel_wakeref_auto(&to_gt(i915)->userfault_wakeref, 0);
flush_workqueue(i915->wq);
/*
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
index 166d0a4b9e8c..acc561c0f0aa 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
@@ -18,10 +18,12 @@
#include "gt/intel_region_lmem.h"
#include "i915_drv.h"
#include "i915_gem_stolen.h"
+#include "i915_pci.h"
#include "i915_reg.h"
#include "i915_utils.h"
#include "i915_vgpu.h"
#include "intel_mchbar_regs.h"
+#include "intel_pci_config.h"
/*
* The BIOS typically reserves some of the system's memory for the exclusive
@@ -428,48 +430,29 @@ static int i915_gem_init_stolen(struct intel_memory_region *mem)
reserved_base = stolen_top;
reserved_size = 0;
- switch (GRAPHICS_VER(i915)) {
- case 2:
- case 3:
- break;
- case 4:
- if (!IS_G4X(i915))
- break;
- fallthrough;
- case 5:
- g4x_get_stolen_reserved(i915, uncore,
+ if (GRAPHICS_VER(i915) >= 11) {
+ icl_get_stolen_reserved(i915, uncore,
&reserved_base, &reserved_size);
- break;
- case 6:
- gen6_get_stolen_reserved(i915, uncore,
- &reserved_base, &reserved_size);
- break;
- case 7:
- if (IS_VALLEYVIEW(i915))
- vlv_get_stolen_reserved(i915, uncore,
- &reserved_base, &reserved_size);
- else
- gen7_get_stolen_reserved(i915, uncore,
- &reserved_base, &reserved_size);
- break;
- case 8:
- case 9:
+ } else if (GRAPHICS_VER(i915) >= 8) {
if (IS_LP(i915))
chv_get_stolen_reserved(i915, uncore,
&reserved_base, &reserved_size);
else
bdw_get_stolen_reserved(i915, uncore,
&reserved_base, &reserved_size);
- break;
- default:
- MISSING_CASE(GRAPHICS_VER(i915));
- fallthrough;
- case 11:
- case 12:
- icl_get_stolen_reserved(i915, uncore,
- &reserved_base,
- &reserved_size);
- break;
+ } else if (GRAPHICS_VER(i915) >= 7) {
+ if (IS_VALLEYVIEW(i915))
+ vlv_get_stolen_reserved(i915, uncore,
+ &reserved_base, &reserved_size);
+ else
+ gen7_get_stolen_reserved(i915, uncore,
+ &reserved_base, &reserved_size);
+ } else if (GRAPHICS_VER(i915) >= 6) {
+ gen6_get_stolen_reserved(i915, uncore,
+ &reserved_base, &reserved_size);
+ } else if (GRAPHICS_VER(i915) >= 5 || IS_G4X(i915)) {
+ g4x_get_stolen_reserved(i915, uncore,
+ &reserved_base, &reserved_size);
}
/*
@@ -827,10 +810,13 @@ i915_gem_stolen_lmem_setup(struct drm_i915_private *i915, u16 type,
if (WARN_ON_ONCE(instance))
return ERR_PTR(-ENODEV);
+ if (!i915_pci_resource_valid(pdev, GEN12_LMEM_BAR))
+ return ERR_PTR(-ENXIO);
+
/* Use DSM base address instead for stolen memory */
dsm_base = intel_uncore_read64(uncore, GEN12_DSMBASE);
if (IS_DG1(uncore->i915)) {
- lmem_size = pci_resource_len(pdev, 2);
+ lmem_size = pci_resource_len(pdev, GEN12_LMEM_BAR);
if (WARN_ON(lmem_size < dsm_base))
return ERR_PTR(-ENODEV);
} else {
@@ -842,11 +828,11 @@ i915_gem_stolen_lmem_setup(struct drm_i915_private *i915, u16 type,
}
dsm_size = lmem_size - dsm_base;
- if (pci_resource_len(pdev, 2) < lmem_size) {
+ if (pci_resource_len(pdev, GEN12_LMEM_BAR) < lmem_size) {
io_start = 0;
io_size = 0;
} else {
- io_start = pci_resource_start(pdev, 2) + dsm_base;
+ io_start = pci_resource_start(pdev, GEN12_LMEM_BAR) + dsm_base;
io_size = dsm_size;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
index 85518b28cd72..fd42b89b7162 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
@@ -278,7 +278,7 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
*/
if (i915_gem_object_has_pages(obj) &&
obj->mm.madv == I915_MADV_WILLNEED &&
- i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
+ i915->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES) {
if (tiling == I915_TILING_NONE) {
GEM_BUG_ON(!i915_gem_object_has_tiling_quirk(obj));
i915_gem_object_clear_tiling_quirk(obj);
@@ -458,7 +458,7 @@ i915_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
}
/* Hide bit 17 from the user -- see comment in i915_gem_set_tiling */
- if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
+ if (dev_priv->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES)
args->phys_swizzle_mode = I915_BIT_6_SWIZZLE_UNKNOWN;
else
args->phys_swizzle_mode = args->swizzle_mode;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
index f64a3deb12fc..4f861782c3e8 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
@@ -509,9 +509,18 @@ static int i915_ttm_shrink(struct drm_i915_gem_object *obj, unsigned int flags)
static void i915_ttm_delete_mem_notify(struct ttm_buffer_object *bo)
{
struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
+ intel_wakeref_t wakeref = 0;
+
+ if (bo->resource && likely(obj)) {
+ /* ttm_bo_release() already has dma_resv_lock */
+ if (i915_ttm_cpu_maps_iomem(bo->resource))
+ wakeref = intel_runtime_pm_get(&to_i915(obj->base.dev)->runtime_pm);
- if (likely(obj)) {
__i915_gem_object_pages_fini(obj);
+
+ if (wakeref)
+ intel_runtime_pm_put(&to_i915(obj->base.dev)->runtime_pm, wakeref);
+
i915_ttm_free_cached_io_rsgt(obj);
}
}
@@ -839,9 +848,10 @@ static int __i915_ttm_migrate(struct drm_i915_gem_object *obj,
}
static int i915_ttm_migrate(struct drm_i915_gem_object *obj,
- struct intel_memory_region *mr)
+ struct intel_memory_region *mr,
+ unsigned int flags)
{
- return __i915_ttm_migrate(obj, mr, obj->flags);
+ return __i915_ttm_migrate(obj, mr, flags);
}
static void i915_ttm_put_pages(struct drm_i915_gem_object *obj,
@@ -981,6 +991,7 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
struct ttm_buffer_object *bo = area->vm_private_data;
struct drm_device *dev = bo->base.dev;
struct drm_i915_gem_object *obj;
+ intel_wakeref_t wakeref = 0;
vm_fault_t ret;
int idx;
@@ -1002,6 +1013,9 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
return VM_FAULT_SIGBUS;
}
+ if (i915_ttm_cpu_maps_iomem(bo->resource))
+ wakeref = intel_runtime_pm_get(&to_i915(obj->base.dev)->runtime_pm);
+
if (!i915_ttm_resource_mappable(bo->resource)) {
int err = -ENODEV;
int i;
@@ -1023,7 +1037,8 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
if (err) {
drm_dbg(dev, "Unable to make resource CPU accessible\n");
dma_resv_unlock(bo->base.resv);
- return VM_FAULT_SIGBUS;
+ ret = VM_FAULT_SIGBUS;
+ goto out_rpm;
}
}
@@ -1034,12 +1049,30 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
} else {
ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot);
}
+
if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
- return ret;
+ goto out_rpm;
+
+ /* ttm_bo_vm_reserve() already has dma_resv_lock */
+ if (ret == VM_FAULT_NOPAGE && wakeref && !obj->userfault_count) {
+ obj->userfault_count = 1;
+ mutex_lock(&to_gt(to_i915(obj->base.dev))->lmem_userfault_lock);
+ list_add(&obj->userfault_link, &to_gt(to_i915(obj->base.dev))->lmem_userfault_list);
+ mutex_unlock(&to_gt(to_i915(obj->base.dev))->lmem_userfault_lock);
+ }
+
+ if (wakeref & CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)
+ intel_wakeref_auto(&to_gt(to_i915(obj->base.dev))->userfault_wakeref,
+ msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND));
i915_ttm_adjust_lru(obj);
dma_resv_unlock(bo->base.resv);
+
+out_rpm:
+ if (wakeref)
+ intel_runtime_pm_put(&to_i915(obj->base.dev)->runtime_pm, wakeref);
+
return ret;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index 8423df021b71..d4398948f016 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -426,12 +426,11 @@ static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
static int
probe_range(struct mm_struct *mm, unsigned long addr, unsigned long len)
{
- const unsigned long end = addr + len;
+ VMA_ITERATOR(vmi, mm, addr);
struct vm_area_struct *vma;
- int ret = -EFAULT;
mmap_read_lock(mm);
- for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) {
+ for_each_vma_range(vmi, vma, addr + len) {
/* Check for holes, note that we also update the addr below */
if (vma->vm_start > addr)
break;
@@ -439,16 +438,13 @@ probe_range(struct mm_struct *mm, unsigned long addr, unsigned long len)
if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
break;
- if (vma->vm_end >= end) {
- ret = 0;
- break;
- }
-
addr = vma->vm_end;
}
mmap_read_unlock(mm);
- return ret;
+ if (vma)
+ return -EFAULT;
+ return 0;
}
/*
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
index 72ce2c9f42fd..c570cf780079 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
@@ -358,7 +358,7 @@ fake_huge_pages_object(struct drm_i915_private *i915, u64 size, bool single)
static int igt_check_page_sizes(struct i915_vma *vma)
{
struct drm_i915_private *i915 = vma->vm->i915;
- unsigned int supported = INTEL_INFO(i915)->page_sizes;
+ unsigned int supported = RUNTIME_INFO(i915)->page_sizes;
struct drm_i915_gem_object *obj = vma->obj;
int err;
@@ -419,7 +419,7 @@ static int igt_mock_exhaust_device_supported_pages(void *arg)
{
struct i915_ppgtt *ppgtt = arg;
struct drm_i915_private *i915 = ppgtt->vm.i915;
- unsigned int saved_mask = INTEL_INFO(i915)->page_sizes;
+ unsigned int saved_mask = RUNTIME_INFO(i915)->page_sizes;
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
int i, j, single;
@@ -438,7 +438,7 @@ static int igt_mock_exhaust_device_supported_pages(void *arg)
combination |= page_sizes[j];
}
- mkwrite_device_info(i915)->page_sizes = combination;
+ RUNTIME_INFO(i915)->page_sizes = combination;
for (single = 0; single <= 1; ++single) {
obj = fake_huge_pages_object(i915, combination, !!single);
@@ -485,7 +485,7 @@ static int igt_mock_exhaust_device_supported_pages(void *arg)
out_put:
i915_gem_object_put(obj);
out_device:
- mkwrite_device_info(i915)->page_sizes = saved_mask;
+ RUNTIME_INFO(i915)->page_sizes = saved_mask;
return err;
}
@@ -495,7 +495,7 @@ static int igt_mock_memory_region_huge_pages(void *arg)
const unsigned int flags[] = { 0, I915_BO_ALLOC_CONTIGUOUS };
struct i915_ppgtt *ppgtt = arg;
struct drm_i915_private *i915 = ppgtt->vm.i915;
- unsigned long supported = INTEL_INFO(i915)->page_sizes;
+ unsigned long supported = RUNTIME_INFO(i915)->page_sizes;
struct intel_memory_region *mem;
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
@@ -573,7 +573,7 @@ static int igt_mock_ppgtt_misaligned_dma(void *arg)
{
struct i915_ppgtt *ppgtt = arg;
struct drm_i915_private *i915 = ppgtt->vm.i915;
- unsigned long supported = INTEL_INFO(i915)->page_sizes;
+ unsigned long supported = RUNTIME_INFO(i915)->page_sizes;
struct drm_i915_gem_object *obj;
int bit;
int err;
@@ -1390,7 +1390,7 @@ out_put:
static int igt_ppgtt_sanity_check(void *arg)
{
struct drm_i915_private *i915 = arg;
- unsigned int supported = INTEL_INFO(i915)->page_sizes;
+ unsigned int supported = RUNTIME_INFO(i915)->page_sizes;
struct {
igt_create_fn fn;
unsigned int flags;
@@ -1764,8 +1764,8 @@ int i915_gem_huge_page_mock_selftests(void)
return -ENOMEM;
/* Pretend to be a device which supports the 48b PPGTT */
- mkwrite_device_info(dev_priv)->ppgtt_type = INTEL_PPGTT_FULL;
- mkwrite_device_info(dev_priv)->ppgtt_size = 48;
+ RUNTIME_INFO(dev_priv)->ppgtt_type = INTEL_PPGTT_FULL;
+ RUNTIME_INFO(dev_priv)->ppgtt_size = 48;
ppgtt = i915_ppgtt_create(to_gt(dev_priv), 0);
if (IS_ERR(ppgtt)) {
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
index 3cfc621ef363..9a6a6b5b722b 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
@@ -711,7 +711,7 @@ static bool bad_swizzling(struct drm_i915_private *i915)
{
struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
- if (i915->quirks & QUIRK_PIN_SWIZZLED_PAGES)
+ if (i915->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES)
return true;
if (has_bit17_swizzle(ggtt->bit_6_swizzle_x) ||
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
index 55bf23dc0e54..b73c91aa5450 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
@@ -367,7 +367,7 @@ static int igt_partial_tiling(void *arg)
unsigned int pitch;
struct tile tile;
- if (i915->quirks & QUIRK_PIN_SWIZZLED_PAGES)
+ if (i915->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES)
/*
* The swizzling pattern is actually unknown as it
* varies based on physical address of each page.
@@ -464,7 +464,7 @@ static int igt_smoke_tiling(void *arg)
* Remember to look at the st_seed if we see a flip-flop in BAT!
*/
- if (i915->quirks & QUIRK_PIN_SWIZZLED_PAGES)
+ if (i915->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES)
return 0;
obj = huge_gem_object(i915,
diff --git a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c
index 1bb766c79dcb..5aaacc53fa4c 100644
--- a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c
@@ -247,6 +247,7 @@ err_scratch1:
i915_gem_object_put(vm->scratch[1]);
err_scratch0:
i915_gem_object_put(vm->scratch[0]);
+ vm->scratch[0] = NULL;
return ret;
}
@@ -268,9 +269,10 @@ static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
gen6_ppgtt_free_pd(ppgtt);
free_scratch(vm);
- mutex_destroy(&ppgtt->flush);
+ if (ppgtt->base.pd)
+ free_pd(&ppgtt->base.vm, ppgtt->base.pd);
- free_pd(&ppgtt->base.vm, ppgtt->base.pd);
+ mutex_destroy(&ppgtt->flush);
}
static void pd_vma_bind(struct i915_address_space *vm,
@@ -449,19 +451,17 @@ struct i915_ppgtt *gen6_ppgtt_create(struct intel_gt *gt)
err = gen6_ppgtt_init_scratch(ppgtt);
if (err)
- goto err_free;
+ goto err_put;
ppgtt->base.pd = gen6_alloc_top_pd(ppgtt);
if (IS_ERR(ppgtt->base.pd)) {
err = PTR_ERR(ppgtt->base.pd);
- goto err_scratch;
+ goto err_put;
}
return &ppgtt->base;
-err_scratch:
- free_scratch(&ppgtt->base.vm);
-err_free:
- kfree(ppgtt);
+err_put:
+ i915_vm_put(&ppgtt->base.vm);
return ERR_PTR(err);
}
diff --git a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
index 98645797962f..e49fa6fa6aee 100644
--- a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
@@ -165,10 +165,12 @@ static u32 preparser_disable(bool state)
return MI_ARB_CHECK | 1 << 8 | state;
}
-u32 *gen12_emit_aux_table_inv(u32 *cs, const i915_reg_t inv_reg)
+u32 *gen12_emit_aux_table_inv(struct intel_gt *gt, u32 *cs, const i915_reg_t inv_reg)
{
+ u32 gsi_offset = gt->uncore->gsi_offset;
+
*cs++ = MI_LOAD_REGISTER_IMM(1) | MI_LRI_MMIO_REMAP_EN;
- *cs++ = i915_mmio_reg_offset(inv_reg);
+ *cs++ = i915_mmio_reg_offset(inv_reg) + gsi_offset;
*cs++ = AUX_INV;
*cs++ = MI_NOOP;
@@ -254,7 +256,8 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode)
if (!HAS_FLAT_CCS(rq->engine->i915)) {
/* hsdes: 1809175790 */
- cs = gen12_emit_aux_table_inv(cs, GEN12_GFX_CCS_AUX_NV);
+ cs = gen12_emit_aux_table_inv(rq->engine->gt,
+ cs, GEN12_GFX_CCS_AUX_NV);
}
*cs++ = preparser_disable(false);
@@ -313,9 +316,11 @@ int gen12_emit_flush_xcs(struct i915_request *rq, u32 mode)
if (aux_inv) { /* hsdes: 1809175790 */
if (rq->engine->class == VIDEO_DECODE_CLASS)
- cs = gen12_emit_aux_table_inv(cs, GEN12_VD0_AUX_NV);
+ cs = gen12_emit_aux_table_inv(rq->engine->gt,
+ cs, GEN12_VD0_AUX_NV);
else
- cs = gen12_emit_aux_table_inv(cs, GEN12_VE0_AUX_NV);
+ cs = gen12_emit_aux_table_inv(rq->engine->gt,
+ cs, GEN12_VE0_AUX_NV);
}
if (mode & EMIT_INVALIDATE)
diff --git a/drivers/gpu/drm/i915/gt/gen8_engine_cs.h b/drivers/gpu/drm/i915/gt/gen8_engine_cs.h
index 32e3d2b831bb..e4d24c811dd6 100644
--- a/drivers/gpu/drm/i915/gt/gen8_engine_cs.h
+++ b/drivers/gpu/drm/i915/gt/gen8_engine_cs.h
@@ -13,6 +13,7 @@
#include "intel_gt_regs.h"
#include "intel_gpu_commands.h"
+struct intel_gt;
struct i915_request;
int gen8_emit_flush_rcs(struct i915_request *rq, u32 mode);
@@ -45,7 +46,7 @@ u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs);
u32 *gen11_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs);
u32 *gen12_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs);
-u32 *gen12_emit_aux_table_inv(u32 *cs, const i915_reg_t inv_reg);
+u32 *gen12_emit_aux_table_inv(struct intel_gt *gt, u32 *cs, const i915_reg_t inv_reg);
static inline u32 *
__gen8_emit_pipe_control(u32 *batch, u32 flags0, u32 flags1, u32 offset)
diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
index c7bd5d71b03e..2128b7a72a25 100644
--- a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
@@ -196,7 +196,10 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
if (intel_vgpu_active(vm->i915))
gen8_ppgtt_notify_vgt(ppgtt, false);
- __gen8_ppgtt_cleanup(vm, ppgtt->pd, gen8_pd_top_count(vm), vm->top);
+ if (ppgtt->pd)
+ __gen8_ppgtt_cleanup(vm, ppgtt->pd,
+ gen8_pd_top_count(vm), vm->top);
+
free_scratch(vm);
}
@@ -803,8 +806,10 @@ static int gen8_init_scratch(struct i915_address_space *vm)
struct drm_i915_gem_object *obj;
obj = vm->alloc_pt_dma(vm, I915_GTT_PAGE_SIZE_4K);
- if (IS_ERR(obj))
+ if (IS_ERR(obj)) {
+ ret = PTR_ERR(obj);
goto free_scratch;
+ }
ret = map_pt_dma(vm, obj);
if (ret) {
@@ -823,7 +828,8 @@ static int gen8_init_scratch(struct i915_address_space *vm)
free_scratch:
while (i--)
i915_gem_object_put(vm->scratch[i]);
- return -ENOMEM;
+ vm->scratch[0] = NULL;
+ return ret;
}
static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt)
@@ -901,6 +907,7 @@ err_pd:
struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt,
unsigned long lmem_pt_obj_flags)
{
+ struct i915_page_directory *pd;
struct i915_ppgtt *ppgtt;
int err;
@@ -946,21 +953,7 @@ struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt,
ppgtt->vm.alloc_scratch_dma = alloc_pt_dma;
}
- err = gen8_init_scratch(&ppgtt->vm);
- if (err)
- goto err_free;
-
- ppgtt->pd = gen8_alloc_top_pd(&ppgtt->vm);
- if (IS_ERR(ppgtt->pd)) {
- err = PTR_ERR(ppgtt->pd);
- goto err_free_scratch;
- }
-
- if (!i915_vm_is_4lvl(&ppgtt->vm)) {
- err = gen8_preallocate_top_level_pdp(ppgtt);
- if (err)
- goto err_free_pd;
- }
+ ppgtt->vm.pte_encode = gen8_pte_encode;
ppgtt->vm.bind_async_flags = I915_VMA_LOCAL_BIND;
ppgtt->vm.insert_entries = gen8_ppgtt_insert;
@@ -971,22 +964,31 @@ struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt,
ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc;
ppgtt->vm.clear_range = gen8_ppgtt_clear;
ppgtt->vm.foreach = gen8_ppgtt_foreach;
+ ppgtt->vm.cleanup = gen8_ppgtt_cleanup;
- ppgtt->vm.pte_encode = gen8_pte_encode;
+ err = gen8_init_scratch(&ppgtt->vm);
+ if (err)
+ goto err_put;
+
+ pd = gen8_alloc_top_pd(&ppgtt->vm);
+ if (IS_ERR(pd)) {
+ err = PTR_ERR(pd);
+ goto err_put;
+ }
+ ppgtt->pd = pd;
+
+ if (!i915_vm_is_4lvl(&ppgtt->vm)) {
+ err = gen8_preallocate_top_level_pdp(ppgtt);
+ if (err)
+ goto err_put;
+ }
if (intel_vgpu_active(gt->i915))
gen8_ppgtt_notify_vgt(ppgtt, true);
- ppgtt->vm.cleanup = gen8_ppgtt_cleanup;
-
return ppgtt;
-err_free_pd:
- __gen8_ppgtt_cleanup(&ppgtt->vm, ppgtt->pd,
- gen8_pd_top_count(&ppgtt->vm), ppgtt->vm.top);
-err_free_scratch:
- free_scratch(&ppgtt->vm);
-err_free:
- kfree(ppgtt);
+err_put:
+ i915_vm_put(&ppgtt->vm);
return ERR_PTR(err);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
index 654a092ed3d6..e94365b08f1e 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -614,13 +614,12 @@ bool intel_context_ban(struct intel_context *ce, struct i915_request *rq)
return ret;
}
-bool intel_context_exit_nonpersistent(struct intel_context *ce,
- struct i915_request *rq)
+bool intel_context_revoke(struct intel_context *ce)
{
bool ret = intel_context_set_exiting(ce);
if (ce->ops->revoke)
- ce->ops->revoke(ce, rq, ce->engine->props.preempt_timeout_ms);
+ ce->ops->revoke(ce, NULL, ce->engine->props.preempt_timeout_ms);
return ret;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h
index 8e2d70630c49..be09fb2e883a 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.h
+++ b/drivers/gpu/drm/i915/gt/intel_context.h
@@ -329,8 +329,7 @@ static inline bool intel_context_set_exiting(struct intel_context *ce)
return test_and_set_bit(CONTEXT_EXITING, &ce->flags);
}
-bool intel_context_exit_nonpersistent(struct intel_context *ce,
- struct i915_request *rq);
+bool intel_context_revoke(struct intel_context *ce);
static inline bool
intel_context_force_single_submission(const struct intel_context *ce)
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 17e7f20bbb48..1f7188129cd1 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -654,16 +654,83 @@ bool gen11_vdbox_has_sfc(struct intel_gt *gt,
*/
if ((gt->info.sfc_mask & BIT(physical_vdbox / 2)) == 0)
return false;
- else if (GRAPHICS_VER(i915) == 12)
+ else if (MEDIA_VER(i915) >= 12)
return (physical_vdbox % 2 == 0) ||
!(BIT(physical_vdbox - 1) & vdbox_mask);
- else if (GRAPHICS_VER(i915) == 11)
+ else if (MEDIA_VER(i915) == 11)
return logical_vdbox % 2 == 0;
- MISSING_CASE(GRAPHICS_VER(i915));
return false;
}
+static void engine_mask_apply_media_fuses(struct intel_gt *gt)
+{
+ struct drm_i915_private *i915 = gt->i915;
+ unsigned int logical_vdbox = 0;
+ unsigned int i;
+ u32 media_fuse, fuse1;
+ u16 vdbox_mask;
+ u16 vebox_mask;
+
+ if (MEDIA_VER(gt->i915) < 11)
+ return;
+
+ /*
+ * On newer platforms the fusing register is called 'enable' and has
+ * enable semantics, while on older platforms it is called 'disable'
+ * and bits have disable semantices.
+ */
+ media_fuse = intel_uncore_read(gt->uncore, GEN11_GT_VEBOX_VDBOX_DISABLE);
+ if (MEDIA_VER_FULL(i915) < IP_VER(12, 50))
+ media_fuse = ~media_fuse;
+
+ vdbox_mask = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK;
+ vebox_mask = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >>
+ GEN11_GT_VEBOX_DISABLE_SHIFT;
+
+ if (MEDIA_VER_FULL(i915) >= IP_VER(12, 50)) {
+ fuse1 = intel_uncore_read(gt->uncore, HSW_PAVP_FUSE1);
+ gt->info.sfc_mask = REG_FIELD_GET(XEHP_SFC_ENABLE_MASK, fuse1);
+ } else {
+ gt->info.sfc_mask = ~0;
+ }
+
+ for (i = 0; i < I915_MAX_VCS; i++) {
+ if (!HAS_ENGINE(gt, _VCS(i))) {
+ vdbox_mask &= ~BIT(i);
+ continue;
+ }
+
+ if (!(BIT(i) & vdbox_mask)) {
+ gt->info.engine_mask &= ~BIT(_VCS(i));
+ drm_dbg(&i915->drm, "vcs%u fused off\n", i);
+ continue;
+ }
+
+ if (gen11_vdbox_has_sfc(gt, i, logical_vdbox, vdbox_mask))
+ gt->info.vdbox_sfc_access |= BIT(i);
+ logical_vdbox++;
+ }
+ drm_dbg(&i915->drm, "vdbox enable: %04x, instances: %04lx\n",
+ vdbox_mask, VDBOX_MASK(gt));
+ GEM_BUG_ON(vdbox_mask != VDBOX_MASK(gt));
+
+ for (i = 0; i < I915_MAX_VECS; i++) {
+ if (!HAS_ENGINE(gt, _VECS(i))) {
+ vebox_mask &= ~BIT(i);
+ continue;
+ }
+
+ if (!(BIT(i) & vebox_mask)) {
+ gt->info.engine_mask &= ~BIT(_VECS(i));
+ drm_dbg(&i915->drm, "vecs%u fused off\n", i);
+ }
+ }
+ drm_dbg(&i915->drm, "vebox enable: %04x, instances: %04lx\n",
+ vebox_mask, VEBOX_MASK(gt));
+ GEM_BUG_ON(vebox_mask != VEBOX_MASK(gt));
+}
+
static void engine_mask_apply_compute_fuses(struct intel_gt *gt)
{
struct drm_i915_private *i915 = gt->i915;
@@ -672,6 +739,9 @@ static void engine_mask_apply_compute_fuses(struct intel_gt *gt)
unsigned long ccs_mask;
unsigned int i;
+ if (GRAPHICS_VER(i915) < 11)
+ return;
+
if (hweight32(CCS_MASK(gt)) <= 1)
return;
@@ -694,6 +764,10 @@ static void engine_mask_apply_copy_fuses(struct intel_gt *gt)
unsigned long meml3_mask;
unsigned long quad;
+ if (!(GRAPHICS_VER_FULL(i915) >= IP_VER(12, 60) &&
+ GRAPHICS_VER_FULL(i915) < IP_VER(12, 70)))
+ return;
+
meml3_mask = intel_uncore_read(gt->uncore, GEN10_MIRROR_FUSE3);
meml3_mask = REG_FIELD_GET(GEN12_MEML3_EN_MASK, meml3_mask);
@@ -727,75 +801,11 @@ static void engine_mask_apply_copy_fuses(struct intel_gt *gt)
*/
static intel_engine_mask_t init_engine_mask(struct intel_gt *gt)
{
- struct drm_i915_private *i915 = gt->i915;
struct intel_gt_info *info = &gt->info;
- struct intel_uncore *uncore = gt->uncore;
- unsigned int logical_vdbox = 0;
- unsigned int i;
- u32 media_fuse, fuse1;
- u16 vdbox_mask;
- u16 vebox_mask;
-
- info->engine_mask = INTEL_INFO(i915)->platform_engine_mask;
-
- if (GRAPHICS_VER(i915) < 11)
- return info->engine_mask;
- /*
- * On newer platforms the fusing register is called 'enable' and has
- * enable semantics, while on older platforms it is called 'disable'
- * and bits have disable semantices.
- */
- media_fuse = intel_uncore_read(uncore, GEN11_GT_VEBOX_VDBOX_DISABLE);
- if (GRAPHICS_VER_FULL(i915) < IP_VER(12, 50))
- media_fuse = ~media_fuse;
-
- vdbox_mask = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK;
- vebox_mask = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >>
- GEN11_GT_VEBOX_DISABLE_SHIFT;
-
- if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) {
- fuse1 = intel_uncore_read(uncore, HSW_PAVP_FUSE1);
- gt->info.sfc_mask = REG_FIELD_GET(XEHP_SFC_ENABLE_MASK, fuse1);
- } else {
- gt->info.sfc_mask = ~0;
- }
-
- for (i = 0; i < I915_MAX_VCS; i++) {
- if (!HAS_ENGINE(gt, _VCS(i))) {
- vdbox_mask &= ~BIT(i);
- continue;
- }
-
- if (!(BIT(i) & vdbox_mask)) {
- info->engine_mask &= ~BIT(_VCS(i));
- drm_dbg(&i915->drm, "vcs%u fused off\n", i);
- continue;
- }
-
- if (gen11_vdbox_has_sfc(gt, i, logical_vdbox, vdbox_mask))
- gt->info.vdbox_sfc_access |= BIT(i);
- logical_vdbox++;
- }
- drm_dbg(&i915->drm, "vdbox enable: %04x, instances: %04lx\n",
- vdbox_mask, VDBOX_MASK(gt));
- GEM_BUG_ON(vdbox_mask != VDBOX_MASK(gt));
-
- for (i = 0; i < I915_MAX_VECS; i++) {
- if (!HAS_ENGINE(gt, _VECS(i))) {
- vebox_mask &= ~BIT(i);
- continue;
- }
-
- if (!(BIT(i) & vebox_mask)) {
- info->engine_mask &= ~BIT(_VECS(i));
- drm_dbg(&i915->drm, "vecs%u fused off\n", i);
- }
- }
- drm_dbg(&i915->drm, "vebox enable: %04x, instances: %04lx\n",
- vebox_mask, VEBOX_MASK(gt));
- GEM_BUG_ON(vebox_mask != VEBOX_MASK(gt));
+ GEM_BUG_ON(!info->engine_mask);
+ engine_mask_apply_media_fuses(gt);
engine_mask_apply_compute_fuses(gt);
engine_mask_apply_copy_fuses(gt);
@@ -1688,9 +1698,9 @@ bool intel_engine_irq_enable(struct intel_engine_cs *engine)
return false;
/* Caller disables interrupts */
- spin_lock(&engine->gt->irq_lock);
+ spin_lock(engine->gt->irq_lock);
engine->irq_enable(engine);
- spin_unlock(&engine->gt->irq_lock);
+ spin_unlock(engine->gt->irq_lock);
return true;
}
@@ -1701,9 +1711,9 @@ void intel_engine_irq_disable(struct intel_engine_cs *engine)
return;
/* Caller disables interrupts */
- spin_lock(&engine->gt->irq_lock);
+ spin_lock(engine->gt->irq_lock);
engine->irq_disable(engine);
- spin_unlock(&engine->gt->irq_lock);
+ spin_unlock(engine->gt->irq_lock);
}
void intel_engines_reset_default_submission(struct intel_gt *gt)
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_regs.h b/drivers/gpu/drm/i915/gt/intel_engine_regs.h
index 889f0df3940b..fe1a0d5fd4b1 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_regs.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_regs.h
@@ -110,6 +110,7 @@
#define RING_SBBSTATE(base) _MMIO((base) + 0x118) /* hsw+ */
#define RING_SBBADDR_UDW(base) _MMIO((base) + 0x11c) /* gen8+ */
#define RING_BBADDR(base) _MMIO((base) + 0x140)
+#define RING_BB_OFFSET(base) _MMIO((base) + 0x158)
#define RING_BBADDR_UDW(base) _MMIO((base) + 0x168) /* gen8+ */
#define CCID(base) _MMIO((base) + 0x180)
#define CCID_EN BIT(0)
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index 633a7e5dba3b..6b5d4ea22b67 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -166,6 +166,21 @@ struct intel_engine_execlists {
struct timer_list preempt;
/**
+ * @preempt_target: active request at the time of the preemption request
+ *
+ * We force a preemption to occur if the pending contexts have not
+ * been promoted to active upon receipt of the CS ack event within
+ * the timeout. This timeout maybe chosen based on the target,
+ * using a very short timeout if the context is no longer schedulable.
+ * That short timeout may not be applicable to other contexts, so
+ * if a context switch should happen within before the preemption
+ * timeout, we may shoot early at an innocent context. To prevent this,
+ * we record which context was active at the time of the preemption
+ * request and only reset that context upon the timeout.
+ */
+ const struct i915_request *preempt_target;
+
+ /**
* @ccid: identifier for contexts submitted to this engine
*/
u32 ccid;
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index 4b909cb88cdf..c718e6dc40b5 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -1241,6 +1241,9 @@ static unsigned long active_preempt_timeout(struct intel_engine_cs *engine,
if (!rq)
return 0;
+ /* Only allow ourselves to force reset the currently active context */
+ engine->execlists.preempt_target = rq;
+
/* Force a fast reset for terminated contexts (ignoring sysfs!) */
if (unlikely(intel_context_is_banned(rq->context) || bad_request(rq)))
return INTEL_CONTEXT_BANNED_PREEMPT_TIMEOUT_MS;
@@ -2427,8 +2430,24 @@ static void execlists_submission_tasklet(struct tasklet_struct *t)
GEM_BUG_ON(inactive - post > ARRAY_SIZE(post));
if (unlikely(preempt_timeout(engine))) {
+ const struct i915_request *rq = *engine->execlists.active;
+
+ /*
+ * If after the preempt-timeout expired, we are still on the
+ * same active request/context as before we initiated the
+ * preemption, reset the engine.
+ *
+ * However, if we have processed a CS event to switch contexts,
+ * but not yet processed the CS event for the pending
+ * preemption, reset the timer allowing the new context to
+ * gracefully exit.
+ */
cancel_timer(&engine->execlists.preempt);
- engine->execlists.error_interrupt |= ERROR_PREEMPT;
+ if (rq == engine->execlists.preempt_target)
+ engine->execlists.error_interrupt |= ERROR_PREEMPT;
+ else
+ set_timer_ms(&engine->execlists.preempt,
+ active_preempt_timeout(engine, rq));
}
if (unlikely(READ_ONCE(engine->execlists.error_interrupt))) {
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
index 15a915bb4088..2049a00417af 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
@@ -16,7 +16,9 @@
#include "intel_ggtt_gmch.h"
#include "intel_gt.h"
#include "intel_gt_regs.h"
+#include "intel_pci_config.h"
#include "i915_drv.h"
+#include "i915_pci.h"
#include "i915_scatterlist.h"
#include "i915_utils.h"
#include "i915_vgpu.h"
@@ -869,8 +871,8 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
u32 pte_flags;
int ret;
- GEM_WARN_ON(pci_resource_len(pdev, 0) != gen6_gttmmadr_size(i915));
- phys_addr = pci_resource_start(pdev, 0) + gen6_gttadr_offset(i915);
+ GEM_WARN_ON(pci_resource_len(pdev, GTTMMADR_BAR) != gen6_gttmmadr_size(i915));
+ phys_addr = pci_resource_start(pdev, GTTMMADR_BAR) + gen6_gttadr_offset(i915);
/*
* On BXT+/ICL+ writes larger than 64 bit to the GTT pagetable range
@@ -930,7 +932,10 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
u16 snb_gmch_ctl;
if (!HAS_LMEM(i915)) {
- ggtt->gmadr = pci_resource(pdev, 2);
+ if (!i915_pci_resource_valid(pdev, GTT_APERTURE_BAR))
+ return -ENXIO;
+
+ ggtt->gmadr = pci_resource(pdev, GTT_APERTURE_BAR);
ggtt->mappable_end = resource_size(&ggtt->gmadr);
}
@@ -1084,7 +1089,10 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt)
unsigned int size;
u16 snb_gmch_ctl;
- ggtt->gmadr = pci_resource(pdev, 2);
+ if (!i915_pci_resource_valid(pdev, GTT_APERTURE_BAR))
+ return -ENXIO;
+
+ ggtt->gmadr = pci_resource(pdev, GTT_APERTURE_BAR);
ggtt->mappable_end = resource_size(&ggtt->gmadr);
/*
@@ -1267,10 +1275,16 @@ bool i915_ggtt_resume_vm(struct i915_address_space *vm)
atomic_read(&vma->flags) & I915_VMA_BIND_MASK;
GEM_BUG_ON(!was_bound);
- if (!retained_ptes)
+ if (!retained_ptes) {
+ /*
+ * Clear the bound flags of the vma resource to allow
+ * ptes to be repopulated.
+ */
+ vma->resource->bound_flags = 0;
vma->ops->bind_vma(vm, NULL, vma->resource,
obj ? obj->cache_level : 0,
was_bound);
+ }
if (obj) { /* only used during resume => exclusive access */
write_domain_objs |= fetch_and_zero(&obj->write_domain);
obj->read_domains |= I915_GEM_DOMAIN_GTT;
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
index 6ebda3d65086..ea775e601686 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
@@ -727,7 +727,7 @@ static void detect_bit_6_swizzle(struct i915_ggtt *ggtt)
* bit17 dependent, and so we need to also prevent the pages
* from being moved.
*/
- i915->quirks |= QUIRK_PIN_SWIZZLED_PAGES;
+ i915->gem_quirks |= GEM_QUIRK_PIN_SWIZZLED_PAGES;
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
}
@@ -842,7 +842,6 @@ void intel_ggtt_init_fences(struct i915_ggtt *ggtt)
INIT_LIST_HEAD(&ggtt->fence_list);
INIT_LIST_HEAD(&ggtt->userfault_list);
- intel_wakeref_auto_init(&ggtt->userfault_wakeref, uncore->rpm);
detect_bit_6_swizzle(ggtt);
diff --git a/drivers/gpu/drm/i915/gt/intel_gsc.c b/drivers/gpu/drm/i915/gt/intel_gsc.c
index 0e494028b81d..7af6db3194dd 100644
--- a/drivers/gpu/drm/i915/gt/intel_gsc.c
+++ b/drivers/gpu/drm/i915/gt/intel_gsc.c
@@ -7,6 +7,7 @@
#include <linux/mei_aux.h>
#include "i915_drv.h"
#include "i915_reg.h"
+#include "gem/i915_gem_region.h"
#include "gt/intel_gsc.h"
#include "gt/intel_gt.h"
@@ -36,10 +37,56 @@ static int gsc_irq_init(int irq)
return irq_set_chip_data(irq, NULL);
}
+static int
+gsc_ext_om_alloc(struct intel_gsc *gsc, struct intel_gsc_intf *intf, size_t size)
+{
+ struct intel_gt *gt = gsc_to_gt(gsc);
+ struct drm_i915_gem_object *obj;
+ int err;
+
+ obj = i915_gem_object_create_lmem(gt->i915, size,
+ I915_BO_ALLOC_CONTIGUOUS |
+ I915_BO_ALLOC_CPU_CLEAR);
+ if (IS_ERR(obj)) {
+ drm_err(&gt->i915->drm, "Failed to allocate gsc memory\n");
+ return PTR_ERR(obj);
+ }
+
+ err = i915_gem_object_pin_pages_unlocked(obj);
+ if (err) {
+ drm_err(&gt->i915->drm, "Failed to pin pages for gsc memory\n");
+ goto out_put;
+ }
+
+ intf->gem_obj = obj;
+
+ return 0;
+
+out_put:
+ i915_gem_object_put(obj);
+ return err;
+}
+
+static void gsc_ext_om_destroy(struct intel_gsc_intf *intf)
+{
+ struct drm_i915_gem_object *obj = fetch_and_zero(&intf->gem_obj);
+
+ if (!obj)
+ return;
+
+ if (i915_gem_object_has_pinned_pages(obj))
+ i915_gem_object_unpin_pages(obj);
+
+ i915_gem_object_put(obj);
+}
+
struct gsc_def {
const char *name;
unsigned long bar;
size_t bar_size;
+ bool use_polling;
+ bool slow_firmware;
+ size_t lmem_size;
};
/* gsc resources and definitions (HECI1 and HECI2) */
@@ -54,11 +101,25 @@ static const struct gsc_def gsc_def_dg1[] = {
}
};
+static const struct gsc_def gsc_def_xehpsdv[] = {
+ {
+ /* HECI1 not enabled on the device. */
+ },
+ {
+ .name = "mei-gscfi",
+ .bar = DG1_GSC_HECI2_BASE,
+ .bar_size = GSC_BAR_LENGTH,
+ .use_polling = true,
+ .slow_firmware = true,
+ }
+};
+
static const struct gsc_def gsc_def_dg2[] = {
{
.name = "mei-gsc",
.bar = DG2_GSC_HECI1_BASE,
.bar_size = GSC_BAR_LENGTH,
+ .lmem_size = SZ_4M,
},
{
.name = "mei-gscfi",
@@ -75,26 +136,32 @@ static void gsc_release_dev(struct device *dev)
kfree(adev);
}
-static void gsc_destroy_one(struct intel_gsc_intf *intf)
+static void gsc_destroy_one(struct drm_i915_private *i915,
+ struct intel_gsc *gsc, unsigned int intf_id)
{
+ struct intel_gsc_intf *intf = &gsc->intf[intf_id];
+
if (intf->adev) {
auxiliary_device_delete(&intf->adev->aux_dev);
auxiliary_device_uninit(&intf->adev->aux_dev);
intf->adev = NULL;
}
+
if (intf->irq >= 0)
irq_free_desc(intf->irq);
intf->irq = -1;
+
+ gsc_ext_om_destroy(intf);
}
-static void gsc_init_one(struct drm_i915_private *i915,
- struct intel_gsc_intf *intf,
+static void gsc_init_one(struct drm_i915_private *i915, struct intel_gsc *gsc,
unsigned int intf_id)
{
struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
struct mei_aux_device *adev;
struct auxiliary_device *aux_dev;
const struct gsc_def *def;
+ struct intel_gsc_intf *intf = &gsc->intf[intf_id];
int ret;
intf->irq = -1;
@@ -105,6 +172,8 @@ static void gsc_init_one(struct drm_i915_private *i915,
if (IS_DG1(i915)) {
def = &gsc_def_dg1[intf_id];
+ } else if (IS_XEHPSDV(i915)) {
+ def = &gsc_def_xehpsdv[intf_id];
} else if (IS_DG2(i915)) {
def = &gsc_def_dg2[intf_id];
} else {
@@ -117,10 +186,14 @@ static void gsc_init_one(struct drm_i915_private *i915,
return;
}
+ /* skip irq initialization */
+ if (def->use_polling)
+ goto add_device;
+
intf->irq = irq_alloc_desc(0);
if (intf->irq < 0) {
drm_err(&i915->drm, "gsc irq error %d\n", intf->irq);
- return;
+ goto fail;
}
ret = gsc_irq_init(intf->irq);
@@ -129,16 +202,31 @@ static void gsc_init_one(struct drm_i915_private *i915,
goto fail;
}
+add_device:
adev = kzalloc(sizeof(*adev), GFP_KERNEL);
if (!adev)
goto fail;
+ if (def->lmem_size) {
+ drm_dbg(&i915->drm, "setting up GSC lmem\n");
+
+ if (gsc_ext_om_alloc(gsc, intf, def->lmem_size)) {
+ drm_err(&i915->drm, "setting up gsc extended operational memory failed\n");
+ kfree(adev);
+ goto fail;
+ }
+
+ adev->ext_op_mem.start = i915_gem_object_get_dma_address(intf->gem_obj, 0);
+ adev->ext_op_mem.end = adev->ext_op_mem.start + def->lmem_size;
+ }
+
adev->irq = intf->irq;
adev->bar.parent = &pdev->resource[0];
adev->bar.start = def->bar + pdev->resource[0].start;
adev->bar.end = adev->bar.start + def->bar_size - 1;
adev->bar.flags = IORESOURCE_MEM;
adev->bar.desc = IORES_DESC_NONE;
+ adev->slow_firmware = def->slow_firmware;
aux_dev = &adev->aux_dev;
aux_dev->name = def->name;
@@ -165,7 +253,7 @@ static void gsc_init_one(struct drm_i915_private *i915,
return;
fail:
- gsc_destroy_one(intf);
+ gsc_destroy_one(i915, gsc, intf->id);
}
static void gsc_irq_handler(struct intel_gt *gt, unsigned int intf_id)
@@ -182,10 +270,8 @@ static void gsc_irq_handler(struct intel_gt *gt, unsigned int intf_id)
return;
}
- if (gt->gsc.intf[intf_id].irq < 0) {
- drm_err_ratelimited(&gt->i915->drm, "GSC irq: irq not set");
+ if (gt->gsc.intf[intf_id].irq < 0)
return;
- }
ret = generic_handle_irq(gt->gsc.intf[intf_id].irq);
if (ret)
@@ -208,7 +294,7 @@ void intel_gsc_init(struct intel_gsc *gsc, struct drm_i915_private *i915)
return;
for (i = 0; i < INTEL_GSC_NUM_INTERFACES; i++)
- gsc_init_one(i915, &gsc->intf[i], i);
+ gsc_init_one(i915, gsc, i);
}
void intel_gsc_fini(struct intel_gsc *gsc)
@@ -220,5 +306,5 @@ void intel_gsc_fini(struct intel_gsc *gsc)
return;
for (i = 0; i < INTEL_GSC_NUM_INTERFACES; i++)
- gsc_destroy_one(&gsc->intf[i]);
+ gsc_destroy_one(gt->i915, gsc, i);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gsc.h b/drivers/gpu/drm/i915/gt/intel_gsc.h
index 68582f912b21..fcac1775e9c3 100644
--- a/drivers/gpu/drm/i915/gt/intel_gsc.h
+++ b/drivers/gpu/drm/i915/gt/intel_gsc.h
@@ -20,11 +20,14 @@ struct mei_aux_device;
/**
* struct intel_gsc - graphics security controller
+ *
+ * @gem_obj: scratch memory GSC operations
* @intf : gsc interface
*/
struct intel_gsc {
struct intel_gsc_intf {
struct mei_aux_device *adev;
+ struct drm_i915_gem_object *gem_obj;
int irq;
unsigned int id;
} intf[INTEL_GSC_NUM_INTERFACES];
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
index f435e06125aa..d0b03a928b9a 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -26,18 +26,22 @@
#include "intel_gt_requests.h"
#include "intel_migrate.h"
#include "intel_mocs.h"
+#include "intel_pci_config.h"
#include "intel_pm.h"
#include "intel_rc6.h"
#include "intel_renderstate.h"
#include "intel_rps.h"
+#include "intel_sa_media.h"
#include "intel_gt_sysfs.h"
#include "intel_uncore.h"
#include "shmem_utils.h"
-static void __intel_gt_init_early(struct intel_gt *gt)
+void intel_gt_common_init_early(struct intel_gt *gt)
{
- spin_lock_init(&gt->irq_lock);
+ spin_lock_init(gt->irq_lock);
+ INIT_LIST_HEAD(&gt->lmem_userfault_list);
+ mutex_init(&gt->lmem_userfault_lock);
INIT_LIST_HEAD(&gt->closed_vma);
spin_lock_init(&gt->closed_lock);
@@ -57,14 +61,19 @@ static void __intel_gt_init_early(struct intel_gt *gt)
}
/* Preliminary initialization of Tile 0 */
-void intel_root_gt_init_early(struct drm_i915_private *i915)
+int intel_root_gt_init_early(struct drm_i915_private *i915)
{
struct intel_gt *gt = to_gt(i915);
gt->i915 = i915;
gt->uncore = &i915->uncore;
+ gt->irq_lock = drmm_kzalloc(&i915->drm, sizeof(*gt->irq_lock), GFP_KERNEL);
+ if (!gt->irq_lock)
+ return -ENOMEM;
- __intel_gt_init_early(gt);
+ intel_gt_common_init_early(gt);
+
+ return 0;
}
static int intel_gt_probe_lmem(struct intel_gt *gt)
@@ -780,26 +789,25 @@ static int intel_gt_tile_setup(struct intel_gt *gt, phys_addr_t phys_addr)
int ret;
if (!gt_is_root(gt)) {
- struct intel_uncore_mmio_debug *mmio_debug;
struct intel_uncore *uncore;
+ spinlock_t *irq_lock;
- uncore = kzalloc(sizeof(*uncore), GFP_KERNEL);
+ uncore = drmm_kzalloc(&gt->i915->drm, sizeof(*uncore), GFP_KERNEL);
if (!uncore)
return -ENOMEM;
- mmio_debug = kzalloc(sizeof(*mmio_debug), GFP_KERNEL);
- if (!mmio_debug) {
- kfree(uncore);
+ irq_lock = drmm_kzalloc(&gt->i915->drm, sizeof(*irq_lock), GFP_KERNEL);
+ if (!irq_lock)
return -ENOMEM;
- }
gt->uncore = uncore;
- gt->uncore->debug = mmio_debug;
+ gt->irq_lock = irq_lock;
- __intel_gt_init_early(gt);
+ intel_gt_common_init_early(gt);
}
intel_uncore_init_early(gt->uncore, gt);
+ intel_wakeref_auto_init(&gt->userfault_wakeref, gt->uncore->rpm);
ret = intel_uncore_setup_mmio(gt->uncore, phys_addr);
if (ret)
@@ -810,27 +818,17 @@ static int intel_gt_tile_setup(struct intel_gt *gt, phys_addr_t phys_addr)
return 0;
}
-static void
-intel_gt_tile_cleanup(struct intel_gt *gt)
-{
- intel_uncore_cleanup_mmio(gt->uncore);
-
- if (!gt_is_root(gt)) {
- kfree(gt->uncore->debug);
- kfree(gt->uncore);
- kfree(gt);
- }
-}
-
int intel_gt_probe_all(struct drm_i915_private *i915)
{
struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
struct intel_gt *gt = &i915->gt0;
+ const struct intel_gt_definition *gtdef;
phys_addr_t phys_addr;
unsigned int mmio_bar;
+ unsigned int i;
int ret;
- mmio_bar = GRAPHICS_VER(i915) == 2 ? 1 : 0;
+ mmio_bar = GRAPHICS_VER(i915) == 2 ? GEN2_GTTMMADR_BAR : GTTMMADR_BAR;
phys_addr = pci_resource_start(pdev, mmio_bar);
/*
@@ -838,14 +836,74 @@ int intel_gt_probe_all(struct drm_i915_private *i915)
* and it has been already initialized early during probe
* in i915_driver_probe()
*/
+ gt->i915 = i915;
+ gt->name = "Primary GT";
+ gt->info.engine_mask = RUNTIME_INFO(i915)->platform_engine_mask;
+
+ drm_dbg(&i915->drm, "Setting up %s\n", gt->name);
ret = intel_gt_tile_setup(gt, phys_addr);
if (ret)
return ret;
i915->gt[0] = gt;
- /* TODO: add more tiles */
+ if (!HAS_EXTRA_GT_LIST(i915))
+ return 0;
+
+ for (i = 1, gtdef = &INTEL_INFO(i915)->extra_gt_list[i - 1];
+ gtdef->name != NULL;
+ i++, gtdef = &INTEL_INFO(i915)->extra_gt_list[i - 1]) {
+ gt = drmm_kzalloc(&i915->drm, sizeof(*gt), GFP_KERNEL);
+ if (!gt) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ gt->i915 = i915;
+ gt->name = gtdef->name;
+ gt->type = gtdef->type;
+ gt->info.engine_mask = gtdef->engine_mask;
+ gt->info.id = i;
+
+ drm_dbg(&i915->drm, "Setting up %s\n", gt->name);
+ if (GEM_WARN_ON(range_overflows_t(resource_size_t,
+ gtdef->mapping_base,
+ SZ_16M,
+ pci_resource_len(pdev, mmio_bar)))) {
+ ret = -ENODEV;
+ goto err;
+ }
+
+ switch (gtdef->type) {
+ case GT_TILE:
+ ret = intel_gt_tile_setup(gt, phys_addr + gtdef->mapping_base);
+ break;
+
+ case GT_MEDIA:
+ ret = intel_sa_mediagt_setup(gt, phys_addr + gtdef->mapping_base,
+ gtdef->gsi_offset);
+ break;
+
+ case GT_PRIMARY:
+ /* Primary GT should not appear in extra GT list */
+ default:
+ MISSING_CASE(gtdef->type);
+ ret = -ENODEV;
+ }
+
+ if (ret)
+ goto err;
+
+ i915->gt[i] = gt;
+ }
+
return 0;
+
+err:
+ i915_probe_error(i915, "Failed to initialize %s! (%d)\n", gtdef->name, ret);
+ intel_gt_release_all(i915);
+
+ return ret;
}
int intel_gt_tiles_init(struct drm_i915_private *i915)
@@ -868,10 +926,8 @@ void intel_gt_release_all(struct drm_i915_private *i915)
struct intel_gt *gt;
unsigned int id;
- for_each_gt(gt, i915, id) {
- intel_gt_tile_cleanup(gt);
+ for_each_gt(gt, i915, id)
i915->gt[id] = NULL;
- }
}
void intel_gt_info_print(const struct intel_gt_info *info,
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h
index 40b06adf509a..2ee582e287c8 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt.h
@@ -44,7 +44,8 @@ static inline struct intel_gt *gsc_to_gt(struct intel_gsc *gsc)
return container_of(gsc, struct intel_gt, gsc);
}
-void intel_root_gt_init_early(struct drm_i915_private *i915);
+void intel_gt_common_init_early(struct intel_gt *gt);
+int intel_root_gt_init_early(struct drm_i915_private *i915);
int intel_gt_assign_ggtt(struct intel_gt *gt);
int intel_gt_init_mmio(struct intel_gt *gt);
int __must_check intel_gt_init_hw(struct intel_gt *gt);
@@ -54,7 +55,6 @@ void intel_gt_driver_register(struct intel_gt *gt);
void intel_gt_driver_unregister(struct intel_gt *gt);
void intel_gt_driver_remove(struct intel_gt *gt);
void intel_gt_driver_release(struct intel_gt *gt);
-
void intel_gt_driver_late_release_all(struct drm_i915_private *i915);
int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c b/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c
index d5d1b04dbcad..3f656d3dba9a 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c
@@ -26,26 +26,6 @@ static u32 read_reference_ts_freq(struct intel_uncore *uncore)
return base_freq + frac_freq;
}
-static u32 gen9_get_crystal_clock_freq(struct intel_uncore *uncore,
- u32 rpm_config_reg)
-{
- u32 f19_2_mhz = 19200000;
- u32 f24_mhz = 24000000;
- u32 crystal_clock =
- (rpm_config_reg & GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK) >>
- GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT;
-
- switch (crystal_clock) {
- case GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ:
- return f19_2_mhz;
- case GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ:
- return f24_mhz;
- default:
- MISSING_CASE(crystal_clock);
- return 0;
- }
-}
-
static u32 gen11_get_crystal_clock_freq(struct intel_uncore *uncore,
u32 rpm_config_reg)
{
@@ -72,98 +52,106 @@ static u32 gen11_get_crystal_clock_freq(struct intel_uncore *uncore,
}
}
-static u32 read_clock_frequency(struct intel_uncore *uncore)
+static u32 gen11_read_clock_frequency(struct intel_uncore *uncore)
{
- u32 f12_5_mhz = 12500000;
- u32 f19_2_mhz = 19200000;
- u32 f24_mhz = 24000000;
+ u32 ctc_reg = intel_uncore_read(uncore, CTC_MODE);
+ u32 freq = 0;
+
+ /*
+ * Note that on gen11+, the clock frequency may be reconfigured.
+ * We do not, and we assume nobody else does.
+ *
+ * First figure out the reference frequency. There are 2 ways
+ * we can compute the frequency, either through the
+ * TIMESTAMP_OVERRIDE register or through RPM_CONFIG. CTC_MODE
+ * tells us which one we should use.
+ */
+ if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) {
+ freq = read_reference_ts_freq(uncore);
+ } else {
+ u32 c0 = intel_uncore_read(uncore, RPM_CONFIG0);
+
+ freq = gen11_get_crystal_clock_freq(uncore, c0);
- if (GRAPHICS_VER(uncore->i915) <= 4) {
- /*
- * PRMs say:
- *
- * "The value in this register increments once every 16
- * hclks." (through the “Clocking Configuration”
- * (“CLKCFG”) MCHBAR register)
- */
- return RUNTIME_INFO(uncore->i915)->rawclk_freq * 1000 / 16;
- } else if (GRAPHICS_VER(uncore->i915) <= 8) {
/*
- * PRMs say:
- *
- * "The PCU TSC counts 10ns increments; this timestamp
- * reflects bits 38:3 of the TSC (i.e. 80ns granularity,
- * rolling over every 1.5 hours).
+ * Now figure out how the command stream's timestamp
+ * register increments from this frequency (it might
+ * increment only every few clock cycle).
*/
- return f12_5_mhz;
- } else if (GRAPHICS_VER(uncore->i915) <= 9) {
- u32 ctc_reg = intel_uncore_read(uncore, CTC_MODE);
- u32 freq = 0;
-
- if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) {
- freq = read_reference_ts_freq(uncore);
- } else {
- freq = IS_GEN9_LP(uncore->i915) ? f19_2_mhz : f24_mhz;
-
- /*
- * Now figure out how the command stream's timestamp
- * register increments from this frequency (it might
- * increment only every few clock cycle).
- */
- freq >>= 3 - ((ctc_reg & CTC_SHIFT_PARAMETER_MASK) >>
- CTC_SHIFT_PARAMETER_SHIFT);
- }
-
- return freq;
- } else if (GRAPHICS_VER(uncore->i915) <= 12) {
- u32 ctc_reg = intel_uncore_read(uncore, CTC_MODE);
- u32 freq = 0;
+ freq >>= 3 - ((c0 & GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK) >>
+ GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT);
+ }
+
+ return freq;
+}
+
+static u32 gen9_read_clock_frequency(struct intel_uncore *uncore)
+{
+ u32 ctc_reg = intel_uncore_read(uncore, CTC_MODE);
+ u32 freq = 0;
+
+ if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) {
+ freq = read_reference_ts_freq(uncore);
+ } else {
+ freq = IS_GEN9_LP(uncore->i915) ? 19200000 : 24000000;
/*
- * First figure out the reference frequency. There are 2 ways
- * we can compute the frequency, either through the
- * TIMESTAMP_OVERRIDE register or through RPM_CONFIG. CTC_MODE
- * tells us which one we should use.
+ * Now figure out how the command stream's timestamp
+ * register increments from this frequency (it might
+ * increment only every few clock cycle).
*/
- if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) {
- freq = read_reference_ts_freq(uncore);
- } else {
- u32 c0 = intel_uncore_read(uncore, RPM_CONFIG0);
-
- if (GRAPHICS_VER(uncore->i915) >= 11)
- freq = gen11_get_crystal_clock_freq(uncore, c0);
- else
- freq = gen9_get_crystal_clock_freq(uncore, c0);
-
- /*
- * Now figure out how the command stream's timestamp
- * register increments from this frequency (it might
- * increment only every few clock cycle).
- */
- freq >>= 3 - ((c0 & GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK) >>
- GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT);
- }
-
- return freq;
+ freq >>= 3 - ((ctc_reg & CTC_SHIFT_PARAMETER_MASK) >>
+ CTC_SHIFT_PARAMETER_SHIFT);
}
- MISSING_CASE("Unknown gen, unable to read command streamer timestamp frequency\n");
- return 0;
+ return freq;
}
-void intel_gt_init_clock_frequency(struct intel_gt *gt)
+static u32 gen5_read_clock_frequency(struct intel_uncore *uncore)
{
/*
- * Note that on gen11+, the clock frequency may be reconfigured.
- * We do not, and we assume nobody else does.
+ * PRMs say:
+ *
+ * "The PCU TSC counts 10ns increments; this timestamp
+ * reflects bits 38:3 of the TSC (i.e. 80ns granularity,
+ * rolling over every 1.5 hours).
+ */
+ return 12500000;
+}
+
+static u32 gen2_read_clock_frequency(struct intel_uncore *uncore)
+{
+ /*
+ * PRMs say:
+ *
+ * "The value in this register increments once every 16
+ * hclks." (through the “Clocking Configuration”
+ * (“CLKCFG”) MCHBAR register)
*/
+ return RUNTIME_INFO(uncore->i915)->rawclk_freq * 1000 / 16;
+}
+
+static u32 read_clock_frequency(struct intel_uncore *uncore)
+{
+ if (GRAPHICS_VER(uncore->i915) >= 11)
+ return gen11_read_clock_frequency(uncore);
+ else if (GRAPHICS_VER(uncore->i915) >= 9)
+ return gen9_read_clock_frequency(uncore);
+ else if (GRAPHICS_VER(uncore->i915) >= 5)
+ return gen5_read_clock_frequency(uncore);
+ else
+ return gen2_read_clock_frequency(uncore);
+}
+
+void intel_gt_init_clock_frequency(struct intel_gt *gt)
+{
gt->clock_frequency = read_clock_frequency(gt->uncore);
- if (gt->clock_frequency)
- gt->clock_period_ns = intel_gt_clock_interval_to_ns(gt, 1);
/* Icelake appears to use another fixed frequency for CTX_TIMESTAMP */
if (GRAPHICS_VER(gt->i915) == 11)
gt->clock_period_ns = NSEC_PER_SEC / 13750000;
+ else if (gt->clock_frequency)
+ gt->clock_period_ns = intel_gt_clock_interval_to_ns(gt, 1);
GT_TRACE(gt,
"Using clock frequency: %dkHz, period: %dns, wrap: %lldms\n",
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
index 3a72d4fd0214..f26882fdc24c 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_irq.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
@@ -29,7 +29,7 @@ gen11_gt_engine_identity(struct intel_gt *gt,
u32 timeout_ts;
u32 ident;
- lockdep_assert_held(&gt->irq_lock);
+ lockdep_assert_held(gt->irq_lock);
raw_reg_write(regs, GEN11_IIR_REG_SELECTOR(bank), BIT(bit));
@@ -59,11 +59,17 @@ static void
gen11_other_irq_handler(struct intel_gt *gt, const u8 instance,
const u16 iir)
{
+ struct intel_gt *media_gt = gt->i915->media_gt;
+
if (instance == OTHER_GUC_INSTANCE)
return guc_irq_handler(&gt->uc.guc, iir);
+ if (instance == OTHER_MEDIA_GUC_INSTANCE && media_gt)
+ return guc_irq_handler(&media_gt->uc.guc, iir);
if (instance == OTHER_GTPM_INSTANCE)
return gen11_rps_irq_handler(&gt->rps, iir);
+ if (instance == OTHER_MEDIA_GTPM_INSTANCE && media_gt)
+ return gen11_rps_irq_handler(&media_gt->rps, iir);
if (instance == OTHER_KCR_INSTANCE)
return intel_pxp_irq_handler(&gt->pxp, iir);
@@ -81,6 +87,18 @@ gen11_engine_irq_handler(struct intel_gt *gt, const u8 class,
{
struct intel_engine_cs *engine;
+ /*
+ * Platforms with standalone media have their media engines in another
+ * GT.
+ */
+ if (MEDIA_VER(gt->i915) >= 13 &&
+ (class == VIDEO_DECODE_CLASS || class == VIDEO_ENHANCEMENT_CLASS)) {
+ if (!gt->i915->media_gt)
+ goto err;
+
+ gt = gt->i915->media_gt;
+ }
+
if (instance <= MAX_ENGINE_INSTANCE)
engine = gt->engine_class[class][instance];
else
@@ -89,6 +107,7 @@ gen11_engine_irq_handler(struct intel_gt *gt, const u8 class,
if (likely(engine))
return intel_engine_cs_irq(engine, iir);
+err:
WARN_ONCE(1, "unhandled engine interrupt class=0x%x, instance=0x%x\n",
class, instance);
}
@@ -120,7 +139,7 @@ gen11_gt_bank_handler(struct intel_gt *gt, const unsigned int bank)
unsigned long intr_dw;
unsigned int bit;
- lockdep_assert_held(&gt->irq_lock);
+ lockdep_assert_held(gt->irq_lock);
intr_dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
@@ -138,14 +157,14 @@ void gen11_gt_irq_handler(struct intel_gt *gt, const u32 master_ctl)
{
unsigned int bank;
- spin_lock(&gt->irq_lock);
+ spin_lock(gt->irq_lock);
for (bank = 0; bank < 2; bank++) {
if (master_ctl & GEN11_GT_DW_IRQ(bank))
gen11_gt_bank_handler(gt, bank);
}
- spin_unlock(&gt->irq_lock);
+ spin_unlock(gt->irq_lock);
}
bool gen11_gt_reset_one_iir(struct intel_gt *gt,
@@ -154,7 +173,7 @@ bool gen11_gt_reset_one_iir(struct intel_gt *gt,
void __iomem * const regs = gt->uncore->regs;
u32 dw;
- lockdep_assert_held(&gt->irq_lock);
+ lockdep_assert_held(gt->irq_lock);
dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
if (dw & BIT(bit)) {
@@ -310,9 +329,9 @@ static void gen7_parity_error_irq_handler(struct intel_gt *gt, u32 iir)
if (!HAS_L3_DPF(gt->i915))
return;
- spin_lock(&gt->irq_lock);
+ spin_lock(gt->irq_lock);
gen5_gt_disable_irq(gt, GT_PARITY_ERROR(gt->i915));
- spin_unlock(&gt->irq_lock);
+ spin_unlock(gt->irq_lock);
if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
gt->i915->l3_parity.which_slice |= 1 << 1;
@@ -434,7 +453,7 @@ static void gen5_gt_update_irq(struct intel_gt *gt,
u32 interrupt_mask,
u32 enabled_irq_mask)
{
- lockdep_assert_held(&gt->irq_lock);
+ lockdep_assert_held(gt->irq_lock);
GEM_BUG_ON(enabled_irq_mask & ~interrupt_mask);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c
index 40bdd4cb629f..108b9e76c32e 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c
@@ -504,8 +504,8 @@ void intel_gt_pm_frequency_dump(struct intel_gt *gt, struct drm_printer *p)
drm_puts(p, "no P-state info available\n");
}
- drm_printf(p, "Current CD clock frequency: %d kHz\n", i915->cdclk.hw.cdclk);
- drm_printf(p, "Max CD clock frequency: %d kHz\n", i915->max_cdclk_freq);
+ drm_printf(p, "Current CD clock frequency: %d kHz\n", i915->display.cdclk.hw.cdclk);
+ drm_printf(p, "Max CD clock frequency: %d kHz\n", i915->display.cdclk.max_cdclk_freq);
drm_printf(p, "Max pixel clock frequency: %d kHz\n", i915->max_dotclk_freq);
intel_runtime_pm_put(uncore->rpm, wakeref);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_pm_irq.c
index 11060f5a4c89..52f2a28b2058 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm_irq.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm_irq.c
@@ -37,7 +37,7 @@ static void gen6_gt_pm_update_irq(struct intel_gt *gt,
WARN_ON(enabled_irq_mask & ~interrupt_mask);
- lockdep_assert_held(&gt->irq_lock);
+ lockdep_assert_held(gt->irq_lock);
new_val = gt->pm_imr;
new_val &= ~interrupt_mask;
@@ -64,7 +64,7 @@ void gen6_gt_pm_reset_iir(struct intel_gt *gt, u32 reset_mask)
struct intel_uncore *uncore = gt->uncore;
i915_reg_t reg = GRAPHICS_VER(gt->i915) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
- lockdep_assert_held(&gt->irq_lock);
+ lockdep_assert_held(gt->irq_lock);
intel_uncore_write(uncore, reg, reset_mask);
intel_uncore_write(uncore, reg, reset_mask);
@@ -92,7 +92,7 @@ static void write_pm_ier(struct intel_gt *gt)
void gen6_gt_pm_enable_irq(struct intel_gt *gt, u32 enable_mask)
{
- lockdep_assert_held(&gt->irq_lock);
+ lockdep_assert_held(gt->irq_lock);
gt->pm_ier |= enable_mask;
write_pm_ier(gt);
@@ -101,7 +101,7 @@ void gen6_gt_pm_enable_irq(struct intel_gt *gt, u32 enable_mask)
void gen6_gt_pm_disable_irq(struct intel_gt *gt, u32 disable_mask)
{
- lockdep_assert_held(&gt->irq_lock);
+ lockdep_assert_held(gt->irq_lock);
gt->pm_ier &= ~disable_mask;
gen6_gt_pm_mask_irq(gt, disable_mask);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_regs.h b/drivers/gpu/drm/i915/gt/intel_gt_regs.h
index d414785003cc..2275ee47da95 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_regs.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_regs.h
@@ -1554,6 +1554,8 @@
#define OTHER_GTPM_INSTANCE 1
#define OTHER_KCR_INSTANCE 4
#define OTHER_GSC_INSTANCE 6
+#define OTHER_MEDIA_GUC_INSTANCE 16
+#define OTHER_MEDIA_GTPM_INSTANCE 17
#define GEN11_IIR_REG_SELECTOR(x) _MMIO(0x190070 + ((x) * 4))
@@ -1578,4 +1580,12 @@
#define GEN12_SFC_DONE(n) _MMIO(0x1cc000 + (n) * 0x1000)
+/*
+ * Standalone Media's non-engine GT registers are located at their regular GT
+ * offsets plus 0x380000. This extra offset is stored inside the intel_uncore
+ * structure so that the existing code can be used for both GTs without
+ * modification.
+ */
+#define MTL_MEDIA_GSI_BASE 0x380000
+
#endif /* __INTEL_GT_REGS__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c
index e066cc33d9f2..180dd6f3ef57 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c
@@ -545,8 +545,7 @@ static INTEL_GT_RPS_BOOL_ATTR_RO(throttle_reason_ratl, RATL_MASK);
static INTEL_GT_RPS_BOOL_ATTR_RO(throttle_reason_vr_thermalert, VR_THERMALERT_MASK);
static INTEL_GT_RPS_BOOL_ATTR_RO(throttle_reason_vr_tdc, VR_TDC_MASK);
-static const struct attribute *freq_attrs[] = {
- &dev_attr_punit_req_freq_mhz.attr,
+static const struct attribute *throttle_reason_attrs[] = {
&attr_throttle_reason_status.attr,
&attr_throttle_reason_pl1.attr,
&attr_throttle_reason_pl2.attr,
@@ -791,12 +790,20 @@ void intel_gt_sysfs_pm_init(struct intel_gt *gt, struct kobject *kobj)
if (!is_object_gt(kobj))
return;
- ret = sysfs_create_files(kobj, freq_attrs);
+ ret = sysfs_create_file(kobj, &dev_attr_punit_req_freq_mhz.attr);
if (ret)
drm_warn(&gt->i915->drm,
- "failed to create gt%u throttle sysfs files (%pe)",
+ "failed to create gt%u punit_req_freq_mhz sysfs (%pe)",
gt->info.id, ERR_PTR(ret));
+ if (GRAPHICS_VER(gt->i915) >= 11) {
+ ret = sysfs_create_files(kobj, throttle_reason_attrs);
+ if (ret)
+ drm_warn(&gt->i915->drm,
+ "failed to create gt%u throttle sysfs files (%pe)",
+ gt->info.id, ERR_PTR(ret));
+ }
+
if (HAS_MEDIA_RATIO_MODE(gt->i915) && intel_uc_uses_guc_slpc(&gt->uc)) {
ret = sysfs_create_files(kobj, media_perf_power_attrs);
if (ret)
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h
index 4d56f7d5a3be..f19c2de77ff6 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h
@@ -81,8 +81,17 @@ struct gt_defaults {
u32 max_freq;
};
+enum intel_gt_type {
+ GT_PRIMARY,
+ GT_TILE,
+ GT_MEDIA,
+};
+
struct intel_gt {
struct drm_i915_private *i915;
+ const char *name;
+ enum intel_gt_type type;
+
struct intel_uncore *uncore;
struct i915_ggtt *ggtt;
@@ -132,6 +141,20 @@ struct intel_gt {
struct intel_wakeref wakeref;
atomic_t user_wakeref;
+ /**
+ * Protects access to lmem usefault list.
+ * It is required, if we are outside of the runtime suspend path,
+ * access to @lmem_userfault_list requires always first grabbing the
+ * runtime pm, to ensure we can't race against runtime suspend.
+ * Once we have that we also need to grab @lmem_userfault_lock,
+ * at which point we have exclusive access.
+ * The runtime suspend path is special since it doesn't really hold any locks,
+ * but instead has exclusive access by virtue of all other accesses requiring
+ * holding the runtime pm wakeref.
+ */
+ struct mutex lmem_userfault_lock;
+ struct list_head lmem_userfault_list;
+
struct list_head closed_vma;
spinlock_t closed_lock; /* guards the list of closed_vma */
@@ -147,6 +170,9 @@ struct intel_gt {
*/
intel_wakeref_t awake;
+ /* Manual runtime pm autosuspend delay for user GGTT/lmem mmaps */
+ struct intel_wakeref_auto userfault_wakeref;
+
u32 clock_frequency;
u32 clock_period_ns;
@@ -154,7 +180,7 @@ struct intel_gt {
struct intel_rc6 rc6;
struct intel_rps rps;
- spinlock_t irq_lock;
+ spinlock_t *irq_lock;
u32 gt_imr;
u32 pm_ier;
u32 pm_imr;
@@ -262,6 +288,14 @@ struct intel_gt {
struct kobject *sysfs_defaults;
};
+struct intel_gt_definition {
+ enum intel_gt_type type;
+ char *name;
+ u32 mapping_base;
+ u32 gsi_offset;
+ intel_engine_mask_t engine_mask;
+};
+
enum intel_gt_scratch_field {
/* 8 bytes */
INTEL_GT_SCRATCH_FIELD_DEFAULT = 0,
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c b/drivers/gpu/drm/i915/gt/intel_gtt.c
index b67831833c9a..2eaeba14319e 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.c
@@ -405,6 +405,9 @@ void free_scratch(struct i915_address_space *vm)
{
int i;
+ if (!vm->scratch[0])
+ return;
+
for (i = 0; i <= vm->top; i++)
i915_gem_object_put(vm->scratch[i]);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h b/drivers/gpu/drm/i915/gt/intel_gtt.h
index e639434e97fd..c0ca53cba9f0 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.h
@@ -386,9 +386,6 @@ struct i915_ggtt {
*/
struct list_head userfault_list;
- /* Manual runtime pm autosuspend delay for user GGTT mmaps */
- struct intel_wakeref_auto userfault_wakeref;
-
struct mutex error_mutex;
struct drm_mm_node error_capture;
struct drm_mm_node uc_fw;
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 070cec4ff8a4..3955292483a6 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -662,6 +662,21 @@ static int lrc_ring_mi_mode(const struct intel_engine_cs *engine)
return -1;
}
+static int lrc_ring_bb_offset(const struct intel_engine_cs *engine)
+{
+ if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50))
+ return 0x80;
+ else if (GRAPHICS_VER(engine->i915) >= 12)
+ return 0x70;
+ else if (GRAPHICS_VER(engine->i915) >= 9)
+ return 0x64;
+ else if (GRAPHICS_VER(engine->i915) >= 8 &&
+ engine->class == RENDER_CLASS)
+ return 0xc4;
+ else
+ return -1;
+}
+
static int lrc_ring_gpr0(const struct intel_engine_cs *engine)
{
if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50))
@@ -768,6 +783,7 @@ static void init_common_regs(u32 * const regs,
bool inhibit)
{
u32 ctl;
+ int loc;
ctl = _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH);
ctl |= _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
@@ -779,6 +795,10 @@ static void init_common_regs(u32 * const regs,
regs[CTX_CONTEXT_CONTROL] = ctl;
regs[CTX_TIMESTAMP] = ce->stats.runtime.last;
+
+ loc = lrc_ring_bb_offset(engine);
+ if (loc != -1)
+ regs[loc + 1] = 0;
}
static void init_wa_bb_regs(u32 * const regs,
@@ -1278,7 +1298,8 @@ gen12_emit_indirect_ctx_rcs(const struct intel_context *ce, u32 *cs)
/* hsdes: 1809175790 */
if (!HAS_FLAT_CCS(ce->engine->i915))
- cs = gen12_emit_aux_table_inv(cs, GEN12_GFX_CCS_AUX_NV);
+ cs = gen12_emit_aux_table_inv(ce->engine->gt,
+ cs, GEN12_GFX_CCS_AUX_NV);
/* Wa_16014892111 */
if (IS_DG2(ce->engine->i915))
@@ -1304,9 +1325,11 @@ gen12_emit_indirect_ctx_xcs(const struct intel_context *ce, u32 *cs)
/* hsdes: 1809175790 */
if (!HAS_FLAT_CCS(ce->engine->i915)) {
if (ce->engine->class == VIDEO_DECODE_CLASS)
- cs = gen12_emit_aux_table_inv(cs, GEN12_VD0_AUX_NV);
+ cs = gen12_emit_aux_table_inv(ce->engine->gt,
+ cs, GEN12_VD0_AUX_NV);
else if (ce->engine->class == VIDEO_ENHANCEMENT_CLASS)
- cs = gen12_emit_aux_table_inv(cs, GEN12_VE0_AUX_NV);
+ cs = gen12_emit_aux_table_inv(ce->engine->gt,
+ cs, GEN12_VE0_AUX_NV);
}
return cs;
diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c
index c6ebe2781076..152244d7f62a 100644
--- a/drivers/gpu/drm/i915/gt/intel_mocs.c
+++ b/drivers/gpu/drm/i915/gt/intel_mocs.c
@@ -207,6 +207,14 @@ static const struct drm_i915_mocs_entry broxton_mocs_table[] = {
MOCS_ENTRY(15, \
LE_3_WB | LE_TC_1_LLC | LE_LRUM(2) | LE_AOM(1), \
L3_3_WB), \
+ /* Bypass LLC - Uncached (EHL+) */ \
+ MOCS_ENTRY(16, \
+ LE_1_UC | LE_TC_1_LLC | LE_SCF(1), \
+ L3_1_UC), \
+ /* Bypass LLC - L3 (Read-Only) (EHL+) */ \
+ MOCS_ENTRY(17, \
+ LE_1_UC | LE_TC_1_LLC | LE_SCF(1), \
+ L3_3_WB), \
/* Self-Snoop - L3 + LLC */ \
MOCS_ENTRY(18, \
LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_SSE(3), \
diff --git a/drivers/gpu/drm/i915/gt/intel_ppgtt.c b/drivers/gpu/drm/i915/gt/intel_ppgtt.c
index 6ee8d1127016..7ecfa672f738 100644
--- a/drivers/gpu/drm/i915/gt/intel_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ppgtt.c
@@ -312,7 +312,7 @@ void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt,
ppgtt->vm.gt = gt;
ppgtt->vm.i915 = i915;
ppgtt->vm.dma = i915->drm.dev;
- ppgtt->vm.total = BIT_ULL(INTEL_INFO(i915)->ppgtt_size);
+ ppgtt->vm.total = BIT_ULL(RUNTIME_INFO(i915)->ppgtt_size);
ppgtt->vm.lmem_pt_obj_flags = lmem_pt_obj_flags;
dma_resv_init(&ppgtt->vm._resv);
diff --git a/drivers/gpu/drm/i915/gt/intel_region_lmem.c b/drivers/gpu/drm/i915/gt/intel_region_lmem.c
index aa6aed837194..f3ad93db0b21 100644
--- a/drivers/gpu/drm/i915/gt/intel_region_lmem.c
+++ b/drivers/gpu/drm/i915/gt/intel_region_lmem.c
@@ -4,8 +4,10 @@
*/
#include "i915_drv.h"
+#include "i915_pci.h"
#include "i915_reg.h"
#include "intel_memory_region.h"
+#include "intel_pci_config.h"
#include "intel_region_lmem.h"
#include "intel_region_ttm.h"
#include "gem/i915_gem_lmem.h"
@@ -45,7 +47,6 @@ _resize_bar(struct drm_i915_private *i915, int resno, resource_size_t size)
drm_info(&i915->drm, "BAR%d resized to %dM\n", resno, 1 << bar_size);
}
-#define LMEM_BAR_NUM 2
static void i915_resize_lmem_bar(struct drm_i915_private *i915, resource_size_t lmem_size)
{
struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
@@ -56,15 +57,14 @@ static void i915_resize_lmem_bar(struct drm_i915_private *i915, resource_size_t
u32 pci_cmd;
int i;
- current_size = roundup_pow_of_two(pci_resource_len(pdev, LMEM_BAR_NUM));
+ current_size = roundup_pow_of_two(pci_resource_len(pdev, GEN12_LMEM_BAR));
if (i915->params.lmem_bar_size) {
u32 bar_sizes;
rebar_size = i915->params.lmem_bar_size *
(resource_size_t)SZ_1M;
- bar_sizes = pci_rebar_get_possible_sizes(pdev,
- LMEM_BAR_NUM);
+ bar_sizes = pci_rebar_get_possible_sizes(pdev, GEN12_LMEM_BAR);
if (rebar_size == current_size)
return;
@@ -107,7 +107,7 @@ static void i915_resize_lmem_bar(struct drm_i915_private *i915, resource_size_t
pci_write_config_dword(pdev, PCI_COMMAND,
pci_cmd & ~PCI_COMMAND_MEMORY);
- _resize_bar(i915, LMEM_BAR_NUM, rebar_size);
+ _resize_bar(i915, GEN12_LMEM_BAR, rebar_size);
pci_assign_unassigned_bus_resources(pdev->bus);
pci_write_config_dword(pdev, PCI_COMMAND, pci_cmd);
@@ -202,6 +202,9 @@ static struct intel_memory_region *setup_lmem(struct intel_gt *gt)
if (!IS_DGFX(i915))
return ERR_PTR(-ENODEV);
+ if (!i915_pci_resource_valid(pdev, GEN12_LMEM_BAR))
+ return ERR_PTR(-ENXIO);
+
if (HAS_FLAT_CCS(i915)) {
resource_size_t lmem_range;
u64 tile_stolen, flat_ccs_base;
@@ -236,8 +239,8 @@ static struct intel_memory_region *setup_lmem(struct intel_gt *gt)
mul_u32_u32(i915->params.lmem_size, SZ_1M));
}
- io_start = pci_resource_start(pdev, 2);
- io_size = min(pci_resource_len(pdev, 2), lmem_size);
+ io_start = pci_resource_start(pdev, GEN12_LMEM_BAR);
+ io_size = min(pci_resource_len(pdev, GEN12_LMEM_BAR), lmem_size);
if (!io_size)
return ERR_PTR(-EIO);
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c
index 6fadde4ee7bf..6b86250c31ab 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.c
+++ b/drivers/gpu/drm/i915/gt/intel_rps.c
@@ -194,9 +194,9 @@ static void rps_enable_interrupts(struct intel_rps *rps)
rps_reset_ei(rps);
- spin_lock_irq(&gt->irq_lock);
+ spin_lock_irq(gt->irq_lock);
gen6_gt_pm_enable_irq(gt, rps->pm_events);
- spin_unlock_irq(&gt->irq_lock);
+ spin_unlock_irq(gt->irq_lock);
intel_uncore_write(gt->uncore,
GEN6_PMINTRMSK, rps_pm_mask(rps, rps->last_freq));
@@ -217,14 +217,14 @@ static void rps_reset_interrupts(struct intel_rps *rps)
{
struct intel_gt *gt = rps_to_gt(rps);
- spin_lock_irq(&gt->irq_lock);
+ spin_lock_irq(gt->irq_lock);
if (GRAPHICS_VER(gt->i915) >= 11)
gen11_rps_reset_interrupts(rps);
else
gen6_rps_reset_interrupts(rps);
rps->pm_iir = 0;
- spin_unlock_irq(&gt->irq_lock);
+ spin_unlock_irq(gt->irq_lock);
}
static void rps_disable_interrupts(struct intel_rps *rps)
@@ -234,9 +234,9 @@ static void rps_disable_interrupts(struct intel_rps *rps)
intel_uncore_write(gt->uncore,
GEN6_PMINTRMSK, rps_pm_sanitize_mask(rps, ~0u));
- spin_lock_irq(&gt->irq_lock);
+ spin_lock_irq(gt->irq_lock);
gen6_gt_pm_disable_irq(gt, GEN6_PM_RPS_EVENTS);
- spin_unlock_irq(&gt->irq_lock);
+ spin_unlock_irq(gt->irq_lock);
intel_synchronize_irq(gt->i915);
@@ -1797,10 +1797,10 @@ static void rps_work(struct work_struct *work)
int new_freq, adj, min, max;
u32 pm_iir = 0;
- spin_lock_irq(&gt->irq_lock);
+ spin_lock_irq(gt->irq_lock);
pm_iir = fetch_and_zero(&rps->pm_iir) & rps->pm_events;
client_boost = atomic_read(&rps->num_waiters);
- spin_unlock_irq(&gt->irq_lock);
+ spin_unlock_irq(gt->irq_lock);
/* Make sure we didn't queue anything we're not going to process. */
if (!pm_iir && !client_boost)
@@ -1873,9 +1873,9 @@ static void rps_work(struct work_struct *work)
mutex_unlock(&rps->lock);
out:
- spin_lock_irq(&gt->irq_lock);
+ spin_lock_irq(gt->irq_lock);
gen6_gt_pm_unmask_irq(gt, rps->pm_events);
- spin_unlock_irq(&gt->irq_lock);
+ spin_unlock_irq(gt->irq_lock);
}
void gen11_rps_irq_handler(struct intel_rps *rps, u32 pm_iir)
@@ -1883,7 +1883,7 @@ void gen11_rps_irq_handler(struct intel_rps *rps, u32 pm_iir)
struct intel_gt *gt = rps_to_gt(rps);
const u32 events = rps->pm_events & pm_iir;
- lockdep_assert_held(&gt->irq_lock);
+ lockdep_assert_held(gt->irq_lock);
if (unlikely(!events))
return;
@@ -1903,7 +1903,7 @@ void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir)
events = pm_iir & rps->pm_events;
if (events) {
- spin_lock(&gt->irq_lock);
+ spin_lock(gt->irq_lock);
GT_TRACE(gt, "irq events:%x\n", events);
@@ -1911,7 +1911,7 @@ void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir)
rps->pm_iir |= events;
schedule_work(&rps->work);
- spin_unlock(&gt->irq_lock);
+ spin_unlock(gt->irq_lock);
}
if (GRAPHICS_VER(gt->i915) >= 8)
diff --git a/drivers/gpu/drm/i915/gt/intel_sa_media.c b/drivers/gpu/drm/i915/gt/intel_sa_media.c
new file mode 100644
index 000000000000..e8f3d18c12b8
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_sa_media.c
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#include <drm/drm_managed.h>
+
+#include "i915_drv.h"
+#include "gt/intel_gt.h"
+#include "gt/intel_sa_media.h"
+
+int intel_sa_mediagt_setup(struct intel_gt *gt, phys_addr_t phys_addr,
+ u32 gsi_offset)
+{
+ struct drm_i915_private *i915 = gt->i915;
+ struct intel_uncore *uncore;
+
+ uncore = drmm_kzalloc(&i915->drm, sizeof(*uncore), GFP_KERNEL);
+ if (!uncore)
+ return -ENOMEM;
+
+ uncore->gsi_offset = gsi_offset;
+
+ gt->irq_lock = to_gt(i915)->irq_lock;
+ intel_gt_common_init_early(gt);
+ intel_uncore_init_early(uncore, gt);
+
+ /*
+ * Standalone media shares the general MMIO space with the primary
+ * GT. We'll re-use the primary GT's mapping.
+ */
+ uncore->regs = i915->uncore.regs;
+ if (drm_WARN_ON(&i915->drm, uncore->regs == NULL))
+ return -EIO;
+
+ gt->uncore = uncore;
+ gt->phys_addr = phys_addr;
+
+ /*
+ * For current platforms we can assume there's only a single
+ * media GT and cache it for quick lookup.
+ */
+ drm_WARN_ON(&i915->drm, i915->media_gt);
+ i915->media_gt = gt;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_sa_media.h b/drivers/gpu/drm/i915/gt/intel_sa_media.h
new file mode 100644
index 000000000000..3afb310de932
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_sa_media.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+#ifndef __INTEL_SA_MEDIA__
+#define __INTEL_SA_MEDIA__
+
+#include <linux/types.h>
+
+struct intel_gt;
+
+int intel_sa_mediagt_setup(struct intel_gt *gt, phys_addr_t phys_addr,
+ u32 gsi_offset);
+
+#endif /* __INTEL_SA_MEDIA_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.c b/drivers/gpu/drm/i915/gt/intel_sseu.c
index c6d3050604c8..66f21c735d54 100644
--- a/drivers/gpu/drm/i915/gt/intel_sseu.c
+++ b/drivers/gpu/drm/i915/gt/intel_sseu.c
@@ -382,7 +382,6 @@ static void cherryview_sseu_info_init(struct intel_gt *gt)
static void gen9_sseu_info_init(struct intel_gt *gt)
{
struct drm_i915_private *i915 = gt->i915;
- struct intel_device_info *info = mkwrite_device_info(i915);
struct sseu_dev_info *sseu = &gt->info.sseu;
struct intel_uncore *uncore = gt->uncore;
u32 fuse2, eu_disable, subslice_mask;
@@ -471,10 +470,10 @@ static void gen9_sseu_info_init(struct intel_gt *gt)
if (IS_GEN9_LP(i915)) {
#define IS_SS_DISABLED(ss) (!(sseu->subslice_mask.hsw[0] & BIT(ss)))
- info->has_pooled_eu = hweight8(sseu->subslice_mask.hsw[0]) == 3;
+ RUNTIME_INFO(i915)->has_pooled_eu = hweight8(sseu->subslice_mask.hsw[0]) == 3;
sseu->min_eu_in_pool = 0;
- if (info->has_pooled_eu) {
+ if (HAS_POOLED_EU(i915)) {
if (IS_SS_DISABLED(2) || IS_SS_DISABLED(0))
sseu->min_eu_in_pool = 3;
else if (IS_SS_DISABLED(1))
diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
index 1109088fe8f6..82d3f8058995 100644
--- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
@@ -27,6 +27,9 @@
#define NUM_GPR 16
#define NUM_GPR_DW (NUM_GPR * 2) /* each GPR is 2 dwords */
+#define LRI_HEADER MI_INSTR(0x22, 0)
+#define LRI_LENGTH_MASK GENMASK(7, 0)
+
static struct i915_vma *create_scratch(struct intel_gt *gt)
{
return __vm_create_scratch_for_read_pinned(&gt->ggtt->vm, PAGE_SIZE);
@@ -202,7 +205,7 @@ static int live_lrc_layout(void *arg)
continue;
}
- if ((lri & GENMASK(31, 23)) != MI_INSTR(0x22, 0)) {
+ if ((lri & GENMASK(31, 23)) != LRI_HEADER) {
pr_err("%s: Expected LRI command at dword %d, found %08x\n",
engine->name, dw, lri);
err = -EINVAL;
@@ -357,6 +360,11 @@ static int live_lrc_fixed(void *arg)
lrc_ring_cmd_buf_cctl(engine),
"RING_CMD_BUF_CCTL"
},
+ {
+ i915_mmio_reg_offset(RING_BB_OFFSET(engine->mmio_base)),
+ lrc_ring_bb_offset(engine),
+ "RING_BB_OFFSET"
+ },
{ },
}, *t;
u32 *hw;
@@ -987,18 +995,40 @@ store_context(struct intel_context *ce, struct i915_vma *scratch)
hw = defaults;
hw += LRC_STATE_OFFSET / sizeof(*hw);
do {
- u32 len = hw[dw] & 0x7f;
+ u32 len = hw[dw] & LRI_LENGTH_MASK;
+
+ /*
+ * Keep it simple, skip parsing complex commands
+ *
+ * At present, there are no more MI_LOAD_REGISTER_IMM
+ * commands after the first 3D state command. Rather
+ * than include a table (see i915_cmd_parser.c) of all
+ * the possible commands and their instruction lengths
+ * (or mask for variable length instructions), assume
+ * we have gathered the complete list of registers and
+ * bail out.
+ */
+ if ((hw[dw] >> INSTR_CLIENT_SHIFT) != INSTR_MI_CLIENT)
+ break;
if (hw[dw] == 0) {
dw++;
continue;
}
- if ((hw[dw] & GENMASK(31, 23)) != MI_INSTR(0x22, 0)) {
+ if ((hw[dw] & GENMASK(31, 23)) != LRI_HEADER) {
+ /* Assume all other MI commands match LRI length mask */
dw += len + 2;
continue;
}
+ if (!len) {
+ pr_err("%s: invalid LRI found in context image\n",
+ ce->engine->name);
+ igt_hexdump(defaults, PAGE_SIZE);
+ break;
+ }
+
dw++;
len = (len + 1) / 2;
while (len--) {
@@ -1150,18 +1180,29 @@ static struct i915_vma *load_context(struct intel_context *ce, u32 poison)
hw = defaults;
hw += LRC_STATE_OFFSET / sizeof(*hw);
do {
- u32 len = hw[dw] & 0x7f;
+ u32 len = hw[dw] & LRI_LENGTH_MASK;
+
+ /* For simplicity, break parsing at the first complex command */
+ if ((hw[dw] >> INSTR_CLIENT_SHIFT) != INSTR_MI_CLIENT)
+ break;
if (hw[dw] == 0) {
dw++;
continue;
}
- if ((hw[dw] & GENMASK(31, 23)) != MI_INSTR(0x22, 0)) {
+ if ((hw[dw] & GENMASK(31, 23)) != LRI_HEADER) {
dw += len + 2;
continue;
}
+ if (!len) {
+ pr_err("%s: invalid LRI found in context image\n",
+ ce->engine->name);
+ igt_hexdump(defaults, PAGE_SIZE);
+ break;
+ }
+
dw++;
len = (len + 1) / 2;
*cs++ = MI_LOAD_REGISTER_IMM(len);
@@ -1292,18 +1333,29 @@ static int compare_isolation(struct intel_engine_cs *engine,
hw = defaults;
hw += LRC_STATE_OFFSET / sizeof(*hw);
do {
- u32 len = hw[dw] & 0x7f;
+ u32 len = hw[dw] & LRI_LENGTH_MASK;
+
+ /* For simplicity, break parsing at the first complex command */
+ if ((hw[dw] >> INSTR_CLIENT_SHIFT) != INSTR_MI_CLIENT)
+ break;
if (hw[dw] == 0) {
dw++;
continue;
}
- if ((hw[dw] & GENMASK(31, 23)) != MI_INSTR(0x22, 0)) {
+ if ((hw[dw] & GENMASK(31, 23)) != LRI_HEADER) {
dw += len + 2;
continue;
}
+ if (!len) {
+ pr_err("%s: invalid LRI found in context image\n",
+ engine->name);
+ igt_hexdump(defaults, PAGE_SIZE);
+ break;
+ }
+
dw++;
len = (len + 1) / 2;
while (len--) {
@@ -1343,6 +1395,30 @@ err_A0:
return err;
}
+static struct i915_vma *
+create_result_vma(struct i915_address_space *vm, unsigned long sz)
+{
+ struct i915_vma *vma;
+ void *ptr;
+
+ vma = create_user_vma(vm, sz);
+ if (IS_ERR(vma))
+ return vma;
+
+ /* Set the results to a known value distinct from the poison */
+ ptr = i915_gem_object_pin_map_unlocked(vma->obj, I915_MAP_WC);
+ if (IS_ERR(ptr)) {
+ i915_vma_put(vma);
+ return ERR_CAST(ptr);
+ }
+
+ memset(ptr, POISON_INUSE, vma->size);
+ i915_gem_object_flush_map(vma->obj);
+ i915_gem_object_unpin_map(vma->obj);
+
+ return vma;
+}
+
static int __lrc_isolation(struct intel_engine_cs *engine, u32 poison)
{
u32 *sema = memset32(engine->status_page.addr + 1000, 0, 1);
@@ -1361,13 +1437,13 @@ static int __lrc_isolation(struct intel_engine_cs *engine, u32 poison)
goto err_A;
}
- ref[0] = create_user_vma(A->vm, SZ_64K);
+ ref[0] = create_result_vma(A->vm, SZ_64K);
if (IS_ERR(ref[0])) {
err = PTR_ERR(ref[0]);
goto err_B;
}
- ref[1] = create_user_vma(A->vm, SZ_64K);
+ ref[1] = create_result_vma(A->vm, SZ_64K);
if (IS_ERR(ref[1])) {
err = PTR_ERR(ref[1]);
goto err_ref0;
@@ -1389,13 +1465,13 @@ static int __lrc_isolation(struct intel_engine_cs *engine, u32 poison)
}
i915_request_put(rq);
- result[0] = create_user_vma(A->vm, SZ_64K);
+ result[0] = create_result_vma(A->vm, SZ_64K);
if (IS_ERR(result[0])) {
err = PTR_ERR(result[0]);
goto err_ref1;
}
- result[1] = create_user_vma(A->vm, SZ_64K);
+ result[1] = create_result_vma(A->vm, SZ_64K);
if (IS_ERR(result[1])) {
err = PTR_ERR(result[1]);
goto err_result0;
@@ -1408,18 +1484,17 @@ static int __lrc_isolation(struct intel_engine_cs *engine, u32 poison)
}
err = poison_registers(B, poison, sema);
- if (err) {
- WRITE_ONCE(*sema, -1);
- i915_request_put(rq);
- goto err_result1;
- }
-
- if (i915_request_wait(rq, 0, HZ / 2) < 0) {
- i915_request_put(rq);
+ if (err == 0 && i915_request_wait(rq, 0, HZ / 2) < 0) {
+ pr_err("%s(%s): wait for results timed out\n",
+ __func__, engine->name);
err = -ETIME;
- goto err_result1;
}
+
+ /* Always cancel the semaphore wait, just in case the GPU gets stuck */
+ WRITE_ONCE(*sema, -1);
i915_request_put(rq);
+ if (err)
+ goto err_result1;
err = compare_isolation(engine, ref, result, A, poison);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
index 24451d000a6a..bac06e3d6f2c 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
@@ -82,9 +82,9 @@ static void gen9_reset_guc_interrupts(struct intel_guc *guc)
assert_rpm_wakelock_held(&gt->i915->runtime_pm);
- spin_lock_irq(&gt->irq_lock);
+ spin_lock_irq(gt->irq_lock);
gen6_gt_pm_reset_iir(gt, gt->pm_guc_events);
- spin_unlock_irq(&gt->irq_lock);
+ spin_unlock_irq(gt->irq_lock);
}
static void gen9_enable_guc_interrupts(struct intel_guc *guc)
@@ -93,11 +93,11 @@ static void gen9_enable_guc_interrupts(struct intel_guc *guc)
assert_rpm_wakelock_held(&gt->i915->runtime_pm);
- spin_lock_irq(&gt->irq_lock);
+ spin_lock_irq(gt->irq_lock);
WARN_ON_ONCE(intel_uncore_read(gt->uncore, GEN8_GT_IIR(2)) &
gt->pm_guc_events);
gen6_gt_pm_enable_irq(gt, gt->pm_guc_events);
- spin_unlock_irq(&gt->irq_lock);
+ spin_unlock_irq(gt->irq_lock);
}
static void gen9_disable_guc_interrupts(struct intel_guc *guc)
@@ -106,11 +106,11 @@ static void gen9_disable_guc_interrupts(struct intel_guc *guc)
assert_rpm_wakelock_held(&gt->i915->runtime_pm);
- spin_lock_irq(&gt->irq_lock);
+ spin_lock_irq(gt->irq_lock);
gen6_gt_pm_disable_irq(gt, gt->pm_guc_events);
- spin_unlock_irq(&gt->irq_lock);
+ spin_unlock_irq(gt->irq_lock);
intel_synchronize_irq(gt->i915);
gen9_reset_guc_interrupts(guc);
@@ -120,9 +120,9 @@ static void gen11_reset_guc_interrupts(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
- spin_lock_irq(&gt->irq_lock);
+ spin_lock_irq(gt->irq_lock);
gen11_gt_reset_one_iir(gt, 0, GEN11_GUC);
- spin_unlock_irq(&gt->irq_lock);
+ spin_unlock_irq(gt->irq_lock);
}
static void gen11_enable_guc_interrupts(struct intel_guc *guc)
@@ -130,25 +130,25 @@ static void gen11_enable_guc_interrupts(struct intel_guc *guc)
struct intel_gt *gt = guc_to_gt(guc);
u32 events = REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST);
- spin_lock_irq(&gt->irq_lock);
+ spin_lock_irq(gt->irq_lock);
WARN_ON_ONCE(gen11_gt_reset_one_iir(gt, 0, GEN11_GUC));
intel_uncore_write(gt->uncore,
GEN11_GUC_SG_INTR_ENABLE, events);
intel_uncore_write(gt->uncore,
GEN11_GUC_SG_INTR_MASK, ~events);
- spin_unlock_irq(&gt->irq_lock);
+ spin_unlock_irq(gt->irq_lock);
}
static void gen11_disable_guc_interrupts(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
- spin_lock_irq(&gt->irq_lock);
+ spin_lock_irq(gt->irq_lock);
intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_MASK, ~0);
intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_ENABLE, 0);
- spin_unlock_irq(&gt->irq_lock);
+ spin_unlock_irq(gt->irq_lock);
intel_synchronize_irq(gt->i915);
gen11_reset_guc_interrupts(guc);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
index b071973ac41c..55d3ef93e86f 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
@@ -36,24 +36,6 @@ struct guc_log_section {
const char *name;
};
-static s32 scale_log_param(struct intel_guc_log *log, const struct guc_log_section *section,
- s32 param)
-{
- /* -1 means default */
- if (param < 0)
- return section->default_val;
-
- /* Check for 32-bit overflow */
- if (param >= SZ_4K) {
- drm_err(&guc_to_gt(log_to_guc(log))->i915->drm, "Size too large for GuC %s log: %dMB!",
- section->name, param);
- return section->default_val;
- }
-
- /* Param units are 1MB */
- return param * SZ_1M;
-}
-
static void _guc_log_init_sizes(struct intel_guc_log *log)
{
struct intel_guc *guc = log_to_guc(log);
@@ -78,15 +60,10 @@ static void _guc_log_init_sizes(struct intel_guc_log *log)
"capture",
}
};
- s32 params[GUC_LOG_SECTIONS_LIMIT] = {
- GUC_LOG_DEFAULT_CRASH_BUFFER_SIZE / SZ_1M,
- GUC_LOG_DEFAULT_DEBUG_BUFFER_SIZE / SZ_1M,
- GUC_LOG_DEFAULT_CAPTURE_BUFFER_SIZE / SZ_1M,
- };
int i;
for (i = 0; i < GUC_LOG_SECTIONS_LIMIT; i++)
- log->sizes[i].bytes = scale_log_param(log, sections + i, params[i]);
+ log->sizes[i].bytes = sections[i].default_val;
/* If debug size > 1MB then bump default crash size to keep the same units */
if (log->sizes[GUC_LOG_SECTIONS_DEBUG].bytes >= SZ_1M &&
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 64c4e83153f4..1db59eeb34db 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -684,7 +684,7 @@ static int __guc_add_request(struct intel_guc *guc, struct i915_request *rq)
* Corner case where requests were sitting in the priority list or a
* request resubmitted after the context was banned.
*/
- if (unlikely(intel_context_is_banned(ce))) {
+ if (unlikely(!intel_context_is_schedulable(ce))) {
i915_request_put(i915_request_mark_eio(rq));
intel_engine_signal_breadcrumbs(ce->engine);
return 0;
@@ -870,15 +870,15 @@ static int guc_wq_item_append(struct intel_guc *guc,
struct i915_request *rq)
{
struct intel_context *ce = request_to_scheduling_context(rq);
- int ret = 0;
+ int ret;
- if (likely(!intel_context_is_banned(ce))) {
- ret = __guc_wq_item_append(rq);
+ if (unlikely(!intel_context_is_schedulable(ce)))
+ return 0;
- if (unlikely(ret == -EBUSY)) {
- guc->stalled_request = rq;
- guc->submission_stall_reason = STALL_MOVE_LRC_TAIL;
- }
+ ret = __guc_wq_item_append(rq);
+ if (unlikely(ret == -EBUSY)) {
+ guc->stalled_request = rq;
+ guc->submission_stall_reason = STALL_MOVE_LRC_TAIL;
}
return ret;
@@ -897,7 +897,7 @@ static bool multi_lrc_submit(struct i915_request *rq)
* submitting all the requests generated in parallel.
*/
return test_bit(I915_FENCE_FLAG_SUBMIT_PARALLEL, &rq->fence.flags) ||
- intel_context_is_banned(ce);
+ !intel_context_is_schedulable(ce);
}
static int guc_dequeue_one_context(struct intel_guc *guc)
@@ -966,7 +966,7 @@ register_context:
struct intel_context *ce = request_to_scheduling_context(last);
if (unlikely(!ctx_id_mapped(guc, ce->guc_id.id) &&
- !intel_context_is_banned(ce))) {
+ intel_context_is_schedulable(ce))) {
ret = try_context_registration(ce, false);
if (unlikely(ret == -EPIPE)) {
goto deadlk;
@@ -1438,7 +1438,12 @@ void intel_guc_busyness_park(struct intel_gt *gt)
if (!guc_submission_initialized(guc))
return;
- cancel_delayed_work(&guc->timestamp.work);
+ /*
+ * There is a race with suspend flow where the worker runs after suspend
+ * and causes an unclaimed register access warning. Cancel the worker
+ * synchronously here.
+ */
+ cancel_delayed_work_sync(&guc->timestamp.work);
/*
* Before parking, we should sample engine busyness stats if we need to.
@@ -1532,8 +1537,8 @@ void intel_guc_submission_reset_prepare(struct intel_guc *guc)
__reset_guc_busyness_stats(guc);
/* Flush IRQ handler */
- spin_lock_irq(&guc_to_gt(guc)->irq_lock);
- spin_unlock_irq(&guc_to_gt(guc)->irq_lock);
+ spin_lock_irq(guc_to_gt(guc)->irq_lock);
+ spin_unlock_irq(guc_to_gt(guc)->irq_lock);
guc_flush_submissions(guc);
guc_flush_destroyed_contexts(guc);
@@ -1571,7 +1576,7 @@ static void guc_reset_state(struct intel_context *ce, u32 head, bool scrub)
{
struct intel_engine_cs *engine = __context_to_physical_engine(ce);
- if (intel_context_is_banned(ce))
+ if (!intel_context_is_schedulable(ce))
return;
GEM_BUG_ON(!intel_context_is_pinned(ce));
@@ -4419,12 +4424,12 @@ static void guc_handle_context_reset(struct intel_guc *guc,
{
trace_intel_context_reset(ce);
- if (likely(!intel_context_is_banned(ce))) {
+ if (likely(intel_context_is_schedulable(ce))) {
capture_error_state(guc, ce);
guc_context_replay(ce);
} else {
drm_info(&guc_to_gt(guc)->i915->drm,
- "Ignoring context reset notification of banned context 0x%04X on %s",
+ "Ignoring context reset notification of exiting context 0x%04X on %s",
ce->guc_id.id, ce->engine->name);
}
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
index abf4e142596d..dbd048b77e19 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
@@ -245,9 +245,9 @@ static int guc_enable_communication(struct intel_guc *guc)
intel_guc_enable_interrupts(guc);
/* check for CT messages received before we enabled interrupts */
- spin_lock_irq(&gt->irq_lock);
+ spin_lock_irq(gt->irq_lock);
intel_guc_ct_event_handler(&guc->ct);
- spin_unlock_irq(&gt->irq_lock);
+ spin_unlock_irq(gt->irq_lock);
drm_dbg(&i915->drm, "GuC communication enabled\n");
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
index af425916cdf6..b91ad4aede1f 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
@@ -72,12 +72,14 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
* security fixes, etc. to be enabled.
*/
#define INTEL_GUC_FIRMWARE_DEFS(fw_def, guc_maj, guc_mmp) \
- fw_def(DG2, 0, guc_mmp(dg2, 70, 4, 1)) \
+ fw_def(DG2, 0, guc_maj(dg2, 70, 5)) \
+ fw_def(ALDERLAKE_P, 0, guc_maj(adlp, 70, 5)) \
fw_def(ALDERLAKE_P, 0, guc_mmp(adlp, 70, 1, 1)) \
fw_def(ALDERLAKE_P, 0, guc_mmp(adlp, 69, 0, 3)) \
+ fw_def(ALDERLAKE_S, 0, guc_maj(tgl, 70, 5)) \
fw_def(ALDERLAKE_S, 0, guc_mmp(tgl, 70, 1, 1)) \
fw_def(ALDERLAKE_S, 0, guc_mmp(tgl, 69, 0, 3)) \
- fw_def(DG1, 0, guc_mmp(dg1, 70, 1, 1)) \
+ fw_def(DG1, 0, guc_maj(dg1, 70, 5)) \
fw_def(ROCKETLAKE, 0, guc_mmp(tgl, 70, 1, 1)) \
fw_def(TIGERLAKE, 0, guc_mmp(tgl, 70, 1, 1)) \
fw_def(JASPERLAKE, 0, guc_mmp(ehl, 70, 1, 1)) \
@@ -92,9 +94,11 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
fw_def(SKYLAKE, 0, guc_mmp(skl, 70, 1, 1))
#define INTEL_HUC_FIRMWARE_DEFS(fw_def, huc_raw, huc_mmp) \
+ fw_def(ALDERLAKE_P, 0, huc_raw(tgl)) \
fw_def(ALDERLAKE_P, 0, huc_mmp(tgl, 7, 9, 3)) \
+ fw_def(ALDERLAKE_S, 0, huc_raw(tgl)) \
fw_def(ALDERLAKE_S, 0, huc_mmp(tgl, 7, 9, 3)) \
- fw_def(DG1, 0, huc_mmp(dg1, 7, 9, 3)) \
+ fw_def(DG1, 0, huc_raw(dg1)) \
fw_def(ROCKETLAKE, 0, huc_mmp(tgl, 7, 9, 3)) \
fw_def(TIGERLAKE, 0, huc_mmp(tgl, 7, 9, 3)) \
fw_def(JASPERLAKE, 0, huc_mmp(ehl, 9, 0, 0)) \
@@ -232,6 +236,7 @@ __uc_fw_auto_select(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw)
u32 fw_count;
u8 rev = INTEL_REVID(i915);
int i;
+ bool found;
/*
* The only difference between the ADL GuC FWs is the HWConfig support.
@@ -246,6 +251,7 @@ __uc_fw_auto_select(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw)
fw_blobs = blobs_all[uc_fw->type].blobs;
fw_count = blobs_all[uc_fw->type].count;
+ found = false;
for (i = 0; i < fw_count && p <= fw_blobs[i].p; i++) {
const struct uc_fw_blob *blob = &fw_blobs[i].blob;
@@ -266,9 +272,15 @@ __uc_fw_auto_select(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw)
uc_fw->file_wanted.path = blob->path;
uc_fw->file_wanted.major_ver = blob->major;
uc_fw->file_wanted.minor_ver = blob->minor;
+ found = true;
break;
}
+ if (!found && uc_fw->file_selected.path) {
+ /* Failed to find a match for the last attempt?! */
+ uc_fw->file_selected.path = NULL;
+ }
+
/* make sure the list is ordered as expected */
if (IS_ENABLED(CONFIG_DRM_I915_SELFTEST) && !verified) {
verified = true;
@@ -322,7 +334,7 @@ __uc_fw_auto_select(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw)
continue;
bad:
- drm_err(&i915->drm, "\x1B[35;1mInvalid FW blob order: %s r%u %s%d.%d.%d comes before %s r%u %s%d.%d.%d\n",
+ drm_err(&i915->drm, "Invalid FW blob order: %s r%u %s%d.%d.%d comes before %s r%u %s%d.%d.%d\n",
intel_platform_name(fw_blobs[i - 1].p), fw_blobs[i - 1].rev,
fw_blobs[i - 1].blob.legacy ? "L" : "v",
fw_blobs[i - 1].blob.major,
@@ -553,10 +565,14 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
err = firmware_request_nowarn(&fw, uc_fw->file_selected.path, dev);
memcpy(&file_ideal, &uc_fw->file_wanted, sizeof(file_ideal));
- if (!err || intel_uc_fw_is_overridden(uc_fw))
- goto done;
+
+ /* Any error is terminal if overriding. Don't bother searching for older versions */
+ if (err && intel_uc_fw_is_overridden(uc_fw))
+ goto fail;
while (err == -ENOENT) {
+ old_ver = true;
+
__uc_fw_auto_select(i915, uc_fw);
if (!uc_fw->file_selected.path) {
/*
@@ -576,8 +592,6 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
if (err)
goto fail;
- old_ver = true;
-done:
if (uc_fw->loaded_via_gsc)
err = check_gsc_manifest(fw, uc_fw);
else
diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c
index 557f3314291a..076c779f776a 100644
--- a/drivers/gpu/drm/i915/gvt/aperture_gm.c
+++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c
@@ -240,13 +240,13 @@ static void free_resource(struct intel_vgpu *vgpu)
}
static int alloc_resource(struct intel_vgpu *vgpu,
- struct intel_vgpu_creation_params *param)
+ const struct intel_vgpu_config *conf)
{
struct intel_gvt *gvt = vgpu->gvt;
unsigned long request, avail, max, taken;
const char *item;
- if (!param->low_gm_sz || !param->high_gm_sz || !param->fence_sz) {
+ if (!conf->low_mm || !conf->high_mm || !conf->fence) {
gvt_vgpu_err("Invalid vGPU creation params\n");
return -EINVAL;
}
@@ -255,7 +255,7 @@ static int alloc_resource(struct intel_vgpu *vgpu,
max = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE;
taken = gvt->gm.vgpu_allocated_low_gm_size;
avail = max - taken;
- request = MB_TO_BYTES(param->low_gm_sz);
+ request = conf->low_mm;
if (request > avail)
goto no_enough_resource;
@@ -266,7 +266,7 @@ static int alloc_resource(struct intel_vgpu *vgpu,
max = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE;
taken = gvt->gm.vgpu_allocated_high_gm_size;
avail = max - taken;
- request = MB_TO_BYTES(param->high_gm_sz);
+ request = conf->high_mm;
if (request > avail)
goto no_enough_resource;
@@ -277,16 +277,16 @@ static int alloc_resource(struct intel_vgpu *vgpu,
max = gvt_fence_sz(gvt) - HOST_FENCE;
taken = gvt->fence.vgpu_allocated_fence_num;
avail = max - taken;
- request = param->fence_sz;
+ request = conf->fence;
if (request > avail)
goto no_enough_resource;
vgpu_fence_sz(vgpu) = request;
- gvt->gm.vgpu_allocated_low_gm_size += MB_TO_BYTES(param->low_gm_sz);
- gvt->gm.vgpu_allocated_high_gm_size += MB_TO_BYTES(param->high_gm_sz);
- gvt->fence.vgpu_allocated_fence_num += param->fence_sz;
+ gvt->gm.vgpu_allocated_low_gm_size += conf->low_mm;
+ gvt->gm.vgpu_allocated_high_gm_size += conf->high_mm;
+ gvt->fence.vgpu_allocated_fence_num += conf->fence;
return 0;
no_enough_resource:
@@ -298,7 +298,7 @@ no_enough_resource:
}
/**
- * inte_gvt_free_vgpu_resource - free HW resource owned by a vGPU
+ * intel_vgpu_free_resource() - free HW resource owned by a vGPU
* @vgpu: a vGPU
*
* This function is used to free the HW resource owned by a vGPU.
@@ -328,7 +328,7 @@ void intel_vgpu_reset_resource(struct intel_vgpu *vgpu)
}
/**
- * intel_alloc_vgpu_resource - allocate HW resource for a vGPU
+ * intel_vgpu_alloc_resource() - allocate HW resource for a vGPU
* @vgpu: vGPU
* @param: vGPU creation params
*
@@ -340,11 +340,11 @@ void intel_vgpu_reset_resource(struct intel_vgpu *vgpu)
*
*/
int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu,
- struct intel_vgpu_creation_params *param)
+ const struct intel_vgpu_config *conf)
{
int ret;
- ret = alloc_resource(vgpu, param);
+ ret = alloc_resource(vgpu, conf);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c
index dad3a6054335..eef3bba8a41b 100644
--- a/drivers/gpu/drm/i915/gvt/cfg_space.c
+++ b/drivers/gpu/drm/i915/gvt/cfg_space.c
@@ -33,6 +33,7 @@
#include "i915_drv.h"
#include "gvt.h"
+#include "intel_pci_config.h"
enum {
INTEL_GVT_PCI_BAR_GTTMMIO = 0,
@@ -353,9 +354,9 @@ void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4);
vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].size =
- pci_resource_len(pdev, 0);
+ pci_resource_len(pdev, GTTMMADR_BAR);
vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].size =
- pci_resource_len(pdev, 2);
+ pci_resource_len(pdev, GTT_APERTURE_BAR);
memset(vgpu_cfg_space(vgpu) + PCI_ROM_ADDRESS, 0, 4);
diff --git a/drivers/gpu/drm/i915/gvt/edid.c b/drivers/gpu/drm/i915/gvt/edid.c
index a30ba2d7b7ba..1b509c1a1e33 100644
--- a/drivers/gpu/drm/i915/gvt/edid.c
+++ b/drivers/gpu/drm/i915/gvt/edid.c
@@ -32,9 +32,10 @@
*
*/
+#include "display/intel_gmbus_regs.h"
+#include "gvt.h"
#include "i915_drv.h"
#include "i915_reg.h"
-#include "gvt.h"
#define GMBUS1_TOTAL_BYTES_SHIFT 16
#define GMBUS1_TOTAL_BYTES_MASK 0x1ff
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index b4f69364f9a1..ce0eb03709c3 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -2341,7 +2341,7 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
gvt_vgpu_err("fail to populate guest ggtt entry\n");
/* guest driver may read/write the entry when partial
* update the entry in this situation p2m will fail
- * settting the shadow entry to point to a scratch page
+ * setting the shadow entry to point to a scratch page
*/
ops->set_pfn(&m, gvt->gtt.scratch_mfn);
} else
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 705689e64011..dbf8d7470b2c 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -36,6 +36,7 @@
#include <uapi/linux/pci_regs.h>
#include <linux/kvm_host.h>
#include <linux/vfio.h>
+#include <linux/mdev.h>
#include "i915_drv.h"
#include "intel_gvt.h"
@@ -172,6 +173,7 @@ struct intel_vgpu_submission {
#define KVMGT_DEBUGFS_FILENAME "kvmgt_nr_cache_entries"
struct intel_vgpu {
+ struct vfio_device vfio_device;
struct intel_gvt *gvt;
struct mutex vgpu_lock;
int id;
@@ -211,7 +213,6 @@ struct intel_vgpu {
u32 scan_nonprivbb;
- struct vfio_device vfio_device;
struct vfio_region *region;
int num_regions;
struct eventfd_ctx *intx_trigger;
@@ -294,15 +295,25 @@ struct intel_gvt_firmware {
bool firmware_loaded;
};
-#define NR_MAX_INTEL_VGPU_TYPES 20
-struct intel_vgpu_type {
- char name[16];
- unsigned int avail_instance;
- unsigned int low_gm_size;
- unsigned int high_gm_size;
+struct intel_vgpu_config {
+ unsigned int low_mm;
+ unsigned int high_mm;
unsigned int fence;
+
+ /*
+ * A vGPU with a weight of 8 will get twice as much GPU as a vGPU with
+ * a weight of 4 on a contended host, different vGPU type has different
+ * weight set. Legal weights range from 1 to 16.
+ */
unsigned int weight;
- enum intel_vgpu_edid resolution;
+ enum intel_vgpu_edid edid;
+ const char *name;
+};
+
+struct intel_vgpu_type {
+ struct mdev_type type;
+ char name[16];
+ const struct intel_vgpu_config *conf;
};
struct intel_gvt {
@@ -326,6 +337,8 @@ struct intel_gvt {
struct intel_gvt_workload_scheduler scheduler;
struct notifier_block shadow_ctx_notifier_block[I915_NUM_ENGINES];
DECLARE_HASHTABLE(cmd_table, GVT_CMD_HASH_BITS);
+ struct mdev_parent parent;
+ struct mdev_type **mdev_types;
struct intel_vgpu_type *types;
unsigned int num_types;
struct intel_vgpu *idle_vgpu;
@@ -436,19 +449,8 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt);
/* ring context size i.e. the first 0x50 dwords*/
#define RING_CTX_SIZE 320
-struct intel_vgpu_creation_params {
- __u64 low_gm_sz; /* in MB */
- __u64 high_gm_sz; /* in MB */
- __u64 fence_sz;
- __u64 resolution;
- __s32 primary;
- __u64 vgpu_id;
-
- __u32 weight;
-};
-
int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu,
- struct intel_vgpu_creation_params *param);
+ const struct intel_vgpu_config *conf);
void intel_vgpu_reset_resource(struct intel_vgpu *vgpu);
void intel_vgpu_free_resource(struct intel_vgpu *vgpu);
void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
@@ -494,8 +496,8 @@ void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt);
struct intel_vgpu *intel_gvt_create_idle_vgpu(struct intel_gvt *gvt);
void intel_gvt_destroy_idle_vgpu(struct intel_vgpu *vgpu);
-struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
- struct intel_vgpu_type *type);
+int intel_gvt_create_vgpu(struct intel_vgpu *vgpu,
+ const struct intel_vgpu_config *conf);
void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu);
void intel_gvt_release_vgpu(struct intel_vgpu *vgpu);
void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index beea5895e499..daac2050d77d 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -498,7 +498,7 @@ static u32 bdw_vgpu_get_dp_bitrate(struct intel_vgpu *vgpu, enum port port)
switch (wrpll_ctl & WRPLL_REF_MASK) {
case WRPLL_REF_PCH_SSC:
- refclk = vgpu->gvt->gt->i915->dpll.ref_clks.ssc;
+ refclk = vgpu->gvt->gt->i915->display.dpll.ref_clks.ssc;
break;
case WRPLL_REF_LCPLL:
refclk = 2700000;
@@ -529,7 +529,7 @@ out:
static u32 bxt_vgpu_get_dp_bitrate(struct intel_vgpu *vgpu, enum port port)
{
u32 dp_br = 0;
- int refclk = vgpu->gvt->gt->i915->dpll.ref_clks.nssc;
+ int refclk = vgpu->gvt->gt->i915->display.dpll.ref_clks.nssc;
enum dpio_phy phy = DPIO_PHY0;
enum dpio_channel ch = DPIO_CH0;
struct dpll clock = {0};
@@ -905,7 +905,7 @@ static int update_fdi_rx_iir_status(struct intel_vgpu *vgpu,
else if (FDI_RX_IMR_TO_PIPE(offset) != INVALID_INDEX)
index = FDI_RX_IMR_TO_PIPE(offset);
else {
- gvt_vgpu_err("Unsupport registers %x\n", offset);
+ gvt_vgpu_err("Unsupported registers %x\n", offset);
return -EINVAL;
}
@@ -3052,7 +3052,7 @@ int intel_vgpu_default_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
}
/**
- * intel_t_default_mmio_write - default MMIO write handler
+ * intel_vgpu_default_mmio_write() - default MMIO write handler
* @vgpu: a vGPU
* @offset: access offset
* @p_data: write data buffer
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index e3cd58946477..7a45e5360caf 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -34,7 +34,6 @@
*/
#include <linux/init.h>
-#include <linux/device.h>
#include <linux/mm.h>
#include <linux/kthread.h>
#include <linux/sched/mm.h>
@@ -43,7 +42,6 @@
#include <linux/rbtree.h>
#include <linux/spinlock.h>
#include <linux/eventfd.h>
-#include <linux/uuid.h>
#include <linux/mdev.h>
#include <linux/debugfs.h>
@@ -115,117 +113,18 @@ static void kvmgt_page_track_flush_slot(struct kvm *kvm,
struct kvm_memory_slot *slot,
struct kvm_page_track_notifier_node *node);
-static ssize_t available_instances_show(struct mdev_type *mtype,
- struct mdev_type_attribute *attr,
- char *buf)
+static ssize_t intel_vgpu_show_description(struct mdev_type *mtype, char *buf)
{
- struct intel_vgpu_type *type;
- unsigned int num = 0;
- struct intel_gvt *gvt = kdev_to_i915(mtype_get_parent_dev(mtype))->gvt;
-
- type = &gvt->types[mtype_get_type_group_id(mtype)];
- if (!type)
- num = 0;
- else
- num = type->avail_instance;
-
- return sprintf(buf, "%u\n", num);
-}
-
-static ssize_t device_api_show(struct mdev_type *mtype,
- struct mdev_type_attribute *attr, char *buf)
-{
- return sprintf(buf, "%s\n", VFIO_DEVICE_API_PCI_STRING);
-}
-
-static ssize_t description_show(struct mdev_type *mtype,
- struct mdev_type_attribute *attr, char *buf)
-{
- struct intel_vgpu_type *type;
- struct intel_gvt *gvt = kdev_to_i915(mtype_get_parent_dev(mtype))->gvt;
-
- type = &gvt->types[mtype_get_type_group_id(mtype)];
- if (!type)
- return 0;
+ struct intel_vgpu_type *type =
+ container_of(mtype, struct intel_vgpu_type, type);
return sprintf(buf, "low_gm_size: %dMB\nhigh_gm_size: %dMB\n"
"fence: %d\nresolution: %s\n"
"weight: %d\n",
- BYTES_TO_MB(type->low_gm_size),
- BYTES_TO_MB(type->high_gm_size),
- type->fence, vgpu_edid_str(type->resolution),
- type->weight);
-}
-
-static ssize_t name_show(struct mdev_type *mtype,
- struct mdev_type_attribute *attr, char *buf)
-{
- struct intel_vgpu_type *type;
- struct intel_gvt *gvt = kdev_to_i915(mtype_get_parent_dev(mtype))->gvt;
-
- type = &gvt->types[mtype_get_type_group_id(mtype)];
- if (!type)
- return 0;
-
- return sprintf(buf, "%s\n", type->name);
-}
-
-static MDEV_TYPE_ATTR_RO(available_instances);
-static MDEV_TYPE_ATTR_RO(device_api);
-static MDEV_TYPE_ATTR_RO(description);
-static MDEV_TYPE_ATTR_RO(name);
-
-static struct attribute *gvt_type_attrs[] = {
- &mdev_type_attr_available_instances.attr,
- &mdev_type_attr_device_api.attr,
- &mdev_type_attr_description.attr,
- &mdev_type_attr_name.attr,
- NULL,
-};
-
-static struct attribute_group *gvt_vgpu_type_groups[] = {
- [0 ... NR_MAX_INTEL_VGPU_TYPES - 1] = NULL,
-};
-
-static int intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt)
-{
- int i, j;
- struct intel_vgpu_type *type;
- struct attribute_group *group;
-
- for (i = 0; i < gvt->num_types; i++) {
- type = &gvt->types[i];
-
- group = kzalloc(sizeof(struct attribute_group), GFP_KERNEL);
- if (!group)
- goto unwind;
-
- group->name = type->name;
- group->attrs = gvt_type_attrs;
- gvt_vgpu_type_groups[i] = group;
- }
-
- return 0;
-
-unwind:
- for (j = 0; j < i; j++) {
- group = gvt_vgpu_type_groups[j];
- kfree(group);
- }
-
- return -ENOMEM;
-}
-
-static void intel_gvt_cleanup_vgpu_type_groups(struct intel_gvt *gvt)
-{
- int i;
- struct attribute_group *group;
-
- for (i = 0; i < gvt->num_types; i++) {
- group = gvt_vgpu_type_groups[i];
- gvt_vgpu_type_groups[i] = NULL;
- kfree(group);
- }
+ BYTES_TO_MB(type->conf->low_mm),
+ BYTES_TO_MB(type->conf->high_mm),
+ type->conf->fence, vgpu_edid_str(type->conf->edid),
+ type->conf->weight);
}
static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
@@ -1546,7 +1445,28 @@ static const struct attribute_group *intel_vgpu_groups[] = {
NULL,
};
+static int intel_vgpu_init_dev(struct vfio_device *vfio_dev)
+{
+ struct mdev_device *mdev = to_mdev_device(vfio_dev->dev);
+ struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
+ struct intel_vgpu_type *type =
+ container_of(mdev->type, struct intel_vgpu_type, type);
+
+ vgpu->gvt = kdev_to_i915(mdev->type->parent->dev)->gvt;
+ return intel_gvt_create_vgpu(vgpu, type->conf);
+}
+
+static void intel_vgpu_release_dev(struct vfio_device *vfio_dev)
+{
+ struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
+
+ intel_gvt_destroy_vgpu(vgpu);
+ vfio_free_device(vfio_dev);
+}
+
static const struct vfio_device_ops intel_vgpu_dev_ops = {
+ .init = intel_vgpu_init_dev,
+ .release = intel_vgpu_release_dev,
.open_device = intel_vgpu_open_device,
.close_device = intel_vgpu_close_device,
.read = intel_vgpu_read,
@@ -1558,35 +1478,28 @@ static const struct vfio_device_ops intel_vgpu_dev_ops = {
static int intel_vgpu_probe(struct mdev_device *mdev)
{
- struct device *pdev = mdev_parent_dev(mdev);
- struct intel_gvt *gvt = kdev_to_i915(pdev)->gvt;
- struct intel_vgpu_type *type;
struct intel_vgpu *vgpu;
int ret;
- type = &gvt->types[mdev_get_type_group_id(mdev)];
- if (!type)
- return -EINVAL;
-
- vgpu = intel_gvt_create_vgpu(gvt, type);
+ vgpu = vfio_alloc_device(intel_vgpu, vfio_device, &mdev->dev,
+ &intel_vgpu_dev_ops);
if (IS_ERR(vgpu)) {
gvt_err("failed to create intel vgpu: %ld\n", PTR_ERR(vgpu));
return PTR_ERR(vgpu);
}
- vfio_init_group_dev(&vgpu->vfio_device, &mdev->dev,
- &intel_vgpu_dev_ops);
-
dev_set_drvdata(&mdev->dev, vgpu);
ret = vfio_register_emulated_iommu_dev(&vgpu->vfio_device);
- if (ret) {
- intel_gvt_destroy_vgpu(vgpu);
- return ret;
- }
+ if (ret)
+ goto out_put_vdev;
gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n",
dev_name(mdev_dev(mdev)));
return 0;
+
+out_put_vdev:
+ vfio_put_device(&vgpu->vfio_device);
+ return ret;
}
static void intel_vgpu_remove(struct mdev_device *mdev)
@@ -1595,18 +1508,43 @@ static void intel_vgpu_remove(struct mdev_device *mdev)
if (WARN_ON_ONCE(vgpu->attached))
return;
- intel_gvt_destroy_vgpu(vgpu);
+
+ vfio_unregister_group_dev(&vgpu->vfio_device);
+ vfio_put_device(&vgpu->vfio_device);
+}
+
+static unsigned int intel_vgpu_get_available(struct mdev_type *mtype)
+{
+ struct intel_vgpu_type *type =
+ container_of(mtype, struct intel_vgpu_type, type);
+ struct intel_gvt *gvt = kdev_to_i915(mtype->parent->dev)->gvt;
+ unsigned int low_gm_avail, high_gm_avail, fence_avail;
+
+ mutex_lock(&gvt->lock);
+ low_gm_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE -
+ gvt->gm.vgpu_allocated_low_gm_size;
+ high_gm_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE -
+ gvt->gm.vgpu_allocated_high_gm_size;
+ fence_avail = gvt_fence_sz(gvt) - HOST_FENCE -
+ gvt->fence.vgpu_allocated_fence_num;
+ mutex_unlock(&gvt->lock);
+
+ return min3(low_gm_avail / type->conf->low_mm,
+ high_gm_avail / type->conf->high_mm,
+ fence_avail / type->conf->fence);
}
static struct mdev_driver intel_vgpu_mdev_driver = {
+ .device_api = VFIO_DEVICE_API_PCI_STRING,
.driver = {
.name = "intel_vgpu_mdev",
.owner = THIS_MODULE,
.dev_groups = intel_vgpu_groups,
},
- .probe = intel_vgpu_probe,
- .remove = intel_vgpu_remove,
- .supported_type_groups = gvt_vgpu_type_groups,
+ .probe = intel_vgpu_probe,
+ .remove = intel_vgpu_remove,
+ .get_available = intel_vgpu_get_available,
+ .show_description = intel_vgpu_show_description,
};
int intel_gvt_page_track_add(struct intel_vgpu *info, u64 gfn)
@@ -1904,8 +1842,7 @@ static void intel_gvt_clean_device(struct drm_i915_private *i915)
if (drm_WARN_ON(&i915->drm, !gvt))
return;
- mdev_unregister_device(i915->drm.dev);
- intel_gvt_cleanup_vgpu_type_groups(gvt);
+ mdev_unregister_parent(&gvt->parent);
intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu);
intel_gvt_clean_vgpu_types(gvt);
@@ -2005,19 +1942,15 @@ static int intel_gvt_init_device(struct drm_i915_private *i915)
intel_gvt_debugfs_init(gvt);
- ret = intel_gvt_init_vgpu_type_groups(gvt);
+ ret = mdev_register_parent(&gvt->parent, i915->drm.dev,
+ &intel_vgpu_mdev_driver,
+ gvt->mdev_types, gvt->num_types);
if (ret)
goto out_destroy_idle_vgpu;
- ret = mdev_register_device(i915->drm.dev, &intel_vgpu_mdev_driver);
- if (ret)
- goto out_cleanup_vgpu_type_groups;
-
gvt_dbg_core("gvt device initialization is done\n");
return 0;
-out_cleanup_vgpu_type_groups:
- intel_gvt_cleanup_vgpu_type_groups(gvt);
out_destroy_idle_vgpu:
intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu);
intel_gvt_debugfs_clean(gvt);
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index c85bafe7539e..1c6e941c9666 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -546,7 +546,7 @@ static void switch_mmio(struct intel_vgpu *pre,
}
/**
- * intel_gvt_switch_render_mmio - switch mmio context of specific engine
+ * intel_gvt_switch_mmio - switch mmio context of specific engine
* @pre: the last vGPU that own the engine
* @next: the vGPU to switch to
* @engine: the engine
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 46da19b3225d..56c71474008a 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -73,24 +73,21 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu)
drm_WARN_ON(&i915->drm, sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
}
+/*
+ * vGPU type name is defined as GVTg_Vx_y which contains the physical GPU
+ * generation type (e.g V4 as BDW server, V5 as SKL server).
+ *
+ * Depening on the physical SKU resource, we might see vGPU types like
+ * GVTg_V4_8, GVTg_V4_4, GVTg_V4_2, etc. We can create different types of
+ * vGPU on same physical GPU depending on available resource. Each vGPU
+ * type will have a different number of avail_instance to indicate how
+ * many vGPU instance can be created for this type.
+ */
#define VGPU_MAX_WEIGHT 16
#define VGPU_WEIGHT(vgpu_num) \
(VGPU_MAX_WEIGHT / (vgpu_num))
-static const struct {
- unsigned int low_mm;
- unsigned int high_mm;
- unsigned int fence;
-
- /* A vGPU with a weight of 8 will get twice as much GPU as a vGPU
- * with a weight of 4 on a contended host, different vGPU type has
- * different weight set. Legal weights range from 1 to 16.
- */
- unsigned int weight;
- enum intel_vgpu_edid edid;
- const char *name;
-} vgpu_types[] = {
-/* Fixed vGPU type table */
+static const struct intel_vgpu_config intel_vgpu_configs[] = {
{ MB_TO_BYTES(64), MB_TO_BYTES(384), 4, VGPU_WEIGHT(8), GVT_EDID_1024_768, "8" },
{ MB_TO_BYTES(128), MB_TO_BYTES(512), 4, VGPU_WEIGHT(4), GVT_EDID_1920_1200, "4" },
{ MB_TO_BYTES(256), MB_TO_BYTES(1024), 4, VGPU_WEIGHT(2), GVT_EDID_1920_1200, "2" },
@@ -106,102 +103,58 @@ static const struct {
*/
int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
{
- unsigned int num_types;
- unsigned int i, low_avail, high_avail;
- unsigned int min_low;
-
- /* vGPU type name is defined as GVTg_Vx_y which contains
- * physical GPU generation type (e.g V4 as BDW server, V5 as
- * SKL server).
- *
- * Depend on physical SKU resource, might see vGPU types like
- * GVTg_V4_8, GVTg_V4_4, GVTg_V4_2, etc. We can create
- * different types of vGPU on same physical GPU depending on
- * available resource. Each vGPU type will have "avail_instance"
- * to indicate how many vGPU instance can be created for this
- * type.
- *
- */
- low_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE;
- high_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE;
- num_types = ARRAY_SIZE(vgpu_types);
+ unsigned int low_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE;
+ unsigned int high_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE;
+ unsigned int num_types = ARRAY_SIZE(intel_vgpu_configs);
+ unsigned int i;
gvt->types = kcalloc(num_types, sizeof(struct intel_vgpu_type),
GFP_KERNEL);
if (!gvt->types)
return -ENOMEM;
- min_low = MB_TO_BYTES(32);
- for (i = 0; i < num_types; ++i) {
- if (low_avail / vgpu_types[i].low_mm == 0)
- break;
-
- gvt->types[i].low_gm_size = vgpu_types[i].low_mm;
- gvt->types[i].high_gm_size = vgpu_types[i].high_mm;
- gvt->types[i].fence = vgpu_types[i].fence;
+ gvt->mdev_types = kcalloc(num_types, sizeof(*gvt->mdev_types),
+ GFP_KERNEL);
+ if (!gvt->mdev_types)
+ goto out_free_types;
- if (vgpu_types[i].weight < 1 ||
- vgpu_types[i].weight > VGPU_MAX_WEIGHT)
- return -EINVAL;
+ for (i = 0; i < num_types; ++i) {
+ const struct intel_vgpu_config *conf = &intel_vgpu_configs[i];
- gvt->types[i].weight = vgpu_types[i].weight;
- gvt->types[i].resolution = vgpu_types[i].edid;
- gvt->types[i].avail_instance = min(low_avail / vgpu_types[i].low_mm,
- high_avail / vgpu_types[i].high_mm);
+ if (low_avail / conf->low_mm == 0)
+ break;
+ if (conf->weight < 1 || conf->weight > VGPU_MAX_WEIGHT)
+ goto out_free_mdev_types;
- if (GRAPHICS_VER(gvt->gt->i915) == 8)
- sprintf(gvt->types[i].name, "GVTg_V4_%s",
- vgpu_types[i].name);
- else if (GRAPHICS_VER(gvt->gt->i915) == 9)
- sprintf(gvt->types[i].name, "GVTg_V5_%s",
- vgpu_types[i].name);
+ sprintf(gvt->types[i].name, "GVTg_V%u_%s",
+ GRAPHICS_VER(gvt->gt->i915) == 8 ? 4 : 5, conf->name);
+ gvt->types[i].conf = conf;
gvt_dbg_core("type[%d]: %s avail %u low %u high %u fence %u weight %u res %s\n",
i, gvt->types[i].name,
- gvt->types[i].avail_instance,
- gvt->types[i].low_gm_size,
- gvt->types[i].high_gm_size, gvt->types[i].fence,
- gvt->types[i].weight,
- vgpu_edid_str(gvt->types[i].resolution));
+ min(low_avail / conf->low_mm,
+ high_avail / conf->high_mm),
+ conf->low_mm, conf->high_mm, conf->fence,
+ conf->weight, vgpu_edid_str(conf->edid));
+
+ gvt->mdev_types[i] = &gvt->types[i].type;
+ gvt->mdev_types[i]->sysfs_name = gvt->types[i].name;
}
gvt->num_types = i;
return 0;
-}
-void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt)
-{
+out_free_mdev_types:
+ kfree(gvt->mdev_types);
+out_free_types:
kfree(gvt->types);
+ return -EINVAL;
}
-static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt)
+void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt)
{
- int i;
- unsigned int low_gm_avail, high_gm_avail, fence_avail;
- unsigned int low_gm_min, high_gm_min, fence_min;
-
- /* Need to depend on maxium hw resource size but keep on
- * static config for now.
- */
- low_gm_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE -
- gvt->gm.vgpu_allocated_low_gm_size;
- high_gm_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE -
- gvt->gm.vgpu_allocated_high_gm_size;
- fence_avail = gvt_fence_sz(gvt) - HOST_FENCE -
- gvt->fence.vgpu_allocated_fence_num;
-
- for (i = 0; i < gvt->num_types; i++) {
- low_gm_min = low_gm_avail / gvt->types[i].low_gm_size;
- high_gm_min = high_gm_avail / gvt->types[i].high_gm_size;
- fence_min = fence_avail / gvt->types[i].fence;
- gvt->types[i].avail_instance = min(min(low_gm_min, high_gm_min),
- fence_min);
-
- gvt_dbg_core("update type[%d]: %s avail %u low %u high %u fence %u\n",
- i, gvt->types[i].name,
- gvt->types[i].avail_instance, gvt->types[i].low_gm_size,
- gvt->types[i].high_gm_size, gvt->types[i].fence);
- }
+ kfree(gvt->mdev_types);
+ kfree(gvt->types);
}
/**
@@ -298,12 +251,6 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
intel_vgpu_clean_mmio(vgpu);
intel_vgpu_dmabuf_cleanup(vgpu);
mutex_unlock(&vgpu->vgpu_lock);
-
- mutex_lock(&gvt->lock);
- intel_gvt_update_vgpu_types(gvt);
- mutex_unlock(&gvt->lock);
-
- vfree(vgpu);
}
#define IDLE_VGPU_IDR 0
@@ -363,42 +310,38 @@ void intel_gvt_destroy_idle_vgpu(struct intel_vgpu *vgpu)
vfree(vgpu);
}
-static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
- struct intel_vgpu_creation_params *param)
+int intel_gvt_create_vgpu(struct intel_vgpu *vgpu,
+ const struct intel_vgpu_config *conf)
{
+ struct intel_gvt *gvt = vgpu->gvt;
struct drm_i915_private *dev_priv = gvt->gt->i915;
- struct intel_vgpu *vgpu;
int ret;
- gvt_dbg_core("low %llu MB high %llu MB fence %llu\n",
- param->low_gm_sz, param->high_gm_sz,
- param->fence_sz);
-
- vgpu = vzalloc(sizeof(*vgpu));
- if (!vgpu)
- return ERR_PTR(-ENOMEM);
+ gvt_dbg_core("low %u MB high %u MB fence %u\n",
+ BYTES_TO_MB(conf->low_mm), BYTES_TO_MB(conf->high_mm),
+ conf->fence);
+ mutex_lock(&gvt->lock);
ret = idr_alloc(&gvt->vgpu_idr, vgpu, IDLE_VGPU_IDR + 1, GVT_MAX_VGPU,
GFP_KERNEL);
if (ret < 0)
- goto out_free_vgpu;
+ goto out_unlock;;
vgpu->id = ret;
- vgpu->gvt = gvt;
- vgpu->sched_ctl.weight = param->weight;
+ vgpu->sched_ctl.weight = conf->weight;
mutex_init(&vgpu->vgpu_lock);
mutex_init(&vgpu->dmabuf_lock);
INIT_LIST_HEAD(&vgpu->dmabuf_obj_list_head);
INIT_RADIX_TREE(&vgpu->page_track_tree, GFP_KERNEL);
idr_init_base(&vgpu->object_idr, 1);
- intel_vgpu_init_cfg_space(vgpu, param->primary);
+ intel_vgpu_init_cfg_space(vgpu, 1);
vgpu->d3_entered = false;
ret = intel_vgpu_init_mmio(vgpu);
if (ret)
goto out_clean_idr;
- ret = intel_vgpu_alloc_resource(vgpu, param);
+ ret = intel_vgpu_alloc_resource(vgpu, conf);
if (ret)
goto out_clean_vgpu_mmio;
@@ -412,7 +355,7 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
if (ret)
goto out_clean_gtt;
- ret = intel_vgpu_init_display(vgpu, param->resolution);
+ ret = intel_vgpu_init_display(vgpu, conf->edid);
if (ret)
goto out_clean_opregion;
@@ -437,7 +380,9 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
if (ret)
goto out_clean_sched_policy;
- return vgpu;
+ intel_gvt_update_reg_whitelist(vgpu);
+ mutex_unlock(&gvt->lock);
+ return 0;
out_clean_sched_policy:
intel_vgpu_clean_sched_policy(vgpu);
@@ -455,48 +400,9 @@ out_clean_vgpu_mmio:
intel_vgpu_clean_mmio(vgpu);
out_clean_idr:
idr_remove(&gvt->vgpu_idr, vgpu->id);
-out_free_vgpu:
- vfree(vgpu);
- return ERR_PTR(ret);
-}
-
-/**
- * intel_gvt_create_vgpu - create a virtual GPU
- * @gvt: GVT device
- * @type: type of the vGPU to create
- *
- * This function is called when user wants to create a virtual GPU.
- *
- * Returns:
- * pointer to intel_vgpu, error pointer if failed.
- */
-struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
- struct intel_vgpu_type *type)
-{
- struct intel_vgpu_creation_params param;
- struct intel_vgpu *vgpu;
-
- param.primary = 1;
- param.low_gm_sz = type->low_gm_size;
- param.high_gm_sz = type->high_gm_size;
- param.fence_sz = type->fence;
- param.weight = type->weight;
- param.resolution = type->resolution;
-
- /* XXX current param based on MB */
- param.low_gm_sz = BYTES_TO_MB(param.low_gm_sz);
- param.high_gm_sz = BYTES_TO_MB(param.high_gm_sz);
-
- mutex_lock(&gvt->lock);
- vgpu = __intel_gvt_create_vgpu(gvt, &param);
- if (!IS_ERR(vgpu)) {
- /* calculate left instance change for types */
- intel_gvt_update_vgpu_types(gvt);
- intel_gvt_update_reg_whitelist(vgpu);
- }
+out_unlock:
mutex_unlock(&gvt->lock);
-
- return vgpu;
+ return ret;
}
/**
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 3a8450058548..ae987e92251d 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -66,8 +66,7 @@ static int i915_capabilities(struct seq_file *m, void *data)
seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(i915));
- intel_device_info_print_static(INTEL_INFO(i915), &p);
- intel_device_info_print_runtime(RUNTIME_INFO(i915), &p);
+ intel_device_info_print(INTEL_INFO(i915), RUNTIME_INFO(i915), &p);
i915_print_iommu_status(i915, &p);
intel_gt_info_print(&to_gt(i915)->info, &p);
intel_driver_caps_print(&i915->caps, &p);
@@ -411,7 +410,7 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
swizzle_string(to_gt(dev_priv)->ggtt->bit_6_swizzle_y));
- if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
+ if (dev_priv->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES)
seq_puts(m, "L-shaped memory detected\n");
/* On BDW+, swizzling is not used. See detect_bit_6_swizzle() */
@@ -493,7 +492,7 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused)
seq_puts(m, "Runtime power management not supported\n");
seq_printf(m, "Runtime power status: %s\n",
- str_enabled_disabled(!dev_priv->power_domains.init_wakeref));
+ str_enabled_disabled(!dev_priv->display.power.domains.init_wakeref));
seq_printf(m, "GPU idle: %s\n", str_yes_no(!to_gt(dev_priv)->awake));
seq_printf(m, "IRQs disabled: %s\n",
diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c
index deb8a8b76965..c459eb362c47 100644
--- a/drivers/gpu/drm/i915/i915_driver.c
+++ b/drivers/gpu/drm/i915/i915_driver.c
@@ -61,6 +61,7 @@
#include "display/intel_pps.h"
#include "display/intel_sprite.h"
#include "display/intel_vga.h"
+#include "display/skl_watermark.h"
#include "gem/i915_gem_context.h"
#include "gem/i915_gem_create.h"
@@ -105,6 +106,12 @@ static const char irst_name[] = "INT3392";
static const struct drm_driver i915_drm_driver;
+static void i915_release_bridge_dev(struct drm_device *dev,
+ void *bridge)
+{
+ pci_dev_put(bridge);
+}
+
static int i915_get_bridge_dev(struct drm_i915_private *dev_priv)
{
int domain = pci_domain_nr(to_pci_dev(dev_priv->drm.dev)->bus);
@@ -115,7 +122,9 @@ static int i915_get_bridge_dev(struct drm_i915_private *dev_priv)
drm_err(&dev_priv->drm, "bridge device not found\n");
return -EIO;
}
- return 0;
+
+ return drmm_add_action_or_reset(&dev_priv->drm, i915_release_bridge_dev,
+ dev_priv->bridge_dev);
}
/* Allocate space for the MCH regs if needed, return nonzero on error */
@@ -252,8 +261,8 @@ static int i915_workqueues_init(struct drm_i915_private *dev_priv)
if (dev_priv->wq == NULL)
goto out_err;
- dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
- if (dev_priv->hotplug.dp_wq == NULL)
+ dev_priv->display.hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
+ if (dev_priv->display.hotplug.dp_wq == NULL)
goto out_free_wq;
return 0;
@@ -268,7 +277,7 @@ out_err:
static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
{
- destroy_workqueue(dev_priv->hotplug.dp_wq);
+ destroy_workqueue(dev_priv->display.hotplug.dp_wq);
destroy_workqueue(dev_priv->wq);
}
@@ -302,8 +311,13 @@ static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
static void sanitize_gpu(struct drm_i915_private *i915)
{
- if (!INTEL_INFO(i915)->gpu_reset_clobbers_display)
- __intel_gt_reset(to_gt(i915), ALL_ENGINES);
+ if (!INTEL_INFO(i915)->gpu_reset_clobbers_display) {
+ struct intel_gt *gt;
+ unsigned int i;
+
+ for_each_gt(gt, i915, i)
+ __intel_gt_reset(gt, ALL_ENGINES);
+ }
}
/**
@@ -326,19 +340,19 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
intel_device_info_subplatform_init(dev_priv);
intel_step_init(dev_priv);
- intel_uncore_mmio_debug_init_early(&dev_priv->mmio_debug);
+ intel_uncore_mmio_debug_init_early(dev_priv);
spin_lock_init(&dev_priv->irq_lock);
spin_lock_init(&dev_priv->gpu_error.lock);
- mutex_init(&dev_priv->backlight_lock);
+ mutex_init(&dev_priv->display.backlight.lock);
mutex_init(&dev_priv->sb_lock);
cpu_latency_qos_add_request(&dev_priv->sb_qos, PM_QOS_DEFAULT_VALUE);
- mutex_init(&dev_priv->audio.mutex);
- mutex_init(&dev_priv->wm.wm_mutex);
- mutex_init(&dev_priv->pps_mutex);
- mutex_init(&dev_priv->hdcp_comp_mutex);
+ mutex_init(&dev_priv->display.audio.mutex);
+ mutex_init(&dev_priv->display.wm.wm_mutex);
+ mutex_init(&dev_priv->display.pps.mutex);
+ mutex_init(&dev_priv->display.hdcp.comp_mutex);
i915_memcpy_init_early(dev_priv);
intel_runtime_pm_init_early(&dev_priv->runtime_pm);
@@ -357,7 +371,9 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
intel_wopcm_init_early(&dev_priv->wopcm);
- intel_root_gt_init_early(dev_priv);
+ ret = intel_root_gt_init_early(dev_priv);
+ if (ret < 0)
+ goto err_rootgt;
i915_drm_clients_init(&dev_priv->clients, dev_priv);
@@ -382,6 +398,7 @@ err_gem:
i915_gem_cleanup_early(dev_priv);
intel_gt_driver_late_release_all(dev_priv);
i915_drm_clients_fini(&dev_priv->clients);
+err_rootgt:
intel_region_ttm_device_fini(dev_priv);
err_ttm:
vlv_suspend_cleanup(dev_priv);
@@ -423,7 +440,8 @@ static void i915_driver_late_release(struct drm_i915_private *dev_priv)
*/
static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv)
{
- int ret;
+ struct intel_gt *gt;
+ int ret, i;
if (i915_inject_probe_failure(dev_priv))
return -ENODEV;
@@ -432,17 +450,27 @@ static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv)
if (ret < 0)
return ret;
- ret = intel_uncore_init_mmio(&dev_priv->uncore);
- if (ret)
- return ret;
+ for_each_gt(gt, dev_priv, i) {
+ ret = intel_uncore_init_mmio(gt->uncore);
+ if (ret)
+ return ret;
+
+ ret = drmm_add_action_or_reset(&dev_priv->drm,
+ intel_uncore_fini_mmio,
+ gt->uncore);
+ if (ret)
+ return ret;
+ }
/* Try to make sure MCHBAR is enabled before poking at it */
intel_setup_mchbar(dev_priv);
intel_device_info_runtime_init(dev_priv);
- ret = intel_gt_init_mmio(to_gt(dev_priv));
- if (ret)
- goto err_uncore;
+ for_each_gt(gt, dev_priv, i) {
+ ret = intel_gt_init_mmio(gt);
+ if (ret)
+ goto err_uncore;
+ }
/* As early as possible, scrub existing GPU state before clobbering */
sanitize_gpu(dev_priv);
@@ -451,8 +479,6 @@ static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv)
err_uncore:
intel_teardown_mchbar(dev_priv);
- intel_uncore_fini_mmio(&dev_priv->uncore);
- pci_dev_put(dev_priv->bridge_dev);
return ret;
}
@@ -464,8 +490,6 @@ err_uncore:
static void i915_driver_mmio_release(struct drm_i915_private *dev_priv)
{
intel_teardown_mchbar(dev_priv);
- intel_uncore_fini_mmio(&dev_priv->uncore);
- pci_dev_put(dev_priv->bridge_dev);
}
/**
@@ -715,6 +739,8 @@ static void i915_driver_hw_remove(struct drm_i915_private *dev_priv)
static void i915_driver_register(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = &dev_priv->drm;
+ struct intel_gt *gt;
+ unsigned int i;
i915_gem_driver_register(dev_priv);
i915_pmu_register(dev_priv);
@@ -734,7 +760,8 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
/* Depends on sysfs having been initialized */
i915_perf_register(dev_priv);
- intel_gt_driver_register(to_gt(dev_priv));
+ for_each_gt(gt, dev_priv, i)
+ intel_gt_driver_register(gt);
intel_display_driver_register(dev_priv);
@@ -753,6 +780,9 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
*/
static void i915_driver_unregister(struct drm_i915_private *dev_priv)
{
+ struct intel_gt *gt;
+ unsigned int i;
+
i915_switcheroo_unregister(dev_priv);
intel_unregister_dsm_handler();
@@ -762,7 +792,8 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv)
intel_display_driver_unregister(dev_priv);
- intel_gt_driver_unregister(to_gt(dev_priv));
+ for_each_gt(gt, dev_priv, i)
+ intel_gt_driver_unregister(gt);
i915_perf_unregister(dev_priv);
i915_pmu_unregister(dev_priv);
@@ -784,6 +815,8 @@ static void i915_welcome_messages(struct drm_i915_private *dev_priv)
{
if (drm_debug_enabled(DRM_UT_DRIVER)) {
struct drm_printer p = drm_debug_printer("i915 device info:");
+ struct intel_gt *gt;
+ unsigned int i;
drm_printf(&p, "pciid=0x%04x rev=0x%02x platform=%s (subplatform=0x%x) gen=%i\n",
INTEL_DEVID(dev_priv),
@@ -793,10 +826,11 @@ static void i915_welcome_messages(struct drm_i915_private *dev_priv)
INTEL_INFO(dev_priv)->platform),
GRAPHICS_VER(dev_priv));
- intel_device_info_print_static(INTEL_INFO(dev_priv), &p);
- intel_device_info_print_runtime(RUNTIME_INFO(dev_priv), &p);
+ intel_device_info_print(INTEL_INFO(dev_priv),
+ RUNTIME_INFO(dev_priv), &p);
i915_print_iommu_status(dev_priv, &p);
- intel_gt_info_print(&to_gt(dev_priv)->info, &p);
+ for_each_gt(gt, dev_priv, i)
+ intel_gt_info_print(&gt->info, &p);
}
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG))
@@ -814,6 +848,7 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
const struct intel_device_info *match_info =
(struct intel_device_info *)ent->driver_data;
struct intel_device_info *device_info;
+ struct intel_runtime_info *runtime;
struct drm_i915_private *i915;
i915 = devm_drm_dev_alloc(&pdev->dev, &i915_drm_driver,
@@ -829,7 +864,11 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Setup the write-once "constant" device info */
device_info = mkwrite_device_info(i915);
memcpy(device_info, match_info, sizeof(*device_info));
- RUNTIME_INFO(i915)->device_id = pdev->device;
+
+ /* Initialize initial runtime info from static const data and pdev. */
+ runtime = RUNTIME_INFO(i915);
+ memcpy(runtime, &INTEL_INFO(i915)->__runtime, sizeof(*runtime));
+ runtime->device_id = pdev->device;
return i915;
}
@@ -948,7 +987,9 @@ out_fini:
void i915_driver_remove(struct drm_i915_private *i915)
{
- disable_rpm_wakeref_asserts(&i915->runtime_pm);
+ intel_wakeref_t wakeref;
+
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
i915_driver_unregister(i915);
@@ -972,18 +1013,19 @@ void i915_driver_remove(struct drm_i915_private *i915)
i915_driver_hw_remove(i915);
- enable_rpm_wakeref_asserts(&i915->runtime_pm);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
}
static void i915_driver_release(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
+ intel_wakeref_t wakeref;
if (!dev_priv->do_release)
return;
- disable_rpm_wakeref_asserts(rpm);
+ wakeref = intel_runtime_pm_get(rpm);
i915_gem_driver_release(dev_priv);
@@ -994,7 +1036,8 @@ static void i915_driver_release(struct drm_device *dev)
i915_driver_mmio_release(dev_priv);
- enable_rpm_wakeref_asserts(rpm);
+ intel_runtime_pm_put(rpm, wakeref);
+
intel_runtime_pm_driver_release(rpm);
i915_driver_late_release(dev_priv);
@@ -1206,13 +1249,15 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
struct drm_i915_private *dev_priv = to_i915(dev);
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
- int ret;
+ struct intel_gt *gt;
+ int ret, i;
disable_rpm_wakeref_asserts(rpm);
i915_gem_suspend_late(dev_priv);
- intel_uncore_suspend(&dev_priv->uncore);
+ for_each_gt(gt, dev_priv, i)
+ intel_uncore_suspend(gt->uncore);
intel_power_domains_suspend(dev_priv,
get_suspend_mode(dev_priv, hibernation));
@@ -1344,7 +1389,8 @@ static int i915_drm_resume_early(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
- int ret;
+ struct intel_gt *gt;
+ int ret, i;
/*
* We have a resume ordering issue with the snd-hda driver also
@@ -1398,9 +1444,10 @@ static int i915_drm_resume_early(struct drm_device *dev)
drm_err(&dev_priv->drm,
"Resume prepare failed: %d, continuing anyway\n", ret);
- intel_uncore_resume_early(&dev_priv->uncore);
-
- intel_gt_check_and_clear_faults(to_gt(dev_priv));
+ for_each_gt(gt, dev_priv, i) {
+ intel_uncore_resume_early(gt->uncore);
+ intel_gt_check_and_clear_faults(gt);
+ }
intel_display_power_resume_early(dev_priv);
@@ -1580,7 +1627,8 @@ static int intel_runtime_suspend(struct device *kdev)
{
struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
- int ret;
+ struct intel_gt *gt;
+ int ret, i;
if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv)))
return -ENODEV;
@@ -1595,11 +1643,13 @@ static int intel_runtime_suspend(struct device *kdev)
*/
i915_gem_runtime_suspend(dev_priv);
- intel_gt_runtime_suspend(to_gt(dev_priv));
+ for_each_gt(gt, dev_priv, i)
+ intel_gt_runtime_suspend(gt);
intel_runtime_pm_disable_interrupts(dev_priv);
- intel_uncore_suspend(&dev_priv->uncore);
+ for_each_gt(gt, dev_priv, i)
+ intel_uncore_suspend(gt->uncore);
intel_display_power_suspend(dev_priv);
@@ -1663,7 +1713,8 @@ static int intel_runtime_resume(struct device *kdev)
{
struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
- int ret;
+ struct intel_gt *gt;
+ int ret, i;
if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv)))
return -ENODEV;
@@ -1683,7 +1734,8 @@ static int intel_runtime_resume(struct device *kdev)
ret = vlv_resume_prepare(dev_priv, true);
- intel_uncore_runtime_resume(&dev_priv->uncore);
+ for_each_gt(gt, dev_priv, i)
+ intel_uncore_runtime_resume(gt->uncore);
intel_runtime_pm_enable_interrupts(dev_priv);
@@ -1691,7 +1743,8 @@ static int intel_runtime_resume(struct device *kdev)
* No point of rolling back things in case of an error, as the best
* we can do is to hope that things will still work (and disable RPM).
*/
- intel_gt_runtime_resume(to_gt(dev_priv));
+ for_each_gt(gt, dev_priv, i)
+ intel_gt_runtime_resume(gt);
/*
* On VLV/CHV display interrupts are part of the display
@@ -1703,7 +1756,7 @@ static int intel_runtime_resume(struct device *kdev)
intel_hpd_poll_disable(dev_priv);
}
- intel_enable_ipc(dev_priv);
+ skl_watermark_ipc_update(dev_priv);
enable_rpm_wakeref_asserts(rpm);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 7c0a34a33fec..bdc81db76dbd 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -34,20 +34,10 @@
#include <linux/pm_qos.h>
-#include <drm/drm_connector.h>
#include <drm/ttm/ttm_device.h>
-#include "display/intel_cdclk.h"
#include "display/intel_display.h"
-#include "display/intel_display_power.h"
-#include "display/intel_dmc.h"
-#include "display/intel_dpll_mgr.h"
-#include "display/intel_dsb.h"
-#include "display/intel_fbc.h"
-#include "display/intel_frontbuffer.h"
-#include "display/intel_global_state.h"
-#include "display/intel_gmbus.h"
-#include "display/intel_opregion.h"
+#include "display/intel_display_core.h"
#include "gem/i915_gem_context_types.h"
#include "gem/i915_gem_lmem.h"
@@ -70,80 +60,24 @@
#include "intel_device_info.h"
#include "intel_memory_region.h"
#include "intel_pch.h"
-#include "intel_pm_types.h"
#include "intel_runtime_pm.h"
#include "intel_step.h"
#include "intel_uncore.h"
#include "intel_wopcm.h"
-struct dpll;
struct drm_i915_clock_gating_funcs;
struct drm_i915_gem_object;
struct drm_i915_private;
-struct intel_atomic_state;
-struct intel_audio_funcs;
-struct intel_cdclk_config;
-struct intel_cdclk_funcs;
-struct intel_cdclk_state;
-struct intel_cdclk_vals;
-struct intel_color_funcs;
struct intel_connector;
-struct intel_crtc;
struct intel_dp;
-struct intel_dpll_funcs;
struct intel_encoder;
-struct intel_fbdev;
-struct intel_fdi_funcs;
-struct intel_gmbus;
-struct intel_hotplug_funcs;
-struct intel_initial_plane_config;
struct intel_limit;
-struct intel_overlay;
struct intel_overlay_error_state;
struct vlv_s0ix_state;
/* Threshold == 5 for long IRQs, 50 for short */
#define HPD_STORM_DEFAULT_THRESHOLD 50
-struct i915_hotplug {
- struct delayed_work hotplug_work;
-
- const u32 *hpd, *pch_hpd;
-
- struct {
- unsigned long last_jiffies;
- int count;
- enum {
- HPD_ENABLED = 0,
- HPD_DISABLED = 1,
- HPD_MARK_DISABLED = 2
- } state;
- } stats[HPD_NUM_PINS];
- u32 event_bits;
- u32 retry_bits;
- struct delayed_work reenable_work;
-
- u32 long_port_mask;
- u32 short_port_mask;
- struct work_struct dig_port_work;
-
- struct work_struct poll_init_work;
- bool poll_enabled;
-
- unsigned int hpd_storm_threshold;
- /* Whether or not to count short HPD IRQs in HPD storms */
- u8 hpd_short_storm_enabled;
-
- /*
- * if we get a HPD irq from DP and a HPD irq from non-DP
- * the non-DP HPD could block the workqueue on a mode config
- * mutex getting, that userspace may have taken. However
- * userspace is waiting on the DP workqueue to run which is
- * blocked behind the non-DP one.
- */
- struct workqueue_struct *dp_wq;
-};
-
#define I915_GEM_GPU_DOMAINS \
(I915_GEM_DOMAIN_RENDER | \
I915_GEM_DOMAIN_SAMPLER | \
@@ -151,55 +85,9 @@ struct i915_hotplug {
I915_GEM_DOMAIN_INSTRUCTION | \
I915_GEM_DOMAIN_VERTEX)
-struct sdvo_device_mapping {
- u8 initialized;
- u8 dvo_port;
- u8 slave_addr;
- u8 dvo_wiring;
- u8 i2c_pin;
- u8 ddc_pin;
-};
-
-/* functions used for watermark calcs for display. */
-struct drm_i915_wm_disp_funcs {
- /* update_wm is for legacy wm management */
- void (*update_wm)(struct drm_i915_private *dev_priv);
- int (*compute_pipe_wm)(struct intel_atomic_state *state,
- struct intel_crtc *crtc);
- int (*compute_intermediate_wm)(struct intel_atomic_state *state,
- struct intel_crtc *crtc);
- void (*initial_watermarks)(struct intel_atomic_state *state,
- struct intel_crtc *crtc);
- void (*atomic_update_watermarks)(struct intel_atomic_state *state,
- struct intel_crtc *crtc);
- void (*optimize_watermarks)(struct intel_atomic_state *state,
- struct intel_crtc *crtc);
- int (*compute_global_watermarks)(struct intel_atomic_state *state);
-};
-
-struct drm_i915_display_funcs {
- /* Returns the active state of the crtc, and if the crtc is active,
- * fills out the pipe-config with the hw state. */
- bool (*get_pipe_config)(struct intel_crtc *,
- struct intel_crtc_state *);
- void (*get_initial_plane_config)(struct intel_crtc *,
- struct intel_initial_plane_config *);
- void (*crtc_enable)(struct intel_atomic_state *state,
- struct intel_crtc *crtc);
- void (*crtc_disable)(struct intel_atomic_state *state,
- struct intel_crtc *crtc);
- void (*commit_modeset_enables)(struct intel_atomic_state *state);
-};
-
#define I915_COLOR_UNEVICTABLE (-1) /* a non-vma sharing the address space */
-#define QUIRK_LVDS_SSC_DISABLE (1<<1)
-#define QUIRK_INVERT_BRIGHTNESS (1<<2)
-#define QUIRK_BACKLIGHT_PRESENT (1<<3)
-#define QUIRK_PIN_SWIZZLED_PAGES (1<<5)
-#define QUIRK_INCREASE_T12_DELAY (1<<6)
-#define QUIRK_INCREASE_DDI_DISABLED_TIME (1<<7)
-#define QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK (1<<8)
+#define GEM_QUIRK_PIN_SWIZZLED_PAGES BIT(0)
struct i915_suspend_saved_registers {
u32 saveDSPARB;
@@ -289,51 +177,8 @@ i915_fence_timeout(const struct drm_i915_private *i915)
return i915_fence_context_timeout(i915, U64_MAX);
}
-/* Amount of SAGV/QGV points, BSpec precisely defines this */
-#define I915_NUM_QGV_POINTS 8
-
#define HAS_HW_SAGV_WM(i915) (DISPLAY_VER(i915) >= 13 && !IS_DGFX(i915))
-/* Amount of PSF GV points, BSpec precisely defines this */
-#define I915_NUM_PSF_GV_POINTS 3
-
-struct intel_vbt_data {
- /* bdb version */
- u16 version;
-
- /* Feature bits */
- unsigned int int_tv_support:1;
- unsigned int int_crt_support:1;
- unsigned int lvds_use_ssc:1;
- unsigned int int_lvds_support:1;
- unsigned int display_clock_mode:1;
- unsigned int fdi_rx_polarity_inverted:1;
- int lvds_ssc_freq;
- enum drm_panel_orientation orientation;
-
- bool override_afc_startup;
- u8 override_afc_startup_val;
-
- int crt_ddc_pin;
-
- struct list_head display_devices;
- struct list_head bdb_blocks;
-
- struct intel_bios_encoder_data *ports[I915_MAX_PORTS]; /* Non-NULL if port present. */
- struct sdvo_device_mapping sdvo_mappings[2];
-};
-
-struct i915_frontbuffer_tracking {
- spinlock_t lock;
-
- /*
- * Tracking bits for delayed frontbuffer flushing du to gpu activity or
- * scheduled flips.
- */
- unsigned busy_bits;
- unsigned flip_bits;
-};
-
struct i915_virtual_gpu {
struct mutex lock; /* serialises sending of g2v_notify command pkts */
bool active;
@@ -348,32 +193,11 @@ struct i915_selftest_stash {
struct ida mock_region_instances;
};
-/* intel_audio.c private */
-struct intel_audio_private {
- /* Display internal audio functions */
- const struct intel_audio_funcs *funcs;
-
- /* hda/i915 audio component */
- struct i915_audio_component *component;
- bool component_registered;
- /* mutex for audio/video sync */
- struct mutex mutex;
- int power_refcount;
- u32 freq_cntrl;
-
- /* Used to save the pipe-to-encoder mapping for audio */
- struct intel_encoder *encoder_map[I915_MAX_PIPES];
-
- /* necessary resource sharing with HDMI LPE audio driver. */
- struct {
- struct platform_device *platdev;
- int irq;
- } lpe;
-};
-
struct drm_i915_private {
struct drm_device drm;
+ struct intel_display display;
+
/* FIXME: Device release actions should all be moved to drmm_ */
bool do_release;
@@ -417,27 +241,6 @@ struct drm_i915_private {
struct intel_wopcm wopcm;
- struct intel_dmc dmc;
-
- struct intel_gmbus *gmbus[GMBUS_NUM_PINS];
-
- /** gmbus_mutex protects against concurrent usage of the single hw gmbus
- * controller on different i2c buses. */
- struct mutex gmbus_mutex;
-
- /**
- * Base address of where the gmbus and gpio blocks are located (either
- * on PCH or on SoC for platforms without PCH).
- */
- u32 gpio_mmio_base;
-
- /* MMIO base address for MIPI regs */
- u32 mipi_mmio_base;
-
- u32 pps_mmio_base;
-
- wait_queue_head_t gmbus_wait_queue;
-
struct pci_dev *bridge_dev;
struct rb_root uabi_engines;
@@ -461,48 +264,15 @@ struct drm_i915_private {
};
u32 pipestat_irq_mask[I915_MAX_PIPES];
- struct i915_hotplug hotplug;
- struct intel_fbc *fbc[I915_MAX_FBCS];
- struct intel_opregion opregion;
- struct intel_vbt_data vbt;
-
bool preserve_bios_swizzle;
- /* overlay */
- struct intel_overlay *overlay;
-
- /* backlight registers and fields in struct intel_panel */
- struct mutex backlight_lock;
-
- /* protects panel power sequencer state */
- struct mutex pps_mutex;
-
unsigned int fsb_freq, mem_freq, is_ddr3;
unsigned int skl_preferred_vco_freq;
- unsigned int max_cdclk_freq;
unsigned int max_dotclk_freq;
unsigned int hpll_freq;
- unsigned int fdi_pll_freq;
unsigned int czclk_freq;
- struct {
- /* The current hardware cdclk configuration */
- struct intel_cdclk_config hw;
-
- /* cdclk, divider, and ratio table from bspec */
- const struct intel_cdclk_vals *table;
-
- struct intel_global_obj obj;
- } cdclk;
-
- struct {
- /* The current hardware dbuf configuration */
- u8 enabled_slices;
-
- struct intel_global_obj obj;
- } dbuf;
-
/**
* wq - Driver workqueue for GEM.
*
@@ -512,40 +282,14 @@ struct drm_i915_private {
*/
struct workqueue_struct *wq;
- /* ordered wq for modesets */
- struct workqueue_struct *modeset_wq;
- /* unbound hipri wq for page flips/plane updates */
- struct workqueue_struct *flip_wq;
-
/* pm private clock gating functions */
const struct drm_i915_clock_gating_funcs *clock_gating_funcs;
- /* pm display functions */
- const struct drm_i915_wm_disp_funcs *wm_disp;
-
- /* irq display functions */
- const struct intel_hotplug_funcs *hotplug_funcs;
-
- /* fdi display functions */
- const struct intel_fdi_funcs *fdi_funcs;
-
- /* display pll funcs */
- const struct intel_dpll_funcs *dpll_funcs;
-
- /* Display functions */
- const struct drm_i915_display_funcs *display;
-
- /* Display internal color functions */
- const struct intel_color_funcs *color_funcs;
-
- /* Display CDCLK functions */
- const struct intel_cdclk_funcs *cdclk_funcs;
-
/* PCH chipset type */
enum intel_pch pch_type;
unsigned short pch_id;
- unsigned long quirks;
+ unsigned long gem_quirks;
struct drm_atomic_state *modeset_restore_state;
struct drm_modeset_acquire_ctx reset_ctx;
@@ -554,34 +298,8 @@ struct drm_i915_private {
/* Kernel Modesetting */
- /**
- * dpll and cdclk state is protected by connection_mutex
- * dpll.lock serializes intel_{prepare,enable,disable}_shared_dpll.
- * Must be global rather than per dpll, because on some platforms plls
- * share registers.
- */
- struct {
- struct mutex lock;
-
- int num_shared_dpll;
- struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
- const struct intel_dpll_mgr *mgr;
-
- struct {
- int nssc;
- int ssc;
- } ref_clks;
- } dpll;
-
struct list_head global_obj_list;
- struct i915_frontbuffer_tracking fb_tracking;
-
- struct intel_atomic_helper {
- struct llist_head free_list;
- struct work_struct free_work;
- } atomic_helper;
-
bool mchbar_need_disable;
struct intel_l3_parity l3_parity;
@@ -600,21 +318,8 @@ struct drm_i915_private {
*/
u32 edram_size_mb;
- struct i915_power_domains power_domains;
-
struct i915_gpu_error gpu_error;
- /* list of fbdev register on this device */
- struct intel_fbdev *fbdev;
- struct work_struct fbdev_suspend_work;
-
- struct drm_property *broadcast_rgb_property;
- struct drm_property *force_audio_property;
-
- u32 fdi_rx_config;
-
- /* Shadow for DISPLAY_PHY_CONTROL which can't be safely read */
- u32 chv_phy_control;
/*
* Shadows for CHV DPLL_MD regs to keep the state
* checker somewhat working in the presence hardware
@@ -627,51 +332,6 @@ struct drm_i915_private {
struct i915_suspend_saved_registers regfile;
struct vlv_s0ix_state *vlv_s0ix_state;
- enum {
- I915_SAGV_UNKNOWN = 0,
- I915_SAGV_DISABLED,
- I915_SAGV_ENABLED,
- I915_SAGV_NOT_CONTROLLED
- } sagv_status;
-
- u32 sagv_block_time_us;
-
- struct {
- /*
- * Raw watermark latency values:
- * in 0.1us units for WM0,
- * in 0.5us units for WM1+.
- */
- /* primary */
- u16 pri_latency[5];
- /* sprite */
- u16 spr_latency[5];
- /* cursor */
- u16 cur_latency[5];
- /*
- * Raw watermark memory latency values
- * for SKL for all 8 levels
- * in 1us units.
- */
- u16 skl_latency[8];
-
- /* current hardware state */
- union {
- struct ilk_wm_values hw;
- struct vlv_wm_values vlv;
- struct g4x_wm_values g4x;
- };
-
- u8 max_level;
-
- /*
- * Should be held around atomic WM register writing; also
- * protects * intel_crtc->wm.active and
- * crtc_state->wm.need_postvbl_update.
- */
- struct mutex wm_mutex;
- } wm;
-
struct dram_info {
bool wm_lv_0_adjust_needed;
u8 num_channels;
@@ -689,18 +349,6 @@ struct drm_i915_private {
u8 num_psf_gv_points;
} dram_info;
- struct intel_bw_info {
- /* for each QGV point */
- unsigned int deratedbw[I915_NUM_QGV_POINTS];
- /* for each PSF GV point */
- unsigned int psf_bw[I915_NUM_PSF_GV_POINTS];
- u8 num_qgv_points;
- u8 num_psf_gv_points;
- u8 num_planes;
- } max_bw[6];
-
- struct intel_global_obj bw_obj;
-
struct intel_runtime_pm runtime_pm;
struct i915_perf perf;
@@ -716,6 +364,9 @@ struct drm_i915_private {
struct kobject *sysfs_gt;
+ /* Quick lookup of media GT (current platforms only have one) */
+ struct intel_gt *media_gt;
+
struct {
struct i915_gem_contexts {
spinlock_t lock; /* locks list */
@@ -733,9 +384,6 @@ struct drm_i915_private {
struct file *mmap_singleton;
} gem;
- /* Window2 specifies time required to program DSB (Window2) in number of scan lines */
- u8 window2_delay;
-
u8 pch_ssc_use;
/* For i915gm/i945gm vblank irq workaround */
@@ -743,31 +391,16 @@ struct drm_i915_private {
bool irq_enabled;
- union {
- /* perform PHY state sanity checks? */
- bool chv_phy_assert[2];
-
- /*
- * DG2: Mask of PHYs that were not calibrated by the firmware
- * and should not be used.
- */
- u8 snps_phy_failed_calibration;
- };
-
- bool ipc_enabled;
-
- struct intel_audio_private audio;
+ /*
+ * DG2: Mask of PHYs that were not calibrated by the firmware
+ * and should not be used.
+ */
+ u8 snps_phy_failed_calibration;
struct i915_pmu pmu;
struct i915_drm_clients clients;
- struct i915_hdcp_comp_master *hdcp_master;
- bool hdcp_comp_added;
-
- /* Mutex to protect the above hdcp component related values. */
- struct mutex hdcp_comp_mutex;
-
/* The TTM device structure. */
struct ttm_device bdev;
@@ -826,28 +459,6 @@ static inline struct intel_gt *to_gt(struct drm_i915_private *i915)
(engine__) && (engine__)->uabi_class == (class__); \
(engine__) = rb_to_uabi_engine(rb_next(&(engine__)->uabi_node)))
-#define I915_GTT_OFFSET_NONE ((u32)-1)
-
-/*
- * Frontbuffer tracking bits. Set in obj->frontbuffer_bits while a gem bo is
- * considered to be the frontbuffer for the given plane interface-wise. This
- * doesn't mean that the hw necessarily already scans it out, but that any
- * rendering (by the cpu or gpu) will land in the frontbuffer eventually.
- *
- * We have one bit per pipe and per scanout plane type.
- */
-#define INTEL_FRONTBUFFER_BITS_PER_PIPE 8
-#define INTEL_FRONTBUFFER(pipe, plane_id) ({ \
- BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES > 32); \
- BUILD_BUG_ON(I915_MAX_PLANES > INTEL_FRONTBUFFER_BITS_PER_PIPE); \
- BIT((plane_id) + INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)); \
-})
-#define INTEL_FRONTBUFFER_OVERLAY(pipe) \
- BIT(INTEL_FRONTBUFFER_BITS_PER_PIPE - 1 + INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))
-#define INTEL_FRONTBUFFER_ALL_MASK(pipe) \
- GENMASK(INTEL_FRONTBUFFER_BITS_PER_PIPE * ((pipe) + 1) - 1, \
- INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))
-
#define INTEL_INFO(dev_priv) (&(dev_priv)->__info)
#define RUNTIME_INFO(dev_priv) (&(dev_priv)->__runtime)
#define DRIVER_CAPS(dev_priv) (&(dev_priv)->caps)
@@ -856,19 +467,19 @@ static inline struct intel_gt *to_gt(struct drm_i915_private *i915)
#define IP_VER(ver, rel) ((ver) << 8 | (rel))
-#define GRAPHICS_VER(i915) (INTEL_INFO(i915)->graphics.ver)
-#define GRAPHICS_VER_FULL(i915) IP_VER(INTEL_INFO(i915)->graphics.ver, \
- INTEL_INFO(i915)->graphics.rel)
+#define GRAPHICS_VER(i915) (RUNTIME_INFO(i915)->graphics.ip.ver)
+#define GRAPHICS_VER_FULL(i915) IP_VER(RUNTIME_INFO(i915)->graphics.ip.ver, \
+ RUNTIME_INFO(i915)->graphics.ip.rel)
#define IS_GRAPHICS_VER(i915, from, until) \
(GRAPHICS_VER(i915) >= (from) && GRAPHICS_VER(i915) <= (until))
-#define MEDIA_VER(i915) (INTEL_INFO(i915)->media.ver)
-#define MEDIA_VER_FULL(i915) IP_VER(INTEL_INFO(i915)->media.ver, \
- INTEL_INFO(i915)->media.rel)
+#define MEDIA_VER(i915) (RUNTIME_INFO(i915)->media.ip.ver)
+#define MEDIA_VER_FULL(i915) IP_VER(RUNTIME_INFO(i915)->media.ip.ver, \
+ RUNTIME_INFO(i915)->media.ip.rel)
#define IS_MEDIA_VER(i915, from, until) \
(MEDIA_VER(i915) >= (from) && MEDIA_VER(i915) <= (until))
-#define DISPLAY_VER(i915) (INTEL_INFO(i915)->display.ver)
+#define DISPLAY_VER(i915) (RUNTIME_INFO(i915)->display.ip.ver)
#define IS_DISPLAY_VER(i915, from, until) \
(DISPLAY_VER(i915) >= (from) && DISPLAY_VER(i915) <= (until))
@@ -1210,7 +821,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define HAS_EXECLISTS(dev_priv) HAS_LOGICAL_RING_CONTEXTS(dev_priv)
-#define INTEL_PPGTT(dev_priv) (INTEL_INFO(dev_priv)->ppgtt_type)
+#define INTEL_PPGTT(dev_priv) (RUNTIME_INFO(dev_priv)->ppgtt_type)
#define HAS_PPGTT(dev_priv) \
(INTEL_PPGTT(dev_priv) != INTEL_PPGTT_NONE)
#define HAS_FULL_PPGTT(dev_priv) \
@@ -1218,7 +829,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define HAS_PAGE_SIZES(dev_priv, sizes) ({ \
GEM_BUG_ON((sizes) == 0); \
- ((sizes) & ~INTEL_INFO(dev_priv)->page_sizes) == 0; \
+ ((sizes) & ~RUNTIME_INFO(dev_priv)->page_sizes) == 0; \
})
#define HAS_OVERLAY(dev_priv) (INTEL_INFO(dev_priv)->display.has_overlay)
@@ -1249,13 +860,15 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define I915_HAS_HOTPLUG(dev_priv) (INTEL_INFO(dev_priv)->display.has_hotplug)
#define HAS_FW_BLC(dev_priv) (DISPLAY_VER(dev_priv) > 2)
-#define HAS_FBC(dev_priv) (INTEL_INFO(dev_priv)->display.fbc_mask != 0)
+#define HAS_FBC(dev_priv) (RUNTIME_INFO(dev_priv)->fbc_mask != 0)
#define HAS_CUR_FBC(dev_priv) (!HAS_GMCH(dev_priv) && DISPLAY_VER(dev_priv) >= 7)
#define HAS_IPS(dev_priv) (IS_HSW_ULT(dev_priv) || IS_BROADWELL(dev_priv))
#define HAS_DP_MST(dev_priv) (INTEL_INFO(dev_priv)->display.has_dp_mst)
-#define HAS_DP20(dev_priv) (IS_DG2(dev_priv))
+#define HAS_DP20(dev_priv) (IS_DG2(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
+
+#define HAS_DOUBLE_BUFFERED_M_N(dev_priv) (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
#define HAS_CDCLK_CRAWL(dev_priv) (INTEL_INFO(dev_priv)->display.has_cdclk_crawl)
#define HAS_DDI(dev_priv) (INTEL_INFO(dev_priv)->display.has_ddi)
@@ -1264,7 +877,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define HAS_PSR_HW_TRACKING(dev_priv) \
(INTEL_INFO(dev_priv)->display.has_psr_hw_tracking)
#define HAS_PSR2_SEL_FETCH(dev_priv) (DISPLAY_VER(dev_priv) >= 12)
-#define HAS_TRANSCODER(dev_priv, trans) ((INTEL_INFO(dev_priv)->display.cpu_transcoder_mask & BIT(trans)) != 0)
+#define HAS_TRANSCODER(dev_priv, trans) ((RUNTIME_INFO(dev_priv)->cpu_transcoder_mask & BIT(trans)) != 0)
#define HAS_RC6(dev_priv) (INTEL_INFO(dev_priv)->has_rc6)
#define HAS_RC6p(dev_priv) (INTEL_INFO(dev_priv)->has_rc6p)
@@ -1272,7 +885,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define HAS_RPS(dev_priv) (INTEL_INFO(dev_priv)->has_rps)
-#define HAS_DMC(dev_priv) (INTEL_INFO(dev_priv)->display.has_dmc)
+#define HAS_DMC(dev_priv) (RUNTIME_INFO(dev_priv)->has_dmc)
#define HAS_HECI_PXP(dev_priv) \
(INTEL_INFO(dev_priv)->has_heci_pxp)
@@ -1302,9 +915,11 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define HAS_IPC(dev_priv) (INTEL_INFO(dev_priv)->display.has_ipc)
-#define HAS_REGION(i915, i) (INTEL_INFO(i915)->memory_regions & (i))
+#define HAS_REGION(i915, i) (RUNTIME_INFO(i915)->memory_regions & (i))
#define HAS_LMEM(i915) HAS_REGION(i915, REGION_LMEM)
+#define HAS_EXTRA_GT_LIST(dev_priv) (INTEL_INFO(dev_priv)->extra_gt_list)
+
/*
* Platform has the dedicated compression control state for each lmem surfaces
* stored in lmem to support the 3D and media compression formats.
@@ -1313,7 +928,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define HAS_GT_UC(dev_priv) (INTEL_INFO(dev_priv)->has_gt_uc)
-#define HAS_POOLED_EU(dev_priv) (INTEL_INFO(dev_priv)->has_pooled_eu)
+#define HAS_POOLED_EU(dev_priv) (RUNTIME_INFO(dev_priv)->has_pooled_eu)
#define HAS_GLOBAL_MOCS_REGISTERS(dev_priv) (INTEL_INFO(dev_priv)->has_global_mocs)
@@ -1335,9 +950,9 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define GT_FREQUENCY_MULTIPLIER 50
#define GEN9_FREQ_SCALER 3
-#define INTEL_NUM_PIPES(dev_priv) (hweight8(INTEL_INFO(dev_priv)->display.pipe_mask))
+#define INTEL_NUM_PIPES(dev_priv) (hweight8(RUNTIME_INFO(dev_priv)->pipe_mask))
-#define HAS_DISPLAY(dev_priv) (INTEL_INFO(dev_priv)->display.pipe_mask != 0)
+#define HAS_DISPLAY(dev_priv) (RUNTIME_INFO(dev_priv)->pipe_mask != 0)
#define HAS_VRR(i915) (DISPLAY_VER(i915) >= 11)
@@ -1355,85 +970,12 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define HAS_D12_PLANE_MINIMIZATION(dev_priv) (IS_ROCKETLAKE(dev_priv) || \
IS_ALDERLAKE_S(dev_priv))
-#define HAS_MBUS_JOINING(i915) (IS_ALDERLAKE_P(i915))
+#define HAS_MBUS_JOINING(i915) (IS_ALDERLAKE_P(i915) || DISPLAY_VER(i915) >= 14)
#define HAS_3D_PIPELINE(i915) (INTEL_INFO(i915)->has_3d_pipeline)
#define HAS_ONE_EU_PER_FUSE_BIT(i915) (INTEL_INFO(i915)->has_one_eu_per_fuse_bit)
-/* i915_gem.c */
-void i915_gem_init_early(struct drm_i915_private *dev_priv);
-void i915_gem_cleanup_early(struct drm_i915_private *dev_priv);
-
-static inline void i915_gem_drain_freed_objects(struct drm_i915_private *i915)
-{
- /*
- * A single pass should suffice to release all the freed objects (along
- * most call paths) , but be a little more paranoid in that freeing
- * the objects does take a little amount of time, during which the rcu
- * callbacks could have added new objects into the freed list, and
- * armed the work again.
- */
- while (atomic_read(&i915->mm.free_count)) {
- flush_work(&i915->mm.free_work);
- flush_delayed_work(&i915->bdev.wq);
- rcu_barrier();
- }
-}
-
-static inline void i915_gem_drain_workqueue(struct drm_i915_private *i915)
-{
- /*
- * Similar to objects above (see i915_gem_drain_freed-objects), in
- * general we have workers that are armed by RCU and then rearm
- * themselves in their callbacks. To be paranoid, we need to
- * drain the workqueue a second time after waiting for the RCU
- * grace period so that we catch work queued via RCU from the first
- * pass. As neither drain_workqueue() nor flush_workqueue() report
- * a result, we make an assumption that we only don't require more
- * than 3 passes to catch all _recursive_ RCU delayed work.
- *
- */
- int pass = 3;
- do {
- flush_workqueue(i915->wq);
- rcu_barrier();
- i915_gem_drain_freed_objects(i915);
- } while (--pass);
- drain_workqueue(i915->wq);
-}
-
-struct i915_vma * __must_check
-i915_gem_object_ggtt_pin_ww(struct drm_i915_gem_object *obj,
- struct i915_gem_ww_ctx *ww,
- const struct i915_gtt_view *view,
- u64 size, u64 alignment, u64 flags);
-
-struct i915_vma * __must_check
-i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
- const struct i915_gtt_view *view,
- u64 size, u64 alignment, u64 flags);
-
-int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
- unsigned long flags);
-#define I915_GEM_OBJECT_UNBIND_ACTIVE BIT(0)
-#define I915_GEM_OBJECT_UNBIND_BARRIER BIT(1)
-#define I915_GEM_OBJECT_UNBIND_TEST BIT(2)
-#define I915_GEM_OBJECT_UNBIND_VM_TRYLOCK BIT(3)
-#define I915_GEM_OBJECT_UNBIND_ASYNC BIT(4)
-
-void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv);
-
-int __must_check i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno);
-
-int __must_check i915_gem_init(struct drm_i915_private *dev_priv);
-void i915_gem_driver_register(struct drm_i915_private *i915);
-void i915_gem_driver_unregister(struct drm_i915_private *i915);
-void i915_gem_driver_remove(struct drm_i915_private *dev_priv);
-void i915_gem_driver_release(struct drm_i915_private *dev_priv);
-
-int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file);
-
/* intel_device_info.c */
static inline struct intel_device_info *
mkwrite_device_info(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index a3373699835d..2bdddb61ebd7 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -842,6 +842,10 @@ void i915_gem_runtime_suspend(struct drm_i915_private *i915)
&to_gt(i915)->ggtt->userfault_list, userfault_link)
__i915_gem_object_release_mmap_gtt(obj);
+ list_for_each_entry_safe(obj, on,
+ &to_gt(i915)->lmem_userfault_list, userfault_link)
+ i915_gem_object_runtime_pm_release_mmap_offset(obj);
+
/*
* The fence will be lost when the device powers down. If any were
* in use by hardware (i.e. they are pinned), we should not be powering
@@ -1035,7 +1039,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
if (i915_gem_object_has_pages(obj) &&
i915_gem_object_is_tiled(obj) &&
- i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
+ i915->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES) {
if (obj->mm.madv == I915_MADV_WILLNEED) {
GEM_BUG_ON(!i915_gem_object_has_tiling_quirk(obj));
i915_gem_object_clear_tiling_quirk(obj);
@@ -1085,14 +1089,50 @@ out:
return err;
}
+/*
+ * A single pass should suffice to release all the freed objects (along most
+ * call paths), but be a little more paranoid in that freeing the objects does
+ * take a little amount of time, during which the rcu callbacks could have added
+ * new objects into the freed list, and armed the work again.
+ */
+void i915_gem_drain_freed_objects(struct drm_i915_private *i915)
+{
+ while (atomic_read(&i915->mm.free_count)) {
+ flush_work(&i915->mm.free_work);
+ flush_delayed_work(&i915->bdev.wq);
+ rcu_barrier();
+ }
+}
+
+/*
+ * Similar to objects above (see i915_gem_drain_freed-objects), in general we
+ * have workers that are armed by RCU and then rearm themselves in their
+ * callbacks. To be paranoid, we need to drain the workqueue a second time after
+ * waiting for the RCU grace period so that we catch work queued via RCU from
+ * the first pass. As neither drain_workqueue() nor flush_workqueue() report a
+ * result, we make an assumption that we only don't require more than 3 passes
+ * to catch all _recursive_ RCU delayed work.
+ */
+void i915_gem_drain_workqueue(struct drm_i915_private *i915)
+{
+ int i;
+
+ for (i = 0; i < 3; i++) {
+ flush_workqueue(i915->wq);
+ rcu_barrier();
+ i915_gem_drain_freed_objects(i915);
+ }
+
+ drain_workqueue(i915->wq);
+}
+
int i915_gem_init(struct drm_i915_private *dev_priv)
{
int ret;
/* We need to fallback to 4K pages if host doesn't support huge gtt. */
if (intel_vgpu_active(dev_priv) && !intel_vgpu_has_huge_gtt(dev_priv))
- mkwrite_device_info(dev_priv)->page_sizes =
- I915_GTT_PAGE_SIZE_4K;
+ RUNTIME_INFO(dev_priv)->page_sizes = I915_GTT_PAGE_SIZE_4K;
ret = i915_gem_init_userptr(dev_priv);
if (ret)
@@ -1173,7 +1213,7 @@ void i915_gem_driver_unregister(struct drm_i915_private *i915)
void i915_gem_driver_remove(struct drm_i915_private *dev_priv)
{
- intel_wakeref_auto_fini(&to_gt(dev_priv)->ggtt->userfault_wakeref);
+ intel_wakeref_auto_fini(&to_gt(dev_priv)->userfault_wakeref);
i915_gem_suspend_late(dev_priv);
intel_gt_driver_remove(to_gt(dev_priv));
@@ -1191,7 +1231,8 @@ void i915_gem_driver_release(struct drm_i915_private *dev_priv)
intel_uc_cleanup_firmwares(&to_gt(dev_priv)->uc);
- i915_gem_drain_freed_objects(dev_priv);
+ /* Flush any outstanding work, including i915_gem_context.release_work. */
+ i915_gem_drain_workqueue(dev_priv);
drm_WARN_ON(&dev_priv->drm, !list_empty(&dev_priv->gem.contexts.list));
}
@@ -1213,7 +1254,7 @@ void i915_gem_init_early(struct drm_i915_private *dev_priv)
i915_gem_init__mm(dev_priv);
i915_gem_init__contexts(dev_priv);
- spin_lock_init(&dev_priv->fb_tracking.lock);
+ spin_lock_init(&dev_priv->display.fb_tracking.lock);
}
void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h
index 68d8d52bd541..a5cdf6662d01 100644
--- a/drivers/gpu/drm/i915/i915_gem.h
+++ b/drivers/gpu/drm/i915/i915_gem.h
@@ -26,12 +26,55 @@
#define __I915_GEM_H__
#include <linux/bug.h>
+#include <linux/types.h>
#include <drm/drm_drv.h>
#include "i915_utils.h"
+struct drm_file;
+struct drm_i915_gem_object;
struct drm_i915_private;
+struct i915_gem_ww_ctx;
+struct i915_gtt_view;
+struct i915_vma;
+
+void i915_gem_init_early(struct drm_i915_private *i915);
+void i915_gem_cleanup_early(struct drm_i915_private *i915);
+
+void i915_gem_drain_freed_objects(struct drm_i915_private *i915);
+void i915_gem_drain_workqueue(struct drm_i915_private *i915);
+
+struct i915_vma * __must_check
+i915_gem_object_ggtt_pin_ww(struct drm_i915_gem_object *obj,
+ struct i915_gem_ww_ctx *ww,
+ const struct i915_gtt_view *view,
+ u64 size, u64 alignment, u64 flags);
+
+struct i915_vma * __must_check
+i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
+ const struct i915_gtt_view *view,
+ u64 size, u64 alignment, u64 flags);
+
+int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
+ unsigned long flags);
+#define I915_GEM_OBJECT_UNBIND_ACTIVE BIT(0)
+#define I915_GEM_OBJECT_UNBIND_BARRIER BIT(1)
+#define I915_GEM_OBJECT_UNBIND_TEST BIT(2)
+#define I915_GEM_OBJECT_UNBIND_VM_TRYLOCK BIT(3)
+#define I915_GEM_OBJECT_UNBIND_ASYNC BIT(4)
+
+void i915_gem_runtime_suspend(struct drm_i915_private *i915);
+
+int __must_check i915_gem_init(struct drm_i915_private *i915);
+void i915_gem_driver_register(struct drm_i915_private *i915);
+void i915_gem_driver_unregister(struct drm_i915_private *i915);
+void i915_gem_driver_remove(struct drm_i915_private *i915);
+void i915_gem_driver_release(struct drm_i915_private *i915);
+
+int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file);
+
+/* FIXME: All of the below belong somewhere else. */
#ifdef CONFIG_DRM_I915_DEBUG_GEM
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 329ff75b80b9..7bd1861ddbdf 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -137,12 +137,12 @@ static u64 random_offset(u64 start, u64 end, u64 len, u64 align)
range = round_down(end - len, align) - round_up(start, align);
if (range) {
if (sizeof(unsigned long) == sizeof(u64)) {
- addr = get_random_long();
+ addr = get_random_u64();
} else {
- addr = get_random_int();
+ addr = get_random_u32();
if (range > U32_MAX) {
addr <<= 32;
- addr |= get_random_int();
+ addr |= get_random_u32();
}
}
div64_u64_rem(addr, range, &addr);
diff --git a/drivers/gpu/drm/i915/i915_getparam.c b/drivers/gpu/drm/i915/i915_getparam.c
index 6fd15b39570c..342c8ca6414e 100644
--- a/drivers/gpu/drm/i915/i915_getparam.c
+++ b/drivers/gpu/drm/i915/i915_getparam.c
@@ -36,7 +36,7 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data,
value = to_gt(i915)->ggtt->num_fences;
break;
case I915_PARAM_HAS_OVERLAY:
- value = !!i915->overlay;
+ value = !!i915->display.overlay;
break;
case I915_PARAM_HAS_BSD:
value = !!intel_engine_lookup_user(i915,
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 16d8b7ba65dc..9ea2fe34e7d3 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -646,8 +646,7 @@ static void err_print_capabilities(struct drm_i915_error_state_buf *m,
{
struct drm_printer p = i915_error_printer(m);
- intel_device_info_print_static(&error->device_info, &p);
- intel_device_info_print_runtime(&error->runtime_info, &p);
+ intel_device_info_print(&error->device_info, &error->runtime_info, &p);
intel_driver_caps_print(&error->driver_caps, &p);
}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 783a6ca41a61..86a42d9e8041 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -185,7 +185,7 @@ static const u32 hpd_sde_dg1[HPD_NUM_PINS] = {
static void intel_hpd_init_pins(struct drm_i915_private *dev_priv)
{
- struct i915_hotplug *hpd = &dev_priv->hotplug;
+ struct intel_hotplug *hpd = &dev_priv->display.hotplug;
if (HAS_GMCH(dev_priv)) {
if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
@@ -595,7 +595,7 @@ void i915_disable_pipestat(struct drm_i915_private *dev_priv,
static bool i915_has_asle(struct drm_i915_private *dev_priv)
{
- if (!dev_priv->opregion.asle)
+ if (!dev_priv->display.opregion.asle)
return false;
return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
@@ -1104,9 +1104,9 @@ static void ivb_parity_work(struct work_struct *work)
out:
drm_WARN_ON(&dev_priv->drm, dev_priv->l3_parity.which_slice);
- spin_lock_irq(&gt->irq_lock);
+ spin_lock_irq(gt->irq_lock);
gen5_gt_enable_irq(gt, GT_PARITY_ERROR(dev_priv));
- spin_unlock_irq(&gt->irq_lock);
+ spin_unlock_irq(gt->irq_lock);
mutex_unlock(&dev_priv->drm.struct_mutex);
}
@@ -1272,7 +1272,7 @@ static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
u32 enabled_irqs = 0;
for_each_intel_encoder(&dev_priv->drm, encoder)
- if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
+ if (dev_priv->display.hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
enabled_irqs |= hpd[encoder->hpd_pin];
return enabled_irqs;
@@ -1304,12 +1304,12 @@ static u32 intel_hpd_hotplug_enables(struct drm_i915_private *i915,
static void gmbus_irq_handler(struct drm_i915_private *dev_priv)
{
- wake_up_all(&dev_priv->gmbus_wait_queue);
+ wake_up_all(&dev_priv->display.gmbus.wait_queue);
}
static void dp_aux_irq_handler(struct drm_i915_private *dev_priv)
{
- wake_up_all(&dev_priv->gmbus_wait_queue);
+ wake_up_all(&dev_priv->display.gmbus.wait_queue);
}
#if defined(CONFIG_DEBUG_FS)
@@ -1637,7 +1637,7 @@ static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
if (hotplug_trigger) {
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
hotplug_trigger, hotplug_trigger,
- dev_priv->hotplug.hpd,
+ dev_priv->display.hotplug.hpd,
i9xx_port_hotplug_long_detect);
intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
@@ -1841,7 +1841,7 @@ static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
hotplug_trigger, dig_hotplug_reg,
- dev_priv->hotplug.pch_hpd,
+ dev_priv->display.hotplug.pch_hpd,
pch_port_hotplug_long_detect);
intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
@@ -1986,7 +1986,7 @@ static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
ddi_hotplug_trigger, dig_hotplug_reg,
- dev_priv->hotplug.pch_hpd,
+ dev_priv->display.hotplug.pch_hpd,
icp_ddi_port_hotplug_long_detect);
}
@@ -1998,7 +1998,7 @@ static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
tc_hotplug_trigger, dig_hotplug_reg,
- dev_priv->hotplug.pch_hpd,
+ dev_priv->display.hotplug.pch_hpd,
icp_tc_port_hotplug_long_detect);
}
@@ -2024,7 +2024,7 @@ static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
hotplug_trigger, dig_hotplug_reg,
- dev_priv->hotplug.pch_hpd,
+ dev_priv->display.hotplug.pch_hpd,
spt_port_hotplug_long_detect);
}
@@ -2036,7 +2036,7 @@ static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
hotplug2_trigger, dig_hotplug_reg,
- dev_priv->hotplug.pch_hpd,
+ dev_priv->display.hotplug.pch_hpd,
spt_port_hotplug2_long_detect);
}
@@ -2057,7 +2057,7 @@ static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv,
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
hotplug_trigger, dig_hotplug_reg,
- dev_priv->hotplug.hpd,
+ dev_priv->display.hotplug.hpd,
ilk_port_hotplug_long_detect);
intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
@@ -2237,7 +2237,7 @@ static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
hotplug_trigger, dig_hotplug_reg,
- dev_priv->hotplug.hpd,
+ dev_priv->display.hotplug.hpd,
bxt_port_hotplug_long_detect);
intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
@@ -2257,7 +2257,7 @@ static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
trigger_tc, dig_hotplug_reg,
- dev_priv->hotplug.hpd,
+ dev_priv->display.hotplug.hpd,
gen11_port_hotplug_long_detect);
}
@@ -2269,7 +2269,7 @@ static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
trigger_tbt, dig_hotplug_reg,
- dev_priv->hotplug.hpd,
+ dev_priv->display.hotplug.hpd,
gen11_port_hotplug_long_detect);
}
@@ -2653,9 +2653,9 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
}
static u32
-gen11_gu_misc_irq_ack(struct intel_gt *gt, const u32 master_ctl)
+gen11_gu_misc_irq_ack(struct drm_i915_private *i915, const u32 master_ctl)
{
- void __iomem * const regs = gt->uncore->regs;
+ void __iomem * const regs = i915->uncore.regs;
u32 iir;
if (!(master_ctl & GEN11_GU_MISC_IRQ))
@@ -2669,10 +2669,10 @@ gen11_gu_misc_irq_ack(struct intel_gt *gt, const u32 master_ctl)
}
static void
-gen11_gu_misc_irq_handler(struct intel_gt *gt, const u32 iir)
+gen11_gu_misc_irq_handler(struct drm_i915_private *i915, const u32 iir)
{
if (iir & GEN11_GU_MISC_GSE)
- intel_opregion_asle_intr(gt->i915);
+ intel_opregion_asle_intr(i915);
}
static inline u32 gen11_master_intr_disable(void __iomem * const regs)
@@ -2736,11 +2736,11 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg)
if (master_ctl & GEN11_DISPLAY_IRQ)
gen11_display_irq_handler(i915);
- gu_misc_iir = gen11_gu_misc_irq_ack(gt, master_ctl);
+ gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl);
gen11_master_intr_enable(regs);
- gen11_gu_misc_irq_handler(gt, gu_misc_iir);
+ gen11_gu_misc_irq_handler(i915, gu_misc_iir);
pmu_irq_stats(i915, IRQ_HANDLED);
@@ -2801,11 +2801,11 @@ static irqreturn_t dg1_irq_handler(int irq, void *arg)
if (master_ctl & GEN11_DISPLAY_IRQ)
gen11_display_irq_handler(i915);
- gu_misc_iir = gen11_gu_misc_irq_ack(gt, master_ctl);
+ gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl);
dg1_master_intr_enable(regs);
- gen11_gu_misc_irq_handler(gt, gu_misc_iir);
+ gen11_gu_misc_irq_handler(i915, gu_misc_iir);
pmu_irq_stats(i915, IRQ_HANDLED);
@@ -3313,8 +3313,8 @@ static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
u32 hotplug_irqs, enabled_irqs;
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
- hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
+ enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
+ hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
@@ -3383,8 +3383,8 @@ static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
u32 hotplug_irqs, enabled_irqs;
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
- hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
+ enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
+ hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
if (INTEL_PCH_TYPE(dev_priv) <= PCH_TGP)
intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
@@ -3460,8 +3460,8 @@ static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
u32 hotplug_irqs, enabled_irqs;
u32 val;
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd);
- hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.hpd);
+ enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.hpd);
+ hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.hpd);
val = intel_uncore_read(&dev_priv->uncore, GEN11_DE_HPD_IMR);
val &= ~hotplug_irqs;
@@ -3538,8 +3538,8 @@ static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
- hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.pch_hpd);
+ enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
+ hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
@@ -3578,8 +3578,8 @@ static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
u32 hotplug_irqs, enabled_irqs;
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd);
- hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.hpd);
+ enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.hpd);
+ hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.hpd);
if (DISPLAY_VER(dev_priv) >= 8)
bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
@@ -3636,8 +3636,8 @@ static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
u32 hotplug_irqs, enabled_irqs;
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd);
- hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.hpd);
+ enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.hpd);
+ hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.hpd);
bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
@@ -4370,8 +4370,8 @@ HPD_FUNCS(ilk);
void intel_hpd_irq_setup(struct drm_i915_private *i915)
{
- if (i915->display_irqs_enabled && i915->hotplug_funcs)
- i915->hotplug_funcs->hpd_irq_setup(i915);
+ if (i915->display_irqs_enabled && i915->display.funcs.hotplug)
+ i915->display.funcs.hotplug->hpd_irq_setup(i915);
}
/**
@@ -4413,33 +4413,33 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
dev_priv->display_irqs_enabled = false;
- dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD;
+ dev_priv->display.hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD;
/* If we have MST support, we want to avoid doing short HPD IRQ storm
* detection, as short HPD storms will occur as a natural part of
* sideband messaging with MST.
* On older platforms however, IRQ storms can occur with both long and
* short pulses, as seen on some G4x systems.
*/
- dev_priv->hotplug.hpd_short_storm_enabled = !HAS_DP_MST(dev_priv);
+ dev_priv->display.hotplug.hpd_short_storm_enabled = !HAS_DP_MST(dev_priv);
if (HAS_GMCH(dev_priv)) {
if (I915_HAS_HOTPLUG(dev_priv))
- dev_priv->hotplug_funcs = &i915_hpd_funcs;
+ dev_priv->display.funcs.hotplug = &i915_hpd_funcs;
} else {
if (HAS_PCH_DG2(dev_priv))
- dev_priv->hotplug_funcs = &icp_hpd_funcs;
+ dev_priv->display.funcs.hotplug = &icp_hpd_funcs;
else if (HAS_PCH_DG1(dev_priv))
- dev_priv->hotplug_funcs = &dg1_hpd_funcs;
+ dev_priv->display.funcs.hotplug = &dg1_hpd_funcs;
else if (DISPLAY_VER(dev_priv) >= 11)
- dev_priv->hotplug_funcs = &gen11_hpd_funcs;
+ dev_priv->display.funcs.hotplug = &gen11_hpd_funcs;
else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
- dev_priv->hotplug_funcs = &bxt_hpd_funcs;
+ dev_priv->display.funcs.hotplug = &bxt_hpd_funcs;
else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
- dev_priv->hotplug_funcs = &icp_hpd_funcs;
+ dev_priv->display.funcs.hotplug = &icp_hpd_funcs;
else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
- dev_priv->hotplug_funcs = &spt_hpd_funcs;
+ dev_priv->display.funcs.hotplug = &spt_hpd_funcs;
else
- dev_priv->hotplug_funcs = &ilk_hpd_funcs;
+ dev_priv->display.funcs.hotplug = &ilk_hpd_funcs;
}
}
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 6fc475a5db61..d1e4d528cb17 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -29,6 +29,18 @@
#include "i915_params.h"
#include "i915_drv.h"
+DECLARE_DYNDBG_CLASSMAP(drm_debug_classes, DD_CLASS_TYPE_DISJOINT_BITS, 0,
+ "DRM_UT_CORE",
+ "DRM_UT_DRIVER",
+ "DRM_UT_KMS",
+ "DRM_UT_PRIME",
+ "DRM_UT_ATOMIC",
+ "DRM_UT_VBL",
+ "DRM_UT_STATE",
+ "DRM_UT_LEASE",
+ "DRM_UT_DP",
+ "DRM_UT_DRMRES");
+
#define i915_param_named(name, T, perm, desc) \
module_param_named(name, i915_modparams.name, T, perm); \
MODULE_PARM_DESC(name, desc)
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 59a579ed03bb..cd4487a1d3be 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -26,16 +26,22 @@
#include <drm/drm_drv.h>
#include <drm/i915_pciids.h>
+#include "gt/intel_gt_regs.h"
+#include "gt/intel_sa_media.h"
+
#include "i915_driver.h"
#include "i915_drv.h"
#include "i915_pci.h"
#include "i915_reg.h"
+#include "intel_pci_config.h"
#define PLATFORM(x) .platform = (x)
#define GEN(x) \
- .graphics.ver = (x), \
- .media.ver = (x), \
- .display.ver = (x)
+ .__runtime.graphics.ip.ver = (x), \
+ .__runtime.media.ip.ver = (x), \
+ .__runtime.display.ip.ver = (x)
+
+#define NO_DISPLAY .__runtime.pipe_mask = 0
#define I845_PIPE_OFFSETS \
.display.pipe_offsets = { \
@@ -159,16 +165,16 @@
/* Keep in gen based order, and chronological order within a gen */
#define GEN_DEFAULT_PAGE_SIZES \
- .page_sizes = I915_GTT_PAGE_SIZE_4K
+ .__runtime.page_sizes = I915_GTT_PAGE_SIZE_4K
#define GEN_DEFAULT_REGIONS \
- .memory_regions = REGION_SMEM | REGION_STOLEN_SMEM
+ .__runtime.memory_regions = REGION_SMEM | REGION_STOLEN_SMEM
#define I830_FEATURES \
GEN(2), \
.is_mobile = 1, \
- .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
- .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
+ .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
+ .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
.display.has_overlay = 1, \
.display.cursor_needs_physical = 1, \
.display.overlay_needs_physical = 1, \
@@ -177,7 +183,7 @@
.has_3d_pipeline = 1, \
.hws_needs_physical = 1, \
.unfenced_needs_alignment = 1, \
- .platform_engine_mask = BIT(RCS0), \
+ .__runtime.platform_engine_mask = BIT(RCS0), \
.has_snoop = true, \
.has_coherent_ggtt = false, \
.dma_mask_size = 32, \
@@ -189,8 +195,8 @@
#define I845_FEATURES \
GEN(2), \
- .display.pipe_mask = BIT(PIPE_A), \
- .display.cpu_transcoder_mask = BIT(TRANSCODER_A), \
+ .__runtime.pipe_mask = BIT(PIPE_A), \
+ .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A), \
.display.has_overlay = 1, \
.display.overlay_needs_physical = 1, \
.display.has_gmch = 1, \
@@ -198,7 +204,7 @@
.gpu_reset_clobbers_display = true, \
.hws_needs_physical = 1, \
.unfenced_needs_alignment = 1, \
- .platform_engine_mask = BIT(RCS0), \
+ .__runtime.platform_engine_mask = BIT(RCS0), \
.has_snoop = true, \
.has_coherent_ggtt = false, \
.dma_mask_size = 32, \
@@ -221,22 +227,22 @@ static const struct intel_device_info i845g_info = {
static const struct intel_device_info i85x_info = {
I830_FEATURES,
PLATFORM(INTEL_I85X),
- .display.fbc_mask = BIT(INTEL_FBC_A),
+ .__runtime.fbc_mask = BIT(INTEL_FBC_A),
};
static const struct intel_device_info i865g_info = {
I845_FEATURES,
PLATFORM(INTEL_I865G),
- .display.fbc_mask = BIT(INTEL_FBC_A),
+ .__runtime.fbc_mask = BIT(INTEL_FBC_A),
};
#define GEN3_FEATURES \
GEN(3), \
- .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
- .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
+ .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
+ .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
.display.has_gmch = 1, \
.gpu_reset_clobbers_display = true, \
- .platform_engine_mask = BIT(RCS0), \
+ .__runtime.platform_engine_mask = BIT(RCS0), \
.has_3d_pipeline = 1, \
.has_snoop = true, \
.has_coherent_ggtt = true, \
@@ -266,7 +272,7 @@ static const struct intel_device_info i915gm_info = {
.display.has_overlay = 1,
.display.overlay_needs_physical = 1,
.display.supports_tv = 1,
- .display.fbc_mask = BIT(INTEL_FBC_A),
+ .__runtime.fbc_mask = BIT(INTEL_FBC_A),
.hws_needs_physical = 1,
.unfenced_needs_alignment = 1,
};
@@ -291,7 +297,7 @@ static const struct intel_device_info i945gm_info = {
.display.has_overlay = 1,
.display.overlay_needs_physical = 1,
.display.supports_tv = 1,
- .display.fbc_mask = BIT(INTEL_FBC_A),
+ .__runtime.fbc_mask = BIT(INTEL_FBC_A),
.hws_needs_physical = 1,
.unfenced_needs_alignment = 1,
};
@@ -323,12 +329,12 @@ static const struct intel_device_info pnv_m_info = {
#define GEN4_FEATURES \
GEN(4), \
- .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
- .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
+ .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
+ .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
.display.has_hotplug = 1, \
.display.has_gmch = 1, \
.gpu_reset_clobbers_display = true, \
- .platform_engine_mask = BIT(RCS0), \
+ .__runtime.platform_engine_mask = BIT(RCS0), \
.has_3d_pipeline = 1, \
.has_snoop = true, \
.has_coherent_ggtt = true, \
@@ -351,7 +357,7 @@ static const struct intel_device_info i965gm_info = {
GEN4_FEATURES,
PLATFORM(INTEL_I965GM),
.is_mobile = 1,
- .display.fbc_mask = BIT(INTEL_FBC_A),
+ .__runtime.fbc_mask = BIT(INTEL_FBC_A),
.display.has_overlay = 1,
.display.supports_tv = 1,
.hws_needs_physical = 1,
@@ -361,7 +367,7 @@ static const struct intel_device_info i965gm_info = {
static const struct intel_device_info g45_info = {
GEN4_FEATURES,
PLATFORM(INTEL_G45),
- .platform_engine_mask = BIT(RCS0) | BIT(VCS0),
+ .__runtime.platform_engine_mask = BIT(RCS0) | BIT(VCS0),
.gpu_reset_clobbers_display = false,
};
@@ -369,18 +375,18 @@ static const struct intel_device_info gm45_info = {
GEN4_FEATURES,
PLATFORM(INTEL_GM45),
.is_mobile = 1,
- .display.fbc_mask = BIT(INTEL_FBC_A),
+ .__runtime.fbc_mask = BIT(INTEL_FBC_A),
.display.supports_tv = 1,
- .platform_engine_mask = BIT(RCS0) | BIT(VCS0),
+ .__runtime.platform_engine_mask = BIT(RCS0) | BIT(VCS0),
.gpu_reset_clobbers_display = false,
};
#define GEN5_FEATURES \
GEN(5), \
- .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
- .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
+ .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
+ .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
.display.has_hotplug = 1, \
- .platform_engine_mask = BIT(RCS0) | BIT(VCS0), \
+ .__runtime.platform_engine_mask = BIT(RCS0) | BIT(VCS0), \
.has_3d_pipeline = 1, \
.has_snoop = true, \
.has_coherent_ggtt = true, \
@@ -403,16 +409,16 @@ static const struct intel_device_info ilk_m_info = {
PLATFORM(INTEL_IRONLAKE),
.is_mobile = 1,
.has_rps = true,
- .display.fbc_mask = BIT(INTEL_FBC_A),
+ .__runtime.fbc_mask = BIT(INTEL_FBC_A),
};
#define GEN6_FEATURES \
GEN(6), \
- .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
- .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
+ .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
+ .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
.display.has_hotplug = 1, \
- .display.fbc_mask = BIT(INTEL_FBC_A), \
- .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \
+ .__runtime.fbc_mask = BIT(INTEL_FBC_A), \
+ .__runtime.platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \
.has_3d_pipeline = 1, \
.has_coherent_ggtt = true, \
.has_llc = 1, \
@@ -420,8 +426,8 @@ static const struct intel_device_info ilk_m_info = {
.has_rc6p = 1, \
.has_rps = true, \
.dma_mask_size = 40, \
- .ppgtt_type = INTEL_PPGTT_ALIASING, \
- .ppgtt_size = 31, \
+ .__runtime.ppgtt_type = INTEL_PPGTT_ALIASING, \
+ .__runtime.ppgtt_size = 31, \
I9XX_PIPE_OFFSETS, \
I9XX_CURSOR_OFFSETS, \
ILK_COLORS, \
@@ -460,11 +466,11 @@ static const struct intel_device_info snb_m_gt2_info = {
#define GEN7_FEATURES \
GEN(7), \
- .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), \
- .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C), \
+ .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), \
+ .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C), \
.display.has_hotplug = 1, \
- .display.fbc_mask = BIT(INTEL_FBC_A), \
- .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \
+ .__runtime.fbc_mask = BIT(INTEL_FBC_A), \
+ .__runtime.platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \
.has_3d_pipeline = 1, \
.has_coherent_ggtt = true, \
.has_llc = 1, \
@@ -473,8 +479,8 @@ static const struct intel_device_info snb_m_gt2_info = {
.has_reset_engine = true, \
.has_rps = true, \
.dma_mask_size = 40, \
- .ppgtt_type = INTEL_PPGTT_ALIASING, \
- .ppgtt_size = 31, \
+ .__runtime.ppgtt_type = INTEL_PPGTT_ALIASING, \
+ .__runtime.ppgtt_size = 31, \
IVB_PIPE_OFFSETS, \
IVB_CURSOR_OFFSETS, \
IVB_COLORS, \
@@ -515,9 +521,8 @@ static const struct intel_device_info ivb_m_gt2_info = {
static const struct intel_device_info ivb_q_info = {
GEN7_FEATURES,
PLATFORM(INTEL_IVYBRIDGE),
+ NO_DISPLAY,
.gt = 2,
- .display.pipe_mask = 0, /* legal, last one wins */
- .display.cpu_transcoder_mask = 0,
.has_l3_dpf = 1,
};
@@ -525,8 +530,8 @@ static const struct intel_device_info vlv_info = {
PLATFORM(INTEL_VALLEYVIEW),
GEN(7),
.is_lp = 1,
- .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B),
- .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B),
+ .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B),
+ .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B),
.has_runtime_pm = 1,
.has_rc6 = 1,
.has_reset_engine = true,
@@ -534,11 +539,11 @@ static const struct intel_device_info vlv_info = {
.display.has_gmch = 1,
.display.has_hotplug = 1,
.dma_mask_size = 40,
- .ppgtt_type = INTEL_PPGTT_ALIASING,
- .ppgtt_size = 31,
+ .__runtime.ppgtt_type = INTEL_PPGTT_ALIASING,
+ .__runtime.ppgtt_size = 31,
.has_snoop = true,
.has_coherent_ggtt = false,
- .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0),
+ .__runtime.platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0),
.display.mmio_offset = VLV_DISPLAY_BASE,
I9XX_PIPE_OFFSETS,
I9XX_CURSOR_OFFSETS,
@@ -549,8 +554,8 @@ static const struct intel_device_info vlv_info = {
#define G75_FEATURES \
GEN7_FEATURES, \
- .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \
- .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
+ .__runtime.platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \
+ .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP), \
.display.has_ddi = 1, \
.display.has_fpga_dbg = 1, \
@@ -584,8 +589,8 @@ static const struct intel_device_info hsw_gt3_info = {
GEN(8), \
.has_logical_ring_contexts = 1, \
.dma_mask_size = 39, \
- .ppgtt_type = INTEL_PPGTT_FULL, \
- .ppgtt_size = 48, \
+ .__runtime.ppgtt_type = INTEL_PPGTT_FULL, \
+ .__runtime.ppgtt_size = 48, \
.has_64bit_reloc = 1
#define BDW_PLATFORM \
@@ -613,18 +618,18 @@ static const struct intel_device_info bdw_rsvd_info = {
static const struct intel_device_info bdw_gt3_info = {
BDW_PLATFORM,
.gt = 3,
- .platform_engine_mask =
+ .__runtime.platform_engine_mask =
BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1),
};
static const struct intel_device_info chv_info = {
PLATFORM(INTEL_CHERRYVIEW),
GEN(8),
- .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
- .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C),
+ .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
+ .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C),
.display.has_hotplug = 1,
.is_lp = 1,
- .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0),
+ .__runtime.platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0),
.has_64bit_reloc = 1,
.has_runtime_pm = 1,
.has_rc6 = 1,
@@ -632,8 +637,8 @@ static const struct intel_device_info chv_info = {
.has_logical_ring_contexts = 1,
.display.has_gmch = 1,
.dma_mask_size = 39,
- .ppgtt_type = INTEL_PPGTT_FULL,
- .ppgtt_size = 32,
+ .__runtime.ppgtt_type = INTEL_PPGTT_FULL,
+ .__runtime.ppgtt_size = 32,
.has_reset_engine = 1,
.has_snoop = true,
.has_coherent_ggtt = false,
@@ -646,16 +651,16 @@ static const struct intel_device_info chv_info = {
};
#define GEN9_DEFAULT_PAGE_SIZES \
- .page_sizes = I915_GTT_PAGE_SIZE_4K | \
- I915_GTT_PAGE_SIZE_64K
+ .__runtime.page_sizes = I915_GTT_PAGE_SIZE_4K | \
+ I915_GTT_PAGE_SIZE_64K
#define GEN9_FEATURES \
GEN8_FEATURES, \
GEN(9), \
GEN9_DEFAULT_PAGE_SIZES, \
- .display.has_dmc = 1, \
+ .__runtime.has_dmc = 1, \
.has_gt_uc = 1, \
- .display.has_hdcp = 1, \
+ .__runtime.has_hdcp = 1, \
.display.has_ipc = 1, \
.display.has_psr = 1, \
.display.has_psr_hw_tracking = 1, \
@@ -678,7 +683,7 @@ static const struct intel_device_info skl_gt2_info = {
#define SKL_GT3_PLUS_PLATFORM \
SKL_PLATFORM, \
- .platform_engine_mask = \
+ .__runtime.platform_engine_mask = \
BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1)
@@ -697,29 +702,29 @@ static const struct intel_device_info skl_gt4_info = {
.is_lp = 1, \
.display.dbuf.slice_mask = BIT(DBUF_S1), \
.display.has_hotplug = 1, \
- .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \
- .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), \
- .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
+ .__runtime.platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \
+ .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), \
+ .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP) | \
BIT(TRANSCODER_DSI_A) | BIT(TRANSCODER_DSI_C), \
.has_3d_pipeline = 1, \
.has_64bit_reloc = 1, \
.display.has_ddi = 1, \
.display.has_fpga_dbg = 1, \
- .display.fbc_mask = BIT(INTEL_FBC_A), \
- .display.has_hdcp = 1, \
+ .__runtime.fbc_mask = BIT(INTEL_FBC_A), \
+ .__runtime.has_hdcp = 1, \
.display.has_psr = 1, \
.display.has_psr_hw_tracking = 1, \
.has_runtime_pm = 1, \
- .display.has_dmc = 1, \
+ .__runtime.has_dmc = 1, \
.has_rc6 = 1, \
.has_rps = true, \
.display.has_dp_mst = 1, \
.has_logical_ring_contexts = 1, \
.has_gt_uc = 1, \
.dma_mask_size = 39, \
- .ppgtt_type = INTEL_PPGTT_FULL, \
- .ppgtt_size = 48, \
+ .__runtime.ppgtt_type = INTEL_PPGTT_FULL, \
+ .__runtime.ppgtt_size = 48, \
.has_reset_engine = 1, \
.has_snoop = true, \
.has_coherent_ggtt = false, \
@@ -739,7 +744,7 @@ static const struct intel_device_info bxt_info = {
static const struct intel_device_info glk_info = {
GEN9_LP_FEATURES,
PLATFORM(INTEL_GEMINILAKE),
- .display.ver = 10,
+ .__runtime.display.ip.ver = 10,
.display.dbuf.size = 1024 - 4, /* 4 blocks for bypass path allocation */
GLK_COLORS,
};
@@ -761,7 +766,7 @@ static const struct intel_device_info kbl_gt2_info = {
static const struct intel_device_info kbl_gt3_info = {
KBL_PLATFORM,
.gt = 3,
- .platform_engine_mask =
+ .__runtime.platform_engine_mask =
BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1),
};
@@ -782,7 +787,7 @@ static const struct intel_device_info cfl_gt2_info = {
static const struct intel_device_info cfl_gt3_info = {
CFL_PLATFORM,
.gt = 3,
- .platform_engine_mask =
+ .__runtime.platform_engine_mask =
BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1),
};
@@ -801,15 +806,15 @@ static const struct intel_device_info cml_gt2_info = {
};
#define GEN11_DEFAULT_PAGE_SIZES \
- .page_sizes = I915_GTT_PAGE_SIZE_4K | \
- I915_GTT_PAGE_SIZE_64K | \
- I915_GTT_PAGE_SIZE_2M
+ .__runtime.page_sizes = I915_GTT_PAGE_SIZE_4K | \
+ I915_GTT_PAGE_SIZE_64K | \
+ I915_GTT_PAGE_SIZE_2M
#define GEN11_FEATURES \
GEN9_FEATURES, \
GEN11_DEFAULT_PAGE_SIZES, \
.display.abox_mask = BIT(0), \
- .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
+ .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP) | \
BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1), \
.display.pipe_offsets = { \
@@ -832,37 +837,37 @@ static const struct intel_device_info cml_gt2_info = {
ICL_COLORS, \
.display.dbuf.size = 2048, \
.display.dbuf.slice_mask = BIT(DBUF_S1) | BIT(DBUF_S2), \
- .display.has_dsc = 1, \
+ .__runtime.has_dsc = 1, \
.has_coherent_ggtt = false, \
.has_logical_ring_elsq = 1
static const struct intel_device_info icl_info = {
GEN11_FEATURES,
PLATFORM(INTEL_ICELAKE),
- .platform_engine_mask =
+ .__runtime.platform_engine_mask =
BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2),
};
static const struct intel_device_info ehl_info = {
GEN11_FEATURES,
PLATFORM(INTEL_ELKHARTLAKE),
- .platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VCS0) | BIT(VECS0),
- .ppgtt_size = 36,
+ .__runtime.platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VCS0) | BIT(VECS0),
+ .__runtime.ppgtt_size = 36,
};
static const struct intel_device_info jsl_info = {
GEN11_FEATURES,
PLATFORM(INTEL_JASPERLAKE),
- .platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VCS0) | BIT(VECS0),
- .ppgtt_size = 36,
+ .__runtime.platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VCS0) | BIT(VECS0),
+ .__runtime.ppgtt_size = 36,
};
#define GEN12_FEATURES \
GEN11_FEATURES, \
GEN(12), \
.display.abox_mask = GENMASK(2, 1), \
- .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), \
- .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
+ .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), \
+ .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
BIT(TRANSCODER_C) | BIT(TRANSCODER_D) | \
BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1), \
.display.pipe_offsets = { \
@@ -890,7 +895,7 @@ static const struct intel_device_info tgl_info = {
GEN12_FEATURES,
PLATFORM(INTEL_TIGERLAKE),
.display.has_modular_fia = 1,
- .platform_engine_mask =
+ .__runtime.platform_engine_mask =
BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2),
};
@@ -898,17 +903,17 @@ static const struct intel_device_info rkl_info = {
GEN12_FEATURES,
PLATFORM(INTEL_ROCKETLAKE),
.display.abox_mask = BIT(0),
- .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
- .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
+ .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
+ .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
BIT(TRANSCODER_C),
.display.has_hti = 1,
.display.has_psr_hw_tracking = 0,
- .platform_engine_mask =
+ .__runtime.platform_engine_mask =
BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0),
};
#define DGFX_FEATURES \
- .memory_regions = REGION_SMEM | REGION_LMEM | REGION_STOLEN_LMEM, \
+ .__runtime.memory_regions = REGION_SMEM | REGION_LMEM | REGION_STOLEN_LMEM, \
.has_llc = 0, \
.has_pxp = 0, \
.has_snoop = 1, \
@@ -918,24 +923,24 @@ static const struct intel_device_info rkl_info = {
static const struct intel_device_info dg1_info = {
GEN12_FEATURES,
DGFX_FEATURES,
- .graphics.rel = 10,
+ .__runtime.graphics.ip.rel = 10,
PLATFORM(INTEL_DG1),
- .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
+ .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
.require_force_probe = 1,
- .platform_engine_mask =
+ .__runtime.platform_engine_mask =
BIT(RCS0) | BIT(BCS0) | BIT(VECS0) |
BIT(VCS0) | BIT(VCS2),
/* Wa_16011227922 */
- .ppgtt_size = 47,
+ .__runtime.ppgtt_size = 47,
};
static const struct intel_device_info adl_s_info = {
GEN12_FEATURES,
PLATFORM(INTEL_ALDERLAKE_S),
- .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
+ .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
.display.has_hti = 1,
.display.has_psr_hw_tracking = 0,
- .platform_engine_mask =
+ .__runtime.platform_engine_mask =
BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2),
.dma_mask_size = 39,
};
@@ -951,18 +956,18 @@ static const struct intel_device_info adl_s_info = {
.display.dbuf.slice_mask = BIT(DBUF_S1) | BIT(DBUF_S2) | BIT(DBUF_S3) | \
BIT(DBUF_S4), \
.display.has_ddi = 1, \
- .display.has_dmc = 1, \
+ .__runtime.has_dmc = 1, \
.display.has_dp_mst = 1, \
.display.has_dsb = 1, \
- .display.has_dsc = 1, \
- .display.fbc_mask = BIT(INTEL_FBC_A), \
+ .__runtime.has_dsc = 1, \
+ .__runtime.fbc_mask = BIT(INTEL_FBC_A), \
.display.has_fpga_dbg = 1, \
- .display.has_hdcp = 1, \
+ .__runtime.has_hdcp = 1, \
.display.has_hotplug = 1, \
.display.has_ipc = 1, \
.display.has_psr = 1, \
- .display.ver = 13, \
- .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), \
+ .__runtime.display.ip.ver = 13, \
+ .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), \
.display.pipe_offsets = { \
[TRANSCODER_A] = PIPE_A_OFFSET, \
[TRANSCODER_B] = PIPE_B_OFFSET, \
@@ -985,28 +990,28 @@ static const struct intel_device_info adl_p_info = {
GEN12_FEATURES,
XE_LPD_FEATURES,
PLATFORM(INTEL_ALDERLAKE_P),
- .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
+ .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
BIT(TRANSCODER_C) | BIT(TRANSCODER_D) |
BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1),
.display.has_cdclk_crawl = 1,
.display.has_modular_fia = 1,
.display.has_psr_hw_tracking = 0,
- .platform_engine_mask =
+ .__runtime.platform_engine_mask =
BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2),
- .ppgtt_size = 48,
+ .__runtime.ppgtt_size = 48,
.dma_mask_size = 39,
};
#undef GEN
#define XE_HP_PAGE_SIZES \
- .page_sizes = I915_GTT_PAGE_SIZE_4K | \
- I915_GTT_PAGE_SIZE_64K | \
- I915_GTT_PAGE_SIZE_2M
+ .__runtime.page_sizes = I915_GTT_PAGE_SIZE_4K | \
+ I915_GTT_PAGE_SIZE_64K | \
+ I915_GTT_PAGE_SIZE_2M
#define XE_HP_FEATURES \
- .graphics.ver = 12, \
- .graphics.rel = 50, \
+ .__runtime.graphics.ip.ver = 12, \
+ .__runtime.graphics.ip.rel = 50, \
XE_HP_PAGE_SIZES, \
.dma_mask_size = 46, \
.has_3d_pipeline = 1, \
@@ -1022,12 +1027,12 @@ static const struct intel_device_info adl_p_info = {
.has_reset_engine = 1, \
.has_rps = 1, \
.has_runtime_pm = 1, \
- .ppgtt_size = 48, \
- .ppgtt_type = INTEL_PPGTT_FULL
+ .__runtime.ppgtt_size = 48, \
+ .__runtime.ppgtt_type = INTEL_PPGTT_FULL
#define XE_HPM_FEATURES \
- .media.ver = 12, \
- .media.rel = 50
+ .__runtime.media.ip.ver = 12, \
+ .__runtime.media.ip.rel = 50
__maybe_unused
static const struct intel_device_info xehpsdv_info = {
@@ -1035,11 +1040,11 @@ static const struct intel_device_info xehpsdv_info = {
XE_HPM_FEATURES,
DGFX_FEATURES,
PLATFORM(INTEL_XEHPSDV),
- .display = { },
+ NO_DISPLAY,
.has_64k_pages = 1,
.needs_compact_pt = 1,
.has_media_ratio_mode = 1,
- .platform_engine_mask =
+ .__runtime.platform_engine_mask =
BIT(RCS0) | BIT(BCS0) |
BIT(VECS0) | BIT(VECS1) | BIT(VECS2) | BIT(VECS3) |
BIT(VCS0) | BIT(VCS1) | BIT(VCS2) | BIT(VCS3) |
@@ -1052,8 +1057,8 @@ static const struct intel_device_info xehpsdv_info = {
XE_HP_FEATURES, \
XE_HPM_FEATURES, \
DGFX_FEATURES, \
- .graphics.rel = 55, \
- .media.rel = 55, \
+ .__runtime.graphics.ip.rel = 55, \
+ .__runtime.media.ip.rel = 55, \
PLATFORM(INTEL_DG2), \
.has_4tile = 1, \
.has_64k_pages = 1, \
@@ -1061,7 +1066,7 @@ static const struct intel_device_info xehpsdv_info = {
.has_heci_pxp = 1, \
.needs_compact_pt = 1, \
.has_media_ratio_mode = 1, \
- .platform_engine_mask = \
+ .__runtime.platform_engine_mask = \
BIT(RCS0) | BIT(BCS0) | \
BIT(VECS0) | BIT(VECS1) | \
BIT(VCS0) | BIT(VCS2) | \
@@ -1070,14 +1075,14 @@ static const struct intel_device_info xehpsdv_info = {
static const struct intel_device_info dg2_info = {
DG2_FEATURES,
XE_LPD_FEATURES,
- .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
+ .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
BIT(TRANSCODER_C) | BIT(TRANSCODER_D),
.require_force_probe = 1,
};
static const struct intel_device_info ats_m_info = {
DG2_FEATURES,
- .display = { 0 },
+ NO_DISPLAY,
.require_force_probe = 1,
.tuning_thread_rr_after_dep = 1,
};
@@ -1096,12 +1101,12 @@ static const struct intel_device_info pvc_info = {
XE_HPC_FEATURES,
XE_HPM_FEATURES,
DGFX_FEATURES,
- .graphics.rel = 60,
- .media.rel = 60,
+ .__runtime.graphics.ip.rel = 60,
+ .__runtime.media.ip.rel = 60,
PLATFORM(INTEL_PONTEVECCHIO),
- .display = { 0 },
+ NO_DISPLAY,
.has_flat_ccs = 0,
- .platform_engine_mask =
+ .__runtime.platform_engine_mask =
BIT(BCS0) |
BIT(VCS0) |
BIT(CCS0) | BIT(CCS1) | BIT(CCS2) | BIT(CCS3),
@@ -1110,8 +1115,19 @@ static const struct intel_device_info pvc_info = {
#define XE_LPDP_FEATURES \
XE_LPD_FEATURES, \
- .display.ver = 14, \
- .display.has_cdclk_crawl = 1
+ .__runtime.display.ip.ver = 14, \
+ .display.has_cdclk_crawl = 1, \
+ .__runtime.fbc_mask = BIT(INTEL_FBC_A) | BIT(INTEL_FBC_B)
+
+static const struct intel_gt_definition xelpmp_extra_gt[] = {
+ {
+ .type = GT_MEDIA,
+ .name = "Standalone Media GT",
+ .gsi_offset = MTL_MEDIA_GSI_BASE,
+ .engine_mask = BIT(VECS0) | BIT(VCS0) | BIT(VCS2),
+ },
+ {}
+};
__maybe_unused
static const struct intel_device_info mtl_info = {
@@ -1121,15 +1137,16 @@ static const struct intel_device_info mtl_info = {
* Real graphics IP version will be obtained from hardware GMD_ID
* register. Value provided here is just for sanity checking.
*/
- .graphics.ver = 12,
- .graphics.rel = 70,
- .media.ver = 13,
+ .__runtime.graphics.ip.ver = 12,
+ .__runtime.graphics.ip.rel = 70,
+ .__runtime.media.ip.ver = 13,
PLATFORM(INTEL_METEORLAKE),
.display.has_modular_fia = 1,
+ .extra_gt_list = xelpmp_extra_gt,
.has_flat_ccs = 0,
.has_snoop = 1,
- .memory_regions = REGION_SMEM | REGION_STOLEN_LMEM,
- .platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(CCS0),
+ .__runtime.memory_regions = REGION_SMEM | REGION_STOLEN_LMEM,
+ .__runtime.platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(CCS0),
.require_force_probe = 1,
};
@@ -1263,6 +1280,27 @@ static bool force_probe(u16 device_id, const char *devices)
return ret;
}
+bool i915_pci_resource_valid(struct pci_dev *pdev, int bar)
+{
+ if (!pci_resource_flags(pdev, bar))
+ return false;
+
+ if (pci_resource_flags(pdev, bar) & IORESOURCE_UNSET)
+ return false;
+
+ if (!pci_resource_len(pdev, bar))
+ return false;
+
+ return true;
+}
+
+static bool intel_mmio_bar_valid(struct pci_dev *pdev, struct intel_device_info *intel_info)
+{
+ int gttmmaddr_bar = intel_info->__runtime.graphics.ip.ver == 2 ? GEN2_GTTMMADR_BAR : GTTMMADR_BAR;
+
+ return i915_pci_resource_valid(pdev, gttmmaddr_bar);
+}
+
static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct intel_device_info *intel_info =
@@ -1288,6 +1326,9 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (PCI_FUNC(pdev->devfn))
return -ENODEV;
+ if (!intel_mmio_bar_valid(pdev, intel_info))
+ return -ENXIO;
+
/* Detect if we need to wait for other drivers early on */
if (intel_modeset_probe_defer(pdev))
return -EPROBE_DEFER;
diff --git a/drivers/gpu/drm/i915/i915_pci.h b/drivers/gpu/drm/i915/i915_pci.h
index ee048c238174..8dfe19f9a775 100644
--- a/drivers/gpu/drm/i915/i915_pci.h
+++ b/drivers/gpu/drm/i915/i915_pci.h
@@ -6,7 +6,13 @@
#ifndef __I915_PCI_H__
#define __I915_PCI_H__
+#include <linux/types.h>
+
+struct pci_dev;
+
int i915_pci_register_driver(void);
void i915_pci_unregister_driver(void);
+bool i915_pci_resource_valid(struct pci_dev *pdev, int bar);
+
#endif /* __I915_PCI_H__ */
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index f3c23fe9ad9c..0defbb43ceea 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -1376,7 +1376,8 @@ static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
{
struct i915_perf *perf = stream->perf;
- BUG_ON(stream != perf->exclusive_stream);
+ if (WARN_ON(stream != perf->exclusive_stream))
+ return;
/*
* Unset exclusive_stream first, it will be checked while disabling
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 3168d7007e10..0b287a59dc2f 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -1125,8 +1125,12 @@
#define MBUS_DBOX_REGULATE_B2B_TRANSACTIONS_EN REG_BIT(16) /* tgl+ */
#define MBUS_DBOX_BW_CREDIT_MASK REG_GENMASK(15, 14)
#define MBUS_DBOX_BW_CREDIT(x) REG_FIELD_PREP(MBUS_DBOX_BW_CREDIT_MASK, x)
+#define MBUS_DBOX_BW_4CREDITS_MTL REG_FIELD_PREP(MBUS_DBOX_BW_CREDIT_MASK, 0x2)
+#define MBUS_DBOX_BW_8CREDITS_MTL REG_FIELD_PREP(MBUS_DBOX_BW_CREDIT_MASK, 0x3)
#define MBUS_DBOX_B_CREDIT_MASK REG_GENMASK(12, 8)
#define MBUS_DBOX_B_CREDIT(x) REG_FIELD_PREP(MBUS_DBOX_B_CREDIT_MASK, x)
+#define MBUS_DBOX_I_CREDIT_MASK REG_GENMASK(7, 5)
+#define MBUS_DBOX_I_CREDIT(x) REG_FIELD_PREP(MBUS_DBOX_I_CREDIT_MASK, x)
#define MBUS_DBOX_A_CREDIT_MASK REG_GENMASK(3, 0)
#define MBUS_DBOX_A_CREDIT(x) REG_FIELD_PREP(MBUS_DBOX_A_CREDIT_MASK, x)
@@ -1462,69 +1466,6 @@
#define FBC_REND_CACHE_CLEAN REG_BIT(1)
/*
- * GPIO regs
- */
-#define GPIO(gpio) _MMIO(dev_priv->gpio_mmio_base + 0x5010 + \
- 4 * (gpio))
-
-# define GPIO_CLOCK_DIR_MASK (1 << 0)
-# define GPIO_CLOCK_DIR_IN (0 << 1)
-# define GPIO_CLOCK_DIR_OUT (1 << 1)
-# define GPIO_CLOCK_VAL_MASK (1 << 2)
-# define GPIO_CLOCK_VAL_OUT (1 << 3)
-# define GPIO_CLOCK_VAL_IN (1 << 4)
-# define GPIO_CLOCK_PULLUP_DISABLE (1 << 5)
-# define GPIO_DATA_DIR_MASK (1 << 8)
-# define GPIO_DATA_DIR_IN (0 << 9)
-# define GPIO_DATA_DIR_OUT (1 << 9)
-# define GPIO_DATA_VAL_MASK (1 << 10)
-# define GPIO_DATA_VAL_OUT (1 << 11)
-# define GPIO_DATA_VAL_IN (1 << 12)
-# define GPIO_DATA_PULLUP_DISABLE (1 << 13)
-
-#define GMBUS0 _MMIO(dev_priv->gpio_mmio_base + 0x5100) /* clock/port select */
-#define GMBUS_AKSV_SELECT (1 << 11)
-#define GMBUS_RATE_100KHZ (0 << 8)
-#define GMBUS_RATE_50KHZ (1 << 8)
-#define GMBUS_RATE_400KHZ (2 << 8) /* reserved on Pineview */
-#define GMBUS_RATE_1MHZ (3 << 8) /* reserved on Pineview */
-#define GMBUS_HOLD_EXT (1 << 7) /* 300ns hold time, rsvd on Pineview */
-#define GMBUS_BYTE_CNT_OVERRIDE (1 << 6)
-
-#define GMBUS1 _MMIO(dev_priv->gpio_mmio_base + 0x5104) /* command/status */
-#define GMBUS_SW_CLR_INT (1 << 31)
-#define GMBUS_SW_RDY (1 << 30)
-#define GMBUS_ENT (1 << 29) /* enable timeout */
-#define GMBUS_CYCLE_NONE (0 << 25)
-#define GMBUS_CYCLE_WAIT (1 << 25)
-#define GMBUS_CYCLE_INDEX (2 << 25)
-#define GMBUS_CYCLE_STOP (4 << 25)
-#define GMBUS_BYTE_COUNT_SHIFT 16
-#define GMBUS_BYTE_COUNT_MAX 256U
-#define GEN9_GMBUS_BYTE_COUNT_MAX 511U
-#define GMBUS_SLAVE_INDEX_SHIFT 8
-#define GMBUS_SLAVE_ADDR_SHIFT 1
-#define GMBUS_SLAVE_READ (1 << 0)
-#define GMBUS_SLAVE_WRITE (0 << 0)
-#define GMBUS2 _MMIO(dev_priv->gpio_mmio_base + 0x5108) /* status */
-#define GMBUS_INUSE (1 << 15)
-#define GMBUS_HW_WAIT_PHASE (1 << 14)
-#define GMBUS_STALL_TIMEOUT (1 << 13)
-#define GMBUS_INT (1 << 12)
-#define GMBUS_HW_RDY (1 << 11)
-#define GMBUS_SATOER (1 << 10)
-#define GMBUS_ACTIVE (1 << 9)
-#define GMBUS3 _MMIO(dev_priv->gpio_mmio_base + 0x510c) /* data buffer bytes 3-0 */
-#define GMBUS4 _MMIO(dev_priv->gpio_mmio_base + 0x5110) /* interrupt mask (Pineview+) */
-#define GMBUS_SLAVE_TIMEOUT_EN (1 << 4)
-#define GMBUS_NAK_EN (1 << 3)
-#define GMBUS_IDLE_EN (1 << 2)
-#define GMBUS_HW_WAIT_EN (1 << 1)
-#define GMBUS_HW_RDY_EN (1 << 0)
-#define GMBUS5 _MMIO(dev_priv->gpio_mmio_base + 0x5120) /* byte index */
-#define GMBUS_2BYTE_INDEX_EN (1 << 31)
-
-/*
* Clock control & power management
*/
#define _DPLL_A (DISPLAY_MMIO_BASE(dev_priv) + 0x6014)
@@ -1700,7 +1641,7 @@
#define DSTATE_PLL_D3_OFF (1 << 3)
#define DSTATE_GFX_CLOCK_GATING (1 << 1)
#define DSTATE_DOT_CLOCK_GATING (1 << 0)
-#define DSPCLK_GATE_D _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x6200)
+#define DSPCLK_GATE_D(__i915) _MMIO(DISPLAY_MMIO_BASE(__i915) + 0x6200)
# define DPUNIT_B_CLOCK_GATE_DISABLE (1 << 30) /* 965 */
# define VSUNIT_CLOCK_GATE_DISABLE (1 << 29) /* 965 */
# define VRHUNIT_CLOCK_GATE_DISABLE (1 << 28) /* 965 */
@@ -1857,14 +1798,14 @@
#define GT0_PERF_LIMIT_REASONS _MMIO(0x1381a8)
#define GT0_PERF_LIMIT_REASONS_MASK 0xde3
-#define PROCHOT_MASK REG_BIT(1)
-#define THERMAL_LIMIT_MASK REG_BIT(2)
-#define RATL_MASK REG_BIT(6)
-#define VR_THERMALERT_MASK REG_BIT(7)
-#define VR_TDC_MASK REG_BIT(8)
-#define POWER_LIMIT_4_MASK REG_BIT(9)
-#define POWER_LIMIT_1_MASK REG_BIT(11)
-#define POWER_LIMIT_2_MASK REG_BIT(12)
+#define PROCHOT_MASK REG_BIT(0)
+#define THERMAL_LIMIT_MASK REG_BIT(1)
+#define RATL_MASK REG_BIT(5)
+#define VR_THERMALERT_MASK REG_BIT(6)
+#define VR_TDC_MASK REG_BIT(7)
+#define POWER_LIMIT_4_MASK REG_BIT(8)
+#define POWER_LIMIT_1_MASK REG_BIT(10)
+#define POWER_LIMIT_2_MASK REG_BIT(11)
#define CHV_CLK_CTL1 _MMIO(0x101100)
#define VLV_CLK_CTL2 _MMIO(0x101104)
@@ -1916,6 +1857,13 @@
#define CLKGATE_DIS_PSL(pipe) \
_MMIO_PIPE(pipe, _CLKGATE_DIS_PSL_A, _CLKGATE_DIS_PSL_B)
+#define _CLKGATE_DIS_PSL_EXT_A 0x4654C
+#define _CLKGATE_DIS_PSL_EXT_B 0x46550
+#define PIPEDMC_GATING_DIS REG_BIT(12)
+
+#define CLKGATE_DIS_PSL_EXT(pipe) \
+ _MMIO_PIPE(pipe, _CLKGATE_DIS_PSL_EXT_A, _CLKGATE_DIS_PSL_EXT_B)
+
/*
* Display engine regs
*/
@@ -2209,10 +2157,18 @@
#define TRANS_PSR_IIR(tran) _MMIO_TRANS2(tran, _PSR_IIR_A)
#define _EDP_PSR_TRANS_SHIFT(trans) ((trans) == TRANSCODER_EDP ? \
0 : ((trans) - TRANSCODER_A + 1) * 8)
-#define EDP_PSR_TRANS_MASK(trans) (0x7 << _EDP_PSR_TRANS_SHIFT(trans))
-#define EDP_PSR_ERROR(trans) (0x4 << _EDP_PSR_TRANS_SHIFT(trans))
-#define EDP_PSR_POST_EXIT(trans) (0x2 << _EDP_PSR_TRANS_SHIFT(trans))
-#define EDP_PSR_PRE_ENTRY(trans) (0x1 << _EDP_PSR_TRANS_SHIFT(trans))
+#define TGL_PSR_MASK REG_GENMASK(2, 0)
+#define TGL_PSR_ERROR REG_BIT(2)
+#define TGL_PSR_POST_EXIT REG_BIT(1)
+#define TGL_PSR_PRE_ENTRY REG_BIT(0)
+#define EDP_PSR_MASK(trans) (TGL_PSR_MASK << \
+ _EDP_PSR_TRANS_SHIFT(trans))
+#define EDP_PSR_ERROR(trans) (TGL_PSR_ERROR << \
+ _EDP_PSR_TRANS_SHIFT(trans))
+#define EDP_PSR_POST_EXIT(trans) (TGL_PSR_POST_EXIT << \
+ _EDP_PSR_TRANS_SHIFT(trans))
+#define EDP_PSR_PRE_ENTRY(trans) (TGL_PSR_PRE_ENTRY << \
+ _EDP_PSR_TRANS_SHIFT(trans))
#define _SRD_AUX_DATA_A 0x60814
#define _SRD_AUX_DATA_EDP 0x6f814
@@ -2822,7 +2778,7 @@
#define VLV_PPS_BASE (VLV_DISPLAY_BASE + PPS_BASE)
#define PCH_PPS_BASE 0xC7200
-#define _MMIO_PPS(pps_idx, reg) _MMIO(dev_priv->pps_mmio_base - \
+#define _MMIO_PPS(pps_idx, reg) _MMIO(dev_priv->display.pps.mmio_base - \
PPS_BASE + (reg) + \
(pps_idx) * 0x100)
@@ -2918,118 +2874,6 @@
#define PFIT_AUTO_RATIOS _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61238)
-#define _VLV_BLC_PWM_CTL2_A (DISPLAY_MMIO_BASE(dev_priv) + 0x61250)
-#define _VLV_BLC_PWM_CTL2_B (DISPLAY_MMIO_BASE(dev_priv) + 0x61350)
-#define VLV_BLC_PWM_CTL2(pipe) _MMIO_PIPE(pipe, _VLV_BLC_PWM_CTL2_A, \
- _VLV_BLC_PWM_CTL2_B)
-
-#define _VLV_BLC_PWM_CTL_A (DISPLAY_MMIO_BASE(dev_priv) + 0x61254)
-#define _VLV_BLC_PWM_CTL_B (DISPLAY_MMIO_BASE(dev_priv) + 0x61354)
-#define VLV_BLC_PWM_CTL(pipe) _MMIO_PIPE(pipe, _VLV_BLC_PWM_CTL_A, \
- _VLV_BLC_PWM_CTL_B)
-
-#define _VLV_BLC_HIST_CTL_A (DISPLAY_MMIO_BASE(dev_priv) + 0x61260)
-#define _VLV_BLC_HIST_CTL_B (DISPLAY_MMIO_BASE(dev_priv) + 0x61360)
-#define VLV_BLC_HIST_CTL(pipe) _MMIO_PIPE(pipe, _VLV_BLC_HIST_CTL_A, \
- _VLV_BLC_HIST_CTL_B)
-
-/* Backlight control */
-#define BLC_PWM_CTL2 _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61250) /* 965+ only */
-#define BLM_PWM_ENABLE (1 << 31)
-#define BLM_COMBINATION_MODE (1 << 30) /* gen4 only */
-#define BLM_PIPE_SELECT (1 << 29)
-#define BLM_PIPE_SELECT_IVB (3 << 29)
-#define BLM_PIPE_A (0 << 29)
-#define BLM_PIPE_B (1 << 29)
-#define BLM_PIPE_C (2 << 29) /* ivb + */
-#define BLM_TRANSCODER_A BLM_PIPE_A /* hsw */
-#define BLM_TRANSCODER_B BLM_PIPE_B
-#define BLM_TRANSCODER_C BLM_PIPE_C
-#define BLM_TRANSCODER_EDP (3 << 29)
-#define BLM_PIPE(pipe) ((pipe) << 29)
-#define BLM_POLARITY_I965 (1 << 28) /* gen4 only */
-#define BLM_PHASE_IN_INTERUPT_STATUS (1 << 26)
-#define BLM_PHASE_IN_ENABLE (1 << 25)
-#define BLM_PHASE_IN_INTERUPT_ENABL (1 << 24)
-#define BLM_PHASE_IN_TIME_BASE_SHIFT (16)
-#define BLM_PHASE_IN_TIME_BASE_MASK (0xff << 16)
-#define BLM_PHASE_IN_COUNT_SHIFT (8)
-#define BLM_PHASE_IN_COUNT_MASK (0xff << 8)
-#define BLM_PHASE_IN_INCR_SHIFT (0)
-#define BLM_PHASE_IN_INCR_MASK (0xff << 0)
-#define BLC_PWM_CTL _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61254)
-/*
- * This is the most significant 15 bits of the number of backlight cycles in a
- * complete cycle of the modulated backlight control.
- *
- * The actual value is this field multiplied by two.
- */
-#define BACKLIGHT_MODULATION_FREQ_SHIFT (17)
-#define BACKLIGHT_MODULATION_FREQ_MASK (0x7fff << 17)
-#define BLM_LEGACY_MODE (1 << 16) /* gen2 only */
-/*
- * This is the number of cycles out of the backlight modulation cycle for which
- * the backlight is on.
- *
- * This field must be no greater than the number of cycles in the complete
- * backlight modulation cycle.
- */
-#define BACKLIGHT_DUTY_CYCLE_SHIFT (0)
-#define BACKLIGHT_DUTY_CYCLE_MASK (0xffff)
-#define BACKLIGHT_DUTY_CYCLE_MASK_PNV (0xfffe)
-#define BLM_POLARITY_PNV (1 << 0) /* pnv only */
-
-#define BLC_HIST_CTL _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61260)
-#define BLM_HISTOGRAM_ENABLE (1 << 31)
-
-/* New registers for PCH-split platforms. Safe where new bits show up, the
- * register layout machtes with gen4 BLC_PWM_CTL[12]. */
-#define BLC_PWM_CPU_CTL2 _MMIO(0x48250)
-#define BLC_PWM_CPU_CTL _MMIO(0x48254)
-
-#define HSW_BLC_PWM2_CTL _MMIO(0x48350)
-
-/* PCH CTL1 is totally different, all but the below bits are reserved. CTL2 is
- * like the normal CTL from gen4 and earlier. Hooray for confusing naming. */
-#define BLC_PWM_PCH_CTL1 _MMIO(0xc8250)
-#define BLM_PCH_PWM_ENABLE (1 << 31)
-#define BLM_PCH_OVERRIDE_ENABLE (1 << 30)
-#define BLM_PCH_POLARITY (1 << 29)
-#define BLC_PWM_PCH_CTL2 _MMIO(0xc8254)
-
-#define UTIL_PIN_CTL _MMIO(0x48400)
-#define UTIL_PIN_ENABLE (1 << 31)
-#define UTIL_PIN_PIPE_MASK (3 << 29)
-#define UTIL_PIN_PIPE(x) ((x) << 29)
-#define UTIL_PIN_MODE_MASK (0xf << 24)
-#define UTIL_PIN_MODE_DATA (0 << 24)
-#define UTIL_PIN_MODE_PWM (1 << 24)
-#define UTIL_PIN_MODE_VBLANK (4 << 24)
-#define UTIL_PIN_MODE_VSYNC (5 << 24)
-#define UTIL_PIN_MODE_EYE_LEVEL (8 << 24)
-#define UTIL_PIN_OUTPUT_DATA (1 << 23)
-#define UTIL_PIN_POLARITY (1 << 22)
-#define UTIL_PIN_DIRECTION_INPUT (1 << 19)
-#define UTIL_PIN_INPUT_DATA (1 << 16)
-
-/* BXT backlight register definition. */
-#define _BXT_BLC_PWM_CTL1 0xC8250
-#define BXT_BLC_PWM_ENABLE (1 << 31)
-#define BXT_BLC_PWM_POLARITY (1 << 29)
-#define _BXT_BLC_PWM_FREQ1 0xC8254
-#define _BXT_BLC_PWM_DUTY1 0xC8258
-
-#define _BXT_BLC_PWM_CTL2 0xC8350
-#define _BXT_BLC_PWM_FREQ2 0xC8354
-#define _BXT_BLC_PWM_DUTY2 0xC8358
-
-#define BXT_BLC_PWM_CTL(controller) _MMIO_PIPE(controller, \
- _BXT_BLC_PWM_CTL1, _BXT_BLC_PWM_CTL2)
-#define BXT_BLC_PWM_FREQ(controller) _MMIO_PIPE(controller, \
- _BXT_BLC_PWM_FREQ1, _BXT_BLC_PWM_FREQ2)
-#define BXT_BLC_PWM_DUTY(controller) _MMIO_PIPE(controller, \
- _BXT_BLC_PWM_DUTY1, _BXT_BLC_PWM_DUTY2)
-
#define PCH_GTC_CTL _MMIO(0xe7000)
#define PCH_GTC_ENABLE (1 << 31)
@@ -3619,6 +3463,34 @@
#define DP_AUX_CH_CTL(aux_ch) _MMIO_PORT(aux_ch, _DPA_AUX_CH_CTL, _DPB_AUX_CH_CTL)
#define DP_AUX_CH_DATA(aux_ch, i) _MMIO(_PORT(aux_ch, _DPA_AUX_CH_DATA1, _DPB_AUX_CH_DATA1) + (i) * 4) /* 5 registers */
+#define _XELPDP_USBC1_AUX_CH_CTL 0x16F210
+#define _XELPDP_USBC2_AUX_CH_CTL 0x16F410
+#define _XELPDP_USBC3_AUX_CH_CTL 0x16F610
+#define _XELPDP_USBC4_AUX_CH_CTL 0x16F810
+
+#define XELPDP_DP_AUX_CH_CTL(aux_ch) _MMIO(_PICK(aux_ch, \
+ _DPA_AUX_CH_CTL, \
+ _DPB_AUX_CH_CTL, \
+ 0, /* port/aux_ch C is non-existent */ \
+ _XELPDP_USBC1_AUX_CH_CTL, \
+ _XELPDP_USBC2_AUX_CH_CTL, \
+ _XELPDP_USBC3_AUX_CH_CTL, \
+ _XELPDP_USBC4_AUX_CH_CTL))
+
+#define _XELPDP_USBC1_AUX_CH_DATA1 0x16F214
+#define _XELPDP_USBC2_AUX_CH_DATA1 0x16F414
+#define _XELPDP_USBC3_AUX_CH_DATA1 0x16F614
+#define _XELPDP_USBC4_AUX_CH_DATA1 0x16F814
+
+#define XELPDP_DP_AUX_CH_DATA(aux_ch, i) _MMIO(_PICK(aux_ch, \
+ _DPA_AUX_CH_DATA1, \
+ _DPB_AUX_CH_DATA1, \
+ 0, /* port/aux_ch C is non-existent */ \
+ _XELPDP_USBC1_AUX_CH_DATA1, \
+ _XELPDP_USBC2_AUX_CH_DATA1, \
+ _XELPDP_USBC3_AUX_CH_DATA1, \
+ _XELPDP_USBC4_AUX_CH_DATA1) + (i) * 4)
+
#define DP_AUX_CH_CTL_SEND_BUSY (1 << 31)
#define DP_AUX_CH_CTL_DONE (1 << 30)
#define DP_AUX_CH_CTL_INTERRUPT (1 << 29)
@@ -3631,6 +3503,8 @@
#define DP_AUX_CH_CTL_RECEIVE_ERROR (1 << 25)
#define DP_AUX_CH_CTL_MESSAGE_SIZE_MASK (0x1f << 20)
#define DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT 20
+#define XELPDP_DP_AUX_CH_CTL_POWER_REQUEST REG_BIT(19)
+#define XELPDP_DP_AUX_CH_CTL_POWER_STATUS REG_BIT(18)
#define DP_AUX_CH_CTL_PRECHARGE_2US_MASK (0xf << 16)
#define DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT 16
#define DP_AUX_CH_CTL_AUX_AKSV_SELECT (1 << 15)
@@ -5862,6 +5736,13 @@
[TRANSCODER_B] = _CHICKEN_TRANS_B, \
[TRANSCODER_C] = _CHICKEN_TRANS_C, \
[TRANSCODER_D] = _CHICKEN_TRANS_D))
+
+#define _MTL_CHICKEN_TRANS_A 0x604e0
+#define _MTL_CHICKEN_TRANS_B 0x614e0
+#define MTL_CHICKEN_TRANS(trans) _MMIO_TRANS((trans), \
+ _MTL_CHICKEN_TRANS_A, \
+ _MTL_CHICKEN_TRANS_B)
+
#define HSW_FRAME_START_DELAY_MASK REG_GENMASK(28, 27)
#define HSW_FRAME_START_DELAY(x) REG_FIELD_PREP(HSW_FRAME_START_DELAY_MASK, x)
#define VSC_DATA_SEL_SOFTWARE_CONTROL REG_BIT(25) /* GLK */
@@ -5926,7 +5807,8 @@
_BW_BUDDY1_PAGE_MASK))
#define HSW_NDE_RSTWRN_OPT _MMIO(0x46408)
-#define RESET_PCH_HANDSHAKE_ENABLE (1 << 4)
+#define MTL_RESET_PICA_HANDSHAKE_EN REG_BIT(6)
+#define RESET_PCH_HANDSHAKE_ENABLE REG_BIT(4)
#define GEN8_CHICKEN_DCPR_1 _MMIO(0x46430)
#define SKL_SELECT_ALTERNATE_DC_EXIT REG_BIT(30)
@@ -6718,10 +6600,10 @@
#define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) + 245)
#define BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ 0x18
#define GEN9_PCODE_READ_MEM_LATENCY 0x6
-#define GEN9_MEM_LATENCY_LEVEL_MASK 0xFF
-#define GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT 8
-#define GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT 16
-#define GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT 24
+#define GEN9_MEM_LATENCY_LEVEL_3_7_MASK REG_GENMASK(31, 24)
+#define GEN9_MEM_LATENCY_LEVEL_2_6_MASK REG_GENMASK(23, 16)
+#define GEN9_MEM_LATENCY_LEVEL_1_5_MASK REG_GENMASK(15, 8)
+#define GEN9_MEM_LATENCY_LEVEL_0_4_MASK REG_GENMASK(7, 0)
#define SKL_PCODE_LOAD_HDCP_KEYS 0x5
#define SKL_PCODE_CDCLK_CONTROL 0x7
#define SKL_CDCLK_PREPARE_FOR_CHANGE 0x3
@@ -6937,265 +6819,6 @@ enum skl_power_gate {
#define ICL_AUX_ANAOVRD1_LDO_BYPASS (1 << 7)
#define ICL_AUX_ANAOVRD1_ENABLE (1 << 0)
-/* HDCP Key Registers */
-#define HDCP_KEY_CONF _MMIO(0x66c00)
-#define HDCP_AKSV_SEND_TRIGGER BIT(31)
-#define HDCP_CLEAR_KEYS_TRIGGER BIT(30)
-#define HDCP_KEY_LOAD_TRIGGER BIT(8)
-#define HDCP_KEY_STATUS _MMIO(0x66c04)
-#define HDCP_FUSE_IN_PROGRESS BIT(7)
-#define HDCP_FUSE_ERROR BIT(6)
-#define HDCP_FUSE_DONE BIT(5)
-#define HDCP_KEY_LOAD_STATUS BIT(1)
-#define HDCP_KEY_LOAD_DONE BIT(0)
-#define HDCP_AKSV_LO _MMIO(0x66c10)
-#define HDCP_AKSV_HI _MMIO(0x66c14)
-
-/* HDCP Repeater Registers */
-#define HDCP_REP_CTL _MMIO(0x66d00)
-#define HDCP_TRANSA_REP_PRESENT BIT(31)
-#define HDCP_TRANSB_REP_PRESENT BIT(30)
-#define HDCP_TRANSC_REP_PRESENT BIT(29)
-#define HDCP_TRANSD_REP_PRESENT BIT(28)
-#define HDCP_DDIB_REP_PRESENT BIT(30)
-#define HDCP_DDIA_REP_PRESENT BIT(29)
-#define HDCP_DDIC_REP_PRESENT BIT(28)
-#define HDCP_DDID_REP_PRESENT BIT(27)
-#define HDCP_DDIF_REP_PRESENT BIT(26)
-#define HDCP_DDIE_REP_PRESENT BIT(25)
-#define HDCP_TRANSA_SHA1_M0 (1 << 20)
-#define HDCP_TRANSB_SHA1_M0 (2 << 20)
-#define HDCP_TRANSC_SHA1_M0 (3 << 20)
-#define HDCP_TRANSD_SHA1_M0 (4 << 20)
-#define HDCP_DDIB_SHA1_M0 (1 << 20)
-#define HDCP_DDIA_SHA1_M0 (2 << 20)
-#define HDCP_DDIC_SHA1_M0 (3 << 20)
-#define HDCP_DDID_SHA1_M0 (4 << 20)
-#define HDCP_DDIF_SHA1_M0 (5 << 20)
-#define HDCP_DDIE_SHA1_M0 (6 << 20) /* Bspec says 5? */
-#define HDCP_SHA1_BUSY BIT(16)
-#define HDCP_SHA1_READY BIT(17)
-#define HDCP_SHA1_COMPLETE BIT(18)
-#define HDCP_SHA1_V_MATCH BIT(19)
-#define HDCP_SHA1_TEXT_32 (1 << 1)
-#define HDCP_SHA1_COMPLETE_HASH (2 << 1)
-#define HDCP_SHA1_TEXT_24 (4 << 1)
-#define HDCP_SHA1_TEXT_16 (5 << 1)
-#define HDCP_SHA1_TEXT_8 (6 << 1)
-#define HDCP_SHA1_TEXT_0 (7 << 1)
-#define HDCP_SHA_V_PRIME_H0 _MMIO(0x66d04)
-#define HDCP_SHA_V_PRIME_H1 _MMIO(0x66d08)
-#define HDCP_SHA_V_PRIME_H2 _MMIO(0x66d0C)
-#define HDCP_SHA_V_PRIME_H3 _MMIO(0x66d10)
-#define HDCP_SHA_V_PRIME_H4 _MMIO(0x66d14)
-#define HDCP_SHA_V_PRIME(h) _MMIO((0x66d04 + (h) * 4))
-#define HDCP_SHA_TEXT _MMIO(0x66d18)
-
-/* HDCP Auth Registers */
-#define _PORTA_HDCP_AUTHENC 0x66800
-#define _PORTB_HDCP_AUTHENC 0x66500
-#define _PORTC_HDCP_AUTHENC 0x66600
-#define _PORTD_HDCP_AUTHENC 0x66700
-#define _PORTE_HDCP_AUTHENC 0x66A00
-#define _PORTF_HDCP_AUTHENC 0x66900
-#define _PORT_HDCP_AUTHENC(port, x) _MMIO(_PICK(port, \
- _PORTA_HDCP_AUTHENC, \
- _PORTB_HDCP_AUTHENC, \
- _PORTC_HDCP_AUTHENC, \
- _PORTD_HDCP_AUTHENC, \
- _PORTE_HDCP_AUTHENC, \
- _PORTF_HDCP_AUTHENC) + (x))
-#define PORT_HDCP_CONF(port) _PORT_HDCP_AUTHENC(port, 0x0)
-#define _TRANSA_HDCP_CONF 0x66400
-#define _TRANSB_HDCP_CONF 0x66500
-#define TRANS_HDCP_CONF(trans) _MMIO_TRANS(trans, _TRANSA_HDCP_CONF, \
- _TRANSB_HDCP_CONF)
-#define HDCP_CONF(dev_priv, trans, port) \
- (GRAPHICS_VER(dev_priv) >= 12 ? \
- TRANS_HDCP_CONF(trans) : \
- PORT_HDCP_CONF(port))
-
-#define HDCP_CONF_CAPTURE_AN BIT(0)
-#define HDCP_CONF_AUTH_AND_ENC (BIT(1) | BIT(0))
-#define PORT_HDCP_ANINIT(port) _PORT_HDCP_AUTHENC(port, 0x4)
-#define _TRANSA_HDCP_ANINIT 0x66404
-#define _TRANSB_HDCP_ANINIT 0x66504
-#define TRANS_HDCP_ANINIT(trans) _MMIO_TRANS(trans, \
- _TRANSA_HDCP_ANINIT, \
- _TRANSB_HDCP_ANINIT)
-#define HDCP_ANINIT(dev_priv, trans, port) \
- (GRAPHICS_VER(dev_priv) >= 12 ? \
- TRANS_HDCP_ANINIT(trans) : \
- PORT_HDCP_ANINIT(port))
-
-#define PORT_HDCP_ANLO(port) _PORT_HDCP_AUTHENC(port, 0x8)
-#define _TRANSA_HDCP_ANLO 0x66408
-#define _TRANSB_HDCP_ANLO 0x66508
-#define TRANS_HDCP_ANLO(trans) _MMIO_TRANS(trans, _TRANSA_HDCP_ANLO, \
- _TRANSB_HDCP_ANLO)
-#define HDCP_ANLO(dev_priv, trans, port) \
- (GRAPHICS_VER(dev_priv) >= 12 ? \
- TRANS_HDCP_ANLO(trans) : \
- PORT_HDCP_ANLO(port))
-
-#define PORT_HDCP_ANHI(port) _PORT_HDCP_AUTHENC(port, 0xC)
-#define _TRANSA_HDCP_ANHI 0x6640C
-#define _TRANSB_HDCP_ANHI 0x6650C
-#define TRANS_HDCP_ANHI(trans) _MMIO_TRANS(trans, _TRANSA_HDCP_ANHI, \
- _TRANSB_HDCP_ANHI)
-#define HDCP_ANHI(dev_priv, trans, port) \
- (GRAPHICS_VER(dev_priv) >= 12 ? \
- TRANS_HDCP_ANHI(trans) : \
- PORT_HDCP_ANHI(port))
-
-#define PORT_HDCP_BKSVLO(port) _PORT_HDCP_AUTHENC(port, 0x10)
-#define _TRANSA_HDCP_BKSVLO 0x66410
-#define _TRANSB_HDCP_BKSVLO 0x66510
-#define TRANS_HDCP_BKSVLO(trans) _MMIO_TRANS(trans, \
- _TRANSA_HDCP_BKSVLO, \
- _TRANSB_HDCP_BKSVLO)
-#define HDCP_BKSVLO(dev_priv, trans, port) \
- (GRAPHICS_VER(dev_priv) >= 12 ? \
- TRANS_HDCP_BKSVLO(trans) : \
- PORT_HDCP_BKSVLO(port))
-
-#define PORT_HDCP_BKSVHI(port) _PORT_HDCP_AUTHENC(port, 0x14)
-#define _TRANSA_HDCP_BKSVHI 0x66414
-#define _TRANSB_HDCP_BKSVHI 0x66514
-#define TRANS_HDCP_BKSVHI(trans) _MMIO_TRANS(trans, \
- _TRANSA_HDCP_BKSVHI, \
- _TRANSB_HDCP_BKSVHI)
-#define HDCP_BKSVHI(dev_priv, trans, port) \
- (GRAPHICS_VER(dev_priv) >= 12 ? \
- TRANS_HDCP_BKSVHI(trans) : \
- PORT_HDCP_BKSVHI(port))
-
-#define PORT_HDCP_RPRIME(port) _PORT_HDCP_AUTHENC(port, 0x18)
-#define _TRANSA_HDCP_RPRIME 0x66418
-#define _TRANSB_HDCP_RPRIME 0x66518
-#define TRANS_HDCP_RPRIME(trans) _MMIO_TRANS(trans, \
- _TRANSA_HDCP_RPRIME, \
- _TRANSB_HDCP_RPRIME)
-#define HDCP_RPRIME(dev_priv, trans, port) \
- (GRAPHICS_VER(dev_priv) >= 12 ? \
- TRANS_HDCP_RPRIME(trans) : \
- PORT_HDCP_RPRIME(port))
-
-#define PORT_HDCP_STATUS(port) _PORT_HDCP_AUTHENC(port, 0x1C)
-#define _TRANSA_HDCP_STATUS 0x6641C
-#define _TRANSB_HDCP_STATUS 0x6651C
-#define TRANS_HDCP_STATUS(trans) _MMIO_TRANS(trans, \
- _TRANSA_HDCP_STATUS, \
- _TRANSB_HDCP_STATUS)
-#define HDCP_STATUS(dev_priv, trans, port) \
- (GRAPHICS_VER(dev_priv) >= 12 ? \
- TRANS_HDCP_STATUS(trans) : \
- PORT_HDCP_STATUS(port))
-
-#define HDCP_STATUS_STREAM_A_ENC BIT(31)
-#define HDCP_STATUS_STREAM_B_ENC BIT(30)
-#define HDCP_STATUS_STREAM_C_ENC BIT(29)
-#define HDCP_STATUS_STREAM_D_ENC BIT(28)
-#define HDCP_STATUS_AUTH BIT(21)
-#define HDCP_STATUS_ENC BIT(20)
-#define HDCP_STATUS_RI_MATCH BIT(19)
-#define HDCP_STATUS_R0_READY BIT(18)
-#define HDCP_STATUS_AN_READY BIT(17)
-#define HDCP_STATUS_CIPHER BIT(16)
-#define HDCP_STATUS_FRAME_CNT(x) (((x) >> 8) & 0xff)
-
-/* HDCP2.2 Registers */
-#define _PORTA_HDCP2_BASE 0x66800
-#define _PORTB_HDCP2_BASE 0x66500
-#define _PORTC_HDCP2_BASE 0x66600
-#define _PORTD_HDCP2_BASE 0x66700
-#define _PORTE_HDCP2_BASE 0x66A00
-#define _PORTF_HDCP2_BASE 0x66900
-#define _PORT_HDCP2_BASE(port, x) _MMIO(_PICK((port), \
- _PORTA_HDCP2_BASE, \
- _PORTB_HDCP2_BASE, \
- _PORTC_HDCP2_BASE, \
- _PORTD_HDCP2_BASE, \
- _PORTE_HDCP2_BASE, \
- _PORTF_HDCP2_BASE) + (x))
-
-#define PORT_HDCP2_AUTH(port) _PORT_HDCP2_BASE(port, 0x98)
-#define _TRANSA_HDCP2_AUTH 0x66498
-#define _TRANSB_HDCP2_AUTH 0x66598
-#define TRANS_HDCP2_AUTH(trans) _MMIO_TRANS(trans, _TRANSA_HDCP2_AUTH, \
- _TRANSB_HDCP2_AUTH)
-#define AUTH_LINK_AUTHENTICATED BIT(31)
-#define AUTH_LINK_TYPE BIT(30)
-#define AUTH_FORCE_CLR_INPUTCTR BIT(19)
-#define AUTH_CLR_KEYS BIT(18)
-#define HDCP2_AUTH(dev_priv, trans, port) \
- (GRAPHICS_VER(dev_priv) >= 12 ? \
- TRANS_HDCP2_AUTH(trans) : \
- PORT_HDCP2_AUTH(port))
-
-#define PORT_HDCP2_CTL(port) _PORT_HDCP2_BASE(port, 0xB0)
-#define _TRANSA_HDCP2_CTL 0x664B0
-#define _TRANSB_HDCP2_CTL 0x665B0
-#define TRANS_HDCP2_CTL(trans) _MMIO_TRANS(trans, _TRANSA_HDCP2_CTL, \
- _TRANSB_HDCP2_CTL)
-#define CTL_LINK_ENCRYPTION_REQ BIT(31)
-#define HDCP2_CTL(dev_priv, trans, port) \
- (GRAPHICS_VER(dev_priv) >= 12 ? \
- TRANS_HDCP2_CTL(trans) : \
- PORT_HDCP2_CTL(port))
-
-#define PORT_HDCP2_STATUS(port) _PORT_HDCP2_BASE(port, 0xB4)
-#define _TRANSA_HDCP2_STATUS 0x664B4
-#define _TRANSB_HDCP2_STATUS 0x665B4
-#define TRANS_HDCP2_STATUS(trans) _MMIO_TRANS(trans, \
- _TRANSA_HDCP2_STATUS, \
- _TRANSB_HDCP2_STATUS)
-#define LINK_TYPE_STATUS BIT(22)
-#define LINK_AUTH_STATUS BIT(21)
-#define LINK_ENCRYPTION_STATUS BIT(20)
-#define HDCP2_STATUS(dev_priv, trans, port) \
- (GRAPHICS_VER(dev_priv) >= 12 ? \
- TRANS_HDCP2_STATUS(trans) : \
- PORT_HDCP2_STATUS(port))
-
-#define _PIPEA_HDCP2_STREAM_STATUS 0x668C0
-#define _PIPEB_HDCP2_STREAM_STATUS 0x665C0
-#define _PIPEC_HDCP2_STREAM_STATUS 0x666C0
-#define _PIPED_HDCP2_STREAM_STATUS 0x667C0
-#define PIPE_HDCP2_STREAM_STATUS(pipe) _MMIO(_PICK((pipe), \
- _PIPEA_HDCP2_STREAM_STATUS, \
- _PIPEB_HDCP2_STREAM_STATUS, \
- _PIPEC_HDCP2_STREAM_STATUS, \
- _PIPED_HDCP2_STREAM_STATUS))
-
-#define _TRANSA_HDCP2_STREAM_STATUS 0x664C0
-#define _TRANSB_HDCP2_STREAM_STATUS 0x665C0
-#define TRANS_HDCP2_STREAM_STATUS(trans) _MMIO_TRANS(trans, \
- _TRANSA_HDCP2_STREAM_STATUS, \
- _TRANSB_HDCP2_STREAM_STATUS)
-#define STREAM_ENCRYPTION_STATUS BIT(31)
-#define STREAM_TYPE_STATUS BIT(30)
-#define HDCP2_STREAM_STATUS(dev_priv, trans, port) \
- (GRAPHICS_VER(dev_priv) >= 12 ? \
- TRANS_HDCP2_STREAM_STATUS(trans) : \
- PIPE_HDCP2_STREAM_STATUS(pipe))
-
-#define _PORTA_HDCP2_AUTH_STREAM 0x66F00
-#define _PORTB_HDCP2_AUTH_STREAM 0x66F04
-#define PORT_HDCP2_AUTH_STREAM(port) _MMIO_PORT(port, \
- _PORTA_HDCP2_AUTH_STREAM, \
- _PORTB_HDCP2_AUTH_STREAM)
-#define _TRANSA_HDCP2_AUTH_STREAM 0x66F00
-#define _TRANSB_HDCP2_AUTH_STREAM 0x66F04
-#define TRANS_HDCP2_AUTH_STREAM(trans) _MMIO_TRANS(trans, \
- _TRANSA_HDCP2_AUTH_STREAM, \
- _TRANSB_HDCP2_AUTH_STREAM)
-#define AUTH_STREAM_TYPE BIT(31)
-#define HDCP2_AUTH_STREAM(dev_priv, trans, port) \
- (GRAPHICS_VER(dev_priv) >= 12 ? \
- TRANS_HDCP2_AUTH_STREAM(trans) : \
- PORT_HDCP2_AUTH_STREAM(port))
-
/* Per-pipe DDI Function Control */
#define _TRANS_DDI_FUNC_CTL_A 0x60400
#define _TRANS_DDI_FUNC_CTL_B 0x61400
@@ -7503,16 +7126,16 @@ enum skl_power_gate {
/* CDCLK_CTL */
#define CDCLK_CTL _MMIO(0x46000)
-#define CDCLK_FREQ_SEL_MASK (3 << 26)
-#define CDCLK_FREQ_450_432 (0 << 26)
-#define CDCLK_FREQ_540 (1 << 26)
-#define CDCLK_FREQ_337_308 (2 << 26)
-#define CDCLK_FREQ_675_617 (3 << 26)
-#define BXT_CDCLK_CD2X_DIV_SEL_MASK (3 << 22)
-#define BXT_CDCLK_CD2X_DIV_SEL_1 (0 << 22)
-#define BXT_CDCLK_CD2X_DIV_SEL_1_5 (1 << 22)
-#define BXT_CDCLK_CD2X_DIV_SEL_2 (2 << 22)
-#define BXT_CDCLK_CD2X_DIV_SEL_4 (3 << 22)
+#define CDCLK_FREQ_SEL_MASK REG_GENMASK(27, 26)
+#define CDCLK_FREQ_450_432 REG_FIELD_PREP(CDCLK_FREQ_SEL_MASK, 0)
+#define CDCLK_FREQ_540 REG_FIELD_PREP(CDCLK_FREQ_SEL_MASK, 1)
+#define CDCLK_FREQ_337_308 REG_FIELD_PREP(CDCLK_FREQ_SEL_MASK, 2)
+#define CDCLK_FREQ_675_617 REG_FIELD_PREP(CDCLK_FREQ_SEL_MASK, 3)
+#define BXT_CDCLK_CD2X_DIV_SEL_MASK REG_GENMASK(23, 22)
+#define BXT_CDCLK_CD2X_DIV_SEL_1 REG_FIELD_PREP(BXT_CDCLK_CD2X_DIV_SEL_MASK, 0)
+#define BXT_CDCLK_CD2X_DIV_SEL_1_5 REG_FIELD_PREP(BXT_CDCLK_CD2X_DIV_SEL_MASK, 1)
+#define BXT_CDCLK_CD2X_DIV_SEL_2 REG_FIELD_PREP(BXT_CDCLK_CD2X_DIV_SEL_MASK, 2)
+#define BXT_CDCLK_CD2X_DIV_SEL_4 REG_FIELD_PREP(BXT_CDCLK_CD2X_DIV_SEL_MASK, 3)
#define BXT_CDCLK_CD2X_PIPE(pipe) ((pipe) << 20)
#define CDCLK_DIVMUX_CD_OVERRIDE (1 << 19)
#define BXT_CDCLK_CD2X_PIPE_NONE BXT_CDCLK_CD2X_PIPE(3)
@@ -8367,6 +7990,7 @@ enum skl_power_gate {
#define ICL_DSC1_PICTURE_PARAMETER_SET_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
_ICL_DSC1_PICTURE_PARAMETER_SET_0_PB, \
_ICL_DSC1_PICTURE_PARAMETER_SET_0_PC)
+#define DSC_ALT_ICH_SEL (1 << 20)
#define DSC_VBR_ENABLE (1 << 19)
#define DSC_422_ENABLE (1 << 18)
#define DSC_COLOR_SPACE_CONVERSION (1 << 17)
@@ -8717,4 +8341,27 @@ enum skl_power_gate {
#define GEN12_CULLBIT2 _MMIO(0x7030)
#define GEN12_STATE_ACK_DEBUG _MMIO(0x20BC)
+#define MTL_LATENCY_LP0_LP1 _MMIO(0x45780)
+#define MTL_LATENCY_LP2_LP3 _MMIO(0x45784)
+#define MTL_LATENCY_LP4_LP5 _MMIO(0x45788)
+#define MTL_LATENCY_LEVEL_EVEN_MASK REG_GENMASK(12, 0)
+#define MTL_LATENCY_LEVEL_ODD_MASK REG_GENMASK(28, 16)
+
+#define MTL_LATENCY_SAGV _MMIO(0x4578b)
+#define MTL_LATENCY_QCLK_SAGV REG_GENMASK(12, 0)
+
+#define MTL_MEM_SS_INFO_GLOBAL _MMIO(0x45700)
+#define MTL_N_OF_ENABLED_QGV_POINTS_MASK REG_GENMASK(11, 8)
+#define MTL_N_OF_POPULATED_CH_MASK REG_GENMASK(7, 4)
+#define MTL_DDR_TYPE_MASK REG_GENMASK(3, 0)
+
+#define MTL_MEM_SS_INFO_QGV_POINT_LOW(point) _MMIO(0x45710 + (point) * 2)
+#define MTL_TRCD_MASK REG_GENMASK(31, 24)
+#define MTL_TRP_MASK REG_GENMASK(23, 16)
+#define MTL_DCLK_MASK REG_GENMASK(15, 0)
+
+#define MTL_MEM_SS_INFO_QGV_POINT_HIGH(point) _MMIO(0x45714 + (point) * 2)
+#define MTL_TRAS_MASK REG_GENMASK(16, 8)
+#define MTL_TRDPRE_MASK REG_GENMASK(7, 0)
+
#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
index ae984c66c48a..6fc0d1b89690 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -241,8 +241,6 @@ void __i915_sw_fence_init(struct i915_sw_fence *fence,
const char *name,
struct lock_class_key *key)
{
- BUG_ON(!fn);
-
__init_waitqueue_head(&fence->wait, name, key);
fence->fn = fn;
#ifdef CONFIG_DRM_I915_SW_FENCE_CHECK_DAG
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.h b/drivers/gpu/drm/i915/i915_sw_fence.h
index a7c603bc1b01..619fc5a22f0c 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.h
+++ b/drivers/gpu/drm/i915/i915_sw_fence.h
@@ -48,11 +48,15 @@ void __i915_sw_fence_init(struct i915_sw_fence *fence,
do { \
static struct lock_class_key __key; \
\
+ BUILD_BUG_ON((fn) == NULL); \
__i915_sw_fence_init((fence), (fn), #fence, &__key); \
} while (0)
#else
#define i915_sw_fence_init(fence, fn) \
- __i915_sw_fence_init((fence), (fn), NULL, NULL)
+do { \
+ BUILD_BUG_ON((fn) == NULL); \
+ __i915_sw_fence_init((fence), (fn), NULL, NULL); \
+} while (0)
#endif
void i915_sw_fence_reinit(struct i915_sw_fence *fence);
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index c10d68cdc3ca..6c14d13364bf 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -360,10 +360,6 @@ wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
#define KHz(x) (1000 * (x))
#define MHz(x) KHz(1000 * (x))
-#define KBps(x) (1000 * (x))
-#define MBps(x) KBps(1000 * (x))
-#define GBps(x) ((u64)1000 * MBps((x)))
-
void add_taint_for_CI(struct drm_i915_private *i915, unsigned int taint);
static inline void __add_taint_for_CI(unsigned int taint)
{
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index d98fbbd589aa..20575eb77ea7 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -88,46 +88,57 @@ const char *intel_platform_name(enum intel_platform platform)
return platform_names[platform];
}
-void intel_device_info_print_static(const struct intel_device_info *info,
- struct drm_printer *p)
+void intel_device_info_print(const struct intel_device_info *info,
+ const struct intel_runtime_info *runtime,
+ struct drm_printer *p)
{
- if (info->graphics.rel)
- drm_printf(p, "graphics version: %u.%02u\n", info->graphics.ver,
- info->graphics.rel);
+ if (runtime->graphics.ip.rel)
+ drm_printf(p, "graphics version: %u.%02u\n",
+ runtime->graphics.ip.ver,
+ runtime->graphics.ip.rel);
else
- drm_printf(p, "graphics version: %u\n", info->graphics.ver);
+ drm_printf(p, "graphics version: %u\n",
+ runtime->graphics.ip.ver);
- if (info->media.rel)
- drm_printf(p, "media version: %u.%02u\n", info->media.ver, info->media.rel);
+ if (runtime->media.ip.rel)
+ drm_printf(p, "media version: %u.%02u\n",
+ runtime->media.ip.ver,
+ runtime->media.ip.rel);
else
- drm_printf(p, "media version: %u\n", info->media.ver);
+ drm_printf(p, "media version: %u\n",
+ runtime->media.ip.ver);
- if (info->display.rel)
- drm_printf(p, "display version: %u.%02u\n", info->display.ver, info->display.rel);
+ if (runtime->display.ip.rel)
+ drm_printf(p, "display version: %u.%02u\n",
+ runtime->display.ip.ver,
+ runtime->display.ip.rel);
else
- drm_printf(p, "display version: %u\n", info->display.ver);
+ drm_printf(p, "display version: %u\n",
+ runtime->display.ip.ver);
drm_printf(p, "gt: %d\n", info->gt);
- drm_printf(p, "memory-regions: %x\n", info->memory_regions);
- drm_printf(p, "page-sizes: %x\n", info->page_sizes);
+ drm_printf(p, "memory-regions: %x\n", runtime->memory_regions);
+ drm_printf(p, "page-sizes: %x\n", runtime->page_sizes);
drm_printf(p, "platform: %s\n", intel_platform_name(info->platform));
- drm_printf(p, "ppgtt-size: %d\n", info->ppgtt_size);
- drm_printf(p, "ppgtt-type: %d\n", info->ppgtt_type);
+ drm_printf(p, "ppgtt-size: %d\n", runtime->ppgtt_size);
+ drm_printf(p, "ppgtt-type: %d\n", runtime->ppgtt_type);
drm_printf(p, "dma_mask_size: %u\n", info->dma_mask_size);
#define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, str_yes_no(info->name))
DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG);
#undef PRINT_FLAG
+ drm_printf(p, "has_pooled_eu: %s\n", str_yes_no(runtime->has_pooled_eu));
+
#define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, str_yes_no(info->display.name))
DEV_INFO_DISPLAY_FOR_EACH_FLAG(PRINT_FLAG);
#undef PRINT_FLAG
-}
-void intel_device_info_print_runtime(const struct intel_runtime_info *info,
- struct drm_printer *p)
-{
- drm_printf(p, "rawclk rate: %u kHz\n", info->rawclk_freq);
+ drm_printf(p, "has_hdcp: %s\n", str_yes_no(runtime->has_hdcp));
+ drm_printf(p, "has_dmc: %s\n", str_yes_no(runtime->has_dmc));
+ drm_printf(p, "has_dsc: %s\n", str_yes_no(runtime->has_dsc));
+
+ drm_printf(p, "rawclk rate: %u kHz\n", runtime->rawclk_freq);
}
#undef INTEL_VGA_DEVICE
@@ -364,55 +375,55 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
!(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) {
drm_info(&dev_priv->drm,
"Display fused off, disabling\n");
- info->display.pipe_mask = 0;
- info->display.cpu_transcoder_mask = 0;
- info->display.fbc_mask = 0;
+ runtime->pipe_mask = 0;
+ runtime->cpu_transcoder_mask = 0;
+ runtime->fbc_mask = 0;
} else if (fuse_strap & IVB_PIPE_C_DISABLE) {
drm_info(&dev_priv->drm, "PipeC fused off\n");
- info->display.pipe_mask &= ~BIT(PIPE_C);
- info->display.cpu_transcoder_mask &= ~BIT(TRANSCODER_C);
+ runtime->pipe_mask &= ~BIT(PIPE_C);
+ runtime->cpu_transcoder_mask &= ~BIT(TRANSCODER_C);
}
} else if (HAS_DISPLAY(dev_priv) && DISPLAY_VER(dev_priv) >= 9) {
u32 dfsm = intel_de_read(dev_priv, SKL_DFSM);
if (dfsm & SKL_DFSM_PIPE_A_DISABLE) {
- info->display.pipe_mask &= ~BIT(PIPE_A);
- info->display.cpu_transcoder_mask &= ~BIT(TRANSCODER_A);
- info->display.fbc_mask &= ~BIT(INTEL_FBC_A);
+ runtime->pipe_mask &= ~BIT(PIPE_A);
+ runtime->cpu_transcoder_mask &= ~BIT(TRANSCODER_A);
+ runtime->fbc_mask &= ~BIT(INTEL_FBC_A);
}
if (dfsm & SKL_DFSM_PIPE_B_DISABLE) {
- info->display.pipe_mask &= ~BIT(PIPE_B);
- info->display.cpu_transcoder_mask &= ~BIT(TRANSCODER_B);
+ runtime->pipe_mask &= ~BIT(PIPE_B);
+ runtime->cpu_transcoder_mask &= ~BIT(TRANSCODER_B);
}
if (dfsm & SKL_DFSM_PIPE_C_DISABLE) {
- info->display.pipe_mask &= ~BIT(PIPE_C);
- info->display.cpu_transcoder_mask &= ~BIT(TRANSCODER_C);
+ runtime->pipe_mask &= ~BIT(PIPE_C);
+ runtime->cpu_transcoder_mask &= ~BIT(TRANSCODER_C);
}
if (DISPLAY_VER(dev_priv) >= 12 &&
(dfsm & TGL_DFSM_PIPE_D_DISABLE)) {
- info->display.pipe_mask &= ~BIT(PIPE_D);
- info->display.cpu_transcoder_mask &= ~BIT(TRANSCODER_D);
+ runtime->pipe_mask &= ~BIT(PIPE_D);
+ runtime->cpu_transcoder_mask &= ~BIT(TRANSCODER_D);
}
if (dfsm & SKL_DFSM_DISPLAY_HDCP_DISABLE)
- info->display.has_hdcp = 0;
+ runtime->has_hdcp = 0;
if (dfsm & SKL_DFSM_DISPLAY_PM_DISABLE)
- info->display.fbc_mask = 0;
+ runtime->fbc_mask = 0;
if (DISPLAY_VER(dev_priv) >= 11 && (dfsm & ICL_DFSM_DMC_DISABLE))
- info->display.has_dmc = 0;
+ runtime->has_dmc = 0;
if (DISPLAY_VER(dev_priv) >= 10 &&
(dfsm & GLK_DFSM_DISPLAY_DSC_DISABLE))
- info->display.has_dsc = 0;
+ runtime->has_dsc = 0;
}
if (GRAPHICS_VER(dev_priv) == 6 && i915_vtd_active(dev_priv)) {
drm_info(&dev_priv->drm,
"Disabling ppGTT for VT-d support\n");
- info->ppgtt_type = INTEL_PPGTT_NONE;
+ runtime->ppgtt_type = INTEL_PPGTT_NONE;
}
runtime->rawclk_freq = intel_read_rawclk(dev_priv);
@@ -422,8 +433,14 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
dev_priv->drm.driver_features &= ~(DRIVER_MODESET |
DRIVER_ATOMIC);
memset(&info->display, 0, sizeof(info->display));
+
+ runtime->cpu_transcoder_mask = 0;
memset(runtime->num_sprites, 0, sizeof(runtime->num_sprites));
memset(runtime->num_scalers, 0, sizeof(runtime->num_scalers));
+ runtime->fbc_mask = 0;
+ runtime->has_hdcp = false;
+ runtime->has_dmc = false;
+ runtime->has_dsc = false;
}
}
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
index e681bc6ed8e9..d638235e1d26 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -37,6 +37,7 @@
struct drm_printer;
struct drm_i915_private;
+struct intel_gt_definition;
/* Keep in gen based order, and chronological order within a gen */
enum intel_platform {
@@ -164,7 +165,6 @@ enum intel_ppgtt_type {
func(has_media_ratio_mode); \
func(has_mslice_steering); \
func(has_one_eu_per_fuse_bit); \
- func(has_pooled_eu); \
func(has_pxp); \
func(has_rc6); \
func(has_rc6p); \
@@ -180,14 +180,11 @@ enum intel_ppgtt_type {
/* Keep in alphabetical order */ \
func(cursor_needs_physical); \
func(has_cdclk_crawl); \
- func(has_dmc); \
func(has_ddi); \
func(has_dp_mst); \
func(has_dsb); \
- func(has_dsc); \
func(has_fpga_dbg); \
func(has_gmch); \
- func(has_hdcp); \
func(has_hotplug); \
func(has_hti); \
func(has_ipc); \
@@ -203,23 +200,67 @@ struct ip_version {
u8 rel;
};
-struct intel_device_info {
- struct ip_version graphics;
- struct ip_version media;
+struct intel_runtime_info {
+ struct {
+ struct ip_version ip;
+ } graphics;
+ struct {
+ struct ip_version ip;
+ } media;
+ struct {
+ struct ip_version ip;
+ } display;
+
+ /*
+ * Platform mask is used for optimizing or-ed IS_PLATFORM calls into
+ * single runtime conditionals, and also to provide groundwork for
+ * future per platform, or per SKU build optimizations.
+ *
+ * Array can be extended when necessary if the corresponding
+ * BUILD_BUG_ON is hit.
+ */
+ u32 platform_mask[2];
+
+ u16 device_id;
intel_engine_mask_t platform_engine_mask; /* Engines supported by the HW */
- enum intel_platform platform;
+ u32 rawclk_freq;
- unsigned int dma_mask_size; /* available DMA address bits */
+ struct intel_step_info step;
+
+ unsigned int page_sizes; /* page sizes supported by the HW */
enum intel_ppgtt_type ppgtt_type;
unsigned int ppgtt_size; /* log2, e.g. 31/32/48 bits */
- unsigned int page_sizes; /* page sizes supported by the HW */
-
u32 memory_regions; /* regions supported by the HW */
+ bool has_pooled_eu;
+
+ /* display */
+ struct {
+ u8 pipe_mask;
+ u8 cpu_transcoder_mask;
+
+ u8 num_sprites[I915_MAX_PIPES];
+ u8 num_scalers[I915_MAX_PIPES];
+
+ u8 fbc_mask;
+
+ bool has_hdcp;
+ bool has_dmc;
+ bool has_dsc;
+ };
+};
+
+struct intel_device_info {
+ enum intel_platform platform;
+
+ unsigned int dma_mask_size; /* available DMA address bits */
+
+ const struct intel_gt_definition *extra_gt_list;
+
u8 gt; /* GT number, 0 if undefined */
#define DEFINE_FLAG(name) u8 name:1
@@ -227,12 +268,6 @@ struct intel_device_info {
#undef DEFINE_FLAG
struct {
- u8 ver;
- u8 rel;
-
- u8 pipe_mask;
- u8 cpu_transcoder_mask;
- u8 fbc_mask;
u8 abox_mask;
struct {
@@ -259,27 +294,11 @@ struct intel_device_info {
u32 gamma_lut_tests;
} color;
} display;
-};
-struct intel_runtime_info {
/*
- * Platform mask is used for optimizing or-ed IS_PLATFORM calls into
- * into single runtime conditionals, and also to provide groundwork
- * for future per platform, or per SKU build optimizations.
- *
- * Array can be extended when necessary if the corresponding
- * BUILD_BUG_ON is hit.
+ * Initial runtime info. Do not access outside of i915_driver_create().
*/
- u32 platform_mask[2];
-
- u16 device_id;
-
- u8 num_sprites[I915_MAX_PIPES];
- u8 num_scalers[I915_MAX_PIPES];
-
- u32 rawclk_freq;
-
- struct intel_step_info step;
+ const struct intel_runtime_info __runtime;
};
struct intel_driver_caps {
@@ -292,10 +311,9 @@ const char *intel_platform_name(enum intel_platform platform);
void intel_device_info_subplatform_init(struct drm_i915_private *dev_priv);
void intel_device_info_runtime_init(struct drm_i915_private *dev_priv);
-void intel_device_info_print_static(const struct intel_device_info *info,
- struct drm_printer *p);
-void intel_device_info_print_runtime(const struct intel_runtime_info *info,
- struct drm_printer *p);
+void intel_device_info_print(const struct intel_device_info *info,
+ const struct intel_runtime_info *runtime,
+ struct drm_printer *p);
void intel_driver_caps_print(const struct intel_driver_caps *caps,
struct drm_printer *p);
diff --git a/drivers/gpu/drm/i915/intel_dram.c b/drivers/gpu/drm/i915/intel_dram.c
index 437447119770..2403ccd52c74 100644
--- a/drivers/gpu/drm/i915/intel_dram.c
+++ b/drivers/gpu/drm/i915/intel_dram.c
@@ -466,6 +466,43 @@ static int gen12_get_dram_info(struct drm_i915_private *i915)
return icl_pcode_read_mem_global_info(i915);
}
+static int xelpdp_get_dram_info(struct drm_i915_private *i915)
+{
+ u32 val = intel_uncore_read(&i915->uncore, MTL_MEM_SS_INFO_GLOBAL);
+ struct dram_info *dram_info = &i915->dram_info;
+
+ val = REG_FIELD_GET(MTL_DDR_TYPE_MASK, val);
+ switch (val) {
+ case 0:
+ dram_info->type = INTEL_DRAM_DDR4;
+ break;
+ case 1:
+ dram_info->type = INTEL_DRAM_DDR5;
+ break;
+ case 2:
+ dram_info->type = INTEL_DRAM_LPDDR5;
+ break;
+ case 3:
+ dram_info->type = INTEL_DRAM_LPDDR4;
+ break;
+ case 4:
+ dram_info->type = INTEL_DRAM_DDR3;
+ break;
+ case 5:
+ dram_info->type = INTEL_DRAM_LPDDR3;
+ break;
+ default:
+ MISSING_CASE(val);
+ return -EINVAL;
+ }
+
+ dram_info->num_channels = REG_FIELD_GET(MTL_N_OF_POPULATED_CH_MASK, val);
+ dram_info->num_qgv_points = REG_FIELD_GET(MTL_N_OF_ENABLED_QGV_POINTS_MASK, val);
+ /* PSF GV points not supported in D14+ */
+
+ return 0;
+}
+
void intel_dram_detect(struct drm_i915_private *i915)
{
struct dram_info *dram_info = &i915->dram_info;
@@ -480,7 +517,9 @@ void intel_dram_detect(struct drm_i915_private *i915)
*/
dram_info->wm_lv_0_adjust_needed = !IS_GEN9_LP(i915);
- if (GRAPHICS_VER(i915) >= 12)
+ if (DISPLAY_VER(i915) >= 14)
+ ret = xelpdp_get_dram_info(i915);
+ else if (GRAPHICS_VER(i915) >= 12)
ret = gen12_get_dram_info(i915);
else if (GRAPHICS_VER(i915) >= 11)
ret = gen11_get_dram_info(i915);
diff --git a/drivers/gpu/drm/i915/intel_gvt_mmio_table.c b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
index 157e166672d7..8279dc580a3e 100644
--- a/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
+++ b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
@@ -4,6 +4,7 @@
*/
#include "display/intel_audio_regs.h"
+#include "display/intel_backlight_regs.h"
#include "display/intel_dmc_regs.h"
#include "display/vlv_dsi_pll_regs.h"
#include "gt/intel_gt_regs.h"
@@ -1076,7 +1077,8 @@ static int iterate_skl_plus_mmio(struct intel_gvt_mmio_table_iter *iter)
MMIO_D(GEN8_HDC_CHICKEN1);
MMIO_D(GEN9_WM_CHICKEN3);
- if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv))
+ if (IS_KABYLAKE(dev_priv) ||
+ IS_COFFEELAKE(dev_priv) || IS_COMETLAKE(dev_priv))
MMIO_D(GAMT_CHKN_BIT_REG);
if (!IS_BROXTON(dev_priv))
MMIO_D(GEN9_CTX_PREEMPT_REG);
diff --git a/drivers/gpu/drm/i915/intel_pch.c b/drivers/gpu/drm/i915/intel_pch.c
index 0fec25be146a..ba9843cb1b13 100644
--- a/drivers/gpu/drm/i915/intel_pch.c
+++ b/drivers/gpu/drm/i915/intel_pch.c
@@ -138,6 +138,11 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
drm_WARN_ON(&dev_priv->drm, !IS_ALDERLAKE_S(dev_priv) &&
!IS_ALDERLAKE_P(dev_priv));
return PCH_ADP;
+ case INTEL_PCH_MTP_DEVICE_ID_TYPE:
+ case INTEL_PCH_MTP2_DEVICE_ID_TYPE:
+ drm_dbg_kms(&dev_priv->drm, "Found Meteor Lake PCH\n");
+ drm_WARN_ON(&dev_priv->drm, !IS_METEORLAKE(dev_priv));
+ return PCH_MTP;
default:
return PCH_NONE;
}
@@ -166,7 +171,9 @@ intel_virt_detect_pch(const struct drm_i915_private *dev_priv,
* make an educated guess as to which PCH is really there.
*/
- if (IS_ALDERLAKE_S(dev_priv) || IS_ALDERLAKE_P(dev_priv))
+ if (IS_METEORLAKE(dev_priv))
+ id = INTEL_PCH_MTP_DEVICE_ID_TYPE;
+ else if (IS_ALDERLAKE_S(dev_priv) || IS_ALDERLAKE_P(dev_priv))
id = INTEL_PCH_ADP_DEVICE_ID_TYPE;
else if (IS_TIGERLAKE(dev_priv) || IS_ROCKETLAKE(dev_priv))
id = INTEL_PCH_TGP_DEVICE_ID_TYPE;
diff --git a/drivers/gpu/drm/i915/intel_pch.h b/drivers/gpu/drm/i915/intel_pch.h
index 7c8ce9781d1a..32aff5a70d04 100644
--- a/drivers/gpu/drm/i915/intel_pch.h
+++ b/drivers/gpu/drm/i915/intel_pch.h
@@ -25,6 +25,7 @@ enum intel_pch {
PCH_ICP, /* Ice Lake/Jasper Lake PCH */
PCH_TGP, /* Tiger Lake/Mule Creek Canyon PCH */
PCH_ADP, /* Alder Lake PCH */
+ PCH_MTP, /* Meteor Lake PCH */
/* Fake PCHs, functionality handled on the same PCI dev */
PCH_DG1 = 1024,
@@ -57,12 +58,15 @@ enum intel_pch {
#define INTEL_PCH_ADP2_DEVICE_ID_TYPE 0x5180
#define INTEL_PCH_ADP3_DEVICE_ID_TYPE 0x7A00
#define INTEL_PCH_ADP4_DEVICE_ID_TYPE 0x5480
+#define INTEL_PCH_MTP_DEVICE_ID_TYPE 0x7E00
+#define INTEL_PCH_MTP2_DEVICE_ID_TYPE 0xAE00
#define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100
#define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000
#define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */
#define INTEL_PCH_TYPE(dev_priv) ((dev_priv)->pch_type)
#define INTEL_PCH_ID(dev_priv) ((dev_priv)->pch_id)
+#define HAS_PCH_MTP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_MTP)
#define HAS_PCH_DG2(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_DG2)
#define HAS_PCH_ADP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_ADP)
#define HAS_PCH_DG1(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_DG1)
diff --git a/drivers/gpu/drm/i915/intel_pci_config.h b/drivers/gpu/drm/i915/intel_pci_config.h
index 12cd9d4f23de..4977a524ce6f 100644
--- a/drivers/gpu/drm/i915/intel_pci_config.h
+++ b/drivers/gpu/drm/i915/intel_pci_config.h
@@ -6,6 +6,13 @@
#ifndef __INTEL_PCI_CONFIG_H__
#define __INTEL_PCI_CONFIG_H__
+/* PCI BARs */
+#define GTTMMADR_BAR 0
+#define GEN2_GTTMMADR_BAR 1
+#define GFXMEM_BAR 2
+#define GTT_APERTURE_BAR GFXMEM_BAR
+#define GEN12_LMEM_BAR GFXMEM_BAR
+
/* BSM in include/drm/i915_drm.h */
#define MCHBAR_I915 0x44
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index ef7553b494ea..8f86f56e7ca4 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -25,60 +25,22 @@
*
*/
-#include <linux/module.h>
-#include <linux/string_helpers.h>
-#include <linux/pm_runtime.h>
-
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_blend.h>
-#include <drm/drm_fourcc.h>
-
-#include "display/intel_atomic.h"
-#include "display/intel_atomic_plane.h"
-#include "display/intel_bw.h"
#include "display/intel_de.h"
#include "display/intel_display_trace.h"
-#include "display/intel_display_types.h"
-#include "display/intel_fb.h"
-#include "display/intel_fbc.h"
-#include "display/intel_sprite.h"
-#include "display/skl_universal_plane.h"
+#include "display/skl_watermark.h"
#include "gt/intel_engine_regs.h"
#include "gt/intel_gt_regs.h"
-#include "gt/intel_llc.h"
#include "i915_drv.h"
-#include "i915_fixed.h"
-#include "i915_irq.h"
#include "intel_mchbar_regs.h"
-#include "intel_pcode.h"
#include "intel_pm.h"
#include "vlv_sideband.h"
-#include "../../../platform/x86/intel_ips.h"
-
-static void skl_sagv_disable(struct drm_i915_private *dev_priv);
struct drm_i915_clock_gating_funcs {
void (*init_clock_gating)(struct drm_i915_private *i915);
};
-/* Stores plane specific WM parameters */
-struct skl_wm_params {
- bool x_tiled, y_tiled;
- bool rc_surface;
- bool is_planar;
- u32 width;
- u8 cpp;
- u32 plane_pixel_rate;
- u32 y_min_scanlines;
- u32 plane_bytes_per_line;
- uint_fixed_16_16_t plane_blocks_per_line;
- uint_fixed_16_16_t y_tile_minimum;
- u32 linetime_us;
- u32 dbuf_block_size;
-};
-
/* used in computing the new watermarks state */
struct intel_wm_config {
unsigned int num_pipes_active;
@@ -468,13 +430,13 @@ bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
{
bool ret;
- mutex_lock(&dev_priv->wm.wm_mutex);
+ mutex_lock(&dev_priv->display.wm.wm_mutex);
ret = _intel_set_memory_cxsr(dev_priv, enable);
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- dev_priv->wm.vlv.cxsr = enable;
+ dev_priv->display.wm.vlv.cxsr = enable;
else if (IS_G4X(dev_priv))
- dev_priv->wm.g4x.cxsr = enable;
- mutex_unlock(&dev_priv->wm.wm_mutex);
+ dev_priv->display.wm.g4x.cxsr = enable;
+ mutex_unlock(&dev_priv->display.wm.wm_mutex);
return ret;
}
@@ -834,11 +796,11 @@ static bool is_enabling(int old, int new, int threshold)
static int intel_wm_num_levels(struct drm_i915_private *dev_priv)
{
- return dev_priv->wm.max_level + 1;
+ return dev_priv->display.wm.max_level + 1;
}
-static bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
+bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
{
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
@@ -1093,11 +1055,11 @@ static void vlv_write_wm_values(struct drm_i915_private *dev_priv,
static void g4x_setup_wm_latency(struct drm_i915_private *dev_priv)
{
/* all latencies in usec */
- dev_priv->wm.pri_latency[G4X_WM_LEVEL_NORMAL] = 5;
- dev_priv->wm.pri_latency[G4X_WM_LEVEL_SR] = 12;
- dev_priv->wm.pri_latency[G4X_WM_LEVEL_HPLL] = 35;
+ dev_priv->display.wm.pri_latency[G4X_WM_LEVEL_NORMAL] = 5;
+ dev_priv->display.wm.pri_latency[G4X_WM_LEVEL_SR] = 12;
+ dev_priv->display.wm.pri_latency[G4X_WM_LEVEL_HPLL] = 35;
- dev_priv->wm.max_level = G4X_WM_LEVEL_HPLL;
+ dev_priv->display.wm.max_level = G4X_WM_LEVEL_HPLL;
}
static int g4x_plane_fifo_size(enum plane_id plane_id, int level)
@@ -1150,7 +1112,7 @@ static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state,
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
const struct drm_display_mode *pipe_mode =
&crtc_state->hw.pipe_mode;
- unsigned int latency = dev_priv->wm.pri_latency[level] * 10;
+ unsigned int latency = dev_priv->display.wm.pri_latency[level] * 10;
unsigned int pixel_rate, htotal, cpp, width, wm;
if (latency == 0)
@@ -1324,7 +1286,7 @@ static bool g4x_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state,
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- if (level > dev_priv->wm.max_level)
+ if (level > dev_priv->display.wm.max_level)
return false;
return g4x_raw_plane_wm_is_valid(crtc_state, PLANE_PRIMARY, level) &&
@@ -1583,7 +1545,7 @@ static void g4x_merge_wm(struct drm_i915_private *dev_priv,
static void g4x_program_watermarks(struct drm_i915_private *dev_priv)
{
- struct g4x_wm_values *old_wm = &dev_priv->wm.g4x;
+ struct g4x_wm_values *old_wm = &dev_priv->display.wm.g4x;
struct g4x_wm_values new_wm = {};
g4x_merge_wm(dev_priv, &new_wm);
@@ -1609,10 +1571,10 @@ static void g4x_initial_watermarks(struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- mutex_lock(&dev_priv->wm.wm_mutex);
+ mutex_lock(&dev_priv->display.wm.wm_mutex);
crtc->wm.active.g4x = crtc_state->wm.g4x.intermediate;
g4x_program_watermarks(dev_priv);
- mutex_unlock(&dev_priv->wm.wm_mutex);
+ mutex_unlock(&dev_priv->display.wm.wm_mutex);
}
static void g4x_optimize_watermarks(struct intel_atomic_state *state,
@@ -1625,10 +1587,10 @@ static void g4x_optimize_watermarks(struct intel_atomic_state *state,
if (!crtc_state->wm.need_postvbl_update)
return;
- mutex_lock(&dev_priv->wm.wm_mutex);
+ mutex_lock(&dev_priv->display.wm.wm_mutex);
crtc->wm.active.g4x = crtc_state->wm.g4x.optimal;
g4x_program_watermarks(dev_priv);
- mutex_unlock(&dev_priv->wm.wm_mutex);
+ mutex_unlock(&dev_priv->display.wm.wm_mutex);
}
/* latency must be in 0.1us units. */
@@ -1650,15 +1612,15 @@ static unsigned int vlv_wm_method2(unsigned int pixel_rate,
static void vlv_setup_wm_latency(struct drm_i915_private *dev_priv)
{
/* all latencies in usec */
- dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM2] = 3;
+ dev_priv->display.wm.pri_latency[VLV_WM_LEVEL_PM2] = 3;
- dev_priv->wm.max_level = VLV_WM_LEVEL_PM2;
+ dev_priv->display.wm.max_level = VLV_WM_LEVEL_PM2;
if (IS_CHERRYVIEW(dev_priv)) {
- dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM5] = 12;
- dev_priv->wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33;
+ dev_priv->display.wm.pri_latency[VLV_WM_LEVEL_PM5] = 12;
+ dev_priv->display.wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33;
- dev_priv->wm.max_level = VLV_WM_LEVEL_DDR_DVFS;
+ dev_priv->display.wm.max_level = VLV_WM_LEVEL_DDR_DVFS;
}
}
@@ -1672,7 +1634,7 @@ static u16 vlv_compute_wm_level(const struct intel_crtc_state *crtc_state,
&crtc_state->hw.pipe_mode;
unsigned int pixel_rate, htotal, cpp, width, wm;
- if (dev_priv->wm.pri_latency[level] == 0)
+ if (dev_priv->display.wm.pri_latency[level] == 0)
return USHRT_MAX;
if (!intel_wm_plane_visible(crtc_state, plane_state))
@@ -1693,7 +1655,7 @@ static u16 vlv_compute_wm_level(const struct intel_crtc_state *crtc_state,
wm = 63;
} else {
wm = vlv_wm_method2(pixel_rate, htotal, width, cpp,
- dev_priv->wm.pri_latency[level] * 10);
+ dev_priv->display.wm.pri_latency[level] * 10);
}
return min_t(unsigned int, wm, USHRT_MAX);
@@ -2158,7 +2120,7 @@ static void vlv_merge_wm(struct drm_i915_private *dev_priv,
struct intel_crtc *crtc;
int num_active_pipes = 0;
- wm->level = dev_priv->wm.max_level;
+ wm->level = dev_priv->display.wm.max_level;
wm->cxsr = true;
for_each_intel_crtc(&dev_priv->drm, crtc) {
@@ -2197,7 +2159,7 @@ static void vlv_merge_wm(struct drm_i915_private *dev_priv,
static void vlv_program_watermarks(struct drm_i915_private *dev_priv)
{
- struct vlv_wm_values *old_wm = &dev_priv->wm.vlv;
+ struct vlv_wm_values *old_wm = &dev_priv->display.wm.vlv;
struct vlv_wm_values new_wm = {};
vlv_merge_wm(dev_priv, &new_wm);
@@ -2235,10 +2197,10 @@ static void vlv_initial_watermarks(struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- mutex_lock(&dev_priv->wm.wm_mutex);
+ mutex_lock(&dev_priv->display.wm.wm_mutex);
crtc->wm.active.vlv = crtc_state->wm.vlv.intermediate;
vlv_program_watermarks(dev_priv);
- mutex_unlock(&dev_priv->wm.wm_mutex);
+ mutex_unlock(&dev_priv->display.wm.wm_mutex);
}
static void vlv_optimize_watermarks(struct intel_atomic_state *state,
@@ -2251,10 +2213,10 @@ static void vlv_optimize_watermarks(struct intel_atomic_state *state,
if (!crtc_state->wm.need_postvbl_update)
return;
- mutex_lock(&dev_priv->wm.wm_mutex);
+ mutex_lock(&dev_priv->display.wm.wm_mutex);
crtc->wm.active.vlv = crtc_state->wm.vlv.optimal;
vlv_program_watermarks(dev_priv);
- mutex_unlock(&dev_priv->wm.wm_mutex);
+ mutex_unlock(&dev_priv->display.wm.wm_mutex);
}
static void i965_update_wm(struct drm_i915_private *dev_priv)
@@ -2835,9 +2797,9 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
const struct intel_plane_state *curstate,
struct intel_wm_level *result)
{
- u16 pri_latency = dev_priv->wm.pri_latency[level];
- u16 spr_latency = dev_priv->wm.spr_latency[level];
- u16 cur_latency = dev_priv->wm.cur_latency[level];
+ u16 pri_latency = dev_priv->display.wm.pri_latency[level];
+ u16 spr_latency = dev_priv->display.wm.spr_latency[level];
+ u16 cur_latency = dev_priv->display.wm.cur_latency[level];
/* WM1+ latency values stored in 0.5us units */
if (level > 0) {
@@ -2861,119 +2823,43 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
result->enable = true;
}
-static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
- u16 wm[])
+static void hsw_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
{
- struct intel_uncore *uncore = &dev_priv->uncore;
-
- if (DISPLAY_VER(dev_priv) >= 9) {
- u32 val;
- int ret, i;
- int level, max_level = ilk_wm_max_level(dev_priv);
- int mult = IS_DG2(dev_priv) ? 2 : 1;
-
- /* read the first set of memory latencies[0:3] */
- val = 0; /* data0 to be programmed to 0 for first set */
- ret = snb_pcode_read(&dev_priv->uncore, GEN9_PCODE_READ_MEM_LATENCY,
- &val, NULL);
+ u64 sskpd;
- if (ret) {
- drm_err(&dev_priv->drm,
- "SKL Mailbox read error = %d\n", ret);
- return;
- }
-
- wm[0] = (val & GEN9_MEM_LATENCY_LEVEL_MASK) * mult;
- wm[1] = ((val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
- GEN9_MEM_LATENCY_LEVEL_MASK) * mult;
- wm[2] = ((val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
- GEN9_MEM_LATENCY_LEVEL_MASK) * mult;
- wm[3] = ((val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
- GEN9_MEM_LATENCY_LEVEL_MASK) * mult;
-
- /* read the second set of memory latencies[4:7] */
- val = 1; /* data0 to be programmed to 1 for second set */
- ret = snb_pcode_read(&dev_priv->uncore, GEN9_PCODE_READ_MEM_LATENCY,
- &val, NULL);
- if (ret) {
- drm_err(&dev_priv->drm,
- "SKL Mailbox read error = %d\n", ret);
- return;
- }
+ sskpd = intel_uncore_read64(&i915->uncore, MCH_SSKPD);
- wm[4] = (val & GEN9_MEM_LATENCY_LEVEL_MASK) * mult;
- wm[5] = ((val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
- GEN9_MEM_LATENCY_LEVEL_MASK) * mult;
- wm[6] = ((val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
- GEN9_MEM_LATENCY_LEVEL_MASK) * mult;
- wm[7] = ((val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
- GEN9_MEM_LATENCY_LEVEL_MASK) * mult;
+ wm[0] = REG_FIELD_GET64(SSKPD_NEW_WM0_MASK_HSW, sskpd);
+ if (wm[0] == 0)
+ wm[0] = REG_FIELD_GET64(SSKPD_OLD_WM0_MASK_HSW, sskpd);
+ wm[1] = REG_FIELD_GET64(SSKPD_WM1_MASK_HSW, sskpd);
+ wm[2] = REG_FIELD_GET64(SSKPD_WM2_MASK_HSW, sskpd);
+ wm[3] = REG_FIELD_GET64(SSKPD_WM3_MASK_HSW, sskpd);
+ wm[4] = REG_FIELD_GET64(SSKPD_WM4_MASK_HSW, sskpd);
+}
- /*
- * If a level n (n > 1) has a 0us latency, all levels m (m >= n)
- * need to be disabled. We make sure to sanitize the values out
- * of the punit to satisfy this requirement.
- */
- for (level = 1; level <= max_level; level++) {
- if (wm[level] == 0) {
- for (i = level + 1; i <= max_level; i++)
- wm[i] = 0;
+static void snb_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
+{
+ u32 sskpd;
- max_level = level - 1;
+ sskpd = intel_uncore_read(&i915->uncore, MCH_SSKPD);
- break;
- }
- }
+ wm[0] = REG_FIELD_GET(SSKPD_WM0_MASK_SNB, sskpd);
+ wm[1] = REG_FIELD_GET(SSKPD_WM1_MASK_SNB, sskpd);
+ wm[2] = REG_FIELD_GET(SSKPD_WM2_MASK_SNB, sskpd);
+ wm[3] = REG_FIELD_GET(SSKPD_WM3_MASK_SNB, sskpd);
+}
- /*
- * WaWmMemoryReadLatency
- *
- * punit doesn't take into account the read latency so we need
- * to add proper adjustement to each valid level we retrieve
- * from the punit when level 0 response data is 0us.
- */
- if (wm[0] == 0) {
- u8 adjust = DISPLAY_VER(dev_priv) >= 12 ? 3 : 2;
+static void ilk_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
+{
+ u32 mltr;
- for (level = 0; level <= max_level; level++)
- wm[level] += adjust;
- }
+ mltr = intel_uncore_read(&i915->uncore, MLTR_ILK);
- /*
- * WA Level-0 adjustment for 16GB DIMMs: SKL+
- * If we could not get dimm info enable this WA to prevent from
- * any underrun. If not able to get Dimm info assume 16GB dimm
- * to avoid any underrun.
- */
- if (dev_priv->dram_info.wm_lv_0_adjust_needed)
- wm[0] += 1;
- } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
- u64 sskpd = intel_uncore_read64(uncore, MCH_SSKPD);
-
- wm[0] = REG_FIELD_GET64(SSKPD_NEW_WM0_MASK_HSW, sskpd);
- if (wm[0] == 0)
- wm[0] = REG_FIELD_GET64(SSKPD_OLD_WM0_MASK_HSW, sskpd);
- wm[1] = REG_FIELD_GET64(SSKPD_WM1_MASK_HSW, sskpd);
- wm[2] = REG_FIELD_GET64(SSKPD_WM2_MASK_HSW, sskpd);
- wm[3] = REG_FIELD_GET64(SSKPD_WM3_MASK_HSW, sskpd);
- wm[4] = REG_FIELD_GET64(SSKPD_WM4_MASK_HSW, sskpd);
- } else if (DISPLAY_VER(dev_priv) >= 6) {
- u32 sskpd = intel_uncore_read(uncore, MCH_SSKPD);
-
- wm[0] = REG_FIELD_GET(SSKPD_WM0_MASK_SNB, sskpd);
- wm[1] = REG_FIELD_GET(SSKPD_WM1_MASK_SNB, sskpd);
- wm[2] = REG_FIELD_GET(SSKPD_WM2_MASK_SNB, sskpd);
- wm[3] = REG_FIELD_GET(SSKPD_WM3_MASK_SNB, sskpd);
- } else if (DISPLAY_VER(dev_priv) >= 5) {
- u32 mltr = intel_uncore_read(uncore, MLTR_ILK);
-
- /* ILK primary LP0 latency is 700 ns */
- wm[0] = 7;
- wm[1] = REG_FIELD_GET(MLTR_WM1_MASK, mltr);
- wm[2] = REG_FIELD_GET(MLTR_WM2_MASK, mltr);
- } else {
- MISSING_CASE(INTEL_DEVID(dev_priv));
- }
+ /* ILK primary LP0 latency is 700 ns */
+ wm[0] = 7;
+ wm[1] = REG_FIELD_GET(MLTR_WM1_MASK, mltr);
+ wm[2] = REG_FIELD_GET(MLTR_WM2_MASK, mltr);
}
static void intel_fixup_spr_wm_latency(struct drm_i915_private *dev_priv,
@@ -3007,9 +2893,8 @@ int ilk_wm_max_level(const struct drm_i915_private *dev_priv)
return 2;
}
-static void intel_print_wm_latency(struct drm_i915_private *dev_priv,
- const char *name,
- const u16 wm[])
+void intel_print_wm_latency(struct drm_i915_private *dev_priv,
+ const char *name, const u16 wm[])
{
int level, max_level = ilk_wm_max_level(dev_priv);
@@ -3061,18 +2946,18 @@ static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv)
* The BIOS provided WM memory latency values are often
* inadequate for high resolution displays. Adjust them.
*/
- changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12);
- changed |= ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12);
- changed |= ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12);
+ changed = ilk_increase_wm_latency(dev_priv, dev_priv->display.wm.pri_latency, 12);
+ changed |= ilk_increase_wm_latency(dev_priv, dev_priv->display.wm.spr_latency, 12);
+ changed |= ilk_increase_wm_latency(dev_priv, dev_priv->display.wm.cur_latency, 12);
if (!changed)
return;
drm_dbg_kms(&dev_priv->drm,
"WM latency values increased to avoid potential underruns\n");
- intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
- intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
- intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
+ intel_print_wm_latency(dev_priv, "Primary", dev_priv->display.wm.pri_latency);
+ intel_print_wm_latency(dev_priv, "Sprite", dev_priv->display.wm.spr_latency);
+ intel_print_wm_latency(dev_priv, "Cursor", dev_priv->display.wm.cur_latency);
}
static void snb_wm_lp3_irq_quirk(struct drm_i915_private *dev_priv)
@@ -3088,37 +2973,42 @@ static void snb_wm_lp3_irq_quirk(struct drm_i915_private *dev_priv)
* interrupts only. To play it safe we disable LP3
* watermarks entirely.
*/
- if (dev_priv->wm.pri_latency[3] == 0 &&
- dev_priv->wm.spr_latency[3] == 0 &&
- dev_priv->wm.cur_latency[3] == 0)
+ if (dev_priv->display.wm.pri_latency[3] == 0 &&
+ dev_priv->display.wm.spr_latency[3] == 0 &&
+ dev_priv->display.wm.cur_latency[3] == 0)
return;
- dev_priv->wm.pri_latency[3] = 0;
- dev_priv->wm.spr_latency[3] = 0;
- dev_priv->wm.cur_latency[3] = 0;
+ dev_priv->display.wm.pri_latency[3] = 0;
+ dev_priv->display.wm.spr_latency[3] = 0;
+ dev_priv->display.wm.cur_latency[3] = 0;
drm_dbg_kms(&dev_priv->drm,
"LP3 watermarks disabled due to potential for lost interrupts\n");
- intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
- intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
- intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
+ intel_print_wm_latency(dev_priv, "Primary", dev_priv->display.wm.pri_latency);
+ intel_print_wm_latency(dev_priv, "Sprite", dev_priv->display.wm.spr_latency);
+ intel_print_wm_latency(dev_priv, "Cursor", dev_priv->display.wm.cur_latency);
}
static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
{
- intel_read_wm_latency(dev_priv, dev_priv->wm.pri_latency);
+ if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+ hsw_read_wm_latency(dev_priv, dev_priv->display.wm.pri_latency);
+ else if (DISPLAY_VER(dev_priv) >= 6)
+ snb_read_wm_latency(dev_priv, dev_priv->display.wm.pri_latency);
+ else
+ ilk_read_wm_latency(dev_priv, dev_priv->display.wm.pri_latency);
- memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency,
- sizeof(dev_priv->wm.pri_latency));
- memcpy(dev_priv->wm.cur_latency, dev_priv->wm.pri_latency,
- sizeof(dev_priv->wm.pri_latency));
+ memcpy(dev_priv->display.wm.spr_latency, dev_priv->display.wm.pri_latency,
+ sizeof(dev_priv->display.wm.pri_latency));
+ memcpy(dev_priv->display.wm.cur_latency, dev_priv->display.wm.pri_latency,
+ sizeof(dev_priv->display.wm.pri_latency));
- intel_fixup_spr_wm_latency(dev_priv, dev_priv->wm.spr_latency);
- intel_fixup_cur_wm_latency(dev_priv, dev_priv->wm.cur_latency);
+ intel_fixup_spr_wm_latency(dev_priv, dev_priv->display.wm.spr_latency);
+ intel_fixup_cur_wm_latency(dev_priv, dev_priv->display.wm.cur_latency);
- intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
- intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
- intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
+ intel_print_wm_latency(dev_priv, "Primary", dev_priv->display.wm.pri_latency);
+ intel_print_wm_latency(dev_priv, "Sprite", dev_priv->display.wm.spr_latency);
+ intel_print_wm_latency(dev_priv, "Cursor", dev_priv->display.wm.cur_latency);
if (DISPLAY_VER(dev_priv) == 6) {
snb_wm_latency_quirk(dev_priv);
@@ -3126,12 +3016,6 @@ static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
}
}
-static void skl_setup_wm_latency(struct drm_i915_private *dev_priv)
-{
- intel_read_wm_latency(dev_priv, dev_priv->wm.skl_latency);
- intel_print_wm_latency(dev_priv, "Gen9 Plane", dev_priv->wm.skl_latency);
-}
-
static bool ilk_validate_pipe_wm(const struct drm_i915_private *dev_priv,
struct intel_pipe_wm *pipe_wm)
{
@@ -3386,7 +3270,7 @@ static unsigned int ilk_wm_lp_latency(struct drm_i915_private *dev_priv,
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
return 2 * level;
else
- return dev_priv->wm.pri_latency[level];
+ return dev_priv->display.wm.pri_latency[level];
}
static void ilk_compute_wm_results(struct drm_i915_private *dev_priv,
@@ -3538,7 +3422,7 @@ static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv,
static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
unsigned int dirty)
{
- struct ilk_wm_values *previous = &dev_priv->wm.hw;
+ struct ilk_wm_values *previous = &dev_priv->display.wm.hw;
bool changed = false;
if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM_LP_ENABLE) {
@@ -3572,7 +3456,7 @@ static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
struct ilk_wm_values *results)
{
- struct ilk_wm_values *previous = &dev_priv->wm.hw;
+ struct ilk_wm_values *previous = &dev_priv->display.wm.hw;
unsigned int dirty;
u32 val;
@@ -3634,7 +3518,7 @@ static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2])
intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, results->wm_lp[2]);
- dev_priv->wm.hw = *results;
+ dev_priv->display.wm.hw = *results;
}
bool ilk_disable_lp_wm(struct drm_i915_private *dev_priv)
@@ -3642,2765 +3526,6 @@ bool ilk_disable_lp_wm(struct drm_i915_private *dev_priv)
return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
}
-u8 intel_enabled_dbuf_slices_mask(struct drm_i915_private *dev_priv)
-{
- u8 enabled_slices = 0;
- enum dbuf_slice slice;
-
- for_each_dbuf_slice(dev_priv, slice) {
- if (intel_uncore_read(&dev_priv->uncore,
- DBUF_CTL_S(slice)) & DBUF_POWER_STATE)
- enabled_slices |= BIT(slice);
- }
-
- return enabled_slices;
-}
-
-/*
- * FIXME: We still don't have the proper code detect if we need to apply the WA,
- * so assume we'll always need it in order to avoid underruns.
- */
-static bool skl_needs_memory_bw_wa(struct drm_i915_private *dev_priv)
-{
- return DISPLAY_VER(dev_priv) == 9;
-}
-
-static bool
-intel_has_sagv(struct drm_i915_private *dev_priv)
-{
- return DISPLAY_VER(dev_priv) >= 9 && !IS_LP(dev_priv) &&
- dev_priv->sagv_status != I915_SAGV_NOT_CONTROLLED;
-}
-
-static u32
-intel_sagv_block_time(struct drm_i915_private *dev_priv)
-{
- if (DISPLAY_VER(dev_priv) >= 12) {
- u32 val = 0;
- int ret;
-
- ret = snb_pcode_read(&dev_priv->uncore,
- GEN12_PCODE_READ_SAGV_BLOCK_TIME_US,
- &val, NULL);
- if (ret) {
- drm_dbg_kms(&dev_priv->drm, "Couldn't read SAGV block time!\n");
- return 0;
- }
-
- return val;
- } else if (DISPLAY_VER(dev_priv) == 11) {
- return 10;
- } else if (DISPLAY_VER(dev_priv) == 9 && !IS_LP(dev_priv)) {
- return 30;
- } else {
- return 0;
- }
-}
-
-static void intel_sagv_init(struct drm_i915_private *i915)
-{
- if (!intel_has_sagv(i915))
- i915->sagv_status = I915_SAGV_NOT_CONTROLLED;
-
- /*
- * Probe to see if we have working SAGV control.
- * For icl+ this was already determined by intel_bw_init_hw().
- */
- if (DISPLAY_VER(i915) < 11)
- skl_sagv_disable(i915);
-
- drm_WARN_ON(&i915->drm, i915->sagv_status == I915_SAGV_UNKNOWN);
-
- i915->sagv_block_time_us = intel_sagv_block_time(i915);
-
- drm_dbg_kms(&i915->drm, "SAGV supported: %s, original SAGV block time: %u us\n",
- str_yes_no(intel_has_sagv(i915)), i915->sagv_block_time_us);
-
- /* avoid overflow when adding with wm0 latency/etc. */
- if (drm_WARN(&i915->drm, i915->sagv_block_time_us > U16_MAX,
- "Excessive SAGV block time %u, ignoring\n",
- i915->sagv_block_time_us))
- i915->sagv_block_time_us = 0;
-
- if (!intel_has_sagv(i915))
- i915->sagv_block_time_us = 0;
-}
-
-/*
- * SAGV dynamically adjusts the system agent voltage and clock frequencies
- * depending on power and performance requirements. The display engine access
- * to system memory is blocked during the adjustment time. Because of the
- * blocking time, having this enabled can cause full system hangs and/or pipe
- * underruns if we don't meet all of the following requirements:
- *
- * - <= 1 pipe enabled
- * - All planes can enable watermarks for latencies >= SAGV engine block time
- * - We're not using an interlaced display configuration
- */
-static void skl_sagv_enable(struct drm_i915_private *dev_priv)
-{
- int ret;
-
- if (!intel_has_sagv(dev_priv))
- return;
-
- if (dev_priv->sagv_status == I915_SAGV_ENABLED)
- return;
-
- drm_dbg_kms(&dev_priv->drm, "Enabling SAGV\n");
- ret = snb_pcode_write(&dev_priv->uncore, GEN9_PCODE_SAGV_CONTROL,
- GEN9_SAGV_ENABLE);
-
- /* We don't need to wait for SAGV when enabling */
-
- /*
- * Some skl systems, pre-release machines in particular,
- * don't actually have SAGV.
- */
- if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) {
- drm_dbg(&dev_priv->drm, "No SAGV found on system, ignoring\n");
- dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
- return;
- } else if (ret < 0) {
- drm_err(&dev_priv->drm, "Failed to enable SAGV\n");
- return;
- }
-
- dev_priv->sagv_status = I915_SAGV_ENABLED;
-}
-
-static void skl_sagv_disable(struct drm_i915_private *dev_priv)
-{
- int ret;
-
- if (!intel_has_sagv(dev_priv))
- return;
-
- if (dev_priv->sagv_status == I915_SAGV_DISABLED)
- return;
-
- drm_dbg_kms(&dev_priv->drm, "Disabling SAGV\n");
- /* bspec says to keep retrying for at least 1 ms */
- ret = skl_pcode_request(&dev_priv->uncore, GEN9_PCODE_SAGV_CONTROL,
- GEN9_SAGV_DISABLE,
- GEN9_SAGV_IS_DISABLED, GEN9_SAGV_IS_DISABLED,
- 1);
- /*
- * Some skl systems, pre-release machines in particular,
- * don't actually have SAGV.
- */
- if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) {
- drm_dbg(&dev_priv->drm, "No SAGV found on system, ignoring\n");
- dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
- return;
- } else if (ret < 0) {
- drm_err(&dev_priv->drm, "Failed to disable SAGV (%d)\n", ret);
- return;
- }
-
- dev_priv->sagv_status = I915_SAGV_DISABLED;
-}
-
-static void skl_sagv_pre_plane_update(struct intel_atomic_state *state)
-{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
- const struct intel_bw_state *new_bw_state =
- intel_atomic_get_new_bw_state(state);
-
- if (!new_bw_state)
- return;
-
- if (!intel_can_enable_sagv(i915, new_bw_state))
- skl_sagv_disable(i915);
-}
-
-static void skl_sagv_post_plane_update(struct intel_atomic_state *state)
-{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
- const struct intel_bw_state *new_bw_state =
- intel_atomic_get_new_bw_state(state);
-
- if (!new_bw_state)
- return;
-
- if (intel_can_enable_sagv(i915, new_bw_state))
- skl_sagv_enable(i915);
-}
-
-static void icl_sagv_pre_plane_update(struct intel_atomic_state *state)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- const struct intel_bw_state *old_bw_state =
- intel_atomic_get_old_bw_state(state);
- const struct intel_bw_state *new_bw_state =
- intel_atomic_get_new_bw_state(state);
- u16 old_mask, new_mask;
-
- if (!new_bw_state)
- return;
-
- old_mask = old_bw_state->qgv_points_mask;
- new_mask = old_bw_state->qgv_points_mask | new_bw_state->qgv_points_mask;
-
- if (old_mask == new_mask)
- return;
-
- WARN_ON(!new_bw_state->base.changed);
-
- drm_dbg_kms(&dev_priv->drm, "Restricting QGV points: 0x%x -> 0x%x\n",
- old_mask, new_mask);
-
- /*
- * Restrict required qgv points before updating the configuration.
- * According to BSpec we can't mask and unmask qgv points at the same
- * time. Also masking should be done before updating the configuration
- * and unmasking afterwards.
- */
- icl_pcode_restrict_qgv_points(dev_priv, new_mask);
-}
-
-static void icl_sagv_post_plane_update(struct intel_atomic_state *state)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- const struct intel_bw_state *old_bw_state =
- intel_atomic_get_old_bw_state(state);
- const struct intel_bw_state *new_bw_state =
- intel_atomic_get_new_bw_state(state);
- u16 old_mask, new_mask;
-
- if (!new_bw_state)
- return;
-
- old_mask = old_bw_state->qgv_points_mask | new_bw_state->qgv_points_mask;
- new_mask = new_bw_state->qgv_points_mask;
-
- if (old_mask == new_mask)
- return;
-
- WARN_ON(!new_bw_state->base.changed);
-
- drm_dbg_kms(&dev_priv->drm, "Relaxing QGV points: 0x%x -> 0x%x\n",
- old_mask, new_mask);
-
- /*
- * Allow required qgv points after updating the configuration.
- * According to BSpec we can't mask and unmask qgv points at the same
- * time. Also masking should be done before updating the configuration
- * and unmasking afterwards.
- */
- icl_pcode_restrict_qgv_points(dev_priv, new_mask);
-}
-
-void intel_sagv_pre_plane_update(struct intel_atomic_state *state)
-{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
-
- /*
- * Just return if we can't control SAGV or don't have it.
- * This is different from situation when we have SAGV but just can't
- * afford it due to DBuf limitation - in case if SAGV is completely
- * disabled in a BIOS, we are not even allowed to send a PCode request,
- * as it will throw an error. So have to check it here.
- */
- if (!intel_has_sagv(i915))
- return;
-
- if (DISPLAY_VER(i915) >= 11)
- icl_sagv_pre_plane_update(state);
- else
- skl_sagv_pre_plane_update(state);
-}
-
-void intel_sagv_post_plane_update(struct intel_atomic_state *state)
-{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
-
- /*
- * Just return if we can't control SAGV or don't have it.
- * This is different from situation when we have SAGV but just can't
- * afford it due to DBuf limitation - in case if SAGV is completely
- * disabled in a BIOS, we are not even allowed to send a PCode request,
- * as it will throw an error. So have to check it here.
- */
- if (!intel_has_sagv(i915))
- return;
-
- if (DISPLAY_VER(i915) >= 11)
- icl_sagv_post_plane_update(state);
- else
- skl_sagv_post_plane_update(state);
-}
-
-static bool skl_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum plane_id plane_id;
- int max_level = INT_MAX;
-
- if (!intel_has_sagv(dev_priv))
- return false;
-
- if (!crtc_state->hw.active)
- return true;
-
- if (crtc_state->hw.pipe_mode.flags & DRM_MODE_FLAG_INTERLACE)
- return false;
-
- for_each_plane_id_on_crtc(crtc, plane_id) {
- const struct skl_plane_wm *wm =
- &crtc_state->wm.skl.optimal.planes[plane_id];
- int level;
-
- /* Skip this plane if it's not enabled */
- if (!wm->wm[0].enable)
- continue;
-
- /* Find the highest enabled wm level for this plane */
- for (level = ilk_wm_max_level(dev_priv);
- !wm->wm[level].enable; --level)
- { }
-
- /* Highest common enabled wm level for all planes */
- max_level = min(level, max_level);
- }
-
- /* No enabled planes? */
- if (max_level == INT_MAX)
- return true;
-
- for_each_plane_id_on_crtc(crtc, plane_id) {
- const struct skl_plane_wm *wm =
- &crtc_state->wm.skl.optimal.planes[plane_id];
-
- /*
- * All enabled planes must have enabled a common wm level that
- * can tolerate memory latencies higher than sagv_block_time_us
- */
- if (wm->wm[0].enable && !wm->wm[max_level].can_sagv)
- return false;
- }
-
- return true;
-}
-
-static bool tgl_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- enum plane_id plane_id;
-
- if (!crtc_state->hw.active)
- return true;
-
- for_each_plane_id_on_crtc(crtc, plane_id) {
- const struct skl_plane_wm *wm =
- &crtc_state->wm.skl.optimal.planes[plane_id];
-
- if (wm->wm[0].enable && !wm->sagv.wm0.enable)
- return false;
- }
-
- return true;
-}
-
-static bool intel_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-
- if (DISPLAY_VER(dev_priv) >= 12)
- return tgl_crtc_can_enable_sagv(crtc_state);
- else
- return skl_crtc_can_enable_sagv(crtc_state);
-}
-
-bool intel_can_enable_sagv(struct drm_i915_private *dev_priv,
- const struct intel_bw_state *bw_state)
-{
- if (DISPLAY_VER(dev_priv) < 11 &&
- bw_state->active_pipes && !is_power_of_2(bw_state->active_pipes))
- return false;
-
- return bw_state->pipe_sagv_reject == 0;
-}
-
-static int intel_compute_sagv_mask(struct intel_atomic_state *state)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- int ret;
- struct intel_crtc *crtc;
- struct intel_crtc_state *new_crtc_state;
- struct intel_bw_state *new_bw_state = NULL;
- const struct intel_bw_state *old_bw_state = NULL;
- int i;
-
- for_each_new_intel_crtc_in_state(state, crtc,
- new_crtc_state, i) {
- new_bw_state = intel_atomic_get_bw_state(state);
- if (IS_ERR(new_bw_state))
- return PTR_ERR(new_bw_state);
-
- old_bw_state = intel_atomic_get_old_bw_state(state);
-
- if (intel_crtc_can_enable_sagv(new_crtc_state))
- new_bw_state->pipe_sagv_reject &= ~BIT(crtc->pipe);
- else
- new_bw_state->pipe_sagv_reject |= BIT(crtc->pipe);
- }
-
- if (!new_bw_state)
- return 0;
-
- new_bw_state->active_pipes =
- intel_calc_active_pipes(state, old_bw_state->active_pipes);
-
- if (new_bw_state->active_pipes != old_bw_state->active_pipes) {
- ret = intel_atomic_lock_global_state(&new_bw_state->base);
- if (ret)
- return ret;
- }
-
- if (intel_can_enable_sagv(dev_priv, new_bw_state) !=
- intel_can_enable_sagv(dev_priv, old_bw_state)) {
- ret = intel_atomic_serialize_global_state(&new_bw_state->base);
- if (ret)
- return ret;
- } else if (new_bw_state->pipe_sagv_reject != old_bw_state->pipe_sagv_reject) {
- ret = intel_atomic_lock_global_state(&new_bw_state->base);
- if (ret)
- return ret;
- }
-
- for_each_new_intel_crtc_in_state(state, crtc,
- new_crtc_state, i) {
- struct skl_pipe_wm *pipe_wm = &new_crtc_state->wm.skl.optimal;
-
- /*
- * We store use_sagv_wm in the crtc state rather than relying on
- * that bw state since we have no convenient way to get at the
- * latter from the plane commit hooks (especially in the legacy
- * cursor case)
- */
- pipe_wm->use_sagv_wm = !HAS_HW_SAGV_WM(dev_priv) &&
- DISPLAY_VER(dev_priv) >= 12 &&
- intel_can_enable_sagv(dev_priv, new_bw_state);
- }
-
- return 0;
-}
-
-static u16 skl_ddb_entry_init(struct skl_ddb_entry *entry,
- u16 start, u16 end)
-{
- entry->start = start;
- entry->end = end;
-
- return end;
-}
-
-static int intel_dbuf_slice_size(struct drm_i915_private *dev_priv)
-{
- return INTEL_INFO(dev_priv)->display.dbuf.size /
- hweight8(INTEL_INFO(dev_priv)->display.dbuf.slice_mask);
-}
-
-static void
-skl_ddb_entry_for_slices(struct drm_i915_private *dev_priv, u8 slice_mask,
- struct skl_ddb_entry *ddb)
-{
- int slice_size = intel_dbuf_slice_size(dev_priv);
-
- if (!slice_mask) {
- ddb->start = 0;
- ddb->end = 0;
- return;
- }
-
- ddb->start = (ffs(slice_mask) - 1) * slice_size;
- ddb->end = fls(slice_mask) * slice_size;
-
- WARN_ON(ddb->start >= ddb->end);
- WARN_ON(ddb->end > INTEL_INFO(dev_priv)->display.dbuf.size);
-}
-
-static unsigned int mbus_ddb_offset(struct drm_i915_private *i915, u8 slice_mask)
-{
- struct skl_ddb_entry ddb;
-
- if (slice_mask & (BIT(DBUF_S1) | BIT(DBUF_S2)))
- slice_mask = BIT(DBUF_S1);
- else if (slice_mask & (BIT(DBUF_S3) | BIT(DBUF_S4)))
- slice_mask = BIT(DBUF_S3);
-
- skl_ddb_entry_for_slices(i915, slice_mask, &ddb);
-
- return ddb.start;
-}
-
-u32 skl_ddb_dbuf_slice_mask(struct drm_i915_private *dev_priv,
- const struct skl_ddb_entry *entry)
-{
- int slice_size = intel_dbuf_slice_size(dev_priv);
- enum dbuf_slice start_slice, end_slice;
- u8 slice_mask = 0;
-
- if (!skl_ddb_entry_size(entry))
- return 0;
-
- start_slice = entry->start / slice_size;
- end_slice = (entry->end - 1) / slice_size;
-
- /*
- * Per plane DDB entry can in a really worst case be on multiple slices
- * but single entry is anyway contigious.
- */
- while (start_slice <= end_slice) {
- slice_mask |= BIT(start_slice);
- start_slice++;
- }
-
- return slice_mask;
-}
-
-static unsigned int intel_crtc_ddb_weight(const struct intel_crtc_state *crtc_state)
-{
- const struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
- int hdisplay, vdisplay;
-
- if (!crtc_state->hw.active)
- return 0;
-
- /*
- * Watermark/ddb requirement highly depends upon width of the
- * framebuffer, So instead of allocating DDB equally among pipes
- * distribute DDB based on resolution/width of the display.
- */
- drm_mode_get_hv_timing(pipe_mode, &hdisplay, &vdisplay);
-
- return hdisplay;
-}
-
-static void intel_crtc_dbuf_weights(const struct intel_dbuf_state *dbuf_state,
- enum pipe for_pipe,
- unsigned int *weight_start,
- unsigned int *weight_end,
- unsigned int *weight_total)
-{
- struct drm_i915_private *dev_priv =
- to_i915(dbuf_state->base.state->base.dev);
- enum pipe pipe;
-
- *weight_start = 0;
- *weight_end = 0;
- *weight_total = 0;
-
- for_each_pipe(dev_priv, pipe) {
- int weight = dbuf_state->weight[pipe];
-
- /*
- * Do not account pipes using other slice sets
- * luckily as of current BSpec slice sets do not partially
- * intersect(pipes share either same one slice or same slice set
- * i.e no partial intersection), so it is enough to check for
- * equality for now.
- */
- if (dbuf_state->slices[pipe] != dbuf_state->slices[for_pipe])
- continue;
-
- *weight_total += weight;
- if (pipe < for_pipe) {
- *weight_start += weight;
- *weight_end += weight;
- } else if (pipe == for_pipe) {
- *weight_end += weight;
- }
- }
-}
-
-static int
-skl_crtc_allocate_ddb(struct intel_atomic_state *state, struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- unsigned int weight_total, weight_start, weight_end;
- const struct intel_dbuf_state *old_dbuf_state =
- intel_atomic_get_old_dbuf_state(state);
- struct intel_dbuf_state *new_dbuf_state =
- intel_atomic_get_new_dbuf_state(state);
- struct intel_crtc_state *crtc_state;
- struct skl_ddb_entry ddb_slices;
- enum pipe pipe = crtc->pipe;
- unsigned int mbus_offset = 0;
- u32 ddb_range_size;
- u32 dbuf_slice_mask;
- u32 start, end;
- int ret;
-
- if (new_dbuf_state->weight[pipe] == 0) {
- skl_ddb_entry_init(&new_dbuf_state->ddb[pipe], 0, 0);
- goto out;
- }
-
- dbuf_slice_mask = new_dbuf_state->slices[pipe];
-
- skl_ddb_entry_for_slices(dev_priv, dbuf_slice_mask, &ddb_slices);
- mbus_offset = mbus_ddb_offset(dev_priv, dbuf_slice_mask);
- ddb_range_size = skl_ddb_entry_size(&ddb_slices);
-
- intel_crtc_dbuf_weights(new_dbuf_state, pipe,
- &weight_start, &weight_end, &weight_total);
-
- start = ddb_range_size * weight_start / weight_total;
- end = ddb_range_size * weight_end / weight_total;
-
- skl_ddb_entry_init(&new_dbuf_state->ddb[pipe],
- ddb_slices.start - mbus_offset + start,
- ddb_slices.start - mbus_offset + end);
-
-out:
- if (old_dbuf_state->slices[pipe] == new_dbuf_state->slices[pipe] &&
- skl_ddb_entry_equal(&old_dbuf_state->ddb[pipe],
- &new_dbuf_state->ddb[pipe]))
- return 0;
-
- ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
- if (ret)
- return ret;
-
- crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
- if (IS_ERR(crtc_state))
- return PTR_ERR(crtc_state);
-
- /*
- * Used for checking overlaps, so we need absolute
- * offsets instead of MBUS relative offsets.
- */
- crtc_state->wm.skl.ddb.start = mbus_offset + new_dbuf_state->ddb[pipe].start;
- crtc_state->wm.skl.ddb.end = mbus_offset + new_dbuf_state->ddb[pipe].end;
-
- drm_dbg_kms(&dev_priv->drm,
- "[CRTC:%d:%s] dbuf slices 0x%x -> 0x%x, ddb (%d - %d) -> (%d - %d), active pipes 0x%x -> 0x%x\n",
- crtc->base.base.id, crtc->base.name,
- old_dbuf_state->slices[pipe], new_dbuf_state->slices[pipe],
- old_dbuf_state->ddb[pipe].start, old_dbuf_state->ddb[pipe].end,
- new_dbuf_state->ddb[pipe].start, new_dbuf_state->ddb[pipe].end,
- old_dbuf_state->active_pipes, new_dbuf_state->active_pipes);
-
- return 0;
-}
-
-static int skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
- int width, const struct drm_format_info *format,
- u64 modifier, unsigned int rotation,
- u32 plane_pixel_rate, struct skl_wm_params *wp,
- int color_plane);
-
-static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
- struct intel_plane *plane,
- int level,
- unsigned int latency,
- const struct skl_wm_params *wp,
- const struct skl_wm_level *result_prev,
- struct skl_wm_level *result /* out */);
-
-static unsigned int
-skl_cursor_allocation(const struct intel_crtc_state *crtc_state,
- int num_active)
-{
- struct intel_plane *plane = to_intel_plane(crtc_state->uapi.crtc->cursor);
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- int level, max_level = ilk_wm_max_level(dev_priv);
- struct skl_wm_level wm = {};
- int ret, min_ddb_alloc = 0;
- struct skl_wm_params wp;
-
- ret = skl_compute_wm_params(crtc_state, 256,
- drm_format_info(DRM_FORMAT_ARGB8888),
- DRM_FORMAT_MOD_LINEAR,
- DRM_MODE_ROTATE_0,
- crtc_state->pixel_rate, &wp, 0);
- drm_WARN_ON(&dev_priv->drm, ret);
-
- for (level = 0; level <= max_level; level++) {
- unsigned int latency = dev_priv->wm.skl_latency[level];
-
- skl_compute_plane_wm(crtc_state, plane, level, latency, &wp, &wm, &wm);
- if (wm.min_ddb_alloc == U16_MAX)
- break;
-
- min_ddb_alloc = wm.min_ddb_alloc;
- }
-
- return max(num_active == 1 ? 32 : 8, min_ddb_alloc);
-}
-
-static void skl_ddb_entry_init_from_hw(struct skl_ddb_entry *entry, u32 reg)
-{
- skl_ddb_entry_init(entry,
- REG_FIELD_GET(PLANE_BUF_START_MASK, reg),
- REG_FIELD_GET(PLANE_BUF_END_MASK, reg));
- if (entry->end)
- entry->end++;
-}
-
-static void
-skl_ddb_get_hw_plane_state(struct drm_i915_private *dev_priv,
- const enum pipe pipe,
- const enum plane_id plane_id,
- struct skl_ddb_entry *ddb,
- struct skl_ddb_entry *ddb_y)
-{
- u32 val;
-
- /* Cursor doesn't support NV12/planar, so no extra calculation needed */
- if (plane_id == PLANE_CURSOR) {
- val = intel_uncore_read(&dev_priv->uncore, CUR_BUF_CFG(pipe));
- skl_ddb_entry_init_from_hw(ddb, val);
- return;
- }
-
- val = intel_uncore_read(&dev_priv->uncore, PLANE_BUF_CFG(pipe, plane_id));
- skl_ddb_entry_init_from_hw(ddb, val);
-
- if (DISPLAY_VER(dev_priv) >= 11)
- return;
-
- val = intel_uncore_read(&dev_priv->uncore, PLANE_NV12_BUF_CFG(pipe, plane_id));
- skl_ddb_entry_init_from_hw(ddb_y, val);
-}
-
-static void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
- struct skl_ddb_entry *ddb,
- struct skl_ddb_entry *ddb_y)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum intel_display_power_domain power_domain;
- enum pipe pipe = crtc->pipe;
- intel_wakeref_t wakeref;
- enum plane_id plane_id;
-
- power_domain = POWER_DOMAIN_PIPE(pipe);
- wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
- if (!wakeref)
- return;
-
- for_each_plane_id_on_crtc(crtc, plane_id)
- skl_ddb_get_hw_plane_state(dev_priv, pipe,
- plane_id,
- &ddb[plane_id],
- &ddb_y[plane_id]);
-
- intel_display_power_put(dev_priv, power_domain, wakeref);
-}
-
-struct dbuf_slice_conf_entry {
- u8 active_pipes;
- u8 dbuf_mask[I915_MAX_PIPES];
- bool join_mbus;
-};
-
-/*
- * Table taken from Bspec 12716
- * Pipes do have some preferred DBuf slice affinity,
- * plus there are some hardcoded requirements on how
- * those should be distributed for multipipe scenarios.
- * For more DBuf slices algorithm can get even more messy
- * and less readable, so decided to use a table almost
- * as is from BSpec itself - that way it is at least easier
- * to compare, change and check.
- */
-static const struct dbuf_slice_conf_entry icl_allowed_dbufs[] =
-/* Autogenerated with igt/tools/intel_dbuf_map tool: */
-{
- {
- .active_pipes = BIT(PIPE_A),
- .dbuf_mask = {
- [PIPE_A] = BIT(DBUF_S1),
- },
- },
- {
- .active_pipes = BIT(PIPE_B),
- .dbuf_mask = {
- [PIPE_B] = BIT(DBUF_S1),
- },
- },
- {
- .active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
- .dbuf_mask = {
- [PIPE_A] = BIT(DBUF_S1),
- [PIPE_B] = BIT(DBUF_S2),
- },
- },
- {
- .active_pipes = BIT(PIPE_C),
- .dbuf_mask = {
- [PIPE_C] = BIT(DBUF_S2),
- },
- },
- {
- .active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
- .dbuf_mask = {
- [PIPE_A] = BIT(DBUF_S1),
- [PIPE_C] = BIT(DBUF_S2),
- },
- },
- {
- .active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
- .dbuf_mask = {
- [PIPE_B] = BIT(DBUF_S1),
- [PIPE_C] = BIT(DBUF_S2),
- },
- },
- {
- .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
- .dbuf_mask = {
- [PIPE_A] = BIT(DBUF_S1),
- [PIPE_B] = BIT(DBUF_S1),
- [PIPE_C] = BIT(DBUF_S2),
- },
- },
- {}
-};
-
-/*
- * Table taken from Bspec 49255
- * Pipes do have some preferred DBuf slice affinity,
- * plus there are some hardcoded requirements on how
- * those should be distributed for multipipe scenarios.
- * For more DBuf slices algorithm can get even more messy
- * and less readable, so decided to use a table almost
- * as is from BSpec itself - that way it is at least easier
- * to compare, change and check.
- */
-static const struct dbuf_slice_conf_entry tgl_allowed_dbufs[] =
-/* Autogenerated with igt/tools/intel_dbuf_map tool: */
-{
- {
- .active_pipes = BIT(PIPE_A),
- .dbuf_mask = {
- [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
- },
- },
- {
- .active_pipes = BIT(PIPE_B),
- .dbuf_mask = {
- [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
- },
- },
- {
- .active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
- .dbuf_mask = {
- [PIPE_A] = BIT(DBUF_S2),
- [PIPE_B] = BIT(DBUF_S1),
- },
- },
- {
- .active_pipes = BIT(PIPE_C),
- .dbuf_mask = {
- [PIPE_C] = BIT(DBUF_S2) | BIT(DBUF_S1),
- },
- },
- {
- .active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
- .dbuf_mask = {
- [PIPE_A] = BIT(DBUF_S1),
- [PIPE_C] = BIT(DBUF_S2),
- },
- },
- {
- .active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
- .dbuf_mask = {
- [PIPE_B] = BIT(DBUF_S1),
- [PIPE_C] = BIT(DBUF_S2),
- },
- },
- {
- .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
- .dbuf_mask = {
- [PIPE_A] = BIT(DBUF_S1),
- [PIPE_B] = BIT(DBUF_S1),
- [PIPE_C] = BIT(DBUF_S2),
- },
- },
- {
- .active_pipes = BIT(PIPE_D),
- .dbuf_mask = {
- [PIPE_D] = BIT(DBUF_S2) | BIT(DBUF_S1),
- },
- },
- {
- .active_pipes = BIT(PIPE_A) | BIT(PIPE_D),
- .dbuf_mask = {
- [PIPE_A] = BIT(DBUF_S1),
- [PIPE_D] = BIT(DBUF_S2),
- },
- },
- {
- .active_pipes = BIT(PIPE_B) | BIT(PIPE_D),
- .dbuf_mask = {
- [PIPE_B] = BIT(DBUF_S1),
- [PIPE_D] = BIT(DBUF_S2),
- },
- },
- {
- .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_D),
- .dbuf_mask = {
- [PIPE_A] = BIT(DBUF_S1),
- [PIPE_B] = BIT(DBUF_S1),
- [PIPE_D] = BIT(DBUF_S2),
- },
- },
- {
- .active_pipes = BIT(PIPE_C) | BIT(PIPE_D),
- .dbuf_mask = {
- [PIPE_C] = BIT(DBUF_S1),
- [PIPE_D] = BIT(DBUF_S2),
- },
- },
- {
- .active_pipes = BIT(PIPE_A) | BIT(PIPE_C) | BIT(PIPE_D),
- .dbuf_mask = {
- [PIPE_A] = BIT(DBUF_S1),
- [PIPE_C] = BIT(DBUF_S2),
- [PIPE_D] = BIT(DBUF_S2),
- },
- },
- {
- .active_pipes = BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
- .dbuf_mask = {
- [PIPE_B] = BIT(DBUF_S1),
- [PIPE_C] = BIT(DBUF_S2),
- [PIPE_D] = BIT(DBUF_S2),
- },
- },
- {
- .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
- .dbuf_mask = {
- [PIPE_A] = BIT(DBUF_S1),
- [PIPE_B] = BIT(DBUF_S1),
- [PIPE_C] = BIT(DBUF_S2),
- [PIPE_D] = BIT(DBUF_S2),
- },
- },
- {}
-};
-
-static const struct dbuf_slice_conf_entry dg2_allowed_dbufs[] = {
- {
- .active_pipes = BIT(PIPE_A),
- .dbuf_mask = {
- [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
- },
- },
- {
- .active_pipes = BIT(PIPE_B),
- .dbuf_mask = {
- [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
- },
- },
- {
- .active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
- .dbuf_mask = {
- [PIPE_A] = BIT(DBUF_S1),
- [PIPE_B] = BIT(DBUF_S2),
- },
- },
- {
- .active_pipes = BIT(PIPE_C),
- .dbuf_mask = {
- [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
- },
- },
- {
- .active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
- .dbuf_mask = {
- [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
- [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
- },
- },
- {
- .active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
- .dbuf_mask = {
- [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
- [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
- },
- },
- {
- .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
- .dbuf_mask = {
- [PIPE_A] = BIT(DBUF_S1),
- [PIPE_B] = BIT(DBUF_S2),
- [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
- },
- },
- {
- .active_pipes = BIT(PIPE_D),
- .dbuf_mask = {
- [PIPE_D] = BIT(DBUF_S3) | BIT(DBUF_S4),
- },
- },
- {
- .active_pipes = BIT(PIPE_A) | BIT(PIPE_D),
- .dbuf_mask = {
- [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
- [PIPE_D] = BIT(DBUF_S3) | BIT(DBUF_S4),
- },
- },
- {
- .active_pipes = BIT(PIPE_B) | BIT(PIPE_D),
- .dbuf_mask = {
- [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
- [PIPE_D] = BIT(DBUF_S3) | BIT(DBUF_S4),
- },
- },
- {
- .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_D),
- .dbuf_mask = {
- [PIPE_A] = BIT(DBUF_S1),
- [PIPE_B] = BIT(DBUF_S2),
- [PIPE_D] = BIT(DBUF_S3) | BIT(DBUF_S4),
- },
- },
- {
- .active_pipes = BIT(PIPE_C) | BIT(PIPE_D),
- .dbuf_mask = {
- [PIPE_C] = BIT(DBUF_S3),
- [PIPE_D] = BIT(DBUF_S4),
- },
- },
- {
- .active_pipes = BIT(PIPE_A) | BIT(PIPE_C) | BIT(PIPE_D),
- .dbuf_mask = {
- [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
- [PIPE_C] = BIT(DBUF_S3),
- [PIPE_D] = BIT(DBUF_S4),
- },
- },
- {
- .active_pipes = BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
- .dbuf_mask = {
- [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
- [PIPE_C] = BIT(DBUF_S3),
- [PIPE_D] = BIT(DBUF_S4),
- },
- },
- {
- .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
- .dbuf_mask = {
- [PIPE_A] = BIT(DBUF_S1),
- [PIPE_B] = BIT(DBUF_S2),
- [PIPE_C] = BIT(DBUF_S3),
- [PIPE_D] = BIT(DBUF_S4),
- },
- },
- {}
-};
-
-static const struct dbuf_slice_conf_entry adlp_allowed_dbufs[] = {
- /*
- * Keep the join_mbus cases first so check_mbus_joined()
- * will prefer them over the !join_mbus cases.
- */
- {
- .active_pipes = BIT(PIPE_A),
- .dbuf_mask = {
- [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2) | BIT(DBUF_S3) | BIT(DBUF_S4),
- },
- .join_mbus = true,
- },
- {
- .active_pipes = BIT(PIPE_B),
- .dbuf_mask = {
- [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2) | BIT(DBUF_S3) | BIT(DBUF_S4),
- },
- .join_mbus = true,
- },
- {
- .active_pipes = BIT(PIPE_A),
- .dbuf_mask = {
- [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
- },
- .join_mbus = false,
- },
- {
- .active_pipes = BIT(PIPE_B),
- .dbuf_mask = {
- [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
- },
- .join_mbus = false,
- },
- {
- .active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
- .dbuf_mask = {
- [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
- [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
- },
- },
- {
- .active_pipes = BIT(PIPE_C),
- .dbuf_mask = {
- [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
- },
- },
- {
- .active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
- .dbuf_mask = {
- [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
- [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
- },
- },
- {
- .active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
- .dbuf_mask = {
- [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
- [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
- },
- },
- {
- .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
- .dbuf_mask = {
- [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
- [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
- [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
- },
- },
- {
- .active_pipes = BIT(PIPE_D),
- .dbuf_mask = {
- [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
- },
- },
- {
- .active_pipes = BIT(PIPE_A) | BIT(PIPE_D),
- .dbuf_mask = {
- [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
- [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
- },
- },
- {
- .active_pipes = BIT(PIPE_B) | BIT(PIPE_D),
- .dbuf_mask = {
- [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
- [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
- },
- },
- {
- .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_D),
- .dbuf_mask = {
- [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
- [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
- [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
- },
- },
- {
- .active_pipes = BIT(PIPE_C) | BIT(PIPE_D),
- .dbuf_mask = {
- [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
- [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
- },
- },
- {
- .active_pipes = BIT(PIPE_A) | BIT(PIPE_C) | BIT(PIPE_D),
- .dbuf_mask = {
- [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
- [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
- [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
- },
- },
- {
- .active_pipes = BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
- .dbuf_mask = {
- [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
- [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
- [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
- },
- },
- {
- .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
- .dbuf_mask = {
- [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
- [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
- [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
- [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2),
- },
- },
- {}
-
-};
-
-static bool check_mbus_joined(u8 active_pipes,
- const struct dbuf_slice_conf_entry *dbuf_slices)
-{
- int i;
-
- for (i = 0; dbuf_slices[i].active_pipes != 0; i++) {
- if (dbuf_slices[i].active_pipes == active_pipes)
- return dbuf_slices[i].join_mbus;
- }
- return false;
-}
-
-static bool adlp_check_mbus_joined(u8 active_pipes)
-{
- return check_mbus_joined(active_pipes, adlp_allowed_dbufs);
-}
-
-static u8 compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus,
- const struct dbuf_slice_conf_entry *dbuf_slices)
-{
- int i;
-
- for (i = 0; dbuf_slices[i].active_pipes != 0; i++) {
- if (dbuf_slices[i].active_pipes == active_pipes &&
- dbuf_slices[i].join_mbus == join_mbus)
- return dbuf_slices[i].dbuf_mask[pipe];
- }
- return 0;
-}
-
-/*
- * This function finds an entry with same enabled pipe configuration and
- * returns correspondent DBuf slice mask as stated in BSpec for particular
- * platform.
- */
-static u8 icl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
-{
- /*
- * FIXME: For ICL this is still a bit unclear as prev BSpec revision
- * required calculating "pipe ratio" in order to determine
- * if one or two slices can be used for single pipe configurations
- * as additional constraint to the existing table.
- * However based on recent info, it should be not "pipe ratio"
- * but rather ratio between pixel_rate and cdclk with additional
- * constants, so for now we are using only table until this is
- * clarified. Also this is the reason why crtc_state param is
- * still here - we will need it once those additional constraints
- * pop up.
- */
- return compute_dbuf_slices(pipe, active_pipes, join_mbus,
- icl_allowed_dbufs);
-}
-
-static u8 tgl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
-{
- return compute_dbuf_slices(pipe, active_pipes, join_mbus,
- tgl_allowed_dbufs);
-}
-
-static u8 adlp_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
-{
- return compute_dbuf_slices(pipe, active_pipes, join_mbus,
- adlp_allowed_dbufs);
-}
-
-static u8 dg2_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
-{
- return compute_dbuf_slices(pipe, active_pipes, join_mbus,
- dg2_allowed_dbufs);
-}
-
-static u8 skl_compute_dbuf_slices(struct intel_crtc *crtc, u8 active_pipes, bool join_mbus)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum pipe pipe = crtc->pipe;
-
- if (IS_DG2(dev_priv))
- return dg2_compute_dbuf_slices(pipe, active_pipes, join_mbus);
- else if (IS_ALDERLAKE_P(dev_priv))
- return adlp_compute_dbuf_slices(pipe, active_pipes, join_mbus);
- else if (DISPLAY_VER(dev_priv) == 12)
- return tgl_compute_dbuf_slices(pipe, active_pipes, join_mbus);
- else if (DISPLAY_VER(dev_priv) == 11)
- return icl_compute_dbuf_slices(pipe, active_pipes, join_mbus);
- /*
- * For anything else just return one slice yet.
- * Should be extended for other platforms.
- */
- return active_pipes & BIT(pipe) ? BIT(DBUF_S1) : 0;
-}
-
-static bool
-use_minimal_wm0_only(const struct intel_crtc_state *crtc_state,
- struct intel_plane *plane)
-{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
-
- return DISPLAY_VER(i915) >= 13 &&
- crtc_state->uapi.async_flip &&
- plane->async_flip;
-}
-
-static u64
-skl_total_relative_data_rate(const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- enum plane_id plane_id;
- u64 data_rate = 0;
-
- for_each_plane_id_on_crtc(crtc, plane_id) {
- if (plane_id == PLANE_CURSOR)
- continue;
-
- data_rate += crtc_state->rel_data_rate[plane_id];
-
- if (DISPLAY_VER(i915) < 11)
- data_rate += crtc_state->rel_data_rate_y[plane_id];
- }
-
- return data_rate;
-}
-
-static const struct skl_wm_level *
-skl_plane_wm_level(const struct skl_pipe_wm *pipe_wm,
- enum plane_id plane_id,
- int level)
-{
- const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
-
- if (level == 0 && pipe_wm->use_sagv_wm)
- return &wm->sagv.wm0;
-
- return &wm->wm[level];
-}
-
-static const struct skl_wm_level *
-skl_plane_trans_wm(const struct skl_pipe_wm *pipe_wm,
- enum plane_id plane_id)
-{
- const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
-
- if (pipe_wm->use_sagv_wm)
- return &wm->sagv.trans_wm;
-
- return &wm->trans_wm;
-}
-
-/*
- * We only disable the watermarks for each plane if
- * they exceed the ddb allocation of said plane. This
- * is done so that we don't end up touching cursor
- * watermarks needlessly when some other plane reduces
- * our max possible watermark level.
- *
- * Bspec has this to say about the PLANE_WM enable bit:
- * "All the watermarks at this level for all enabled
- * planes must be enabled before the level will be used."
- * So this is actually safe to do.
- */
-static void
-skl_check_wm_level(struct skl_wm_level *wm, const struct skl_ddb_entry *ddb)
-{
- if (wm->min_ddb_alloc > skl_ddb_entry_size(ddb))
- memset(wm, 0, sizeof(*wm));
-}
-
-static void
-skl_check_nv12_wm_level(struct skl_wm_level *wm, struct skl_wm_level *uv_wm,
- const struct skl_ddb_entry *ddb_y, const struct skl_ddb_entry *ddb)
-{
- if (wm->min_ddb_alloc > skl_ddb_entry_size(ddb_y) ||
- uv_wm->min_ddb_alloc > skl_ddb_entry_size(ddb)) {
- memset(wm, 0, sizeof(*wm));
- memset(uv_wm, 0, sizeof(*uv_wm));
- }
-}
-
-static bool icl_need_wm1_wa(struct drm_i915_private *i915,
- enum plane_id plane_id)
-{
- /*
- * Wa_1408961008:icl, ehl
- * Wa_14012656716:tgl, adl
- * Underruns with WM1+ disabled
- */
- return DISPLAY_VER(i915) == 11 ||
- (IS_DISPLAY_VER(i915, 12, 13) && plane_id == PLANE_CURSOR);
-}
-
-struct skl_plane_ddb_iter {
- u64 data_rate;
- u16 start, size;
-};
-
-static void
-skl_allocate_plane_ddb(struct skl_plane_ddb_iter *iter,
- struct skl_ddb_entry *ddb,
- const struct skl_wm_level *wm,
- u64 data_rate)
-{
- u16 size, extra = 0;
-
- if (data_rate) {
- extra = min_t(u16, iter->size,
- DIV64_U64_ROUND_UP(iter->size * data_rate,
- iter->data_rate));
- iter->size -= extra;
- iter->data_rate -= data_rate;
- }
-
- /*
- * Keep ddb entry of all disabled planes explicitly zeroed
- * to avoid skl_ddb_add_affected_planes() adding them to
- * the state when other planes change their allocations.
- */
- size = wm->min_ddb_alloc + extra;
- if (size)
- iter->start = skl_ddb_entry_init(ddb, iter->start,
- iter->start + size);
-}
-
-static int
-skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct intel_crtc_state *crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
- const struct intel_dbuf_state *dbuf_state =
- intel_atomic_get_new_dbuf_state(state);
- const struct skl_ddb_entry *alloc = &dbuf_state->ddb[crtc->pipe];
- int num_active = hweight8(dbuf_state->active_pipes);
- struct skl_plane_ddb_iter iter;
- enum plane_id plane_id;
- u16 cursor_size;
- u32 blocks;
- int level;
-
- /* Clear the partitioning for disabled planes. */
- memset(crtc_state->wm.skl.plane_ddb, 0, sizeof(crtc_state->wm.skl.plane_ddb));
- memset(crtc_state->wm.skl.plane_ddb_y, 0, sizeof(crtc_state->wm.skl.plane_ddb_y));
-
- if (!crtc_state->hw.active)
- return 0;
-
- iter.start = alloc->start;
- iter.size = skl_ddb_entry_size(alloc);
- if (iter.size == 0)
- return 0;
-
- /* Allocate fixed number of blocks for cursor. */
- cursor_size = skl_cursor_allocation(crtc_state, num_active);
- iter.size -= cursor_size;
- skl_ddb_entry_init(&crtc_state->wm.skl.plane_ddb[PLANE_CURSOR],
- alloc->end - cursor_size, alloc->end);
-
- iter.data_rate = skl_total_relative_data_rate(crtc_state);
-
- /*
- * Find the highest watermark level for which we can satisfy the block
- * requirement of active planes.
- */
- for (level = ilk_wm_max_level(dev_priv); level >= 0; level--) {
- blocks = 0;
- for_each_plane_id_on_crtc(crtc, plane_id) {
- const struct skl_plane_wm *wm =
- &crtc_state->wm.skl.optimal.planes[plane_id];
-
- if (plane_id == PLANE_CURSOR) {
- const struct skl_ddb_entry *ddb =
- &crtc_state->wm.skl.plane_ddb[plane_id];
-
- if (wm->wm[level].min_ddb_alloc > skl_ddb_entry_size(ddb)) {
- drm_WARN_ON(&dev_priv->drm,
- wm->wm[level].min_ddb_alloc != U16_MAX);
- blocks = U32_MAX;
- break;
- }
- continue;
- }
-
- blocks += wm->wm[level].min_ddb_alloc;
- blocks += wm->uv_wm[level].min_ddb_alloc;
- }
-
- if (blocks <= iter.size) {
- iter.size -= blocks;
- break;
- }
- }
-
- if (level < 0) {
- drm_dbg_kms(&dev_priv->drm,
- "Requested display configuration exceeds system DDB limitations");
- drm_dbg_kms(&dev_priv->drm, "minimum required %d/%d\n",
- blocks, iter.size);
- return -EINVAL;
- }
-
- /* avoid the WARN later when we don't allocate any extra DDB */
- if (iter.data_rate == 0)
- iter.size = 0;
-
- /*
- * Grant each plane the blocks it requires at the highest achievable
- * watermark level, plus an extra share of the leftover blocks
- * proportional to its relative data rate.
- */
- for_each_plane_id_on_crtc(crtc, plane_id) {
- struct skl_ddb_entry *ddb =
- &crtc_state->wm.skl.plane_ddb[plane_id];
- struct skl_ddb_entry *ddb_y =
- &crtc_state->wm.skl.plane_ddb_y[plane_id];
- const struct skl_plane_wm *wm =
- &crtc_state->wm.skl.optimal.planes[plane_id];
-
- if (plane_id == PLANE_CURSOR)
- continue;
-
- if (DISPLAY_VER(dev_priv) < 11 &&
- crtc_state->nv12_planes & BIT(plane_id)) {
- skl_allocate_plane_ddb(&iter, ddb_y, &wm->wm[level],
- crtc_state->rel_data_rate_y[plane_id]);
- skl_allocate_plane_ddb(&iter, ddb, &wm->uv_wm[level],
- crtc_state->rel_data_rate[plane_id]);
- } else {
- skl_allocate_plane_ddb(&iter, ddb, &wm->wm[level],
- crtc_state->rel_data_rate[plane_id]);
- }
- }
- drm_WARN_ON(&dev_priv->drm, iter.size != 0 || iter.data_rate != 0);
-
- /*
- * When we calculated watermark values we didn't know how high
- * of a level we'd actually be able to hit, so we just marked
- * all levels as "enabled." Go back now and disable the ones
- * that aren't actually possible.
- */
- for (level++; level <= ilk_wm_max_level(dev_priv); level++) {
- for_each_plane_id_on_crtc(crtc, plane_id) {
- const struct skl_ddb_entry *ddb =
- &crtc_state->wm.skl.plane_ddb[plane_id];
- const struct skl_ddb_entry *ddb_y =
- &crtc_state->wm.skl.plane_ddb_y[plane_id];
- struct skl_plane_wm *wm =
- &crtc_state->wm.skl.optimal.planes[plane_id];
-
- if (DISPLAY_VER(dev_priv) < 11 &&
- crtc_state->nv12_planes & BIT(plane_id))
- skl_check_nv12_wm_level(&wm->wm[level],
- &wm->uv_wm[level],
- ddb_y, ddb);
- else
- skl_check_wm_level(&wm->wm[level], ddb);
-
- if (icl_need_wm1_wa(dev_priv, plane_id) &&
- level == 1 && wm->wm[0].enable) {
- wm->wm[level].blocks = wm->wm[0].blocks;
- wm->wm[level].lines = wm->wm[0].lines;
- wm->wm[level].ignore_lines = wm->wm[0].ignore_lines;
- }
- }
- }
-
- /*
- * Go back and disable the transition and SAGV watermarks
- * if it turns out we don't have enough DDB blocks for them.
- */
- for_each_plane_id_on_crtc(crtc, plane_id) {
- const struct skl_ddb_entry *ddb =
- &crtc_state->wm.skl.plane_ddb[plane_id];
- const struct skl_ddb_entry *ddb_y =
- &crtc_state->wm.skl.plane_ddb_y[plane_id];
- struct skl_plane_wm *wm =
- &crtc_state->wm.skl.optimal.planes[plane_id];
-
- if (DISPLAY_VER(dev_priv) < 11 &&
- crtc_state->nv12_planes & BIT(plane_id)) {
- skl_check_wm_level(&wm->trans_wm, ddb_y);
- } else {
- WARN_ON(skl_ddb_entry_size(ddb_y));
-
- skl_check_wm_level(&wm->trans_wm, ddb);
- }
-
- skl_check_wm_level(&wm->sagv.wm0, ddb);
- skl_check_wm_level(&wm->sagv.trans_wm, ddb);
- }
-
- return 0;
-}
-
-/*
- * The max latency should be 257 (max the punit can code is 255 and we add 2us
- * for the read latency) and cpp should always be <= 8, so that
- * should allow pixel_rate up to ~2 GHz which seems sufficient since max
- * 2xcdclk is 1350 MHz and the pixel rate should never exceed that.
-*/
-static uint_fixed_16_16_t
-skl_wm_method1(const struct drm_i915_private *dev_priv, u32 pixel_rate,
- u8 cpp, u32 latency, u32 dbuf_block_size)
-{
- u32 wm_intermediate_val;
- uint_fixed_16_16_t ret;
-
- if (latency == 0)
- return FP_16_16_MAX;
-
- wm_intermediate_val = latency * pixel_rate * cpp;
- ret = div_fixed16(wm_intermediate_val, 1000 * dbuf_block_size);
-
- if (DISPLAY_VER(dev_priv) >= 10)
- ret = add_fixed16_u32(ret, 1);
-
- return ret;
-}
-
-static uint_fixed_16_16_t
-skl_wm_method2(u32 pixel_rate, u32 pipe_htotal, u32 latency,
- uint_fixed_16_16_t plane_blocks_per_line)
-{
- u32 wm_intermediate_val;
- uint_fixed_16_16_t ret;
-
- if (latency == 0)
- return FP_16_16_MAX;
-
- wm_intermediate_val = latency * pixel_rate;
- wm_intermediate_val = DIV_ROUND_UP(wm_intermediate_val,
- pipe_htotal * 1000);
- ret = mul_u32_fixed16(wm_intermediate_val, plane_blocks_per_line);
- return ret;
-}
-
-static uint_fixed_16_16_t
-intel_get_linetime_us(const struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- u32 pixel_rate;
- u32 crtc_htotal;
- uint_fixed_16_16_t linetime_us;
-
- if (!crtc_state->hw.active)
- return u32_to_fixed16(0);
-
- pixel_rate = crtc_state->pixel_rate;
-
- if (drm_WARN_ON(&dev_priv->drm, pixel_rate == 0))
- return u32_to_fixed16(0);
-
- crtc_htotal = crtc_state->hw.pipe_mode.crtc_htotal;
- linetime_us = div_fixed16(crtc_htotal * 1000, pixel_rate);
-
- return linetime_us;
-}
-
-static int
-skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
- int width, const struct drm_format_info *format,
- u64 modifier, unsigned int rotation,
- u32 plane_pixel_rate, struct skl_wm_params *wp,
- int color_plane)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- u32 interm_pbpl;
-
- /* only planar format has two planes */
- if (color_plane == 1 &&
- !intel_format_info_is_yuv_semiplanar(format, modifier)) {
- drm_dbg_kms(&dev_priv->drm,
- "Non planar format have single plane\n");
- return -EINVAL;
- }
-
- wp->y_tiled = modifier == I915_FORMAT_MOD_Y_TILED ||
- modifier == I915_FORMAT_MOD_4_TILED ||
- modifier == I915_FORMAT_MOD_Yf_TILED ||
- modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
- modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
- wp->x_tiled = modifier == I915_FORMAT_MOD_X_TILED;
- wp->rc_surface = modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
- modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
- wp->is_planar = intel_format_info_is_yuv_semiplanar(format, modifier);
-
- wp->width = width;
- if (color_plane == 1 && wp->is_planar)
- wp->width /= 2;
-
- wp->cpp = format->cpp[color_plane];
- wp->plane_pixel_rate = plane_pixel_rate;
-
- if (DISPLAY_VER(dev_priv) >= 11 &&
- modifier == I915_FORMAT_MOD_Yf_TILED && wp->cpp == 1)
- wp->dbuf_block_size = 256;
- else
- wp->dbuf_block_size = 512;
-
- if (drm_rotation_90_or_270(rotation)) {
- switch (wp->cpp) {
- case 1:
- wp->y_min_scanlines = 16;
- break;
- case 2:
- wp->y_min_scanlines = 8;
- break;
- case 4:
- wp->y_min_scanlines = 4;
- break;
- default:
- MISSING_CASE(wp->cpp);
- return -EINVAL;
- }
- } else {
- wp->y_min_scanlines = 4;
- }
-
- if (skl_needs_memory_bw_wa(dev_priv))
- wp->y_min_scanlines *= 2;
-
- wp->plane_bytes_per_line = wp->width * wp->cpp;
- if (wp->y_tiled) {
- interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line *
- wp->y_min_scanlines,
- wp->dbuf_block_size);
-
- if (DISPLAY_VER(dev_priv) >= 10)
- interm_pbpl++;
-
- wp->plane_blocks_per_line = div_fixed16(interm_pbpl,
- wp->y_min_scanlines);
- } else {
- interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line,
- wp->dbuf_block_size);
-
- if (!wp->x_tiled || DISPLAY_VER(dev_priv) >= 10)
- interm_pbpl++;
-
- wp->plane_blocks_per_line = u32_to_fixed16(interm_pbpl);
- }
-
- wp->y_tile_minimum = mul_u32_fixed16(wp->y_min_scanlines,
- wp->plane_blocks_per_line);
-
- wp->linetime_us = fixed16_to_u32_round_up(
- intel_get_linetime_us(crtc_state));
-
- return 0;
-}
-
-static int
-skl_compute_plane_wm_params(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state,
- struct skl_wm_params *wp, int color_plane)
-{
- const struct drm_framebuffer *fb = plane_state->hw.fb;
- int width;
-
- /*
- * Src coordinates are already rotated by 270 degrees for
- * the 90/270 degree plane rotation cases (to match the
- * GTT mapping), hence no need to account for rotation here.
- */
- width = drm_rect_width(&plane_state->uapi.src) >> 16;
-
- return skl_compute_wm_params(crtc_state, width,
- fb->format, fb->modifier,
- plane_state->hw.rotation,
- intel_plane_pixel_rate(crtc_state, plane_state),
- wp, color_plane);
-}
-
-static bool skl_wm_has_lines(struct drm_i915_private *dev_priv, int level)
-{
- if (DISPLAY_VER(dev_priv) >= 10)
- return true;
-
- /* The number of lines are ignored for the level 0 watermark. */
- return level > 0;
-}
-
-static int skl_wm_max_lines(struct drm_i915_private *dev_priv)
-{
- if (DISPLAY_VER(dev_priv) >= 13)
- return 255;
- else
- return 31;
-}
-
-static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
- struct intel_plane *plane,
- int level,
- unsigned int latency,
- const struct skl_wm_params *wp,
- const struct skl_wm_level *result_prev,
- struct skl_wm_level *result /* out */)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- uint_fixed_16_16_t method1, method2;
- uint_fixed_16_16_t selected_result;
- u32 blocks, lines, min_ddb_alloc = 0;
-
- if (latency == 0 ||
- (use_minimal_wm0_only(crtc_state, plane) && level > 0)) {
- /* reject it */
- result->min_ddb_alloc = U16_MAX;
- return;
- }
-
- /*
- * WaIncreaseLatencyIPCEnabled: kbl,cfl
- * Display WA #1141: kbl,cfl
- */
- if ((IS_KABYLAKE(dev_priv) ||
- IS_COFFEELAKE(dev_priv) ||
- IS_COMETLAKE(dev_priv)) &&
- dev_priv->ipc_enabled)
- latency += 4;
-
- if (skl_needs_memory_bw_wa(dev_priv) && wp->x_tiled)
- latency += 15;
-
- method1 = skl_wm_method1(dev_priv, wp->plane_pixel_rate,
- wp->cpp, latency, wp->dbuf_block_size);
- method2 = skl_wm_method2(wp->plane_pixel_rate,
- crtc_state->hw.pipe_mode.crtc_htotal,
- latency,
- wp->plane_blocks_per_line);
-
- if (wp->y_tiled) {
- selected_result = max_fixed16(method2, wp->y_tile_minimum);
- } else {
- if ((wp->cpp * crtc_state->hw.pipe_mode.crtc_htotal /
- wp->dbuf_block_size < 1) &&
- (wp->plane_bytes_per_line / wp->dbuf_block_size < 1)) {
- selected_result = method2;
- } else if (latency >= wp->linetime_us) {
- if (DISPLAY_VER(dev_priv) == 9)
- selected_result = min_fixed16(method1, method2);
- else
- selected_result = method2;
- } else {
- selected_result = method1;
- }
- }
-
- blocks = fixed16_to_u32_round_up(selected_result) + 1;
- /*
- * Lets have blocks at minimum equivalent to plane_blocks_per_line
- * as there will be at minimum one line for lines configuration. This
- * is a work around for FIFO underruns observed with resolutions like
- * 4k 60 Hz in single channel DRAM configurations.
- *
- * As per the Bspec 49325, if the ddb allocation can hold at least
- * one plane_blocks_per_line, we should have selected method2 in
- * the above logic. Assuming that modern versions have enough dbuf
- * and method2 guarantees blocks equivalent to at least 1 line,
- * select the blocks as plane_blocks_per_line.
- *
- * TODO: Revisit the logic when we have better understanding on DRAM
- * channels' impact on the level 0 memory latency and the relevant
- * wm calculations.
- */
- if (skl_wm_has_lines(dev_priv, level))
- blocks = max(blocks,
- fixed16_to_u32_round_up(wp->plane_blocks_per_line));
- lines = div_round_up_fixed16(selected_result,
- wp->plane_blocks_per_line);
-
- if (DISPLAY_VER(dev_priv) == 9) {
- /* Display WA #1125: skl,bxt,kbl */
- if (level == 0 && wp->rc_surface)
- blocks += fixed16_to_u32_round_up(wp->y_tile_minimum);
-
- /* Display WA #1126: skl,bxt,kbl */
- if (level >= 1 && level <= 7) {
- if (wp->y_tiled) {
- blocks += fixed16_to_u32_round_up(wp->y_tile_minimum);
- lines += wp->y_min_scanlines;
- } else {
- blocks++;
- }
-
- /*
- * Make sure result blocks for higher latency levels are
- * atleast as high as level below the current level.
- * Assumption in DDB algorithm optimization for special
- * cases. Also covers Display WA #1125 for RC.
- */
- if (result_prev->blocks > blocks)
- blocks = result_prev->blocks;
- }
- }
-
- if (DISPLAY_VER(dev_priv) >= 11) {
- if (wp->y_tiled) {
- int extra_lines;
-
- if (lines % wp->y_min_scanlines == 0)
- extra_lines = wp->y_min_scanlines;
- else
- extra_lines = wp->y_min_scanlines * 2 -
- lines % wp->y_min_scanlines;
-
- min_ddb_alloc = mul_round_up_u32_fixed16(lines + extra_lines,
- wp->plane_blocks_per_line);
- } else {
- min_ddb_alloc = blocks + DIV_ROUND_UP(blocks, 10);
- }
- }
-
- if (!skl_wm_has_lines(dev_priv, level))
- lines = 0;
-
- if (lines > skl_wm_max_lines(dev_priv)) {
- /* reject it */
- result->min_ddb_alloc = U16_MAX;
- return;
- }
-
- /*
- * If lines is valid, assume we can use this watermark level
- * for now. We'll come back and disable it after we calculate the
- * DDB allocation if it turns out we don't actually have enough
- * blocks to satisfy it.
- */
- result->blocks = blocks;
- result->lines = lines;
- /* Bspec says: value >= plane ddb allocation -> invalid, hence the +1 here */
- result->min_ddb_alloc = max(min_ddb_alloc, blocks) + 1;
- result->enable = true;
-
- if (DISPLAY_VER(dev_priv) < 12 && dev_priv->sagv_block_time_us)
- result->can_sagv = latency >= dev_priv->sagv_block_time_us;
-}
-
-static void
-skl_compute_wm_levels(const struct intel_crtc_state *crtc_state,
- struct intel_plane *plane,
- const struct skl_wm_params *wm_params,
- struct skl_wm_level *levels)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- int level, max_level = ilk_wm_max_level(dev_priv);
- struct skl_wm_level *result_prev = &levels[0];
-
- for (level = 0; level <= max_level; level++) {
- struct skl_wm_level *result = &levels[level];
- unsigned int latency = dev_priv->wm.skl_latency[level];
-
- skl_compute_plane_wm(crtc_state, plane, level, latency,
- wm_params, result_prev, result);
-
- result_prev = result;
- }
-}
-
-static void tgl_compute_sagv_wm(const struct intel_crtc_state *crtc_state,
- struct intel_plane *plane,
- const struct skl_wm_params *wm_params,
- struct skl_plane_wm *plane_wm)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- struct skl_wm_level *sagv_wm = &plane_wm->sagv.wm0;
- struct skl_wm_level *levels = plane_wm->wm;
- unsigned int latency = 0;
-
- if (dev_priv->sagv_block_time_us)
- latency = dev_priv->sagv_block_time_us + dev_priv->wm.skl_latency[0];
-
- skl_compute_plane_wm(crtc_state, plane, 0, latency,
- wm_params, &levels[0],
- sagv_wm);
-}
-
-static void skl_compute_transition_wm(struct drm_i915_private *dev_priv,
- struct skl_wm_level *trans_wm,
- const struct skl_wm_level *wm0,
- const struct skl_wm_params *wp)
-{
- u16 trans_min, trans_amount, trans_y_tile_min;
- u16 wm0_blocks, trans_offset, blocks;
-
- /* Transition WM don't make any sense if ipc is disabled */
- if (!dev_priv->ipc_enabled)
- return;
-
- /*
- * WaDisableTWM:skl,kbl,cfl,bxt
- * Transition WM are not recommended by HW team for GEN9
- */
- if (DISPLAY_VER(dev_priv) == 9)
- return;
-
- if (DISPLAY_VER(dev_priv) >= 11)
- trans_min = 4;
- else
- trans_min = 14;
-
- /* Display WA #1140: glk,cnl */
- if (DISPLAY_VER(dev_priv) == 10)
- trans_amount = 0;
- else
- trans_amount = 10; /* This is configurable amount */
-
- trans_offset = trans_min + trans_amount;
-
- /*
- * The spec asks for Selected Result Blocks for wm0 (the real value),
- * not Result Blocks (the integer value). Pay attention to the capital
- * letters. The value wm_l0->blocks is actually Result Blocks, but
- * since Result Blocks is the ceiling of Selected Result Blocks plus 1,
- * and since we later will have to get the ceiling of the sum in the
- * transition watermarks calculation, we can just pretend Selected
- * Result Blocks is Result Blocks minus 1 and it should work for the
- * current platforms.
- */
- wm0_blocks = wm0->blocks - 1;
-
- if (wp->y_tiled) {
- trans_y_tile_min =
- (u16)mul_round_up_u32_fixed16(2, wp->y_tile_minimum);
- blocks = max(wm0_blocks, trans_y_tile_min) + trans_offset;
- } else {
- blocks = wm0_blocks + trans_offset;
- }
- blocks++;
-
- /*
- * Just assume we can enable the transition watermark. After
- * computing the DDB we'll come back and disable it if that
- * assumption turns out to be false.
- */
- trans_wm->blocks = blocks;
- trans_wm->min_ddb_alloc = max_t(u16, wm0->min_ddb_alloc, blocks + 1);
- trans_wm->enable = true;
-}
-
-static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state,
- struct intel_plane *plane, int color_plane)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane->id];
- struct skl_wm_params wm_params;
- int ret;
-
- ret = skl_compute_plane_wm_params(crtc_state, plane_state,
- &wm_params, color_plane);
- if (ret)
- return ret;
-
- skl_compute_wm_levels(crtc_state, plane, &wm_params, wm->wm);
-
- skl_compute_transition_wm(dev_priv, &wm->trans_wm,
- &wm->wm[0], &wm_params);
-
- if (DISPLAY_VER(dev_priv) >= 12) {
- tgl_compute_sagv_wm(crtc_state, plane, &wm_params, wm);
-
- skl_compute_transition_wm(dev_priv, &wm->sagv.trans_wm,
- &wm->sagv.wm0, &wm_params);
- }
-
- return 0;
-}
-
-static int skl_build_plane_wm_uv(struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state,
- struct intel_plane *plane)
-{
- struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane->id];
- struct skl_wm_params wm_params;
- int ret;
-
- wm->is_planar = true;
-
- /* uv plane watermarks must also be validated for NV12/Planar */
- ret = skl_compute_plane_wm_params(crtc_state, plane_state,
- &wm_params, 1);
- if (ret)
- return ret;
-
- skl_compute_wm_levels(crtc_state, plane, &wm_params, wm->uv_wm);
-
- return 0;
-}
-
-static int skl_build_plane_wm(struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
-{
- struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- enum plane_id plane_id = plane->id;
- struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane_id];
- const struct drm_framebuffer *fb = plane_state->hw.fb;
- int ret;
-
- memset(wm, 0, sizeof(*wm));
-
- if (!intel_wm_plane_visible(crtc_state, plane_state))
- return 0;
-
- ret = skl_build_plane_wm_single(crtc_state, plane_state,
- plane, 0);
- if (ret)
- return ret;
-
- if (fb->format->is_yuv && fb->format->num_planes > 1) {
- ret = skl_build_plane_wm_uv(crtc_state, plane_state,
- plane);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-static int icl_build_plane_wm(struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
-{
- struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- enum plane_id plane_id = plane->id;
- struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane_id];
- int ret;
-
- /* Watermarks calculated in master */
- if (plane_state->planar_slave)
- return 0;
-
- memset(wm, 0, sizeof(*wm));
-
- if (plane_state->planar_linked_plane) {
- const struct drm_framebuffer *fb = plane_state->hw.fb;
-
- drm_WARN_ON(&dev_priv->drm,
- !intel_wm_plane_visible(crtc_state, plane_state));
- drm_WARN_ON(&dev_priv->drm, !fb->format->is_yuv ||
- fb->format->num_planes == 1);
-
- ret = skl_build_plane_wm_single(crtc_state, plane_state,
- plane_state->planar_linked_plane, 0);
- if (ret)
- return ret;
-
- ret = skl_build_plane_wm_single(crtc_state, plane_state,
- plane, 1);
- if (ret)
- return ret;
- } else if (intel_wm_plane_visible(crtc_state, plane_state)) {
- ret = skl_build_plane_wm_single(crtc_state, plane_state,
- plane, 0);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-static int skl_build_pipe_wm(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct intel_crtc_state *crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
- const struct intel_plane_state *plane_state;
- struct intel_plane *plane;
- int ret, i;
-
- for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
- /*
- * FIXME should perhaps check {old,new}_plane_crtc->hw.crtc
- * instead but we don't populate that correctly for NV12 Y
- * planes so for now hack this.
- */
- if (plane->pipe != crtc->pipe)
- continue;
-
- if (DISPLAY_VER(dev_priv) >= 11)
- ret = icl_build_plane_wm(crtc_state, plane_state);
- else
- ret = skl_build_plane_wm(crtc_state, plane_state);
- if (ret)
- return ret;
- }
-
- crtc_state->wm.skl.optimal = crtc_state->wm.skl.raw;
-
- return 0;
-}
-
-static void skl_ddb_entry_write(struct drm_i915_private *dev_priv,
- i915_reg_t reg,
- const struct skl_ddb_entry *entry)
-{
- if (entry->end)
- intel_de_write_fw(dev_priv, reg,
- PLANE_BUF_END(entry->end - 1) |
- PLANE_BUF_START(entry->start));
- else
- intel_de_write_fw(dev_priv, reg, 0);
-}
-
-static void skl_write_wm_level(struct drm_i915_private *dev_priv,
- i915_reg_t reg,
- const struct skl_wm_level *level)
-{
- u32 val = 0;
-
- if (level->enable)
- val |= PLANE_WM_EN;
- if (level->ignore_lines)
- val |= PLANE_WM_IGNORE_LINES;
- val |= REG_FIELD_PREP(PLANE_WM_BLOCKS_MASK, level->blocks);
- val |= REG_FIELD_PREP(PLANE_WM_LINES_MASK, level->lines);
-
- intel_de_write_fw(dev_priv, reg, val);
-}
-
-void skl_write_plane_wm(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- int level, max_level = ilk_wm_max_level(dev_priv);
- enum plane_id plane_id = plane->id;
- enum pipe pipe = plane->pipe;
- const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
- const struct skl_ddb_entry *ddb =
- &crtc_state->wm.skl.plane_ddb[plane_id];
- const struct skl_ddb_entry *ddb_y =
- &crtc_state->wm.skl.plane_ddb_y[plane_id];
-
- for (level = 0; level <= max_level; level++)
- skl_write_wm_level(dev_priv, PLANE_WM(pipe, plane_id, level),
- skl_plane_wm_level(pipe_wm, plane_id, level));
-
- skl_write_wm_level(dev_priv, PLANE_WM_TRANS(pipe, plane_id),
- skl_plane_trans_wm(pipe_wm, plane_id));
-
- if (HAS_HW_SAGV_WM(dev_priv)) {
- const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
-
- skl_write_wm_level(dev_priv, PLANE_WM_SAGV(pipe, plane_id),
- &wm->sagv.wm0);
- skl_write_wm_level(dev_priv, PLANE_WM_SAGV_TRANS(pipe, plane_id),
- &wm->sagv.trans_wm);
- }
-
- skl_ddb_entry_write(dev_priv,
- PLANE_BUF_CFG(pipe, plane_id), ddb);
-
- if (DISPLAY_VER(dev_priv) < 11)
- skl_ddb_entry_write(dev_priv,
- PLANE_NV12_BUF_CFG(pipe, plane_id), ddb_y);
-}
-
-void skl_write_cursor_wm(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- int level, max_level = ilk_wm_max_level(dev_priv);
- enum plane_id plane_id = plane->id;
- enum pipe pipe = plane->pipe;
- const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
- const struct skl_ddb_entry *ddb =
- &crtc_state->wm.skl.plane_ddb[plane_id];
-
- for (level = 0; level <= max_level; level++)
- skl_write_wm_level(dev_priv, CUR_WM(pipe, level),
- skl_plane_wm_level(pipe_wm, plane_id, level));
-
- skl_write_wm_level(dev_priv, CUR_WM_TRANS(pipe),
- skl_plane_trans_wm(pipe_wm, plane_id));
-
- if (HAS_HW_SAGV_WM(dev_priv)) {
- const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id];
-
- skl_write_wm_level(dev_priv, CUR_WM_SAGV(pipe),
- &wm->sagv.wm0);
- skl_write_wm_level(dev_priv, CUR_WM_SAGV_TRANS(pipe),
- &wm->sagv.trans_wm);
- }
-
- skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe), ddb);
-}
-
-static bool skl_wm_level_equals(const struct skl_wm_level *l1,
- const struct skl_wm_level *l2)
-{
- return l1->enable == l2->enable &&
- l1->ignore_lines == l2->ignore_lines &&
- l1->lines == l2->lines &&
- l1->blocks == l2->blocks;
-}
-
-static bool skl_plane_wm_equals(struct drm_i915_private *dev_priv,
- const struct skl_plane_wm *wm1,
- const struct skl_plane_wm *wm2)
-{
- int level, max_level = ilk_wm_max_level(dev_priv);
-
- for (level = 0; level <= max_level; level++) {
- /*
- * We don't check uv_wm as the hardware doesn't actually
- * use it. It only gets used for calculating the required
- * ddb allocation.
- */
- if (!skl_wm_level_equals(&wm1->wm[level], &wm2->wm[level]))
- return false;
- }
-
- return skl_wm_level_equals(&wm1->trans_wm, &wm2->trans_wm) &&
- skl_wm_level_equals(&wm1->sagv.wm0, &wm2->sagv.wm0) &&
- skl_wm_level_equals(&wm1->sagv.trans_wm, &wm2->sagv.trans_wm);
-}
-
-static bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
- const struct skl_ddb_entry *b)
-{
- return a->start < b->end && b->start < a->end;
-}
-
-static void skl_ddb_entry_union(struct skl_ddb_entry *a,
- const struct skl_ddb_entry *b)
-{
- if (a->end && b->end) {
- a->start = min(a->start, b->start);
- a->end = max(a->end, b->end);
- } else if (b->end) {
- a->start = b->start;
- a->end = b->end;
- }
-}
-
-bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
- const struct skl_ddb_entry *entries,
- int num_entries, int ignore_idx)
-{
- int i;
-
- for (i = 0; i < num_entries; i++) {
- if (i != ignore_idx &&
- skl_ddb_entries_overlap(ddb, &entries[i]))
- return true;
- }
-
- return false;
-}
-
-static int
-skl_ddb_add_affected_planes(const struct intel_crtc_state *old_crtc_state,
- struct intel_crtc_state *new_crtc_state)
-{
- struct intel_atomic_state *state = to_intel_atomic_state(new_crtc_state->uapi.state);
- struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct intel_plane *plane;
-
- for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
- struct intel_plane_state *plane_state;
- enum plane_id plane_id = plane->id;
-
- if (skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb[plane_id],
- &new_crtc_state->wm.skl.plane_ddb[plane_id]) &&
- skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb_y[plane_id],
- &new_crtc_state->wm.skl.plane_ddb_y[plane_id]))
- continue;
-
- plane_state = intel_atomic_get_plane_state(state, plane);
- if (IS_ERR(plane_state))
- return PTR_ERR(plane_state);
-
- new_crtc_state->update_planes |= BIT(plane_id);
- }
-
- return 0;
-}
-
-static u8 intel_dbuf_enabled_slices(const struct intel_dbuf_state *dbuf_state)
-{
- struct drm_i915_private *dev_priv = to_i915(dbuf_state->base.state->base.dev);
- u8 enabled_slices;
- enum pipe pipe;
-
- /*
- * FIXME: For now we always enable slice S1 as per
- * the Bspec display initialization sequence.
- */
- enabled_slices = BIT(DBUF_S1);
-
- for_each_pipe(dev_priv, pipe)
- enabled_slices |= dbuf_state->slices[pipe];
-
- return enabled_slices;
-}
-
-static int
-skl_compute_ddb(struct intel_atomic_state *state)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- const struct intel_dbuf_state *old_dbuf_state;
- struct intel_dbuf_state *new_dbuf_state = NULL;
- const struct intel_crtc_state *old_crtc_state;
- struct intel_crtc_state *new_crtc_state;
- struct intel_crtc *crtc;
- int ret, i;
-
- for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
- new_dbuf_state = intel_atomic_get_dbuf_state(state);
- if (IS_ERR(new_dbuf_state))
- return PTR_ERR(new_dbuf_state);
-
- old_dbuf_state = intel_atomic_get_old_dbuf_state(state);
- break;
- }
-
- if (!new_dbuf_state)
- return 0;
-
- new_dbuf_state->active_pipes =
- intel_calc_active_pipes(state, old_dbuf_state->active_pipes);
-
- if (old_dbuf_state->active_pipes != new_dbuf_state->active_pipes) {
- ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
- if (ret)
- return ret;
- }
-
- if (HAS_MBUS_JOINING(dev_priv))
- new_dbuf_state->joined_mbus =
- adlp_check_mbus_joined(new_dbuf_state->active_pipes);
-
- for_each_intel_crtc(&dev_priv->drm, crtc) {
- enum pipe pipe = crtc->pipe;
-
- new_dbuf_state->slices[pipe] =
- skl_compute_dbuf_slices(crtc, new_dbuf_state->active_pipes,
- new_dbuf_state->joined_mbus);
-
- if (old_dbuf_state->slices[pipe] == new_dbuf_state->slices[pipe])
- continue;
-
- ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
- if (ret)
- return ret;
- }
-
- new_dbuf_state->enabled_slices = intel_dbuf_enabled_slices(new_dbuf_state);
-
- if (old_dbuf_state->enabled_slices != new_dbuf_state->enabled_slices ||
- old_dbuf_state->joined_mbus != new_dbuf_state->joined_mbus) {
- ret = intel_atomic_serialize_global_state(&new_dbuf_state->base);
- if (ret)
- return ret;
-
- if (old_dbuf_state->joined_mbus != new_dbuf_state->joined_mbus) {
- /* TODO: Implement vblank synchronized MBUS joining changes */
- ret = intel_modeset_all_pipes(state);
- if (ret)
- return ret;
- }
-
- drm_dbg_kms(&dev_priv->drm,
- "Enabled dbuf slices 0x%x -> 0x%x (total dbuf slices 0x%x), mbus joined? %s->%s\n",
- old_dbuf_state->enabled_slices,
- new_dbuf_state->enabled_slices,
- INTEL_INFO(dev_priv)->display.dbuf.slice_mask,
- str_yes_no(old_dbuf_state->joined_mbus),
- str_yes_no(new_dbuf_state->joined_mbus));
- }
-
- for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
- enum pipe pipe = crtc->pipe;
-
- new_dbuf_state->weight[pipe] = intel_crtc_ddb_weight(new_crtc_state);
-
- if (old_dbuf_state->weight[pipe] == new_dbuf_state->weight[pipe])
- continue;
-
- ret = intel_atomic_lock_global_state(&new_dbuf_state->base);
- if (ret)
- return ret;
- }
-
- for_each_intel_crtc(&dev_priv->drm, crtc) {
- ret = skl_crtc_allocate_ddb(state, crtc);
- if (ret)
- return ret;
- }
-
- for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
- new_crtc_state, i) {
- ret = skl_crtc_allocate_plane_ddb(state, crtc);
- if (ret)
- return ret;
-
- ret = skl_ddb_add_affected_planes(old_crtc_state,
- new_crtc_state);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-static char enast(bool enable)
-{
- return enable ? '*' : ' ';
-}
-
-static void
-skl_print_wm_changes(struct intel_atomic_state *state)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- const struct intel_crtc_state *old_crtc_state;
- const struct intel_crtc_state *new_crtc_state;
- struct intel_plane *plane;
- struct intel_crtc *crtc;
- int i;
-
- if (!drm_debug_enabled(DRM_UT_KMS))
- return;
-
- for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
- new_crtc_state, i) {
- const struct skl_pipe_wm *old_pipe_wm, *new_pipe_wm;
-
- old_pipe_wm = &old_crtc_state->wm.skl.optimal;
- new_pipe_wm = &new_crtc_state->wm.skl.optimal;
-
- for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
- enum plane_id plane_id = plane->id;
- const struct skl_ddb_entry *old, *new;
-
- old = &old_crtc_state->wm.skl.plane_ddb[plane_id];
- new = &new_crtc_state->wm.skl.plane_ddb[plane_id];
-
- if (skl_ddb_entry_equal(old, new))
- continue;
-
- drm_dbg_kms(&dev_priv->drm,
- "[PLANE:%d:%s] ddb (%4d - %4d) -> (%4d - %4d), size %4d -> %4d\n",
- plane->base.base.id, plane->base.name,
- old->start, old->end, new->start, new->end,
- skl_ddb_entry_size(old), skl_ddb_entry_size(new));
- }
-
- for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
- enum plane_id plane_id = plane->id;
- const struct skl_plane_wm *old_wm, *new_wm;
-
- old_wm = &old_pipe_wm->planes[plane_id];
- new_wm = &new_pipe_wm->planes[plane_id];
-
- if (skl_plane_wm_equals(dev_priv, old_wm, new_wm))
- continue;
-
- drm_dbg_kms(&dev_priv->drm,
- "[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm"
- " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm\n",
- plane->base.base.id, plane->base.name,
- enast(old_wm->wm[0].enable), enast(old_wm->wm[1].enable),
- enast(old_wm->wm[2].enable), enast(old_wm->wm[3].enable),
- enast(old_wm->wm[4].enable), enast(old_wm->wm[5].enable),
- enast(old_wm->wm[6].enable), enast(old_wm->wm[7].enable),
- enast(old_wm->trans_wm.enable),
- enast(old_wm->sagv.wm0.enable),
- enast(old_wm->sagv.trans_wm.enable),
- enast(new_wm->wm[0].enable), enast(new_wm->wm[1].enable),
- enast(new_wm->wm[2].enable), enast(new_wm->wm[3].enable),
- enast(new_wm->wm[4].enable), enast(new_wm->wm[5].enable),
- enast(new_wm->wm[6].enable), enast(new_wm->wm[7].enable),
- enast(new_wm->trans_wm.enable),
- enast(new_wm->sagv.wm0.enable),
- enast(new_wm->sagv.trans_wm.enable));
-
- drm_dbg_kms(&dev_priv->drm,
- "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d"
- " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d\n",
- plane->base.base.id, plane->base.name,
- enast(old_wm->wm[0].ignore_lines), old_wm->wm[0].lines,
- enast(old_wm->wm[1].ignore_lines), old_wm->wm[1].lines,
- enast(old_wm->wm[2].ignore_lines), old_wm->wm[2].lines,
- enast(old_wm->wm[3].ignore_lines), old_wm->wm[3].lines,
- enast(old_wm->wm[4].ignore_lines), old_wm->wm[4].lines,
- enast(old_wm->wm[5].ignore_lines), old_wm->wm[5].lines,
- enast(old_wm->wm[6].ignore_lines), old_wm->wm[6].lines,
- enast(old_wm->wm[7].ignore_lines), old_wm->wm[7].lines,
- enast(old_wm->trans_wm.ignore_lines), old_wm->trans_wm.lines,
- enast(old_wm->sagv.wm0.ignore_lines), old_wm->sagv.wm0.lines,
- enast(old_wm->sagv.trans_wm.ignore_lines), old_wm->sagv.trans_wm.lines,
- enast(new_wm->wm[0].ignore_lines), new_wm->wm[0].lines,
- enast(new_wm->wm[1].ignore_lines), new_wm->wm[1].lines,
- enast(new_wm->wm[2].ignore_lines), new_wm->wm[2].lines,
- enast(new_wm->wm[3].ignore_lines), new_wm->wm[3].lines,
- enast(new_wm->wm[4].ignore_lines), new_wm->wm[4].lines,
- enast(new_wm->wm[5].ignore_lines), new_wm->wm[5].lines,
- enast(new_wm->wm[6].ignore_lines), new_wm->wm[6].lines,
- enast(new_wm->wm[7].ignore_lines), new_wm->wm[7].lines,
- enast(new_wm->trans_wm.ignore_lines), new_wm->trans_wm.lines,
- enast(new_wm->sagv.wm0.ignore_lines), new_wm->sagv.wm0.lines,
- enast(new_wm->sagv.trans_wm.ignore_lines), new_wm->sagv.trans_wm.lines);
-
- drm_dbg_kms(&dev_priv->drm,
- "[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d"
- " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n",
- plane->base.base.id, plane->base.name,
- old_wm->wm[0].blocks, old_wm->wm[1].blocks,
- old_wm->wm[2].blocks, old_wm->wm[3].blocks,
- old_wm->wm[4].blocks, old_wm->wm[5].blocks,
- old_wm->wm[6].blocks, old_wm->wm[7].blocks,
- old_wm->trans_wm.blocks,
- old_wm->sagv.wm0.blocks,
- old_wm->sagv.trans_wm.blocks,
- new_wm->wm[0].blocks, new_wm->wm[1].blocks,
- new_wm->wm[2].blocks, new_wm->wm[3].blocks,
- new_wm->wm[4].blocks, new_wm->wm[5].blocks,
- new_wm->wm[6].blocks, new_wm->wm[7].blocks,
- new_wm->trans_wm.blocks,
- new_wm->sagv.wm0.blocks,
- new_wm->sagv.trans_wm.blocks);
-
- drm_dbg_kms(&dev_priv->drm,
- "[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d"
- " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n",
- plane->base.base.id, plane->base.name,
- old_wm->wm[0].min_ddb_alloc, old_wm->wm[1].min_ddb_alloc,
- old_wm->wm[2].min_ddb_alloc, old_wm->wm[3].min_ddb_alloc,
- old_wm->wm[4].min_ddb_alloc, old_wm->wm[5].min_ddb_alloc,
- old_wm->wm[6].min_ddb_alloc, old_wm->wm[7].min_ddb_alloc,
- old_wm->trans_wm.min_ddb_alloc,
- old_wm->sagv.wm0.min_ddb_alloc,
- old_wm->sagv.trans_wm.min_ddb_alloc,
- new_wm->wm[0].min_ddb_alloc, new_wm->wm[1].min_ddb_alloc,
- new_wm->wm[2].min_ddb_alloc, new_wm->wm[3].min_ddb_alloc,
- new_wm->wm[4].min_ddb_alloc, new_wm->wm[5].min_ddb_alloc,
- new_wm->wm[6].min_ddb_alloc, new_wm->wm[7].min_ddb_alloc,
- new_wm->trans_wm.min_ddb_alloc,
- new_wm->sagv.wm0.min_ddb_alloc,
- new_wm->sagv.trans_wm.min_ddb_alloc);
- }
- }
-}
-
-static bool skl_plane_selected_wm_equals(struct intel_plane *plane,
- const struct skl_pipe_wm *old_pipe_wm,
- const struct skl_pipe_wm *new_pipe_wm)
-{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
- int level, max_level = ilk_wm_max_level(i915);
-
- for (level = 0; level <= max_level; level++) {
- /*
- * We don't check uv_wm as the hardware doesn't actually
- * use it. It only gets used for calculating the required
- * ddb allocation.
- */
- if (!skl_wm_level_equals(skl_plane_wm_level(old_pipe_wm, plane->id, level),
- skl_plane_wm_level(new_pipe_wm, plane->id, level)))
- return false;
- }
-
- if (HAS_HW_SAGV_WM(i915)) {
- const struct skl_plane_wm *old_wm = &old_pipe_wm->planes[plane->id];
- const struct skl_plane_wm *new_wm = &new_pipe_wm->planes[plane->id];
-
- if (!skl_wm_level_equals(&old_wm->sagv.wm0, &new_wm->sagv.wm0) ||
- !skl_wm_level_equals(&old_wm->sagv.trans_wm, &new_wm->sagv.trans_wm))
- return false;
- }
-
- return skl_wm_level_equals(skl_plane_trans_wm(old_pipe_wm, plane->id),
- skl_plane_trans_wm(new_pipe_wm, plane->id));
-}
-
-/*
- * To make sure the cursor watermark registers are always consistent
- * with our computed state the following scenario needs special
- * treatment:
- *
- * 1. enable cursor
- * 2. move cursor entirely offscreen
- * 3. disable cursor
- *
- * Step 2. does call .disable_plane() but does not zero the watermarks
- * (since we consider an offscreen cursor still active for the purposes
- * of watermarks). Step 3. would not normally call .disable_plane()
- * because the actual plane visibility isn't changing, and we don't
- * deallocate the cursor ddb until the pipe gets disabled. So we must
- * force step 3. to call .disable_plane() to update the watermark
- * registers properly.
- *
- * Other planes do not suffer from this issues as their watermarks are
- * calculated based on the actual plane visibility. The only time this
- * can trigger for the other planes is during the initial readout as the
- * default value of the watermarks registers is not zero.
- */
-static int skl_wm_add_affected_planes(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- const struct intel_crtc_state *old_crtc_state =
- intel_atomic_get_old_crtc_state(state, crtc);
- struct intel_crtc_state *new_crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
- struct intel_plane *plane;
-
- for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
- struct intel_plane_state *plane_state;
- enum plane_id plane_id = plane->id;
-
- /*
- * Force a full wm update for every plane on modeset.
- * Required because the reset value of the wm registers
- * is non-zero, whereas we want all disabled planes to
- * have zero watermarks. So if we turn off the relevant
- * power well the hardware state will go out of sync
- * with the software state.
- */
- if (!drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi) &&
- skl_plane_selected_wm_equals(plane,
- &old_crtc_state->wm.skl.optimal,
- &new_crtc_state->wm.skl.optimal))
- continue;
-
- plane_state = intel_atomic_get_plane_state(state, plane);
- if (IS_ERR(plane_state))
- return PTR_ERR(plane_state);
-
- new_crtc_state->update_planes |= BIT(plane_id);
- }
-
- return 0;
-}
-
-static int
-skl_compute_wm(struct intel_atomic_state *state)
-{
- struct intel_crtc *crtc;
- struct intel_crtc_state *new_crtc_state;
- int ret, i;
-
- for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
- ret = skl_build_pipe_wm(state, crtc);
- if (ret)
- return ret;
- }
-
- ret = skl_compute_ddb(state);
- if (ret)
- return ret;
-
- ret = intel_compute_sagv_mask(state);
- if (ret)
- return ret;
-
- /*
- * skl_compute_ddb() will have adjusted the final watermarks
- * based on how much ddb is available. Now we can actually
- * check if the final watermarks changed.
- */
- for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
- ret = skl_wm_add_affected_planes(state, crtc);
- if (ret)
- return ret;
- }
-
- skl_print_wm_changes(state);
-
- return 0;
-}
-
static void ilk_compute_wm_config(struct drm_i915_private *dev_priv,
struct intel_wm_config *config)
{
@@ -6458,10 +3583,10 @@ static void ilk_initial_watermarks(struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- mutex_lock(&dev_priv->wm.wm_mutex);
+ mutex_lock(&dev_priv->display.wm.wm_mutex);
crtc->wm.active.ilk = crtc_state->wm.ilk.intermediate;
ilk_program_watermarks(dev_priv);
- mutex_unlock(&dev_priv->wm.wm_mutex);
+ mutex_unlock(&dev_priv->display.wm.wm_mutex);
}
static void ilk_optimize_watermarks(struct intel_atomic_state *state,
@@ -6474,210 +3599,17 @@ static void ilk_optimize_watermarks(struct intel_atomic_state *state,
if (!crtc_state->wm.need_postvbl_update)
return;
- mutex_lock(&dev_priv->wm.wm_mutex);
+ mutex_lock(&dev_priv->display.wm.wm_mutex);
crtc->wm.active.ilk = crtc_state->wm.ilk.optimal;
ilk_program_watermarks(dev_priv);
- mutex_unlock(&dev_priv->wm.wm_mutex);
-}
-
-static void skl_wm_level_from_reg_val(u32 val, struct skl_wm_level *level)
-{
- level->enable = val & PLANE_WM_EN;
- level->ignore_lines = val & PLANE_WM_IGNORE_LINES;
- level->blocks = REG_FIELD_GET(PLANE_WM_BLOCKS_MASK, val);
- level->lines = REG_FIELD_GET(PLANE_WM_LINES_MASK, val);
-}
-
-static void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
- struct skl_pipe_wm *out)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum pipe pipe = crtc->pipe;
- int level, max_level;
- enum plane_id plane_id;
- u32 val;
-
- max_level = ilk_wm_max_level(dev_priv);
-
- for_each_plane_id_on_crtc(crtc, plane_id) {
- struct skl_plane_wm *wm = &out->planes[plane_id];
-
- for (level = 0; level <= max_level; level++) {
- if (plane_id != PLANE_CURSOR)
- val = intel_uncore_read(&dev_priv->uncore, PLANE_WM(pipe, plane_id, level));
- else
- val = intel_uncore_read(&dev_priv->uncore, CUR_WM(pipe, level));
-
- skl_wm_level_from_reg_val(val, &wm->wm[level]);
- }
-
- if (plane_id != PLANE_CURSOR)
- val = intel_uncore_read(&dev_priv->uncore, PLANE_WM_TRANS(pipe, plane_id));
- else
- val = intel_uncore_read(&dev_priv->uncore, CUR_WM_TRANS(pipe));
-
- skl_wm_level_from_reg_val(val, &wm->trans_wm);
-
- if (HAS_HW_SAGV_WM(dev_priv)) {
- if (plane_id != PLANE_CURSOR)
- val = intel_uncore_read(&dev_priv->uncore,
- PLANE_WM_SAGV(pipe, plane_id));
- else
- val = intel_uncore_read(&dev_priv->uncore,
- CUR_WM_SAGV(pipe));
-
- skl_wm_level_from_reg_val(val, &wm->sagv.wm0);
-
- if (plane_id != PLANE_CURSOR)
- val = intel_uncore_read(&dev_priv->uncore,
- PLANE_WM_SAGV_TRANS(pipe, plane_id));
- else
- val = intel_uncore_read(&dev_priv->uncore,
- CUR_WM_SAGV_TRANS(pipe));
-
- skl_wm_level_from_reg_val(val, &wm->sagv.trans_wm);
- } else if (DISPLAY_VER(dev_priv) >= 12) {
- wm->sagv.wm0 = wm->wm[0];
- wm->sagv.trans_wm = wm->trans_wm;
- }
- }
-}
-
-void skl_wm_get_hw_state(struct drm_i915_private *dev_priv)
-{
- struct intel_dbuf_state *dbuf_state =
- to_intel_dbuf_state(dev_priv->dbuf.obj.state);
- struct intel_crtc *crtc;
-
- if (HAS_MBUS_JOINING(dev_priv))
- dbuf_state->joined_mbus = intel_de_read(dev_priv, MBUS_CTL) & MBUS_JOIN;
-
- for_each_intel_crtc(&dev_priv->drm, crtc) {
- struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
- enum pipe pipe = crtc->pipe;
- unsigned int mbus_offset;
- enum plane_id plane_id;
- u8 slices;
-
- skl_pipe_wm_get_hw_state(crtc, &crtc_state->wm.skl.optimal);
- crtc_state->wm.skl.raw = crtc_state->wm.skl.optimal;
-
- memset(&dbuf_state->ddb[pipe], 0, sizeof(dbuf_state->ddb[pipe]));
-
- for_each_plane_id_on_crtc(crtc, plane_id) {
- struct skl_ddb_entry *ddb =
- &crtc_state->wm.skl.plane_ddb[plane_id];
- struct skl_ddb_entry *ddb_y =
- &crtc_state->wm.skl.plane_ddb_y[plane_id];
-
- skl_ddb_get_hw_plane_state(dev_priv, crtc->pipe,
- plane_id, ddb, ddb_y);
-
- skl_ddb_entry_union(&dbuf_state->ddb[pipe], ddb);
- skl_ddb_entry_union(&dbuf_state->ddb[pipe], ddb_y);
- }
-
- dbuf_state->weight[pipe] = intel_crtc_ddb_weight(crtc_state);
-
- /*
- * Used for checking overlaps, so we need absolute
- * offsets instead of MBUS relative offsets.
- */
- slices = skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes,
- dbuf_state->joined_mbus);
- mbus_offset = mbus_ddb_offset(dev_priv, slices);
- crtc_state->wm.skl.ddb.start = mbus_offset + dbuf_state->ddb[pipe].start;
- crtc_state->wm.skl.ddb.end = mbus_offset + dbuf_state->ddb[pipe].end;
-
- /* The slices actually used by the planes on the pipe */
- dbuf_state->slices[pipe] =
- skl_ddb_dbuf_slice_mask(dev_priv, &crtc_state->wm.skl.ddb);
-
- drm_dbg_kms(&dev_priv->drm,
- "[CRTC:%d:%s] dbuf slices 0x%x, ddb (%d - %d), active pipes 0x%x, mbus joined: %s\n",
- crtc->base.base.id, crtc->base.name,
- dbuf_state->slices[pipe], dbuf_state->ddb[pipe].start,
- dbuf_state->ddb[pipe].end, dbuf_state->active_pipes,
- str_yes_no(dbuf_state->joined_mbus));
- }
-
- dbuf_state->enabled_slices = dev_priv->dbuf.enabled_slices;
-}
-
-static bool skl_dbuf_is_misconfigured(struct drm_i915_private *i915)
-{
- const struct intel_dbuf_state *dbuf_state =
- to_intel_dbuf_state(i915->dbuf.obj.state);
- struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
- struct intel_crtc *crtc;
-
- for_each_intel_crtc(&i915->drm, crtc) {
- const struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
-
- entries[crtc->pipe] = crtc_state->wm.skl.ddb;
- }
-
- for_each_intel_crtc(&i915->drm, crtc) {
- const struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
- u8 slices;
-
- slices = skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes,
- dbuf_state->joined_mbus);
- if (dbuf_state->slices[crtc->pipe] & ~slices)
- return true;
-
- if (skl_ddb_allocation_overlaps(&crtc_state->wm.skl.ddb, entries,
- I915_MAX_PIPES, crtc->pipe))
- return true;
- }
-
- return false;
-}
-
-void skl_wm_sanitize(struct drm_i915_private *i915)
-{
- struct intel_crtc *crtc;
-
- /*
- * On TGL/RKL (at least) the BIOS likes to assign the planes
- * to the wrong DBUF slices. This will cause an infinite loop
- * in skl_commit_modeset_enables() as it can't find a way to
- * transition between the old bogus DBUF layout to the new
- * proper DBUF layout without DBUF allocation overlaps between
- * the planes (which cannot be allowed or else the hardware
- * may hang). If we detect a bogus DBUF layout just turn off
- * all the planes so that skl_commit_modeset_enables() can
- * simply ignore them.
- */
- if (!skl_dbuf_is_misconfigured(i915))
- return;
-
- drm_dbg_kms(&i915->drm, "BIOS has misprogrammed the DBUF, disabling all planes\n");
-
- for_each_intel_crtc(&i915->drm, crtc) {
- struct intel_plane *plane = to_intel_plane(crtc->base.primary);
- const struct intel_plane_state *plane_state =
- to_intel_plane_state(plane->base.state);
- struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
-
- if (plane_state->uapi.visible)
- intel_plane_disable_noatomic(crtc, plane);
-
- drm_WARN_ON(&i915->drm, crtc_state->active_planes != 0);
-
- memset(&crtc_state->wm.skl.ddb, 0, sizeof(crtc_state->wm.skl.ddb));
- }
+ mutex_unlock(&dev_priv->display.wm.wm_mutex);
}
static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct ilk_wm_values *hw = &dev_priv->wm.hw;
+ struct ilk_wm_values *hw = &dev_priv->display.wm.hw;
struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
struct intel_pipe_wm *active = &crtc_state->wm.ilk.optimal;
enum pipe pipe = crtc->pipe;
@@ -6825,7 +3757,7 @@ static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv)
{
- struct g4x_wm_values *wm = &dev_priv->wm.g4x;
+ struct g4x_wm_values *wm = &dev_priv->display.wm.g4x;
struct intel_crtc *crtc;
g4x_read_wm_values(dev_priv, wm);
@@ -6919,7 +3851,7 @@ void g4x_wm_sanitize(struct drm_i915_private *dev_priv)
struct intel_plane *plane;
struct intel_crtc *crtc;
- mutex_lock(&dev_priv->wm.wm_mutex);
+ mutex_lock(&dev_priv->display.wm.wm_mutex);
for_each_intel_plane(&dev_priv->drm, plane) {
struct intel_crtc *crtc =
@@ -6967,12 +3899,12 @@ void g4x_wm_sanitize(struct drm_i915_private *dev_priv)
g4x_program_watermarks(dev_priv);
- mutex_unlock(&dev_priv->wm.wm_mutex);
+ mutex_unlock(&dev_priv->display.wm.wm_mutex);
}
void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv)
{
- struct vlv_wm_values *wm = &dev_priv->wm.vlv;
+ struct vlv_wm_values *wm = &dev_priv->display.wm.vlv;
struct intel_crtc *crtc;
u32 val;
@@ -7006,7 +3938,7 @@ void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv)
drm_dbg_kms(&dev_priv->drm,
"Punit not acking DDR DVFS request, "
"assuming DDR DVFS is disabled\n");
- dev_priv->wm.max_level = VLV_WM_LEVEL_PM5;
+ dev_priv->display.wm.max_level = VLV_WM_LEVEL_PM5;
} else {
val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
if ((val & FORCE_DDR_HIGH_FREQ) == 0)
@@ -7075,7 +4007,7 @@ void vlv_wm_sanitize(struct drm_i915_private *dev_priv)
struct intel_plane *plane;
struct intel_crtc *crtc;
- mutex_lock(&dev_priv->wm.wm_mutex);
+ mutex_lock(&dev_priv->display.wm.wm_mutex);
for_each_intel_plane(&dev_priv->drm, plane) {
struct intel_crtc *crtc =
@@ -7116,7 +4048,7 @@ void vlv_wm_sanitize(struct drm_i915_private *dev_priv)
vlv_program_watermarks(dev_priv);
- mutex_unlock(&dev_priv->wm.wm_mutex);
+ mutex_unlock(&dev_priv->display.wm.wm_mutex);
}
/*
@@ -7137,7 +4069,7 @@ static void ilk_init_lp_watermarks(struct drm_i915_private *dev_priv)
void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv)
{
- struct ilk_wm_values *hw = &dev_priv->wm.hw;
+ struct ilk_wm_values *hw = &dev_priv->display.wm.hw;
struct intel_crtc *crtc;
ilk_init_lp_watermarks(dev_priv);
@@ -7166,168 +4098,6 @@ void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv)
!(intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) & DISP_FBC_WM_DIS);
}
-void intel_wm_state_verify(struct intel_crtc *crtc,
- struct intel_crtc_state *new_crtc_state)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct skl_hw_state {
- struct skl_ddb_entry ddb[I915_MAX_PLANES];
- struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
- struct skl_pipe_wm wm;
- } *hw;
- const struct skl_pipe_wm *sw_wm = &new_crtc_state->wm.skl.optimal;
- int level, max_level = ilk_wm_max_level(dev_priv);
- struct intel_plane *plane;
- u8 hw_enabled_slices;
-
- if (DISPLAY_VER(dev_priv) < 9 || !new_crtc_state->hw.active)
- return;
-
- hw = kzalloc(sizeof(*hw), GFP_KERNEL);
- if (!hw)
- return;
-
- skl_pipe_wm_get_hw_state(crtc, &hw->wm);
-
- skl_pipe_ddb_get_hw_state(crtc, hw->ddb, hw->ddb_y);
-
- hw_enabled_slices = intel_enabled_dbuf_slices_mask(dev_priv);
-
- if (DISPLAY_VER(dev_priv) >= 11 &&
- hw_enabled_slices != dev_priv->dbuf.enabled_slices)
- drm_err(&dev_priv->drm,
- "mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n",
- dev_priv->dbuf.enabled_slices,
- hw_enabled_slices);
-
- for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
- const struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
- const struct skl_wm_level *hw_wm_level, *sw_wm_level;
-
- /* Watermarks */
- for (level = 0; level <= max_level; level++) {
- hw_wm_level = &hw->wm.planes[plane->id].wm[level];
- sw_wm_level = skl_plane_wm_level(sw_wm, plane->id, level);
-
- if (skl_wm_level_equals(hw_wm_level, sw_wm_level))
- continue;
-
- drm_err(&dev_priv->drm,
- "[PLANE:%d:%s] mismatch in WM%d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
- plane->base.base.id, plane->base.name, level,
- sw_wm_level->enable,
- sw_wm_level->blocks,
- sw_wm_level->lines,
- hw_wm_level->enable,
- hw_wm_level->blocks,
- hw_wm_level->lines);
- }
-
- hw_wm_level = &hw->wm.planes[plane->id].trans_wm;
- sw_wm_level = skl_plane_trans_wm(sw_wm, plane->id);
-
- if (!skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
- drm_err(&dev_priv->drm,
- "[PLANE:%d:%s] mismatch in trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
- plane->base.base.id, plane->base.name,
- sw_wm_level->enable,
- sw_wm_level->blocks,
- sw_wm_level->lines,
- hw_wm_level->enable,
- hw_wm_level->blocks,
- hw_wm_level->lines);
- }
-
- hw_wm_level = &hw->wm.planes[plane->id].sagv.wm0;
- sw_wm_level = &sw_wm->planes[plane->id].sagv.wm0;
-
- if (HAS_HW_SAGV_WM(dev_priv) &&
- !skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
- drm_err(&dev_priv->drm,
- "[PLANE:%d:%s] mismatch in SAGV WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
- plane->base.base.id, plane->base.name,
- sw_wm_level->enable,
- sw_wm_level->blocks,
- sw_wm_level->lines,
- hw_wm_level->enable,
- hw_wm_level->blocks,
- hw_wm_level->lines);
- }
-
- hw_wm_level = &hw->wm.planes[plane->id].sagv.trans_wm;
- sw_wm_level = &sw_wm->planes[plane->id].sagv.trans_wm;
-
- if (HAS_HW_SAGV_WM(dev_priv) &&
- !skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
- drm_err(&dev_priv->drm,
- "[PLANE:%d:%s] mismatch in SAGV trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
- plane->base.base.id, plane->base.name,
- sw_wm_level->enable,
- sw_wm_level->blocks,
- sw_wm_level->lines,
- hw_wm_level->enable,
- hw_wm_level->blocks,
- hw_wm_level->lines);
- }
-
- /* DDB */
- hw_ddb_entry = &hw->ddb[PLANE_CURSOR];
- sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb[PLANE_CURSOR];
-
- if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
- drm_err(&dev_priv->drm,
- "[PLANE:%d:%s] mismatch in DDB (expected (%u,%u), found (%u,%u))\n",
- plane->base.base.id, plane->base.name,
- sw_ddb_entry->start, sw_ddb_entry->end,
- hw_ddb_entry->start, hw_ddb_entry->end);
- }
- }
-
- kfree(hw);
-}
-
-void intel_enable_ipc(struct drm_i915_private *dev_priv)
-{
- u32 val;
-
- if (!HAS_IPC(dev_priv))
- return;
-
- val = intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL2);
-
- if (dev_priv->ipc_enabled)
- val |= DISP_IPC_ENABLE;
- else
- val &= ~DISP_IPC_ENABLE;
-
- intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL2, val);
-}
-
-static bool intel_can_enable_ipc(struct drm_i915_private *dev_priv)
-{
- /* Display WA #0477 WaDisableIPC: skl */
- if (IS_SKYLAKE(dev_priv))
- return false;
-
- /* Display WA #1141: SKL:all KBL:all CFL */
- if (IS_KABYLAKE(dev_priv) ||
- IS_COFFEELAKE(dev_priv) ||
- IS_COMETLAKE(dev_priv))
- return dev_priv->dram_info.symmetric_memory;
-
- return true;
-}
-
-void intel_init_ipc(struct drm_i915_private *dev_priv)
-{
- if (!HAS_IPC(dev_priv))
- return;
-
- dev_priv->ipc_enabled = intel_can_enable_ipc(dev_priv);
-
- intel_enable_ipc(dev_priv);
-}
-
static void ibx_init_clock_gating(struct drm_i915_private *dev_priv)
{
/*
@@ -7435,7 +4205,7 @@ static void cpt_init_clock_gating(struct drm_i915_private *dev_priv)
val = intel_uncore_read(&dev_priv->uncore, TRANS_CHICKEN2(pipe));
val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
- if (dev_priv->vbt.fdi_rx_polarity_inverted)
+ if (dev_priv->display.vbt.fdi_rx_polarity_inverted)
val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER;
val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH;
@@ -7586,9 +4356,8 @@ static void icl_init_clock_gating(struct drm_i915_private *dev_priv)
static void gen12lp_init_clock_gating(struct drm_i915_private *dev_priv)
{
- /* Wa_1409120013:tgl,rkl,adl-s,dg1,dg2 */
- if (IS_TIGERLAKE(dev_priv) || IS_ROCKETLAKE(dev_priv) ||
- IS_ALDERLAKE_S(dev_priv) || IS_DG1(dev_priv) || IS_DG2(dev_priv))
+ /* Wa_1409120013 */
+ if (DISPLAY_VER(dev_priv) == 12)
intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A),
DPFC_CHICKEN_COMP_DUMMY_PIXEL);
@@ -7965,7 +4734,7 @@ static void g4x_init_clock_gating(struct drm_i915_private *dev_priv)
OVCUNIT_CLOCK_GATE_DISABLE;
if (IS_GM45(dev_priv))
dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
- intel_uncore_write(&dev_priv->uncore, DSPCLK_GATE_D, dspclk_gate);
+ intel_uncore_write(&dev_priv->uncore, DSPCLK_GATE_D(dev_priv), dspclk_gate);
g4x_disable_trickle_feed(dev_priv);
}
@@ -7976,7 +4745,7 @@ static void i965gm_init_clock_gating(struct drm_i915_private *dev_priv)
intel_uncore_write(uncore, RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
intel_uncore_write(uncore, RENCLK_GATE_D2, 0);
- intel_uncore_write(uncore, DSPCLK_GATE_D, 0);
+ intel_uncore_write(uncore, DSPCLK_GATE_D(dev_priv), 0);
intel_uncore_write(uncore, RAMCLK_GATE_D, 0);
intel_uncore_write16(uncore, DEUC, 0);
intel_uncore_write(uncore,
@@ -8168,18 +4937,14 @@ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
}
}
-static const struct drm_i915_wm_disp_funcs skl_wm_funcs = {
- .compute_global_watermarks = skl_compute_wm,
-};
-
-static const struct drm_i915_wm_disp_funcs ilk_wm_funcs = {
+static const struct intel_wm_funcs ilk_wm_funcs = {
.compute_pipe_wm = ilk_compute_pipe_wm,
.compute_intermediate_wm = ilk_compute_intermediate_wm,
.initial_watermarks = ilk_initial_watermarks,
.optimize_watermarks = ilk_optimize_watermarks,
};
-static const struct drm_i915_wm_disp_funcs vlv_wm_funcs = {
+static const struct intel_wm_funcs vlv_wm_funcs = {
.compute_pipe_wm = vlv_compute_pipe_wm,
.compute_intermediate_wm = vlv_compute_intermediate_wm,
.initial_watermarks = vlv_initial_watermarks,
@@ -8187,67 +4952,67 @@ static const struct drm_i915_wm_disp_funcs vlv_wm_funcs = {
.atomic_update_watermarks = vlv_atomic_update_fifo,
};
-static const struct drm_i915_wm_disp_funcs g4x_wm_funcs = {
+static const struct intel_wm_funcs g4x_wm_funcs = {
.compute_pipe_wm = g4x_compute_pipe_wm,
.compute_intermediate_wm = g4x_compute_intermediate_wm,
.initial_watermarks = g4x_initial_watermarks,
.optimize_watermarks = g4x_optimize_watermarks,
};
-static const struct drm_i915_wm_disp_funcs pnv_wm_funcs = {
+static const struct intel_wm_funcs pnv_wm_funcs = {
.update_wm = pnv_update_wm,
};
-static const struct drm_i915_wm_disp_funcs i965_wm_funcs = {
+static const struct intel_wm_funcs i965_wm_funcs = {
.update_wm = i965_update_wm,
};
-static const struct drm_i915_wm_disp_funcs i9xx_wm_funcs = {
+static const struct intel_wm_funcs i9xx_wm_funcs = {
.update_wm = i9xx_update_wm,
};
-static const struct drm_i915_wm_disp_funcs i845_wm_funcs = {
+static const struct intel_wm_funcs i845_wm_funcs = {
.update_wm = i845_update_wm,
};
-static const struct drm_i915_wm_disp_funcs nop_funcs = {
+static const struct intel_wm_funcs nop_funcs = {
};
/* Set up chip specific power management-related functions */
void intel_init_pm(struct drm_i915_private *dev_priv)
{
+ if (DISPLAY_VER(dev_priv) >= 9) {
+ skl_wm_init(dev_priv);
+ return;
+ }
+
/* For cxsr */
if (IS_PINEVIEW(dev_priv))
pnv_get_mem_freq(dev_priv);
else if (GRAPHICS_VER(dev_priv) == 5)
ilk_get_mem_freq(dev_priv);
- intel_sagv_init(dev_priv);
-
/* For FIFO watermark updates */
- if (DISPLAY_VER(dev_priv) >= 9) {
- skl_setup_wm_latency(dev_priv);
- dev_priv->wm_disp = &skl_wm_funcs;
- } else if (HAS_PCH_SPLIT(dev_priv)) {
+ if (HAS_PCH_SPLIT(dev_priv)) {
ilk_setup_wm_latency(dev_priv);
- if ((DISPLAY_VER(dev_priv) == 5 && dev_priv->wm.pri_latency[1] &&
- dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) ||
- (DISPLAY_VER(dev_priv) != 5 && dev_priv->wm.pri_latency[0] &&
- dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) {
- dev_priv->wm_disp = &ilk_wm_funcs;
+ if ((DISPLAY_VER(dev_priv) == 5 && dev_priv->display.wm.pri_latency[1] &&
+ dev_priv->display.wm.spr_latency[1] && dev_priv->display.wm.cur_latency[1]) ||
+ (DISPLAY_VER(dev_priv) != 5 && dev_priv->display.wm.pri_latency[0] &&
+ dev_priv->display.wm.spr_latency[0] && dev_priv->display.wm.cur_latency[0])) {
+ dev_priv->display.funcs.wm = &ilk_wm_funcs;
} else {
drm_dbg_kms(&dev_priv->drm,
"Failed to read display plane latency. "
"Disable CxSR\n");
- dev_priv->wm_disp = &nop_funcs;
+ dev_priv->display.funcs.wm = &nop_funcs;
}
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
vlv_setup_wm_latency(dev_priv);
- dev_priv->wm_disp = &vlv_wm_funcs;
+ dev_priv->display.funcs.wm = &vlv_wm_funcs;
} else if (IS_G4X(dev_priv)) {
g4x_setup_wm_latency(dev_priv);
- dev_priv->wm_disp = &g4x_wm_funcs;
+ dev_priv->display.funcs.wm = &g4x_wm_funcs;
} else if (IS_PINEVIEW(dev_priv)) {
if (!intel_get_cxsr_latency(!IS_MOBILE(dev_priv),
dev_priv->is_ddr3,
@@ -8261,22 +5026,22 @@ void intel_init_pm(struct drm_i915_private *dev_priv)
dev_priv->fsb_freq, dev_priv->mem_freq);
/* Disable CxSR and never update its watermark again */
intel_set_memory_cxsr(dev_priv, false);
- dev_priv->wm_disp = &nop_funcs;
+ dev_priv->display.funcs.wm = &nop_funcs;
} else
- dev_priv->wm_disp = &pnv_wm_funcs;
+ dev_priv->display.funcs.wm = &pnv_wm_funcs;
} else if (DISPLAY_VER(dev_priv) == 4) {
- dev_priv->wm_disp = &i965_wm_funcs;
+ dev_priv->display.funcs.wm = &i965_wm_funcs;
} else if (DISPLAY_VER(dev_priv) == 3) {
- dev_priv->wm_disp = &i9xx_wm_funcs;
+ dev_priv->display.funcs.wm = &i9xx_wm_funcs;
} else if (DISPLAY_VER(dev_priv) == 2) {
if (INTEL_NUM_PIPES(dev_priv) == 1)
- dev_priv->wm_disp = &i845_wm_funcs;
+ dev_priv->display.funcs.wm = &i845_wm_funcs;
else
- dev_priv->wm_disp = &i9xx_wm_funcs;
+ dev_priv->display.funcs.wm = &i9xx_wm_funcs;
} else {
drm_err(&dev_priv->drm,
"unexpected fall-through in %s\n", __func__);
- dev_priv->wm_disp = &nop_funcs;
+ dev_priv->display.funcs.wm = &nop_funcs;
}
}
@@ -8285,183 +5050,3 @@ void intel_pm_setup(struct drm_i915_private *dev_priv)
dev_priv->runtime_pm.suspended = false;
atomic_set(&dev_priv->runtime_pm.wakeref_count, 0);
}
-
-static struct intel_global_state *intel_dbuf_duplicate_state(struct intel_global_obj *obj)
-{
- struct intel_dbuf_state *dbuf_state;
-
- dbuf_state = kmemdup(obj->state, sizeof(*dbuf_state), GFP_KERNEL);
- if (!dbuf_state)
- return NULL;
-
- return &dbuf_state->base;
-}
-
-static void intel_dbuf_destroy_state(struct intel_global_obj *obj,
- struct intel_global_state *state)
-{
- kfree(state);
-}
-
-static const struct intel_global_state_funcs intel_dbuf_funcs = {
- .atomic_duplicate_state = intel_dbuf_duplicate_state,
- .atomic_destroy_state = intel_dbuf_destroy_state,
-};
-
-struct intel_dbuf_state *
-intel_atomic_get_dbuf_state(struct intel_atomic_state *state)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- struct intel_global_state *dbuf_state;
-
- dbuf_state = intel_atomic_get_global_obj_state(state, &dev_priv->dbuf.obj);
- if (IS_ERR(dbuf_state))
- return ERR_CAST(dbuf_state);
-
- return to_intel_dbuf_state(dbuf_state);
-}
-
-int intel_dbuf_init(struct drm_i915_private *dev_priv)
-{
- struct intel_dbuf_state *dbuf_state;
-
- dbuf_state = kzalloc(sizeof(*dbuf_state), GFP_KERNEL);
- if (!dbuf_state)
- return -ENOMEM;
-
- intel_atomic_global_obj_init(dev_priv, &dev_priv->dbuf.obj,
- &dbuf_state->base, &intel_dbuf_funcs);
-
- return 0;
-}
-
-/*
- * Configure MBUS_CTL and all DBUF_CTL_S of each slice to join_mbus state before
- * update the request state of all DBUS slices.
- */
-static void update_mbus_pre_enable(struct intel_atomic_state *state)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- u32 mbus_ctl, dbuf_min_tracker_val;
- enum dbuf_slice slice;
- const struct intel_dbuf_state *dbuf_state =
- intel_atomic_get_new_dbuf_state(state);
-
- if (!HAS_MBUS_JOINING(dev_priv))
- return;
-
- /*
- * TODO: Implement vblank synchronized MBUS joining changes.
- * Must be properly coordinated with dbuf reprogramming.
- */
- if (dbuf_state->joined_mbus) {
- mbus_ctl = MBUS_HASHING_MODE_1x4 | MBUS_JOIN |
- MBUS_JOIN_PIPE_SELECT_NONE;
- dbuf_min_tracker_val = DBUF_MIN_TRACKER_STATE_SERVICE(3);
- } else {
- mbus_ctl = MBUS_HASHING_MODE_2x2 |
- MBUS_JOIN_PIPE_SELECT_NONE;
- dbuf_min_tracker_val = DBUF_MIN_TRACKER_STATE_SERVICE(1);
- }
-
- intel_de_rmw(dev_priv, MBUS_CTL,
- MBUS_HASHING_MODE_MASK | MBUS_JOIN |
- MBUS_JOIN_PIPE_SELECT_MASK, mbus_ctl);
-
- for_each_dbuf_slice(dev_priv, slice)
- intel_de_rmw(dev_priv, DBUF_CTL_S(slice),
- DBUF_MIN_TRACKER_STATE_SERVICE_MASK,
- dbuf_min_tracker_val);
-}
-
-void intel_dbuf_pre_plane_update(struct intel_atomic_state *state)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- const struct intel_dbuf_state *new_dbuf_state =
- intel_atomic_get_new_dbuf_state(state);
- const struct intel_dbuf_state *old_dbuf_state =
- intel_atomic_get_old_dbuf_state(state);
-
- if (!new_dbuf_state ||
- ((new_dbuf_state->enabled_slices == old_dbuf_state->enabled_slices)
- && (new_dbuf_state->joined_mbus == old_dbuf_state->joined_mbus)))
- return;
-
- WARN_ON(!new_dbuf_state->base.changed);
-
- update_mbus_pre_enable(state);
- gen9_dbuf_slices_update(dev_priv,
- old_dbuf_state->enabled_slices |
- new_dbuf_state->enabled_slices);
-}
-
-void intel_dbuf_post_plane_update(struct intel_atomic_state *state)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- const struct intel_dbuf_state *new_dbuf_state =
- intel_atomic_get_new_dbuf_state(state);
- const struct intel_dbuf_state *old_dbuf_state =
- intel_atomic_get_old_dbuf_state(state);
-
- if (!new_dbuf_state ||
- ((new_dbuf_state->enabled_slices == old_dbuf_state->enabled_slices)
- && (new_dbuf_state->joined_mbus == old_dbuf_state->joined_mbus)))
- return;
-
- WARN_ON(!new_dbuf_state->base.changed);
-
- gen9_dbuf_slices_update(dev_priv,
- new_dbuf_state->enabled_slices);
-}
-
-void intel_mbus_dbox_update(struct intel_atomic_state *state)
-{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
- const struct intel_dbuf_state *new_dbuf_state, *old_dbuf_state;
- const struct intel_crtc_state *new_crtc_state;
- const struct intel_crtc *crtc;
- u32 val = 0;
- int i;
-
- if (DISPLAY_VER(i915) < 11)
- return;
-
- new_dbuf_state = intel_atomic_get_new_dbuf_state(state);
- old_dbuf_state = intel_atomic_get_old_dbuf_state(state);
- if (!new_dbuf_state ||
- (new_dbuf_state->joined_mbus == old_dbuf_state->joined_mbus &&
- new_dbuf_state->active_pipes == old_dbuf_state->active_pipes))
- return;
-
- if (DISPLAY_VER(i915) >= 12) {
- val |= MBUS_DBOX_B2B_TRANSACTIONS_MAX(16);
- val |= MBUS_DBOX_B2B_TRANSACTIONS_DELAY(1);
- val |= MBUS_DBOX_REGULATE_B2B_TRANSACTIONS_EN;
- }
-
- /* Wa_22010947358:adl-p */
- if (IS_ALDERLAKE_P(i915))
- val |= new_dbuf_state->joined_mbus ? MBUS_DBOX_A_CREDIT(6) :
- MBUS_DBOX_A_CREDIT(4);
- else
- val |= MBUS_DBOX_A_CREDIT(2);
-
- if (IS_ALDERLAKE_P(i915)) {
- val |= MBUS_DBOX_BW_CREDIT(2);
- val |= MBUS_DBOX_B_CREDIT(8);
- } else if (DISPLAY_VER(i915) >= 12) {
- val |= MBUS_DBOX_BW_CREDIT(2);
- val |= MBUS_DBOX_B_CREDIT(12);
- } else {
- val |= MBUS_DBOX_BW_CREDIT(1);
- val |= MBUS_DBOX_B_CREDIT(8);
- }
-
- for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
- if (!new_crtc_state->hw.active ||
- !intel_crtc_needs_modeset(new_crtc_state))
- continue;
-
- intel_de_write(i915, PIPE_MBUS_DBOX_CTL(crtc->pipe), val);
- }
-}
diff --git a/drivers/gpu/drm/i915/intel_pm.h b/drivers/gpu/drm/i915/intel_pm.h
index 945503ae493e..c09b872d65c8 100644
--- a/drivers/gpu/drm/i915/intel_pm.h
+++ b/drivers/gpu/drm/i915/intel_pm.h
@@ -8,22 +8,9 @@
#include <linux/types.h>
-#include "display/intel_display.h"
-#include "display/intel_global_state.h"
-
-#include "i915_drv.h"
-
-struct drm_device;
struct drm_i915_private;
-struct i915_request;
-struct intel_atomic_state;
-struct intel_bw_state;
-struct intel_crtc;
struct intel_crtc_state;
-struct intel_plane;
-struct skl_ddb_entry;
-struct skl_pipe_wm;
-struct skl_wm_level;
+struct intel_plane_state;
void intel_init_clock_gating(struct drm_i915_private *dev_priv);
void intel_suspend_hw(struct drm_i915_private *dev_priv);
@@ -34,56 +21,14 @@ void intel_pm_setup(struct drm_i915_private *dev_priv);
void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv);
void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv);
void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv);
-void skl_wm_get_hw_state(struct drm_i915_private *dev_priv);
-void intel_wm_state_verify(struct intel_crtc *crtc,
- struct intel_crtc_state *new_crtc_state);
-u8 intel_enabled_dbuf_slices_mask(struct drm_i915_private *dev_priv);
-void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv);
-u32 skl_ddb_dbuf_slice_mask(struct drm_i915_private *dev_priv,
- const struct skl_ddb_entry *entry);
void g4x_wm_sanitize(struct drm_i915_private *dev_priv);
void vlv_wm_sanitize(struct drm_i915_private *dev_priv);
-void skl_wm_sanitize(struct drm_i915_private *dev_priv);
-bool intel_can_enable_sagv(struct drm_i915_private *dev_priv,
- const struct intel_bw_state *bw_state);
-void intel_sagv_pre_plane_update(struct intel_atomic_state *state);
-void intel_sagv_post_plane_update(struct intel_atomic_state *state);
-bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
- const struct skl_ddb_entry *entries,
- int num_entries, int ignore_idx);
-void skl_write_plane_wm(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state);
-void skl_write_cursor_wm(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state);
bool ilk_disable_lp_wm(struct drm_i915_private *dev_priv);
-void intel_init_ipc(struct drm_i915_private *dev_priv);
-void intel_enable_ipc(struct drm_i915_private *dev_priv);
+bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state);
+void intel_print_wm_latency(struct drm_i915_private *dev_priv,
+ const char *name, const u16 wm[]);
bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable);
-struct intel_dbuf_state {
- struct intel_global_state base;
-
- struct skl_ddb_entry ddb[I915_MAX_PIPES];
- unsigned int weight[I915_MAX_PIPES];
- u8 slices[I915_MAX_PIPES];
- u8 enabled_slices;
- u8 active_pipes;
- bool joined_mbus;
-};
-
-struct intel_dbuf_state *
-intel_atomic_get_dbuf_state(struct intel_atomic_state *state);
-
-#define to_intel_dbuf_state(x) container_of((x), struct intel_dbuf_state, base)
-#define intel_atomic_get_old_dbuf_state(state) \
- to_intel_dbuf_state(intel_atomic_get_old_global_obj_state(state, &to_i915(state->base.dev)->dbuf.obj))
-#define intel_atomic_get_new_dbuf_state(state) \
- to_intel_dbuf_state(intel_atomic_get_new_global_obj_state(state, &to_i915(state->base.dev)->dbuf.obj))
-
-int intel_dbuf_init(struct drm_i915_private *dev_priv);
-void intel_dbuf_pre_plane_update(struct intel_atomic_state *state);
-void intel_dbuf_post_plane_update(struct intel_atomic_state *state);
-void intel_mbus_dbox_update(struct intel_atomic_state *state);
-
#endif /* __INTEL_PM_H__ */
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index c7ef5f2ff2e1..5cd423c7b646 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -21,6 +21,7 @@
* IN THE SOFTWARE.
*/
+#include <drm/drm_managed.h>
#include <linux/pm_runtime.h>
#include "gt/intel_engine_regs.h"
@@ -44,29 +45,47 @@ fw_domains_get(struct intel_uncore *uncore, enum forcewake_domains fw_domains)
}
void
-intel_uncore_mmio_debug_init_early(struct intel_uncore_mmio_debug *mmio_debug)
+intel_uncore_mmio_debug_init_early(struct drm_i915_private *i915)
{
- spin_lock_init(&mmio_debug->lock);
- mmio_debug->unclaimed_mmio_check = 1;
+ spin_lock_init(&i915->mmio_debug.lock);
+ i915->mmio_debug.unclaimed_mmio_check = 1;
+
+ i915->uncore.debug = &i915->mmio_debug;
}
-static void mmio_debug_suspend(struct intel_uncore_mmio_debug *mmio_debug)
+static void mmio_debug_suspend(struct intel_uncore *uncore)
{
- lockdep_assert_held(&mmio_debug->lock);
+ if (!uncore->debug)
+ return;
+
+ spin_lock(&uncore->debug->lock);
/* Save and disable mmio debugging for the user bypass */
- if (!mmio_debug->suspend_count++) {
- mmio_debug->saved_mmio_check = mmio_debug->unclaimed_mmio_check;
- mmio_debug->unclaimed_mmio_check = 0;
+ if (!uncore->debug->suspend_count++) {
+ uncore->debug->saved_mmio_check = uncore->debug->unclaimed_mmio_check;
+ uncore->debug->unclaimed_mmio_check = 0;
}
+
+ spin_unlock(&uncore->debug->lock);
}
-static void mmio_debug_resume(struct intel_uncore_mmio_debug *mmio_debug)
+static bool check_for_unclaimed_mmio(struct intel_uncore *uncore);
+
+static void mmio_debug_resume(struct intel_uncore *uncore)
{
- lockdep_assert_held(&mmio_debug->lock);
+ if (!uncore->debug)
+ return;
+
+ spin_lock(&uncore->debug->lock);
+
+ if (!--uncore->debug->suspend_count)
+ uncore->debug->unclaimed_mmio_check = uncore->debug->saved_mmio_check;
- if (!--mmio_debug->suspend_count)
- mmio_debug->unclaimed_mmio_check = mmio_debug->saved_mmio_check;
+ if (check_for_unclaimed_mmio(uncore))
+ drm_info(&uncore->i915->drm,
+ "Invalid mmio detected during user access\n");
+
+ spin_unlock(&uncore->debug->lock);
}
static const char * const forcewake_domain_names[] = {
@@ -677,9 +696,7 @@ void intel_uncore_forcewake_user_get(struct intel_uncore *uncore)
spin_lock_irq(&uncore->lock);
if (!uncore->user_forcewake_count++) {
intel_uncore_forcewake_get__locked(uncore, FORCEWAKE_ALL);
- spin_lock(&uncore->debug->lock);
- mmio_debug_suspend(uncore->debug);
- spin_unlock(&uncore->debug->lock);
+ mmio_debug_suspend(uncore);
}
spin_unlock_irq(&uncore->lock);
}
@@ -695,14 +712,7 @@ void intel_uncore_forcewake_user_put(struct intel_uncore *uncore)
{
spin_lock_irq(&uncore->lock);
if (!--uncore->user_forcewake_count) {
- spin_lock(&uncore->debug->lock);
- mmio_debug_resume(uncore->debug);
-
- if (check_for_unclaimed_mmio(uncore))
- drm_info(&uncore->i915->drm,
- "Invalid mmio detected during user access\n");
- spin_unlock(&uncore->debug->lock);
-
+ mmio_debug_resume(uncore);
intel_uncore_forcewake_put__locked(uncore, FORCEWAKE_ALL);
}
spin_unlock_irq(&uncore->lock);
@@ -918,6 +928,9 @@ find_fw_domain(struct intel_uncore *uncore, u32 offset)
{
const struct intel_forcewake_range *entry;
+ if (IS_GSI_REG(offset))
+ offset += uncore->gsi_offset;
+
entry = BSEARCH(offset,
uncore->fw_domains_table,
uncore->fw_domains_table_entries,
@@ -1133,6 +1146,9 @@ static bool is_shadowed(struct intel_uncore *uncore, u32 offset)
if (drm_WARN_ON(&uncore->i915->drm, !uncore->shadowed_reg_table))
return false;
+ if (IS_GSI_REG(offset))
+ offset += uncore->gsi_offset;
+
return BSEARCH(offset,
uncore->shadowed_reg_table,
uncore->shadowed_reg_table_entries,
@@ -1704,7 +1720,7 @@ unclaimed_reg_debug(struct intel_uncore *uncore,
const bool read,
const bool before)
{
- if (likely(!uncore->i915->params.mmio_debug))
+ if (likely(!uncore->i915->params.mmio_debug) || !uncore->debug)
return;
/* interrupts are disabled and re-enabled around uncore->lock usage */
@@ -1985,8 +2001,8 @@ static int __fw_domain_init(struct intel_uncore *uncore,
d->uncore = uncore;
d->wake_count = 0;
- d->reg_set = uncore->regs + i915_mmio_reg_offset(reg_set);
- d->reg_ack = uncore->regs + i915_mmio_reg_offset(reg_ack);
+ d->reg_set = uncore->regs + i915_mmio_reg_offset(reg_set) + uncore->gsi_offset;
+ d->reg_ack = uncore->regs + i915_mmio_reg_offset(reg_ack) + uncore->gsi_offset;
d->id = domain_id;
@@ -2070,7 +2086,7 @@ static int intel_uncore_fw_domains_init(struct intel_uncore *uncore)
if (GRAPHICS_VER(i915) >= 11) {
/* we'll prune the domains of missing engines later */
- intel_engine_mask_t emask = INTEL_INFO(i915)->platform_engine_mask;
+ intel_engine_mask_t emask = RUNTIME_INFO(i915)->platform_engine_mask;
int i;
uncore->fw_get_funcs = &uncore_get_fallback;
@@ -2223,6 +2239,11 @@ static int i915_pmic_bus_access_notifier(struct notifier_block *nb,
return NOTIFY_OK;
}
+static void uncore_unmap_mmio(struct drm_device *drm, void *regs)
+{
+ iounmap(regs);
+}
+
int intel_uncore_setup_mmio(struct intel_uncore *uncore, phys_addr_t phys_addr)
{
struct drm_i915_private *i915 = uncore->i915;
@@ -2251,12 +2272,7 @@ int intel_uncore_setup_mmio(struct intel_uncore *uncore, phys_addr_t phys_addr)
return -EIO;
}
- return 0;
-}
-
-void intel_uncore_cleanup_mmio(struct intel_uncore *uncore)
-{
- iounmap(uncore->regs);
+ return drmm_add_action_or_reset(&i915->drm, uncore_unmap_mmio, uncore->regs);
}
void intel_uncore_init_early(struct intel_uncore *uncore,
@@ -2266,7 +2282,6 @@ void intel_uncore_init_early(struct intel_uncore *uncore,
uncore->i915 = gt->i915;
uncore->gt = gt;
uncore->rpm = &gt->i915->runtime_pm;
- uncore->debug = &gt->i915->mmio_debug;
}
static void uncore_raw_init(struct intel_uncore *uncore)
@@ -2446,8 +2461,11 @@ void intel_uncore_prune_engine_fw_domains(struct intel_uncore *uncore,
}
}
-void intel_uncore_fini_mmio(struct intel_uncore *uncore)
+/* Called via drm-managed action */
+void intel_uncore_fini_mmio(struct drm_device *dev, void *data)
{
+ struct intel_uncore *uncore = data;
+
if (intel_uncore_has_forcewake(uncore)) {
iosf_mbi_punit_acquire();
iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
@@ -2577,6 +2595,9 @@ bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore)
{
bool ret;
+ if (!uncore->debug)
+ return false;
+
spin_lock_irq(&uncore->debug->lock);
ret = check_for_unclaimed_mmio(uncore);
spin_unlock_irq(&uncore->debug->lock);
@@ -2589,6 +2610,9 @@ intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore)
{
bool ret = false;
+ if (drm_WARN_ON(&uncore->i915->drm, !uncore->debug))
+ return false;
+
spin_lock_irq(&uncore->debug->lock);
if (unlikely(uncore->debug->unclaimed_mmio_check <= 0))
diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h
index b1fa912a65e7..5022bac80b67 100644
--- a/drivers/gpu/drm/i915/intel_uncore.h
+++ b/drivers/gpu/drm/i915/intel_uncore.h
@@ -33,6 +33,7 @@
#include "i915_reg_defs.h"
+struct drm_device;
struct drm_i915_private;
struct intel_runtime_pm;
struct intel_uncore;
@@ -135,6 +136,16 @@ struct intel_uncore {
spinlock_t lock; /** lock is also taken in irq contexts. */
+ /*
+ * Do we need to apply an additional offset to reach the beginning
+ * of the basic non-engine GT registers (referred to as "GSI" on
+ * newer platforms, or "GT block" on older platforms)? If so, we'll
+ * track that here and apply it transparently to registers in the
+ * appropriate range to maintain compatibility with our existing
+ * register definitions and GT code.
+ */
+ u32 gsi_offset;
+
unsigned int flags;
#define UNCORE_HAS_FORCEWAKE BIT(0)
#define UNCORE_HAS_FPGA_DBG_UNCLAIMED BIT(1)
@@ -210,8 +221,7 @@ intel_uncore_has_fifo(const struct intel_uncore *uncore)
return uncore->flags & UNCORE_HAS_FIFO;
}
-void
-intel_uncore_mmio_debug_init_early(struct intel_uncore_mmio_debug *mmio_debug);
+void intel_uncore_mmio_debug_init_early(struct drm_i915_private *i915);
void intel_uncore_init_early(struct intel_uncore *uncore,
struct intel_gt *gt);
int intel_uncore_setup_mmio(struct intel_uncore *uncore, phys_addr_t phys_addr);
@@ -221,7 +231,7 @@ void intel_uncore_prune_engine_fw_domains(struct intel_uncore *uncore,
bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore);
bool intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore);
void intel_uncore_cleanup_mmio(struct intel_uncore *uncore);
-void intel_uncore_fini_mmio(struct intel_uncore *uncore);
+void intel_uncore_fini_mmio(struct drm_device *dev, void *data);
void intel_uncore_suspend(struct intel_uncore *uncore);
void intel_uncore_resume_early(struct intel_uncore *uncore);
void intel_uncore_runtime_resume(struct intel_uncore *uncore);
@@ -294,19 +304,27 @@ intel_wait_for_register_fw(struct intel_uncore *uncore,
2, timeout_ms, NULL);
}
+#define IS_GSI_REG(reg) ((reg) < 0x40000)
+
/* register access functions */
#define __raw_read(x__, s__) \
static inline u##x__ __raw_uncore_read##x__(const struct intel_uncore *uncore, \
i915_reg_t reg) \
{ \
- return read##s__(uncore->regs + i915_mmio_reg_offset(reg)); \
+ u32 offset = i915_mmio_reg_offset(reg); \
+ if (IS_GSI_REG(offset)) \
+ offset += uncore->gsi_offset; \
+ return read##s__(uncore->regs + offset); \
}
#define __raw_write(x__, s__) \
static inline void __raw_uncore_write##x__(const struct intel_uncore *uncore, \
i915_reg_t reg, u##x__ val) \
{ \
- write##s__(val, uncore->regs + i915_mmio_reg_offset(reg)); \
+ u32 offset = i915_mmio_reg_offset(reg); \
+ if (IS_GSI_REG(offset)) \
+ offset += uncore->gsi_offset; \
+ write##s__(val, uncore->regs + offset); \
}
__raw_read(8, b)
__raw_read(16, w)
@@ -447,6 +465,18 @@ static inline int intel_uncore_write_and_verify(struct intel_uncore *uncore,
return (reg_val & mask) != expected_val ? -EINVAL : 0;
}
+/*
+ * The raw_reg_{read,write} macros are intended as a micro-optimization for
+ * interrupt handlers so that the pointer indirection on uncore->regs can
+ * be computed once (and presumably cached in a register) instead of generating
+ * extra load instructions for each MMIO access.
+ *
+ * Given that these macros are only intended for non-GSI interrupt registers
+ * (and the goal is to avoid extra instructions generated by the compiler),
+ * these macros do not account for uncore->gsi_offset. Any caller that needs
+ * to use these macros on a GSI register is responsible for adding the
+ * appropriate GSI offset to the 'base' parameter.
+ */
#define raw_reg_read(base, reg) \
readl(base + i915_mmio_reg_offset(reg))
#define raw_reg_write(base, reg, value) \
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp.c b/drivers/gpu/drm/i915/pxp/intel_pxp.c
index 17109c513259..69cdaaddc4a9 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp.c
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp.c
@@ -169,11 +169,11 @@ static void pxp_queue_termination(struct intel_pxp *pxp)
* We want to get the same effect as if we received a termination
* interrupt, so just pretend that we did.
*/
- spin_lock_irq(&gt->irq_lock);
+ spin_lock_irq(gt->irq_lock);
intel_pxp_mark_termination_in_progress(pxp);
pxp->session_events |= PXP_TERMINATION_REQUEST;
queue_work(system_unbound_wq, &pxp->session_work);
- spin_unlock_irq(&gt->irq_lock);
+ spin_unlock_irq(gt->irq_lock);
}
static bool pxp_component_bound(struct intel_pxp *pxp)
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c b/drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c
index e888b5124a07..4359e8be4101 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c
@@ -47,9 +47,9 @@ static int pxp_terminate_set(void *data, u64 val)
return -ENODEV;
/* simulate a termination interrupt */
- spin_lock_irq(&gt->irq_lock);
+ spin_lock_irq(gt->irq_lock);
intel_pxp_irq_handler(pxp, GEN12_DISPLAY_PXP_STATE_TERMINATED_INTERRUPT);
- spin_unlock_irq(&gt->irq_lock);
+ spin_unlock_irq(gt->irq_lock);
if (!wait_for_completion_timeout(&pxp->termination,
msecs_to_jiffies(100)))
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_irq.c b/drivers/gpu/drm/i915/pxp/intel_pxp_irq.c
index 04745f914407..c28be430718a 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_irq.c
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_irq.c
@@ -25,7 +25,7 @@ void intel_pxp_irq_handler(struct intel_pxp *pxp, u16 iir)
if (GEM_WARN_ON(!intel_pxp_is_enabled(pxp)))
return;
- lockdep_assert_held(&gt->irq_lock);
+ lockdep_assert_held(gt->irq_lock);
if (unlikely(!iir))
return;
@@ -55,16 +55,16 @@ static inline void __pxp_set_interrupts(struct intel_gt *gt, u32 interrupts)
static inline void pxp_irq_reset(struct intel_gt *gt)
{
- spin_lock_irq(&gt->irq_lock);
+ spin_lock_irq(gt->irq_lock);
gen11_gt_reset_one_iir(gt, 0, GEN11_KCR);
- spin_unlock_irq(&gt->irq_lock);
+ spin_unlock_irq(gt->irq_lock);
}
void intel_pxp_irq_enable(struct intel_pxp *pxp)
{
struct intel_gt *gt = pxp_to_gt(pxp);
- spin_lock_irq(&gt->irq_lock);
+ spin_lock_irq(gt->irq_lock);
if (!pxp->irq_enabled)
WARN_ON_ONCE(gen11_gt_reset_one_iir(gt, 0, GEN11_KCR));
@@ -72,7 +72,7 @@ void intel_pxp_irq_enable(struct intel_pxp *pxp)
__pxp_set_interrupts(gt, GEN12_PXP_INTERRUPTS);
pxp->irq_enabled = true;
- spin_unlock_irq(&gt->irq_lock);
+ spin_unlock_irq(gt->irq_lock);
}
void intel_pxp_irq_disable(struct intel_pxp *pxp)
@@ -88,12 +88,12 @@ void intel_pxp_irq_disable(struct intel_pxp *pxp)
*/
GEM_WARN_ON(intel_pxp_is_active(pxp));
- spin_lock_irq(&gt->irq_lock);
+ spin_lock_irq(gt->irq_lock);
pxp->irq_enabled = false;
__pxp_set_interrupts(gt, 0);
- spin_unlock_irq(&gt->irq_lock);
+ spin_unlock_irq(gt->irq_lock);
intel_synchronize_irq(gt->i915);
pxp_irq_reset(gt);
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_session.c b/drivers/gpu/drm/i915/pxp/intel_pxp_session.c
index 92b00b4de240..1bb5b5249157 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_session.c
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_session.c
@@ -144,9 +144,9 @@ void intel_pxp_session_work(struct work_struct *work)
intel_wakeref_t wakeref;
u32 events = 0;
- spin_lock_irq(&gt->irq_lock);
+ spin_lock_irq(gt->irq_lock);
events = fetch_and_zero(&pxp->session_events);
- spin_unlock_irq(&gt->irq_lock);
+ spin_unlock_irq(gt->irq_lock);
if (!events)
return;
diff --git a/drivers/gpu/drm/i915/selftests/i915_selftest.c b/drivers/gpu/drm/i915/selftests/i915_selftest.c
index c4e932368b37..39da0fb0d6d2 100644
--- a/drivers/gpu/drm/i915/selftests/i915_selftest.c
+++ b/drivers/gpu/drm/i915/selftests/i915_selftest.c
@@ -135,7 +135,7 @@ static int __run_selftests(const char *name,
int err = 0;
while (!i915_selftest.random_seed)
- i915_selftest.random_seed = get_random_int();
+ i915_selftest.random_seed = get_random_u32();
i915_selftest.timeout_jiffies =
i915_selftest.timeout_ms ?
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 9c31a16f8380..fff11c90f1fa 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -115,6 +115,7 @@ static struct dev_pm_domain pm_domain = {
static void mock_gt_probe(struct drm_i915_private *i915)
{
i915->gt[0] = &i915->gt0;
+ i915->gt[0]->name = "Mock GT";
}
struct drm_i915_private *mock_gem_device(void)
@@ -172,14 +173,14 @@ struct drm_i915_private *mock_gem_device(void)
/* Using the global GTT may ask questions about KMS users, so prepare */
drm_mode_config_init(&i915->drm);
- mkwrite_device_info(i915)->graphics.ver = -1;
+ RUNTIME_INFO(i915)->graphics.ip.ver = -1;
- mkwrite_device_info(i915)->page_sizes =
+ RUNTIME_INFO(i915)->page_sizes =
I915_GTT_PAGE_SIZE_4K |
I915_GTT_PAGE_SIZE_64K |
I915_GTT_PAGE_SIZE_2M;
- mkwrite_device_info(i915)->memory_regions = REGION_SMEM;
+ RUNTIME_INFO(i915)->memory_regions = REGION_SMEM;
intel_memory_regions_hw_probe(i915);
spin_lock_init(&i915->gpu_error.lock);
@@ -209,7 +210,7 @@ struct drm_i915_private *mock_gem_device(void)
mock_init_ggtt(to_gt(i915));
to_gt(i915)->vm = i915_vm_get(&to_gt(i915)->ggtt->vm);
- mkwrite_device_info(i915)->platform_engine_mask = BIT(0);
+ RUNTIME_INFO(i915)->platform_engine_mask = BIT(0);
to_gt(i915)->info.engine_mask = BIT(0);
to_gt(i915)->engine[RCS0] = mock_engine(i915, "mock", RCS0);
diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c
index 630a4e301ef6..508a6d994e83 100644
--- a/drivers/gpu/drm/mediatek/mtk_dpi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dpi.c
@@ -11,7 +11,6 @@
#include <linux/media-bus-format.h>
#include <linux/of.h>
#include <linux/of_device.h>
-#include <linux/of_gpio.h>
#include <linux/of_graph.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
index 2d72cc5ddaba..6b6d5335c834 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
@@ -157,7 +157,7 @@ static void mtk_dither_config(struct device *dev, unsigned int w,
{
struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev);
- mtk_ddp_write(cmdq_pkt, h << 16 | w, &priv->cmdq_reg, priv->regs, DISP_REG_DITHER_SIZE);
+ mtk_ddp_write(cmdq_pkt, w << 16 | h, &priv->cmdq_reg, priv->regs, DISP_REG_DITHER_SIZE);
mtk_ddp_write(cmdq_pkt, DITHER_RELAY_MODE, &priv->cmdq_reg, priv->regs,
DISP_REG_DITHER_CFG);
mtk_dither_set_common(priv->regs, &priv->cmdq_reg, bpc, DISP_REG_DITHER_CFG,
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index 5f02f8d0e4fc..91f58db5915f 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -833,11 +833,8 @@ static int mtk_drm_sys_prepare(struct device *dev)
{
struct mtk_drm_private *private = dev_get_drvdata(dev);
struct drm_device *drm = private->drm;
- int ret;
-
- ret = drm_mode_config_helper_suspend(drm);
- return ret;
+ return drm_mode_config_helper_suspend(drm);
}
static void mtk_drm_sys_complete(struct device *dev)
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index 9cc406e1eee1..3b7d13028fb6 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -685,6 +685,16 @@ static void mtk_dsi_poweroff(struct mtk_dsi *dsi)
if (--dsi->refcount != 0)
return;
+ /*
+ * mtk_dsi_stop() and mtk_dsi_start() is asymmetric, since
+ * mtk_dsi_stop() should be called after mtk_drm_crtc_atomic_disable(),
+ * which needs irq for vblank, and mtk_dsi_stop() will disable irq.
+ * mtk_dsi_start() needs to be called in mtk_output_dsi_enable(),
+ * after dsi is fully set.
+ */
+ mtk_dsi_stop(dsi);
+
+ mtk_dsi_switch_to_cmd_mode(dsi, VM_DONE_INT_FLAG, 500);
mtk_dsi_reset_engine(dsi);
mtk_dsi_lane0_ulp_mode_enter(dsi);
mtk_dsi_clk_ulp_mode_enter(dsi);
@@ -735,17 +745,6 @@ static void mtk_output_dsi_disable(struct mtk_dsi *dsi)
if (!dsi->enabled)
return;
- /*
- * mtk_dsi_stop() and mtk_dsi_start() is asymmetric, since
- * mtk_dsi_stop() should be called after mtk_drm_crtc_atomic_disable(),
- * which needs irq for vblank, and mtk_dsi_stop() will disable irq.
- * mtk_dsi_start() needs to be called in mtk_output_dsi_enable(),
- * after dsi is fully set.
- */
- mtk_dsi_stop(dsi);
-
- mtk_dsi_switch_to_cmd_mode(dsi, VM_DONE_INT_FLAG, 500);
-
dsi->enabled = false;
}
@@ -808,10 +807,13 @@ static void mtk_dsi_bridge_atomic_post_disable(struct drm_bridge *bridge,
static const struct drm_bridge_funcs mtk_dsi_bridge_funcs = {
.attach = mtk_dsi_bridge_attach,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
.atomic_disable = mtk_dsi_bridge_atomic_disable,
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
.atomic_enable = mtk_dsi_bridge_atomic_enable,
.atomic_pre_enable = mtk_dsi_bridge_atomic_pre_enable,
.atomic_post_disable = mtk_dsi_bridge_atomic_post_disable,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
.mode_set = mtk_dsi_bridge_mode_set,
};
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
index 3196189429bc..4c80b6896dc3 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -16,7 +16,6 @@
#include <linux/mutex.h>
#include <linux/of_platform.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/of_graph.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
diff --git a/drivers/gpu/drm/meson/meson_plane.c b/drivers/gpu/drm/meson/meson_plane.c
index dfd6a9f33dda..815dfe30492b 100644
--- a/drivers/gpu/drm/meson/meson_plane.c
+++ b/drivers/gpu/drm/meson/meson_plane.c
@@ -169,7 +169,7 @@ static void meson_plane_atomic_update(struct drm_plane *plane,
/* Enable OSD and BLK0, set max global alpha */
priv->viu.osd1_ctrl_stat = OSD_ENABLE |
- (0xFF << OSD_GLOBAL_ALPHA_SHIFT) |
+ (0x100 << OSD_GLOBAL_ALPHA_SHIFT) |
OSD_BLK0_ENABLE;
priv->viu.osd1_ctrl_stat2 = readl(priv->io_base +
diff --git a/drivers/gpu/drm/meson/meson_viu.c b/drivers/gpu/drm/meson/meson_viu.c
index bb7e109534de..d4b907889a21 100644
--- a/drivers/gpu/drm/meson/meson_viu.c
+++ b/drivers/gpu/drm/meson/meson_viu.c
@@ -94,7 +94,7 @@ static void meson_viu_set_g12a_osd1_matrix(struct meson_drm *priv,
priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_COEF11_12));
writel(((m[9] & 0x1fff) << 16) | (m[10] & 0x1fff),
priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_COEF20_21));
- writel((m[11] & 0x1fff) << 16,
+ writel((m[11] & 0x1fff),
priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_COEF22));
writel(((m[18] & 0xfff) << 16) | (m[19] & 0xfff),
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index 4d38b8e18030..ece6cd102dbb 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -263,7 +263,11 @@ mgag200_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
return ret;
- drm_fbdev_generic_setup(dev, 0);
+ /*
+ * FIXME: A 24-bit color depth does not work with 24 bpp on
+ * G200ER. Force 32 bpp.
+ */
+ drm_fbdev_generic_setup(dev, 32);
return 0;
}
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
index 0ab0e1dd8bbb..2c8b9899625b 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -68,7 +68,7 @@ static void a3xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
/* BIT(31) of CACHE_FLUSH_TS triggers CACHE_FLUSH_TS IRQ from GPU */
OUT_PKT3(ring, CP_EVENT_WRITE, 3);
- OUT_RING(ring, CACHE_FLUSH_TS | BIT(31));
+ OUT_RING(ring, CACHE_FLUSH_TS | CP_EVENT_WRITE_0_IRQ);
OUT_RING(ring, rbmemptr(ring, fence));
OUT_RING(ring, submit->seqno);
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
index 0c6b2a6d0b4c..7cb8d9849c07 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
@@ -62,7 +62,7 @@ static void a4xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
/* BIT(31) of CACHE_FLUSH_TS triggers CACHE_FLUSH_TS IRQ from GPU */
OUT_PKT3(ring, CP_EVENT_WRITE, 3);
- OUT_RING(ring, CACHE_FLUSH_TS | BIT(31));
+ OUT_RING(ring, CACHE_FLUSH_TS | CP_EVENT_WRITE_0_IRQ);
OUT_RING(ring, rbmemptr(ring, fence));
OUT_RING(ring, submit->seqno);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx.xml.h b/drivers/gpu/drm/msm/adreno/a6xx.xml.h
index b03e2c413ab1..beea4a7fc1df 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx.xml.h
@@ -1413,6 +1413,10 @@ static inline uint32_t REG_A6XX_RBBM_PERFCTR_RBBM_SEL(uint32_t i0) { return 0x00
#define REG_A6XX_RBBM_GBIF_CLIENT_QOS_CNTL 0x00000011
+#define REG_A6XX_RBBM_GBIF_HALT 0x00000016
+
+#define REG_A6XX_RBBM_GBIF_HALT_ACK 0x00000017
+
#define REG_A6XX_RBBM_WAIT_FOR_GPU_IDLE_CMD 0x0000001c
#define A6XX_RBBM_WAIT_FOR_GPU_IDLE_CMD_WAIT_GPU_IDLE 0x00000001
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index 310a317885a1..e033d6a67a20 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -873,9 +873,47 @@ static void a6xx_gmu_rpmh_off(struct a6xx_gmu *gmu)
(val & 1), 100, 1000);
}
+#define GBIF_CLIENT_HALT_MASK BIT(0)
+#define GBIF_ARB_HALT_MASK BIT(1)
+
+static void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu)
+{
+ struct msm_gpu *gpu = &adreno_gpu->base;
+
+ if (!a6xx_has_gbif(adreno_gpu)) {
+ gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0xf);
+ spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) &
+ 0xf) == 0xf);
+ gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0);
+
+ return;
+ }
+
+ /* Halt the gx side of GBIF */
+ gpu_write(gpu, REG_A6XX_RBBM_GBIF_HALT, 1);
+ spin_until(gpu_read(gpu, REG_A6XX_RBBM_GBIF_HALT_ACK) & 1);
+
+ /* Halt new client requests on GBIF */
+ gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_CLIENT_HALT_MASK);
+ spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) &
+ (GBIF_CLIENT_HALT_MASK)) == GBIF_CLIENT_HALT_MASK);
+
+ /* Halt all AXI requests on GBIF */
+ gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_ARB_HALT_MASK);
+ spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) &
+ (GBIF_ARB_HALT_MASK)) == GBIF_ARB_HALT_MASK);
+
+ /* The GBIF halt needs to be explicitly cleared */
+ gpu_write(gpu, REG_A6XX_GBIF_HALT, 0x0);
+}
+
/* Force the GMU off in case it isn't responsive */
static void a6xx_gmu_force_off(struct a6xx_gmu *gmu)
{
+ struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ struct msm_gpu *gpu = &adreno_gpu->base;
+
/* Flush all the queues */
a6xx_hfi_stop(gmu);
@@ -887,6 +925,15 @@ static void a6xx_gmu_force_off(struct a6xx_gmu *gmu)
/* Make sure there are no outstanding RPMh votes */
a6xx_gmu_rpmh_off(gmu);
+
+ /* Halt the gmu cm3 core */
+ gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1);
+
+ a6xx_bus_clear_pending_transactions(adreno_gpu);
+
+ /* Reset GPU core blocks */
+ gpu_write(gpu, REG_A6XX_RBBM_SW_RESET_CMD, 1);
+ udelay(100);
}
static void a6xx_gmu_set_initial_freq(struct msm_gpu *gpu, struct a6xx_gmu *gmu)
@@ -1014,36 +1061,6 @@ bool a6xx_gmu_isidle(struct a6xx_gmu *gmu)
return true;
}
-#define GBIF_CLIENT_HALT_MASK BIT(0)
-#define GBIF_ARB_HALT_MASK BIT(1)
-
-static void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu)
-{
- struct msm_gpu *gpu = &adreno_gpu->base;
-
- if (!a6xx_has_gbif(adreno_gpu)) {
- gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0xf);
- spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) &
- 0xf) == 0xf);
- gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0);
-
- return;
- }
-
- /* Halt new client requests on GBIF */
- gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_CLIENT_HALT_MASK);
- spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) &
- (GBIF_CLIENT_HALT_MASK)) == GBIF_CLIENT_HALT_MASK);
-
- /* Halt all AXI requests on GBIF */
- gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_ARB_HALT_MASK);
- spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) &
- (GBIF_ARB_HALT_MASK)) == GBIF_ARB_HALT_MASK);
-
- /* The GBIF halt needs to be explicitly cleared */
- gpu_write(gpu, REG_A6XX_GBIF_HALT, 0x0);
-}
-
/* Gracefully try to shut down the GMU and by extension the GPU */
static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu)
{
@@ -1069,7 +1086,11 @@ static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu)
a6xx_bus_clear_pending_transactions(adreno_gpu);
/* tell the GMU we want to slumber */
- a6xx_gmu_notify_slumber(gmu);
+ ret = a6xx_gmu_notify_slumber(gmu);
+ if (ret) {
+ a6xx_gmu_force_off(gmu);
+ return;
+ }
ret = gmu_poll_timeout(gmu,
REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS, val,
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index 4d501100b9e4..fdc578016e0b 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -10,6 +10,7 @@
#include <linux/bitfield.h>
#include <linux/devfreq.h>
+#include <linux/reset.h>
#include <linux/soc/qcom/llcc-qcom.h>
#define GPU_PAS_ID 13
@@ -146,7 +147,7 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
*/
OUT_PKT7(ring, CP_EVENT_WRITE, 1);
- OUT_RING(ring, 0x31);
+ OUT_RING(ring, CACHE_INVALIDATE);
if (!sysprof) {
/*
@@ -987,6 +988,10 @@ static int hw_init(struct msm_gpu *gpu)
/* Make sure the GMU keeps the GPU on while we set it up */
a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
+ /* Clear GBIF halt in case GX domain was not collapsed */
+ if (a6xx_has_gbif(adreno_gpu))
+ gpu_write(gpu, REG_A6XX_RBBM_GBIF_HALT, 0);
+
gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_CNTL, 0);
/*
@@ -1261,7 +1266,7 @@ static void a6xx_recover(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
- int i;
+ int i, active_submits;
adreno_dump_info(gpu);
@@ -1272,14 +1277,46 @@ static void a6xx_recover(struct msm_gpu *gpu)
if (hang_debug)
a6xx_dump(gpu);
+ /* Halt SQE first */
+ gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 3);
+
/*
* Turn off keep alive that might have been enabled by the hang
* interrupt
*/
gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE, 0);
- gpu->funcs->pm_suspend(gpu);
- gpu->funcs->pm_resume(gpu);
+ pm_runtime_dont_use_autosuspend(&gpu->pdev->dev);
+
+ /* active_submit won't change until we make a submission */
+ mutex_lock(&gpu->active_lock);
+ active_submits = gpu->active_submits;
+
+ /*
+ * Temporarily clear active_submits count to silence a WARN() in the
+ * runtime suspend cb
+ */
+ gpu->active_submits = 0;
+
+ /* Drop the rpm refcount from active submits */
+ if (active_submits)
+ pm_runtime_put(&gpu->pdev->dev);
+
+ /* And the final one from recover worker */
+ pm_runtime_put_sync(&gpu->pdev->dev);
+
+ /* Call into gpucc driver to poll for cx gdsc collapse */
+ reset_control_reset(gpu->cx_collapse);
+
+ pm_runtime_use_autosuspend(&gpu->pdev->dev);
+
+ if (active_submits)
+ pm_runtime_get(&gpu->pdev->dev);
+
+ pm_runtime_get_sync(&gpu->pdev->dev);
+
+ gpu->active_submits = active_submits;
+ mutex_unlock(&gpu->active_lock);
msm_gpu_hw_init(gpu);
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
index 781dcd3fb283..13ce321283ff 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -412,7 +412,6 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
struct dpu_format *format;
struct dpu_hw_ctl *ctl = mixer->lm_ctl;
- u32 flush_mask;
uint32_t stage_idx, lm_idx;
int zpos_cnt[DPU_STAGE_MAX + 1] = { 0 };
bool bg_alpha_enable = false;
@@ -420,6 +419,8 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
memset(fetch_active, 0, sizeof(fetch_active));
drm_atomic_crtc_for_each_plane(plane, crtc) {
+ enum dpu_sspp sspp_idx;
+
state = plane->state;
if (!state)
continue;
@@ -430,14 +431,14 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
pstate = to_dpu_plane_state(state);
fb = state->fb;
- dpu_plane_get_ctl_flush(plane, ctl, &flush_mask);
- set_bit(dpu_plane_pipe(plane), fetch_active);
+ sspp_idx = dpu_plane_pipe(plane);
+ set_bit(sspp_idx, fetch_active);
DRM_DEBUG_ATOMIC("crtc %d stage:%d - plane %d sspp %d fb %d\n",
crtc->base.id,
pstate->stage,
plane->base.id,
- dpu_plane_pipe(plane) - SSPP_VIG0,
+ sspp_idx - SSPP_VIG0,
state->fb ? state->fb->base.id : -1);
format = to_dpu_format(msm_framebuffer_format(pstate->base.fb));
@@ -447,13 +448,13 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
stage_idx = zpos_cnt[pstate->stage]++;
stage_cfg->stage[pstate->stage][stage_idx] =
- dpu_plane_pipe(plane);
+ sspp_idx;
stage_cfg->multirect_index[pstate->stage][stage_idx] =
pstate->multirect_index;
trace_dpu_crtc_setup_mixer(DRMID(crtc), DRMID(plane),
state, pstate, stage_idx,
- dpu_plane_pipe(plane) - SSPP_VIG0,
+ sspp_idx - SSPP_VIG0,
format->base.pixel_format,
fb ? fb->modifier : 0);
@@ -462,7 +463,8 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
_dpu_crtc_setup_blend_cfg(mixer + lm_idx,
pstate, format);
- mixer[lm_idx].flush_mask |= flush_mask;
+ mixer[lm_idx].lm_ctl->ops.update_pending_flush_sspp(mixer[lm_idx].lm_ctl,
+ sspp_idx);
if (bg_alpha_enable && !format->alpha_enable)
mixer[lm_idx].mixer_op_mode = 0;
@@ -496,7 +498,6 @@ static void _dpu_crtc_blend_setup(struct drm_crtc *crtc)
for (i = 0; i < cstate->num_mixers; i++) {
mixer[i].mixer_op_mode = 0;
- mixer[i].flush_mask = 0;
if (mixer[i].lm_ctl->ops.clear_all_blendstages)
mixer[i].lm_ctl->ops.clear_all_blendstages(
mixer[i].lm_ctl);
@@ -513,17 +514,14 @@ static void _dpu_crtc_blend_setup(struct drm_crtc *crtc)
lm->ops.setup_alpha_out(lm, mixer[i].mixer_op_mode);
- mixer[i].flush_mask |= ctl->ops.get_bitmask_mixer(ctl,
- mixer[i].hw_lm->idx);
-
/* stage config flush mask */
- ctl->ops.update_pending_flush(ctl, mixer[i].flush_mask);
+ ctl->ops.update_pending_flush_mixer(ctl,
+ mixer[i].hw_lm->idx);
- DRM_DEBUG_ATOMIC("lm %d, op_mode 0x%X, ctl %d, flush mask 0x%x\n",
+ DRM_DEBUG_ATOMIC("lm %d, op_mode 0x%X, ctl %d\n",
mixer[i].hw_lm->idx - LM_0,
mixer[i].mixer_op_mode,
- ctl->idx - CTL_0,
- mixer[i].flush_mask);
+ ctl->idx - CTL_0);
ctl->ops.setup_blendstage(ctl, mixer[i].hw_lm->idx,
&stage_cfg);
@@ -767,16 +765,9 @@ static void _dpu_crtc_setup_cp_blocks(struct drm_crtc *crtc)
dspp->ops.setup_pcc(dspp, &cfg);
}
- mixer[i].flush_mask |= ctl->ops.get_bitmask_dspp(ctl,
- mixer[i].hw_dspp->idx);
-
/* stage config flush mask */
- ctl->ops.update_pending_flush(ctl, mixer[i].flush_mask);
-
- DRM_DEBUG_ATOMIC("lm %d, ctl %d, flush mask 0x%x\n",
- mixer[i].hw_lm->idx - DSPP_0,
- ctl->idx - CTL_0,
- mixer[i].flush_mask);
+ ctl->ops.update_pending_flush_dspp(ctl,
+ mixer[i].hw_dspp->idx);
}
}
@@ -1235,17 +1226,8 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
}
for (i = 1; i < SSPP_MAX; i++) {
- if (pipe_staged[i]) {
+ if (pipe_staged[i])
dpu_plane_clear_multirect(pipe_staged[i]);
-
- if (is_dpu_plane_virtual(pipe_staged[i]->plane)) {
- DPU_ERROR(
- "r1 only virt plane:%d not supported\n",
- pipe_staged[i]->plane->base.id);
- rc = -EINVAL;
- goto end;
- }
- }
}
z_pos = -1;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
index 9b67645c2574..539b68b1626a 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
@@ -97,7 +97,6 @@ struct dpu_crtc_mixer {
struct dpu_hw_ctl *lm_ctl;
struct dpu_hw_dspp *hw_dspp;
u32 mixer_op_mode;
- u32 flush_mask;
};
/**
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index c682d4e02d1b..9c6817b5a194 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -162,7 +162,7 @@ enum dpu_enc_rc_states {
* @vsync_event_work: worker to handle vsync event for autorefresh
* @topology: topology of the display
* @idle_timeout: idle timeout duration in milliseconds
- * @dsc: msm_display_dsc_config pointer, for DSC-enabled encoders
+ * @dsc: drm_dsc_config pointer, for DSC-enabled encoders
*/
struct dpu_encoder_virt {
struct drm_encoder base;
@@ -208,7 +208,7 @@ struct dpu_encoder_virt {
bool wide_bus_en;
/* DSC configuration */
- struct msm_display_dsc_config *dsc;
+ struct drm_dsc_config *dsc;
};
#define to_dpu_encoder_virt(x) container_of(x, struct dpu_encoder_virt, base)
@@ -1791,12 +1791,12 @@ static void dpu_encoder_vsync_event_work_handler(struct kthread_work *work)
}
static u32
-dpu_encoder_dsc_initial_line_calc(struct msm_display_dsc_config *dsc,
+dpu_encoder_dsc_initial_line_calc(struct drm_dsc_config *dsc,
u32 enc_ip_width)
{
int ssm_delay, total_pixels, soft_slice_per_enc;
- soft_slice_per_enc = enc_ip_width / dsc->drm->slice_width;
+ soft_slice_per_enc = enc_ip_width / dsc->slice_width;
/*
* minimum number of initial line pixels is a sum of:
@@ -1808,16 +1808,16 @@ dpu_encoder_dsc_initial_line_calc(struct msm_display_dsc_config *dsc,
* 5. 6 additional pixels as the output of the rate buffer is
* 48 bits wide
*/
- ssm_delay = ((dsc->drm->bits_per_component < 10) ? 84 : 92);
- total_pixels = ssm_delay * 3 + dsc->drm->initial_xmit_delay + 47;
+ ssm_delay = ((dsc->bits_per_component < 10) ? 84 : 92);
+ total_pixels = ssm_delay * 3 + dsc->initial_xmit_delay + 47;
if (soft_slice_per_enc > 1)
total_pixels += (ssm_delay * 3);
- return DIV_ROUND_UP(total_pixels, dsc->drm->slice_width);
+ return DIV_ROUND_UP(total_pixels, dsc->slice_width);
}
static void dpu_encoder_dsc_pipe_cfg(struct dpu_hw_dsc *hw_dsc,
struct dpu_hw_pingpong *hw_pp,
- struct msm_display_dsc_config *dsc,
+ struct drm_dsc_config *dsc,
u32 common_mode,
u32 initial_lines)
{
@@ -1835,7 +1835,7 @@ static void dpu_encoder_dsc_pipe_cfg(struct dpu_hw_dsc *hw_dsc,
}
static void dpu_encoder_prep_dsc(struct dpu_encoder_virt *dpu_enc,
- struct msm_display_dsc_config *dsc)
+ struct drm_dsc_config *dsc)
{
/* coding only for 2LM, 2enc, 1 dsc config */
struct dpu_encoder_phys *enc_master = dpu_enc->cur_master;
@@ -1858,14 +1858,15 @@ static void dpu_encoder_prep_dsc(struct dpu_encoder_virt *dpu_enc,
}
}
- pic_width = dsc->drm->pic_width;
+ dsc_common_mode = 0;
+ pic_width = dsc->pic_width;
dsc_common_mode = DSC_MODE_MULTIPLEX | DSC_MODE_SPLIT_PANEL;
if (enc_master->intf_mode == INTF_MODE_VIDEO)
dsc_common_mode |= DSC_MODE_VIDEO;
- this_frame_slices = pic_width / dsc->drm->slice_width;
- intf_ip_w = this_frame_slices * dsc->drm->slice_width;
+ this_frame_slices = pic_width / dsc->slice_width;
+ intf_ip_w = this_frame_slices * dsc->slice_width;
/*
* dsc merge case: when using 2 encoders for the same stream,
@@ -1980,7 +1981,6 @@ static void dpu_encoder_helper_reset_mixers(struct dpu_encoder_phys *phys_enc)
{
struct dpu_hw_mixer_cfg mixer;
int i, num_lm;
- u32 flush_mask = 0;
struct dpu_global_state *global_state;
struct dpu_hw_blk *hw_lm[2];
struct dpu_hw_mixer *hw_mixer[2];
@@ -1999,9 +1999,8 @@ static void dpu_encoder_helper_reset_mixers(struct dpu_encoder_phys *phys_enc)
for (i = 0; i < num_lm; i++) {
hw_mixer[i] = to_dpu_hw_mixer(hw_lm[i]);
- flush_mask = phys_enc->hw_ctl->ops.get_bitmask_mixer(ctl, hw_mixer[i]->idx);
- if (phys_enc->hw_ctl->ops.update_pending_flush)
- phys_enc->hw_ctl->ops.update_pending_flush(ctl, flush_mask);
+ if (phys_enc->hw_ctl->ops.update_pending_flush_mixer)
+ phys_enc->hw_ctl->ops.update_pending_flush_mixer(ctl, hw_mixer[i]->idx);
/* clear all blendstages */
if (phys_enc->hw_ctl->ops.setup_blendstage)
@@ -2061,6 +2060,12 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc)
intf_cfg.stream_sel = 0; /* Don't care value for video mode */
intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
+
+ if (phys_enc->hw_intf)
+ intf_cfg.intf = phys_enc->hw_intf->idx;
+ if (phys_enc->hw_wb)
+ intf_cfg.wb = phys_enc->hw_wb->idx;
+
if (phys_enc->hw_pp->merge_3d)
intf_cfg.merge_3d = phys_enc->hw_pp->merge_3d->idx;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
index d4d1ecd416e3..9e7236ef34e6 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
@@ -36,7 +36,7 @@ struct msm_display_info {
uint32_t h_tile_instance[MAX_H_TILES_PER_DISPLAY];
bool is_cmd_mode;
bool is_te_using_watchdog_timer;
- struct msm_display_dsc_config *dsc;
+ struct drm_dsc_config *dsc;
};
/**
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
index 0239a811d5ec..27f029fdc682 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
@@ -1333,7 +1333,7 @@ static const struct dpu_vbif_dynamic_ot_cfg msm8998_ot_rdwr_cfg[] = {
static const struct dpu_vbif_cfg msm8998_vbif[] = {
{
- .name = "vbif_0", .id = VBIF_0,
+ .name = "vbif_rt", .id = VBIF_RT,
.base = 0, .len = 0x1040,
.default_ot_rd_limit = 32,
.default_ot_wr_limit = 32,
@@ -1363,7 +1363,7 @@ static const struct dpu_vbif_cfg msm8998_vbif[] = {
static const struct dpu_vbif_cfg sdm845_vbif[] = {
{
- .name = "vbif_0", .id = VBIF_0,
+ .name = "vbif_rt", .id = VBIF_RT,
.base = 0, .len = 0x1040,
.features = BIT(DPU_VBIF_QOS_REMAP),
.xin_halt_timeout = 0x4000,
@@ -1939,11 +1939,6 @@ static const struct dpu_mdss_hw_cfg_handler cfg_handler[] = {
const struct dpu_mdss_cfg *dpu_hw_catalog_init(u32 hw_rev)
{
int i;
- struct dpu_mdss_cfg *dpu_cfg;
-
- dpu_cfg = kzalloc(sizeof(*dpu_cfg), GFP_KERNEL);
- if (!dpu_cfg)
- return ERR_PTR(-ENOMEM);
for (i = 0; i < ARRAY_SIZE(cfg_handler); i++) {
if (cfg_handler[i].hw_rev == hw_rev)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
index 71fe4c505f5b..38aa38ab1568 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
@@ -76,7 +76,7 @@ enum {
/**
* MDP TOP BLOCK features
- * @DPU_MDP_PANIC_PER_PIPE Panic configuration needs to be be done per pipe
+ * @DPU_MDP_PANIC_PER_PIPE Panic configuration needs to be done per pipe
* @DPU_MDP_10BIT_SUPPORT, Chipset supports 10 bit pixel formats
* @DPU_MDP_BWC, MDSS HW supports Bandwidth compression.
* @DPU_MDP_UBWC_1_0, This chipsets supports Universal Bandwidth
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
index e12b7fa48a7b..a35ecb6676c8 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
@@ -150,92 +150,84 @@ static inline void dpu_hw_ctl_trigger_flush(struct dpu_hw_ctl *ctx)
DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
}
-static uint32_t dpu_hw_ctl_get_bitmask_sspp(struct dpu_hw_ctl *ctx,
+static void dpu_hw_ctl_update_pending_flush_sspp(struct dpu_hw_ctl *ctx,
enum dpu_sspp sspp)
{
- uint32_t flushbits = 0;
-
switch (sspp) {
case SSPP_VIG0:
- flushbits = BIT(0);
+ ctx->pending_flush_mask |= BIT(0);
break;
case SSPP_VIG1:
- flushbits = BIT(1);
+ ctx->pending_flush_mask |= BIT(1);
break;
case SSPP_VIG2:
- flushbits = BIT(2);
+ ctx->pending_flush_mask |= BIT(2);
break;
case SSPP_VIG3:
- flushbits = BIT(18);
+ ctx->pending_flush_mask |= BIT(18);
break;
case SSPP_RGB0:
- flushbits = BIT(3);
+ ctx->pending_flush_mask |= BIT(3);
break;
case SSPP_RGB1:
- flushbits = BIT(4);
+ ctx->pending_flush_mask |= BIT(4);
break;
case SSPP_RGB2:
- flushbits = BIT(5);
+ ctx->pending_flush_mask |= BIT(5);
break;
case SSPP_RGB3:
- flushbits = BIT(19);
+ ctx->pending_flush_mask |= BIT(19);
break;
case SSPP_DMA0:
- flushbits = BIT(11);
+ ctx->pending_flush_mask |= BIT(11);
break;
case SSPP_DMA1:
- flushbits = BIT(12);
+ ctx->pending_flush_mask |= BIT(12);
break;
case SSPP_DMA2:
- flushbits = BIT(24);
+ ctx->pending_flush_mask |= BIT(24);
break;
case SSPP_DMA3:
- flushbits = BIT(25);
+ ctx->pending_flush_mask |= BIT(25);
break;
case SSPP_CURSOR0:
- flushbits = BIT(22);
+ ctx->pending_flush_mask |= BIT(22);
break;
case SSPP_CURSOR1:
- flushbits = BIT(23);
+ ctx->pending_flush_mask |= BIT(23);
break;
default:
break;
}
-
- return flushbits;
}
-static uint32_t dpu_hw_ctl_get_bitmask_mixer(struct dpu_hw_ctl *ctx,
+static void dpu_hw_ctl_update_pending_flush_mixer(struct dpu_hw_ctl *ctx,
enum dpu_lm lm)
{
- uint32_t flushbits = 0;
-
switch (lm) {
case LM_0:
- flushbits = BIT(6);
+ ctx->pending_flush_mask |= BIT(6);
break;
case LM_1:
- flushbits = BIT(7);
+ ctx->pending_flush_mask |= BIT(7);
break;
case LM_2:
- flushbits = BIT(8);
+ ctx->pending_flush_mask |= BIT(8);
break;
case LM_3:
- flushbits = BIT(9);
+ ctx->pending_flush_mask |= BIT(9);
break;
case LM_4:
- flushbits = BIT(10);
+ ctx->pending_flush_mask |= BIT(10);
break;
case LM_5:
- flushbits = BIT(20);
+ ctx->pending_flush_mask |= BIT(20);
break;
default:
- return -EINVAL;
+ break;
}
- flushbits |= CTL_FLUSH_MASK_CTL;
-
- return flushbits;
+ ctx->pending_flush_mask |= CTL_FLUSH_MASK_CTL;
}
static void dpu_hw_ctl_update_pending_flush_intf(struct dpu_hw_ctl *ctx,
@@ -294,29 +286,25 @@ static void dpu_hw_ctl_update_pending_flush_merge_3d_v1(struct dpu_hw_ctl *ctx,
ctx->pending_flush_mask |= BIT(MERGE_3D_IDX);
}
-static uint32_t dpu_hw_ctl_get_bitmask_dspp(struct dpu_hw_ctl *ctx,
+static void dpu_hw_ctl_update_pending_flush_dspp(struct dpu_hw_ctl *ctx,
enum dpu_dspp dspp)
{
- uint32_t flushbits = 0;
-
switch (dspp) {
case DSPP_0:
- flushbits = BIT(13);
+ ctx->pending_flush_mask |= BIT(13);
break;
case DSPP_1:
- flushbits = BIT(14);
+ ctx->pending_flush_mask |= BIT(14);
break;
case DSPP_2:
- flushbits = BIT(15);
+ ctx->pending_flush_mask |= BIT(15);
break;
case DSPP_3:
- flushbits = BIT(21);
+ ctx->pending_flush_mask |= BIT(21);
break;
default:
- return 0;
+ break;
}
-
- return flushbits;
}
static u32 dpu_hw_ctl_poll_reset_status(struct dpu_hw_ctl *ctx, u32 timeout_us)
@@ -685,9 +673,9 @@ static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops,
ops->wait_reset_status = dpu_hw_ctl_wait_reset_status;
ops->clear_all_blendstages = dpu_hw_ctl_clear_all_blendstages;
ops->setup_blendstage = dpu_hw_ctl_setup_blendstage;
- ops->get_bitmask_sspp = dpu_hw_ctl_get_bitmask_sspp;
- ops->get_bitmask_mixer = dpu_hw_ctl_get_bitmask_mixer;
- ops->get_bitmask_dspp = dpu_hw_ctl_get_bitmask_dspp;
+ ops->update_pending_flush_sspp = dpu_hw_ctl_update_pending_flush_sspp;
+ ops->update_pending_flush_mixer = dpu_hw_ctl_update_pending_flush_mixer;
+ ops->update_pending_flush_dspp = dpu_hw_ctl_update_pending_flush_dspp;
if (cap & BIT(DPU_CTL_FETCH_ACTIVE))
ops->set_active_pipes = dpu_hw_ctl_set_fetch_pipe_active;
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
index 7d9ad6a3f9f6..96c012ec8467 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
@@ -130,6 +130,32 @@ struct dpu_hw_ctl_ops {
enum dpu_merge_3d blk);
/**
+ * OR in the given flushbits to the cached pending_flush_mask
+ * No effect on hardware
+ * @ctx : ctl path ctx pointer
+ * @blk : SSPP block index
+ */
+ void (*update_pending_flush_sspp)(struct dpu_hw_ctl *ctx,
+ enum dpu_sspp blk);
+
+ /**
+ * OR in the given flushbits to the cached pending_flush_mask
+ * No effect on hardware
+ * @ctx : ctl path ctx pointer
+ * @blk : LM block index
+ */
+ void (*update_pending_flush_mixer)(struct dpu_hw_ctl *ctx,
+ enum dpu_lm blk);
+
+ /**
+ * OR in the given flushbits to the cached pending_flush_mask
+ * No effect on hardware
+ * @ctx : ctl path ctx pointer
+ * @blk : DSPP block index
+ */
+ void (*update_pending_flush_dspp)(struct dpu_hw_ctl *ctx,
+ enum dpu_dspp blk);
+ /**
* Write the value of the pending_flush_mask to hardware
* @ctx : ctl path ctx pointer
*/
@@ -171,15 +197,6 @@ struct dpu_hw_ctl_ops {
*/
int (*wait_reset_status)(struct dpu_hw_ctl *ctx);
- uint32_t (*get_bitmask_sspp)(struct dpu_hw_ctl *ctx,
- enum dpu_sspp blk);
-
- uint32_t (*get_bitmask_mixer)(struct dpu_hw_ctl *ctx,
- enum dpu_lm blk);
-
- uint32_t (*get_bitmask_dspp)(struct dpu_hw_ctl *ctx,
- enum dpu_dspp blk);
-
/**
* Set all blend stages to disabled
* @ctx : ctl path ctx pointer
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c
index 411689ae6382..f2ddcfb6f7ee 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c
@@ -37,7 +37,7 @@ static void dpu_hw_dsc_disable(struct dpu_hw_dsc *dsc)
}
static void dpu_hw_dsc_config(struct dpu_hw_dsc *hw_dsc,
- struct msm_display_dsc_config *dsc,
+ struct drm_dsc_config *dsc,
u32 mode,
u32 initial_lines)
{
@@ -52,89 +52,89 @@ static void dpu_hw_dsc_config(struct dpu_hw_dsc *hw_dsc,
if (is_cmd_mode)
initial_lines += 1;
- slice_last_group_size = 3 - (dsc->drm->slice_width % 3);
+ slice_last_group_size = 3 - (dsc->slice_width % 3);
data = (initial_lines << 20);
data |= ((slice_last_group_size - 1) << 18);
/* bpp is 6.4 format, 4 LSBs bits are for fractional part */
- data |= dsc->drm->bits_per_pixel << 12;
- lsb = dsc->drm->bits_per_pixel % 4;
- bpp = dsc->drm->bits_per_pixel / 4;
+ data |= dsc->bits_per_pixel << 12;
+ lsb = dsc->bits_per_pixel % 4;
+ bpp = dsc->bits_per_pixel / 4;
bpp *= 4;
bpp <<= 4;
bpp |= lsb;
data |= bpp << 8;
- data |= (dsc->drm->block_pred_enable << 7);
- data |= (dsc->drm->line_buf_depth << 3);
- data |= (dsc->drm->simple_422 << 2);
- data |= (dsc->drm->convert_rgb << 1);
- data |= dsc->drm->bits_per_component;
+ data |= (dsc->block_pred_enable << 7);
+ data |= (dsc->line_buf_depth << 3);
+ data |= (dsc->simple_422 << 2);
+ data |= (dsc->convert_rgb << 1);
+ data |= dsc->bits_per_component;
DPU_REG_WRITE(c, DSC_ENC, data);
- data = dsc->drm->pic_width << 16;
- data |= dsc->drm->pic_height;
+ data = dsc->pic_width << 16;
+ data |= dsc->pic_height;
DPU_REG_WRITE(c, DSC_PICTURE, data);
- data = dsc->drm->slice_width << 16;
- data |= dsc->drm->slice_height;
+ data = dsc->slice_width << 16;
+ data |= dsc->slice_height;
DPU_REG_WRITE(c, DSC_SLICE, data);
- data = dsc->drm->slice_chunk_size << 16;
+ data = dsc->slice_chunk_size << 16;
DPU_REG_WRITE(c, DSC_CHUNK_SIZE, data);
- data = dsc->drm->initial_dec_delay << 16;
- data |= dsc->drm->initial_xmit_delay;
+ data = dsc->initial_dec_delay << 16;
+ data |= dsc->initial_xmit_delay;
DPU_REG_WRITE(c, DSC_DELAY, data);
- data = dsc->drm->initial_scale_value;
+ data = dsc->initial_scale_value;
DPU_REG_WRITE(c, DSC_SCALE_INITIAL, data);
- data = dsc->drm->scale_decrement_interval;
+ data = dsc->scale_decrement_interval;
DPU_REG_WRITE(c, DSC_SCALE_DEC_INTERVAL, data);
- data = dsc->drm->scale_increment_interval;
+ data = dsc->scale_increment_interval;
DPU_REG_WRITE(c, DSC_SCALE_INC_INTERVAL, data);
- data = dsc->drm->first_line_bpg_offset;
+ data = dsc->first_line_bpg_offset;
DPU_REG_WRITE(c, DSC_FIRST_LINE_BPG_OFFSET, data);
- data = dsc->drm->nfl_bpg_offset << 16;
- data |= dsc->drm->slice_bpg_offset;
+ data = dsc->nfl_bpg_offset << 16;
+ data |= dsc->slice_bpg_offset;
DPU_REG_WRITE(c, DSC_BPG_OFFSET, data);
- data = dsc->drm->initial_offset << 16;
- data |= dsc->drm->final_offset;
+ data = dsc->initial_offset << 16;
+ data |= dsc->final_offset;
DPU_REG_WRITE(c, DSC_DSC_OFFSET, data);
- det_thresh_flatness = 7 + 2 * (dsc->drm->bits_per_component - 8);
+ det_thresh_flatness = 7 + 2 * (dsc->bits_per_component - 8);
data = det_thresh_flatness << 10;
- data |= dsc->drm->flatness_max_qp << 5;
- data |= dsc->drm->flatness_min_qp;
+ data |= dsc->flatness_max_qp << 5;
+ data |= dsc->flatness_min_qp;
DPU_REG_WRITE(c, DSC_FLATNESS, data);
- data = dsc->drm->rc_model_size;
+ data = dsc->rc_model_size;
DPU_REG_WRITE(c, DSC_RC_MODEL_SIZE, data);
- data = dsc->drm->rc_tgt_offset_low << 18;
- data |= dsc->drm->rc_tgt_offset_high << 14;
- data |= dsc->drm->rc_quant_incr_limit1 << 9;
- data |= dsc->drm->rc_quant_incr_limit0 << 4;
- data |= dsc->drm->rc_edge_factor;
+ data = dsc->rc_tgt_offset_low << 18;
+ data |= dsc->rc_tgt_offset_high << 14;
+ data |= dsc->rc_quant_incr_limit1 << 9;
+ data |= dsc->rc_quant_incr_limit0 << 4;
+ data |= dsc->rc_edge_factor;
DPU_REG_WRITE(c, DSC_RC, data);
}
static void dpu_hw_dsc_config_thresh(struct dpu_hw_dsc *hw_dsc,
- struct msm_display_dsc_config *dsc)
+ struct drm_dsc_config *dsc)
{
- struct drm_dsc_rc_range_parameters *rc = dsc->drm->rc_range_params;
+ struct drm_dsc_rc_range_parameters *rc = dsc->rc_range_params;
struct dpu_hw_blk_reg_map *c = &hw_dsc->hw;
u32 off;
int i;
off = DSC_RC_BUF_THRESH;
for (i = 0; i < DSC_NUM_BUF_RANGES - 1 ; i++) {
- DPU_REG_WRITE(c, off, dsc->drm->rc_buf_thresh[i]);
+ DPU_REG_WRITE(c, off, dsc->rc_buf_thresh[i]);
off += 4;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h
index 45e4118f1fa2..c0b77fe1a696 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h
@@ -31,7 +31,7 @@ struct dpu_hw_dsc_ops {
* @initial_lines: amount of initial lines to be used
*/
void (*dsc_config)(struct dpu_hw_dsc *hw_dsc,
- struct msm_display_dsc_config *dsc,
+ struct drm_dsc_config *dsc,
u32 mode,
u32 initial_lines);
@@ -41,7 +41,7 @@ struct dpu_hw_dsc_ops {
* @dsc: panel dsc parameters
*/
void (*dsc_config_thresh)(struct dpu_hw_dsc *hw_dsc,
- struct msm_display_dsc_config *dsc);
+ struct drm_dsc_config *dsc);
};
struct dpu_hw_dsc {
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
index 9f402be55fbf..d3b0ed0a9c6c 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
@@ -273,11 +273,9 @@ enum dpu_wd_timer {
};
enum dpu_vbif {
- VBIF_0,
- VBIF_1,
+ VBIF_RT,
+ VBIF_NRT,
VBIF_MAX,
- VBIF_RT = VBIF_0,
- VBIF_NRT = VBIF_1
};
/**
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
index 102c21bb4192..691c471b08c2 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
@@ -780,8 +780,7 @@ static const struct dpu_sspp_cfg *_sspp_offset(enum dpu_sspp sspp,
}
struct dpu_hw_pipe *dpu_hw_sspp_init(enum dpu_sspp idx,
- void __iomem *addr, const struct dpu_mdss_cfg *catalog,
- bool is_virtual_pipe)
+ void __iomem *addr, const struct dpu_mdss_cfg *catalog)
{
struct dpu_hw_pipe *hw_pipe;
const struct dpu_sspp_cfg *cfg;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
index 78b1bc9e004f..0c95b7e64f6c 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
@@ -377,11 +377,9 @@ struct dpu_kms;
* @idx: Pipe index for which driver object is required
* @addr: Mapped register io address of MDP
* @catalog : Pointer to mdss catalog data
- * @is_virtual_pipe: is this pipe virtual pipe
*/
struct dpu_hw_pipe *dpu_hw_sspp_init(enum dpu_sspp idx,
- void __iomem *addr, const struct dpu_mdss_cfg *catalog,
- bool is_virtual_pipe);
+ void __iomem *addr, const struct dpu_mdss_cfg *catalog);
/**
* dpu_hw_sspp_destroy(): Destroys SSPP driver context
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
index 008e1420e6e5..5e6e2626151e 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
@@ -384,12 +384,9 @@ static int dpu_kms_parse_data_bus_icc_path(struct dpu_kms *dpu_kms)
struct icc_path *path1;
struct drm_device *dev = dpu_kms->dev;
struct device *dpu_dev = dev->dev;
- struct device *mdss_dev = dpu_dev->parent;
- /* Interconnects are a part of MDSS device tree binding, not the
- * MDP/DPU device. */
- path0 = of_icc_get(mdss_dev, "mdp0-mem");
- path1 = of_icc_get(mdss_dev, "mdp1-mem");
+ path0 = msm_icc_get(dpu_dev, "mdp0-mem");
+ path1 = msm_icc_get(dpu_dev, "mdp1-mem");
if (IS_ERR_OR_NULL(path0))
return PTR_ERR_OR_ZERO(path0);
@@ -782,7 +779,7 @@ static int _dpu_kms_drm_obj_init(struct dpu_kms *dpu_kms)
catalog->sspp[i].features & BIT(DPU_SSPP_CURSOR));
plane = dpu_plane_init(dev, catalog->sspp[i].id, type,
- (1UL << max_crtc_count) - 1, 0);
+ (1UL << max_crtc_count) - 1);
if (IS_ERR(plane)) {
DPU_ERROR("dpu_plane_init failed\n");
ret = PTR_ERR(plane);
@@ -826,12 +823,10 @@ static void _dpu_kms_hw_destroy(struct dpu_kms *dpu_kms)
_dpu_kms_mmu_destroy(dpu_kms);
if (dpu_kms->catalog) {
- for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
- u32 vbif_idx = dpu_kms->catalog->vbif[i].id;
-
- if ((vbif_idx < VBIF_MAX) && dpu_kms->hw_vbif[vbif_idx]) {
- dpu_hw_vbif_destroy(dpu_kms->hw_vbif[vbif_idx]);
- dpu_kms->hw_vbif[vbif_idx] = NULL;
+ for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) {
+ if (dpu_kms->hw_vbif[i]) {
+ dpu_hw_vbif_destroy(dpu_kms->hw_vbif[i]);
+ dpu_kms->hw_vbif[i] = NULL;
}
}
}
@@ -902,12 +897,10 @@ static void dpu_kms_mdp_snapshot(struct msm_disp_state *disp_state, struct msm_k
int i;
struct dpu_kms *dpu_kms;
const struct dpu_mdss_cfg *cat;
- struct dpu_hw_mdp *top;
dpu_kms = to_dpu_kms(kms);
cat = dpu_kms->catalog;
- top = dpu_kms->hw_mdp;
pm_runtime_get_sync(&dpu_kms->pdev->dev);
@@ -1113,12 +1106,10 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
u32 vbif_idx = dpu_kms->catalog->vbif[i].id;
- dpu_kms->hw_vbif[i] = dpu_hw_vbif_init(vbif_idx,
+ dpu_kms->hw_vbif[vbif_idx] = dpu_hw_vbif_init(vbif_idx,
dpu_kms->vbif[vbif_idx], dpu_kms->catalog);
- if (IS_ERR_OR_NULL(dpu_kms->hw_vbif[vbif_idx])) {
+ if (IS_ERR(dpu_kms->hw_vbif[vbif_idx])) {
rc = PTR_ERR(dpu_kms->hw_vbif[vbif_idx]);
- if (!dpu_kms->hw_vbif[vbif_idx])
- rc = -EINVAL;
DPU_ERROR("failed to init vbif %d: %d\n", vbif_idx, rc);
dpu_kms->hw_vbif[vbif_idx] = NULL;
goto power_error;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
index a617a3d8b1bc..658005f609f4 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
@@ -91,7 +91,7 @@ enum dpu_plane_qos {
/*
* struct dpu_plane - local dpu plane structure
* @aspace: address space pointer
- * @mplane_list: List of multirect planes of the same pipe
+ * @csc_ptr: Points to dpu_csc_cfg structure to use for current
* @catalog: Points to dpu catalog structure
* @revalidate: force revalidation of all the plane properties
*/
@@ -106,8 +106,6 @@ struct dpu_plane {
uint32_t color_fill;
bool is_error;
bool is_rt_pipe;
- bool is_virtual;
- struct list_head mplane_list;
const struct dpu_mdss_cfg *catalog;
};
@@ -225,7 +223,7 @@ static void _dpu_plane_calc_clk(struct drm_plane *plane, struct dpu_hw_pipe_cfg
static int _dpu_plane_calc_fill_level(struct drm_plane *plane,
const struct dpu_format *fmt, u32 src_width)
{
- struct dpu_plane *pdpu, *tmp;
+ struct dpu_plane *pdpu;
struct dpu_plane_state *pstate;
u32 fixed_buff_size;
u32 total_fl;
@@ -239,19 +237,7 @@ static int _dpu_plane_calc_fill_level(struct drm_plane *plane,
pstate = to_dpu_plane_state(plane->state);
fixed_buff_size = pdpu->catalog->caps->pixel_ram_size;
- list_for_each_entry(tmp, &pdpu->mplane_list, mplane_list) {
- u32 tmp_width;
-
- if (!tmp->base.state->visible)
- continue;
- tmp_width = drm_rect_width(&tmp->base.state->src) >> 16;
- DPU_DEBUG("plane%d/%d src_width:%d/%d\n",
- pdpu->base.base.id, tmp->base.base.id,
- src_width,
- tmp_width);
- src_width = max_t(u32, src_width,
- tmp_width);
- }
+ /* FIXME: in multirect case account for the src_width of all the planes */
if (fmt->fetch_planes == DPU_PLANE_PSEUDO_PLANAR) {
if (fmt->chroma_sample == DPU_CHROMA_420) {
@@ -854,13 +840,8 @@ int dpu_plane_validate_multirect_v2(struct dpu_multirect_plane_states *plane)
}
done:
- if (dpu_plane[R0]->is_virtual) {
- pstate[R0]->multirect_index = DPU_SSPP_RECT_1;
- pstate[R1]->multirect_index = DPU_SSPP_RECT_0;
- } else {
- pstate[R0]->multirect_index = DPU_SSPP_RECT_0;
- pstate[R1]->multirect_index = DPU_SSPP_RECT_1;
- }
+ pstate[R0]->multirect_index = DPU_SSPP_RECT_0;
+ pstate[R1]->multirect_index = DPU_SSPP_RECT_1;
DPU_DEBUG_PLANE(dpu_plane[R0], "R0: %d - %d\n",
pstate[R0]->multirect_mode, pstate[R0]->multirect_index);
@@ -869,18 +850,6 @@ done:
return 0;
}
-/**
- * dpu_plane_get_ctl_flush - get control flush for the given plane
- * @plane: Pointer to drm plane structure
- * @ctl: Pointer to hardware control driver
- * @flush_sspp: Pointer to sspp flush control word
- */
-void dpu_plane_get_ctl_flush(struct drm_plane *plane, struct dpu_hw_ctl *ctl,
- u32 *flush_sspp)
-{
- *flush_sspp = ctl->ops.get_bitmask_sspp(ctl, dpu_plane_pipe(plane));
-}
-
static int dpu_plane_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *new_state)
{
@@ -1266,19 +1235,13 @@ static void dpu_plane_sspp_atomic_update(struct drm_plane *plane)
static void _dpu_plane_atomic_disable(struct drm_plane *plane)
{
- struct dpu_plane *pdpu = to_dpu_plane(plane);
struct drm_plane_state *state = plane->state;
struct dpu_plane_state *pstate = to_dpu_plane_state(state);
- trace_dpu_plane_disable(DRMID(plane), is_dpu_plane_virtual(plane),
+ trace_dpu_plane_disable(DRMID(plane), false,
pstate->multirect_mode);
pstate->pending = true;
-
- if (is_dpu_plane_virtual(plane) &&
- pdpu->pipe_hw && pdpu->pipe_hw->ops.setup_multirect)
- pdpu->pipe_hw->ops.setup_multirect(pdpu->pipe_hw,
- DPU_SSPP_RECT_SOLO, DPU_SSPP_MULTIRECT_NONE);
}
static void dpu_plane_atomic_update(struct drm_plane *plane,
@@ -1493,22 +1456,16 @@ enum dpu_sspp dpu_plane_pipe(struct drm_plane *plane)
return plane ? to_dpu_plane(plane)->pipe : SSPP_NONE;
}
-bool is_dpu_plane_virtual(struct drm_plane *plane)
-{
- return plane ? to_dpu_plane(plane)->is_virtual : false;
-}
-
/* initialize plane */
struct drm_plane *dpu_plane_init(struct drm_device *dev,
uint32_t pipe, enum drm_plane_type type,
- unsigned long possible_crtcs, u32 master_plane_id)
+ unsigned long possible_crtcs)
{
- struct drm_plane *plane = NULL, *master_plane = NULL;
+ struct drm_plane *plane = NULL;
const uint32_t *format_list;
struct dpu_plane *pdpu;
struct msm_drm_private *priv = dev->dev_private;
struct dpu_kms *kms = to_dpu_kms(priv->kms);
- int zpos_max = DPU_ZPOS_MAX;
uint32_t num_formats;
uint32_t supported_rotations;
int ret = -EINVAL;
@@ -1524,18 +1481,9 @@ struct drm_plane *dpu_plane_init(struct drm_device *dev,
/* cache local stuff for later */
plane = &pdpu->base;
pdpu->pipe = pipe;
- pdpu->is_virtual = (master_plane_id != 0);
- INIT_LIST_HEAD(&pdpu->mplane_list);
- master_plane = drm_plane_find(dev, NULL, master_plane_id);
- if (master_plane) {
- struct dpu_plane *mpdpu = to_dpu_plane(master_plane);
-
- list_add_tail(&pdpu->mplane_list, &mpdpu->mplane_list);
- }
/* initialize underlying h/w driver */
- pdpu->pipe_hw = dpu_hw_sspp_init(pipe, kms->mmio, kms->catalog,
- master_plane_id != 0);
+ pdpu->pipe_hw = dpu_hw_sspp_init(pipe, kms->mmio, kms->catalog);
if (IS_ERR(pdpu->pipe_hw)) {
DPU_ERROR("[%u]SSPP init failed\n", pipe);
ret = PTR_ERR(pdpu->pipe_hw);
@@ -1545,14 +1493,8 @@ struct drm_plane *dpu_plane_init(struct drm_device *dev,
goto clean_sspp;
}
- if (pdpu->is_virtual) {
- format_list = pdpu->pipe_hw->cap->sblk->virt_format_list;
- num_formats = pdpu->pipe_hw->cap->sblk->virt_num_formats;
- }
- else {
- format_list = pdpu->pipe_hw->cap->sblk->format_list;
- num_formats = pdpu->pipe_hw->cap->sblk->num_formats;
- }
+ format_list = pdpu->pipe_hw->cap->sblk->format_list;
+ num_formats = pdpu->pipe_hw->cap->sblk->num_formats;
ret = drm_universal_plane_init(dev, plane, 0xff, &dpu_plane_funcs,
format_list, num_formats,
@@ -1562,14 +1504,7 @@ struct drm_plane *dpu_plane_init(struct drm_device *dev,
pdpu->catalog = kms->catalog;
- if (kms->catalog->mixer_count &&
- kms->catalog->mixer[0].sblk->maxblendstages) {
- zpos_max = kms->catalog->mixer[0].sblk->maxblendstages - 1;
- if (zpos_max > DPU_STAGE_MAX - DPU_STAGE_0 - 1)
- zpos_max = DPU_STAGE_MAX - DPU_STAGE_0 - 1;
- }
-
- ret = drm_plane_create_zpos_property(plane, 0, 0, zpos_max);
+ ret = drm_plane_create_zpos_property(plane, 0, 0, DPU_ZPOS_MAX);
if (ret)
DPU_ERROR("failed to install zpos property, rc = %d\n", ret);
@@ -1594,15 +1529,14 @@ struct drm_plane *dpu_plane_init(struct drm_device *dev,
mutex_init(&pdpu->lock);
- DPU_DEBUG("%s created for pipe:%u id:%u virtual:%u\n", plane->name,
- pipe, plane->base.id, master_plane_id);
+ DPU_DEBUG("%s created for pipe:%u id:%u\n", plane->name,
+ pipe, plane->base.id);
return plane;
clean_sspp:
if (pdpu && pdpu->pipe_hw)
dpu_hw_sspp_destroy(pdpu->pipe_hw);
clean_plane:
- list_del(&pdpu->mplane_list);
kfree(pdpu);
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
index e1463107a6fc..b7b1b05199c2 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
@@ -65,23 +65,6 @@ struct dpu_multirect_plane_states {
enum dpu_sspp dpu_plane_pipe(struct drm_plane *plane);
/**
- * is_dpu_plane_virtual - check for virtual plane
- * @plane: Pointer to DRM plane object
- * returns: true - if the plane is virtual
- * false - if the plane is primary
- */
-bool is_dpu_plane_virtual(struct drm_plane *plane);
-
-/**
- * dpu_plane_get_ctl_flush - get control flush mask
- * @plane: Pointer to DRM plane object
- * @ctl: Pointer to control hardware
- * @flush_sspp: Pointer to sspp flush control word
- */
-void dpu_plane_get_ctl_flush(struct drm_plane *plane, struct dpu_hw_ctl *ctl,
- u32 *flush_sspp);
-
-/**
* dpu_plane_flush - final plane operations before commit flush
* @plane: Pointer to drm plane structure
*/
@@ -99,14 +82,11 @@ void dpu_plane_set_error(struct drm_plane *plane, bool error);
* @pipe: dpu hardware pipe identifier
* @type: Plane type - PRIMARY/OVERLAY/CURSOR
* @possible_crtcs: bitmask of crtc that can be attached to the given pipe
- * @master_plane_id: primary plane id of a multirect pipe. 0 value passed for
- * a regular plane initialization. A non-zero primary plane
- * id will be passed for a virtual pipe initialization.
*
*/
struct drm_plane *dpu_plane_init(struct drm_device *dev,
uint32_t pipe, enum drm_plane_type type,
- unsigned long possible_crtcs, u32 master_plane_id);
+ unsigned long possible_crtcs);
/**
* dpu_plane_validate_multirecti_v2 - validate the multirect planes
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
index 21d20373eb8b..1305e250b71e 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
@@ -11,6 +11,26 @@
#include "dpu_hw_vbif.h"
#include "dpu_trace.h"
+static struct dpu_hw_vbif *dpu_get_vbif(struct dpu_kms *dpu_kms, enum dpu_vbif vbif_idx)
+{
+ if (vbif_idx < ARRAY_SIZE(dpu_kms->hw_vbif))
+ return dpu_kms->hw_vbif[vbif_idx];
+
+ return NULL;
+}
+
+static const char *dpu_vbif_name(enum dpu_vbif idx)
+{
+ switch (idx) {
+ case VBIF_RT:
+ return "VBIF_RT";
+ case VBIF_NRT:
+ return "VBIF_NRT";
+ default:
+ return "??";
+ }
+}
+
/**
* _dpu_vbif_wait_for_xin_halt - wait for the xin to halt
* @vbif: Pointer to hardware vbif driver
@@ -42,12 +62,12 @@ static int _dpu_vbif_wait_for_xin_halt(struct dpu_hw_vbif *vbif, u32 xin_id)
if (!status) {
rc = -ETIMEDOUT;
- DPU_ERROR("VBIF %d client %d not halting. TIMEDOUT.\n",
- vbif->idx - VBIF_0, xin_id);
+ DPU_ERROR("%s client %d not halting. TIMEDOUT.\n",
+ dpu_vbif_name(vbif->idx), xin_id);
} else {
rc = 0;
- DRM_DEBUG_ATOMIC("VBIF %d client %d is halted\n",
- vbif->idx - VBIF_0, xin_id);
+ DRM_DEBUG_ATOMIC("%s client %d is halted\n",
+ dpu_vbif_name(vbif->idx), xin_id);
}
return rc;
@@ -87,8 +107,8 @@ static void _dpu_vbif_apply_dynamic_ot_limit(struct dpu_hw_vbif *vbif,
}
}
- DRM_DEBUG_ATOMIC("vbif:%d xin:%d w:%d h:%d fps:%d pps:%llu ot:%u\n",
- vbif->idx - VBIF_0, params->xin_id,
+ DRM_DEBUG_ATOMIC("%s xin:%d w:%d h:%d fps:%d pps:%llu ot:%u\n",
+ dpu_vbif_name(vbif->idx), params->xin_id,
params->width, params->height, params->frame_rate,
pps, *ot_lim);
}
@@ -133,8 +153,8 @@ static u32 _dpu_vbif_get_ot_limit(struct dpu_hw_vbif *vbif,
}
exit:
- DRM_DEBUG_ATOMIC("vbif:%d xin:%d ot_lim:%d\n",
- vbif->idx - VBIF_0, params->xin_id, ot_lim);
+ DRM_DEBUG_ATOMIC("%s xin:%d ot_lim:%d\n",
+ dpu_vbif_name(vbif->idx), params->xin_id, ot_lim);
return ot_lim;
}
@@ -148,20 +168,15 @@ exit:
void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms,
struct dpu_vbif_set_ot_params *params)
{
- struct dpu_hw_vbif *vbif = NULL;
+ struct dpu_hw_vbif *vbif;
struct dpu_hw_mdp *mdp;
bool forced_on = false;
u32 ot_lim;
- int ret, i;
+ int ret;
mdp = dpu_kms->hw_mdp;
- for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) {
- if (dpu_kms->hw_vbif[i] &&
- dpu_kms->hw_vbif[i]->idx == params->vbif_idx)
- vbif = dpu_kms->hw_vbif[i];
- }
-
+ vbif = dpu_get_vbif(dpu_kms, params->vbif_idx);
if (!vbif || !mdp) {
DRM_DEBUG_ATOMIC("invalid arguments vbif %d mdp %d\n",
vbif != NULL, mdp != NULL);
@@ -204,7 +219,7 @@ void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms,
void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms,
struct dpu_vbif_set_qos_params *params)
{
- struct dpu_hw_vbif *vbif = NULL;
+ struct dpu_hw_vbif *vbif;
struct dpu_hw_mdp *mdp;
bool forced_on = false;
const struct dpu_vbif_qos_tbl *qos_tbl;
@@ -216,13 +231,7 @@ void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms,
}
mdp = dpu_kms->hw_mdp;
- for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) {
- if (dpu_kms->hw_vbif[i] &&
- dpu_kms->hw_vbif[i]->idx == params->vbif_idx) {
- vbif = dpu_kms->hw_vbif[i];
- break;
- }
- }
+ vbif = dpu_get_vbif(dpu_kms, params->vbif_idx);
if (!vbif || !vbif->cap) {
DPU_ERROR("invalid vbif %d\n", params->vbif_idx);
@@ -245,8 +254,8 @@ void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms,
forced_on = mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, true);
for (i = 0; i < qos_tbl->npriority_lvl; i++) {
- DRM_DEBUG_ATOMIC("vbif:%d xin:%d lvl:%d/%d\n",
- params->vbif_idx, params->xin_id, i,
+ DRM_DEBUG_ATOMIC("%s xin:%d lvl:%d/%d\n",
+ dpu_vbif_name(params->vbif_idx), params->xin_id, i,
qos_tbl->priority_lvl[i]);
vbif->ops.set_qos_remap(vbif, params->xin_id, i,
qos_tbl->priority_lvl[i]);
@@ -266,8 +275,8 @@ void dpu_vbif_clear_errors(struct dpu_kms *dpu_kms)
if (vbif && vbif->ops.clear_errors) {
vbif->ops.clear_errors(vbif, &pnd, &src);
if (pnd || src) {
- DRM_DEBUG_KMS("VBIF %d: pnd 0x%X, src 0x%X\n",
- vbif->idx - VBIF_0, pnd, src);
+ DRM_DEBUG_KMS("%s: pnd 0x%X, src 0x%X\n",
+ dpu_vbif_name(vbif->idx), pnd, src);
}
}
}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
index d2a48caf9d27..b0d21838a134 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
@@ -902,12 +902,9 @@ fail:
static int mdp5_setup_interconnect(struct platform_device *pdev)
{
- /* Interconnects are a part of MDSS device tree binding, not the
- * MDP5 device. */
- struct device *mdss_dev = pdev->dev.parent;
- struct icc_path *path0 = of_icc_get(mdss_dev, "mdp0-mem");
- struct icc_path *path1 = of_icc_get(mdss_dev, "mdp1-mem");
- struct icc_path *path_rot = of_icc_get(mdss_dev, "rotator-mem");
+ struct icc_path *path0 = msm_icc_get(&pdev->dev, "mdp0-mem");
+ struct icc_path *path1 = msm_icc_get(&pdev->dev, "mdp1-mem");
+ struct icc_path *path_rot = msm_icc_get(&pdev->dev, "rotator-mem");
if (IS_ERR(path0))
return PTR_ERR(path0);
diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.c b/drivers/gpu/drm/msm/dp/dp_catalog.c
index 7257515871a9..676279d0ca8d 100644
--- a/drivers/gpu/drm/msm/dp/dp_catalog.c
+++ b/drivers/gpu/drm/msm/dp/dp_catalog.c
@@ -431,7 +431,7 @@ void dp_catalog_ctrl_config_msa(struct dp_catalog *dp_catalog,
if (rate == link_rate_hbr3)
pixel_div = 6;
- else if (rate == 1620000 || rate == 270000)
+ else if (rate == 162000 || rate == 270000)
pixel_div = 2;
else if (rate == link_rate_hbr2)
pixel_div = 4;
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
index ab6aa13b1639..3854c9f1f7e9 100644
--- a/drivers/gpu/drm/msm/dp/dp_ctrl.c
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c
@@ -1214,7 +1214,7 @@ static int dp_ctrl_link_train_2(struct dp_ctrl_private *ctrl,
if (ret)
return ret;
- dp_ctrl_train_pattern_set(ctrl, pattern | DP_RECOVERED_CLOCK_OUT_EN);
+ dp_ctrl_train_pattern_set(ctrl, pattern);
for (tries = 0; tries <= maximum_retries; tries++) {
drm_dp_link_train_channel_eq_delay(ctrl->aux, ctrl->panel->dpcd);
@@ -1238,8 +1238,6 @@ static int dp_ctrl_link_train_2(struct dp_ctrl_private *ctrl,
return -ETIMEDOUT;
}
-static int dp_ctrl_reinitialize_mainlink(struct dp_ctrl_private *ctrl);
-
static int dp_ctrl_link_train(struct dp_ctrl_private *ctrl,
int *training_step)
{
@@ -1358,25 +1356,7 @@ static int dp_ctrl_enable_mainlink_clocks(struct dp_ctrl_private *ctrl)
if (ret)
DRM_ERROR("Unable to start link clocks. ret=%d\n", ret);
- drm_dbg_dp(ctrl->drm_dev, "link rate=%d pixel_clk=%d\n",
- ctrl->link->link_params.rate, ctrl->dp_ctrl.pixel_rate);
-
- return ret;
-}
-
-static int dp_ctrl_enable_stream_clocks(struct dp_ctrl_private *ctrl)
-{
- int ret = 0;
-
- dp_ctrl_set_clock_rate(ctrl, DP_STREAM_PM, "stream_pixel",
- ctrl->dp_ctrl.pixel_rate * 1000);
-
- ret = dp_power_clk_enable(ctrl->power, DP_STREAM_PM, true);
- if (ret)
- DRM_ERROR("Unabled to start pixel clocks. ret=%d\n", ret);
-
- drm_dbg_dp(ctrl->drm_dev, "link rate=%d pixel_clk=%d\n",
- ctrl->link->link_params.rate, ctrl->dp_ctrl.pixel_rate);
+ drm_dbg_dp(ctrl->drm_dev, "link rate=%d\n", ctrl->link->link_params.rate);
return ret;
}
@@ -1520,8 +1500,6 @@ static int dp_ctrl_link_maintenance(struct dp_ctrl_private *ctrl)
ctrl->link->phy_params.p_level = 0;
ctrl->link->phy_params.v_level = 0;
- ctrl->dp_ctrl.pixel_rate = ctrl->panel->dp_mode.drm_mode.clock;
-
ret = dp_ctrl_setup_main_link(ctrl, &training_step);
if (ret)
goto end;
@@ -1535,38 +1513,6 @@ end:
return ret;
}
-static int dp_ctrl_on_stream_phy_test_report(struct dp_ctrl *dp_ctrl);
-
-static int dp_ctrl_process_phy_test_request(struct dp_ctrl_private *ctrl)
-{
- int ret = 0;
-
- if (!ctrl->link->phy_params.phy_test_pattern_sel) {
- drm_dbg_dp(ctrl->drm_dev,
- "no test pattern selected by sink\n");
- return ret;
- }
-
- /*
- * The global reset will need DP link related clocks to be
- * running. Add the global reset just before disabling the
- * link clocks and core clocks.
- */
- ret = dp_ctrl_off(&ctrl->dp_ctrl);
- if (ret) {
- DRM_ERROR("failed to disable DP controller\n");
- return ret;
- }
-
- ret = dp_ctrl_on_link(&ctrl->dp_ctrl);
- if (!ret)
- ret = dp_ctrl_on_stream_phy_test_report(&ctrl->dp_ctrl);
- else
- DRM_ERROR("failed to enable DP link controller\n");
-
- return ret;
-}
-
static bool dp_ctrl_send_phy_test_pattern(struct dp_ctrl_private *ctrl)
{
bool success = false;
@@ -1619,6 +1565,48 @@ static bool dp_ctrl_send_phy_test_pattern(struct dp_ctrl_private *ctrl)
return success;
}
+static int dp_ctrl_process_phy_test_request(struct dp_ctrl_private *ctrl)
+{
+ int ret;
+ unsigned long pixel_rate;
+
+ if (!ctrl->link->phy_params.phy_test_pattern_sel) {
+ drm_dbg_dp(ctrl->drm_dev,
+ "no test pattern selected by sink\n");
+ return 0;
+ }
+
+ /*
+ * The global reset will need DP link related clocks to be
+ * running. Add the global reset just before disabling the
+ * link clocks and core clocks.
+ */
+ ret = dp_ctrl_off(&ctrl->dp_ctrl);
+ if (ret) {
+ DRM_ERROR("failed to disable DP controller\n");
+ return ret;
+ }
+
+ ret = dp_ctrl_on_link(&ctrl->dp_ctrl);
+ if (ret) {
+ DRM_ERROR("failed to enable DP link controller\n");
+ return ret;
+ }
+
+ pixel_rate = ctrl->panel->dp_mode.drm_mode.clock;
+ dp_ctrl_set_clock_rate(ctrl, DP_STREAM_PM, "stream_pixel", pixel_rate * 1000);
+
+ ret = dp_power_clk_enable(ctrl->power, DP_STREAM_PM, true);
+ if (ret) {
+ DRM_ERROR("Failed to start pixel clocks. ret=%d\n", ret);
+ return ret;
+ }
+
+ dp_ctrl_send_phy_test_pattern(ctrl);
+
+ return 0;
+}
+
void dp_ctrl_handle_sink_request(struct dp_ctrl *dp_ctrl)
{
struct dp_ctrl_private *ctrl;
@@ -1689,11 +1677,12 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl)
{
int rc = 0;
struct dp_ctrl_private *ctrl;
- u32 rate = 0;
+ u32 rate;
int link_train_max_retries = 5;
u32 const phy_cts_pixel_clk_khz = 148500;
u8 link_status[DP_LINK_STATUS_SIZE];
unsigned int training_step;
+ unsigned long pixel_rate;
if (!dp_ctrl)
return -EINVAL;
@@ -1701,25 +1690,24 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl)
ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
rate = ctrl->panel->link_info.rate;
+ pixel_rate = ctrl->panel->dp_mode.drm_mode.clock;
dp_power_clk_enable(ctrl->power, DP_CORE_PM, true);
if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN) {
drm_dbg_dp(ctrl->drm_dev,
"using phy test link parameters\n");
- if (!ctrl->panel->dp_mode.drm_mode.clock)
- ctrl->dp_ctrl.pixel_rate = phy_cts_pixel_clk_khz;
+ if (!pixel_rate)
+ pixel_rate = phy_cts_pixel_clk_khz;
} else {
ctrl->link->link_params.rate = rate;
ctrl->link->link_params.num_lanes =
ctrl->panel->link_info.num_lanes;
- ctrl->dp_ctrl.pixel_rate = ctrl->panel->dp_mode.drm_mode.clock;
}
- drm_dbg_dp(ctrl->drm_dev, "rate=%d, num_lanes=%d, pixel_rate=%d\n",
+ drm_dbg_dp(ctrl->drm_dev, "rate=%d, num_lanes=%d, pixel_rate=%lu\n",
ctrl->link->link_params.rate, ctrl->link->link_params.num_lanes,
- ctrl->dp_ctrl.pixel_rate);
-
+ pixel_rate);
rc = dp_ctrl_enable_mainlink_clocks(ctrl);
if (rc)
@@ -1816,31 +1804,12 @@ static int dp_ctrl_link_retrain(struct dp_ctrl_private *ctrl)
return dp_ctrl_setup_main_link(ctrl, &training_step);
}
-static int dp_ctrl_on_stream_phy_test_report(struct dp_ctrl *dp_ctrl)
-{
- int ret;
- struct dp_ctrl_private *ctrl;
-
- ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
-
- ctrl->dp_ctrl.pixel_rate = ctrl->panel->dp_mode.drm_mode.clock;
-
- ret = dp_ctrl_enable_stream_clocks(ctrl);
- if (ret) {
- DRM_ERROR("Failed to start pixel clocks. ret=%d\n", ret);
- return ret;
- }
-
- dp_ctrl_send_phy_test_pattern(ctrl);
-
- return 0;
-}
-
int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl, bool force_link_train)
{
int ret = 0;
bool mainlink_ready = false;
struct dp_ctrl_private *ctrl;
+ unsigned long pixel_rate;
unsigned long pixel_rate_orig;
if (!dp_ctrl)
@@ -1848,15 +1817,14 @@ int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl, bool force_link_train)
ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
- ctrl->dp_ctrl.pixel_rate = ctrl->panel->dp_mode.drm_mode.clock;
+ pixel_rate = pixel_rate_orig = ctrl->panel->dp_mode.drm_mode.clock;
- pixel_rate_orig = ctrl->dp_ctrl.pixel_rate;
if (dp_ctrl->wide_bus_en)
- ctrl->dp_ctrl.pixel_rate >>= 1;
+ pixel_rate >>= 1;
- drm_dbg_dp(ctrl->drm_dev, "rate=%d, num_lanes=%d, pixel_rate=%d\n",
+ drm_dbg_dp(ctrl->drm_dev, "rate=%d, num_lanes=%d, pixel_rate=%lu\n",
ctrl->link->link_params.rate,
- ctrl->link->link_params.num_lanes, ctrl->dp_ctrl.pixel_rate);
+ ctrl->link->link_params.num_lanes, pixel_rate);
if (!dp_power_clk_status(ctrl->power, DP_CTRL_PM)) { /* link clk is off */
ret = dp_ctrl_enable_mainlink_clocks(ctrl);
@@ -1866,9 +1834,11 @@ int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl, bool force_link_train)
}
}
- ret = dp_ctrl_enable_stream_clocks(ctrl);
+ dp_ctrl_set_clock_rate(ctrl, DP_STREAM_PM, "stream_pixel", pixel_rate * 1000);
+
+ ret = dp_power_clk_enable(ctrl->power, DP_STREAM_PM, true);
if (ret) {
- DRM_ERROR("Failed to start pixel clocks. ret=%d\n", ret);
+ DRM_ERROR("Unable to start pixel clocks. ret=%d\n", ret);
goto end;
}
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.h b/drivers/gpu/drm/msm/dp/dp_ctrl.h
index b563e2e3bfe5..9f29734af81c 100644
--- a/drivers/gpu/drm/msm/dp/dp_ctrl.h
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.h
@@ -16,7 +16,6 @@
struct dp_ctrl {
bool orientation;
atomic_t aborted;
- u32 pixel_rate;
bool wide_bus_en;
};
diff --git a/drivers/gpu/drm/msm/dp/dp_link.c b/drivers/gpu/drm/msm/dp/dp_link.c
index 36f0af02749f..36bb6191d2f0 100644
--- a/drivers/gpu/drm/msm/dp/dp_link.c
+++ b/drivers/gpu/drm/msm/dp/dp_link.c
@@ -786,7 +786,7 @@ static int dp_link_process_link_training_request(struct dp_link_private *link)
link->request.test_lane_count);
link->dp_link.link_params.num_lanes = link->request.test_lane_count;
- link->dp_link.link_params.rate =
+ link->dp_link.link_params.rate =
drm_dp_bw_code_to_link_rate(link->request.test_link_rate);
return 0;
@@ -965,8 +965,7 @@ static int dp_link_process_link_status_update(struct dp_link_private *link)
if (channel_eq_done && clock_recovery_done)
return -EINVAL;
-
- return 0;
+ return 0;
}
/**
diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c
index 1625328fa430..39bbabb5daf6 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.c
+++ b/drivers/gpu/drm/msm/dsi/dsi.c
@@ -6,14 +6,6 @@
#include "dsi.h"
#include "dsi_cfg.h"
-struct drm_encoder *msm_dsi_get_encoder(struct msm_dsi *msm_dsi)
-{
- if (!msm_dsi || !msm_dsi_device_connected(msm_dsi))
- return NULL;
-
- return msm_dsi->encoder;
-}
-
bool msm_dsi_is_cmd_mode(struct msm_dsi *msm_dsi)
{
unsigned long host_flags = msm_dsi_host_get_mode_flags(msm_dsi->host);
@@ -21,7 +13,7 @@ bool msm_dsi_is_cmd_mode(struct msm_dsi *msm_dsi)
return !(host_flags & MIPI_DSI_MODE_VIDEO);
}
-struct msm_display_dsc_config *msm_dsi_get_dsc_config(struct msm_dsi *msm_dsi)
+struct drm_dsc_config *msm_dsi_get_dsc_config(struct msm_dsi *msm_dsi)
{
return msm_dsi_host_get_dsc_config(msm_dsi->host);
}
@@ -220,7 +212,6 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
struct drm_encoder *encoder)
{
struct msm_drm_private *priv;
- struct drm_bridge *ext_bridge;
int ret;
if (WARN_ON(!encoder) || WARN_ON(!msm_dsi) || WARN_ON(!dev))
@@ -254,26 +245,10 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
goto fail;
}
- /*
- * check if the dsi encoder output is connected to a panel or an
- * external bridge. We create a connector only if we're connected to a
- * drm_panel device. When we're connected to an external bridge, we
- * assume that the drm_bridge driver will create the connector itself.
- */
- ext_bridge = msm_dsi_host_get_bridge(msm_dsi->host);
-
- if (ext_bridge)
- msm_dsi->connector =
- msm_dsi_manager_ext_bridge_init(msm_dsi->id);
- else
- msm_dsi->connector =
- msm_dsi_manager_connector_init(msm_dsi->id);
-
- if (IS_ERR(msm_dsi->connector)) {
- ret = PTR_ERR(msm_dsi->connector);
+ ret = msm_dsi_manager_ext_bridge_init(msm_dsi->id);
+ if (ret) {
DRM_DEV_ERROR(dev->dev,
"failed to create dsi connector: %d\n", ret);
- msm_dsi->connector = NULL;
goto fail;
}
@@ -287,12 +262,6 @@ fail:
msm_dsi->bridge = NULL;
}
- /* don't destroy connector if we didn't make it */
- if (msm_dsi->connector && !msm_dsi->external_bridge)
- msm_dsi->connector->funcs->destroy(msm_dsi->connector);
-
- msm_dsi->connector = NULL;
-
return ret;
}
diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h
index 580a1e6358bf..2a96b4fe7839 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.h
@@ -12,7 +12,6 @@
#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_mipi_dsi.h>
-#include <drm/drm_panel.h>
#include "msm_drv.h"
#include "disp/msm_disp_snapshot.h"
@@ -30,27 +29,12 @@ enum msm_dsi_phy_usecase {
MSM_DSI_PHY_SLAVE,
};
-#define DSI_DEV_REGULATOR_MAX 8
#define DSI_BUS_CLK_MAX 4
-/* Regulators for DSI devices */
-struct dsi_reg_entry {
- char name[32];
- int enable_load;
- int disable_load;
-};
-
-struct dsi_reg_config {
- int num;
- struct dsi_reg_entry regs[DSI_DEV_REGULATOR_MAX];
-};
-
struct msm_dsi {
struct drm_device *dev;
struct platform_device *pdev;
- /* connector managed by us when we're connected to a drm_panel */
- struct drm_connector *connector;
/* internal dsi bridge attached to MDP interface */
struct drm_bridge *bridge;
@@ -58,10 +42,8 @@ struct msm_dsi {
struct msm_dsi_phy *phy;
/*
- * panel/external_bridge connected to dsi bridge output, only one of the
- * two can be valid at a time
+ * external_bridge connected to dsi bridge output
*/
- struct drm_panel *panel;
struct drm_bridge *external_bridge;
struct device *phy_dev;
@@ -76,8 +58,7 @@ struct msm_dsi {
/* dsi manager */
struct drm_bridge *msm_dsi_manager_bridge_init(u8 id);
void msm_dsi_manager_bridge_destroy(struct drm_bridge *bridge);
-struct drm_connector *msm_dsi_manager_connector_init(u8 id);
-struct drm_connector *msm_dsi_manager_ext_bridge_init(u8 id);
+int msm_dsi_manager_ext_bridge_init(u8 id);
int msm_dsi_manager_cmd_xfer(int id, const struct mipi_dsi_msg *msg);
bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 dma_base, u32 len);
int msm_dsi_manager_register(struct msm_dsi *msm_dsi);
@@ -87,11 +68,9 @@ void msm_dsi_manager_tpg_enable(void);
/* msm dsi */
static inline bool msm_dsi_device_connected(struct msm_dsi *msm_dsi)
{
- return msm_dsi->panel || msm_dsi->external_bridge;
+ return msm_dsi->external_bridge;
}
-struct drm_encoder *msm_dsi_get_encoder(struct msm_dsi *msm_dsi);
-
/* dsi host */
struct msm_dsi_host;
int msm_dsi_host_xfer_prepare(struct mipi_dsi_host *host,
@@ -116,9 +95,7 @@ int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
const struct drm_display_mode *mode);
enum drm_mode_status msm_dsi_host_check_dsc(struct mipi_dsi_host *host,
const struct drm_display_mode *mode);
-struct drm_panel *msm_dsi_host_get_panel(struct mipi_dsi_host *host);
unsigned long msm_dsi_host_get_mode_flags(struct mipi_dsi_host *host);
-struct drm_bridge *msm_dsi_host_get_bridge(struct mipi_dsi_host *host);
int msm_dsi_host_register(struct mipi_dsi_host *host);
void msm_dsi_host_unregister(struct mipi_dsi_host *host);
void msm_dsi_host_set_phy_mode(struct mipi_dsi_host *host,
@@ -154,7 +131,7 @@ int dsi_calc_clk_rate_v2(struct msm_dsi_host *msm_host, bool is_bonded_dsi);
int dsi_calc_clk_rate_6g(struct msm_dsi_host *msm_host, bool is_bonded_dsi);
void msm_dsi_host_snapshot(struct msm_disp_state *disp_state, struct mipi_dsi_host *host);
void msm_dsi_host_test_pattern_en(struct mipi_dsi_host *host);
-struct msm_display_dsc_config *msm_dsi_host_get_dsc_config(struct mipi_dsi_host *host);
+struct drm_dsc_config *msm_dsi_host_get_dsc_config(struct mipi_dsi_host *host);
/* dsi phy */
struct msm_dsi_phy;
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.c b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
index 2c23324a2296..7e97c239ed48 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
@@ -9,16 +9,16 @@ static const char * const dsi_v2_bus_clk_names[] = {
"core_mmss", "iface", "bus",
};
+static const struct regulator_bulk_data apq8064_dsi_regulators[] = {
+ { .supply = "vdda", .init_load_uA = 100000 }, /* 1.2 V */
+ { .supply = "avdd", .init_load_uA = 10000 }, /* 3.0 V */
+ { .supply = "vddio", .init_load_uA = 100000 }, /* 1.8 V */
+};
+
static const struct msm_dsi_config apq8064_dsi_cfg = {
.io_offset = 0,
- .reg_cfg = {
- .num = 3,
- .regs = {
- {"vdda", 100000, 100}, /* 1.2 V */
- {"avdd", 10000, 100}, /* 3.0 V */
- {"vddio", 100000, 100}, /* 1.8 V */
- },
- },
+ .regulator_data = apq8064_dsi_regulators,
+ .num_regulators = ARRAY_SIZE(apq8064_dsi_regulators),
.bus_clk_names = dsi_v2_bus_clk_names,
.num_bus_clks = ARRAY_SIZE(dsi_v2_bus_clk_names),
.io_start = { 0x4700000, 0x5800000 },
@@ -29,16 +29,16 @@ static const char * const dsi_6g_bus_clk_names[] = {
"mdp_core", "iface", "bus", "core_mmss",
};
+static const struct regulator_bulk_data msm8974_apq8084_regulators[] = {
+ { .supply = "vdd", .init_load_uA = 150000 }, /* 3.0 V */
+ { .supply = "vdda", .init_load_uA = 100000 }, /* 1.2 V */
+ { .supply = "vddio", .init_load_uA = 100000 }, /* 1.8 V */
+};
+
static const struct msm_dsi_config msm8974_apq8084_dsi_cfg = {
.io_offset = DSI_6G_REG_SHIFT,
- .reg_cfg = {
- .num = 3,
- .regs = {
- {"vdd", 150000, 100}, /* 3.0 V */
- {"vdda", 100000, 100}, /* 1.2 V */
- {"vddio", 100000, 100}, /* 1.8 V */
- },
- },
+ .regulator_data = msm8974_apq8084_regulators,
+ .num_regulators = ARRAY_SIZE(msm8974_apq8084_regulators),
.bus_clk_names = dsi_6g_bus_clk_names,
.num_bus_clks = ARRAY_SIZE(dsi_6g_bus_clk_names),
.io_start = { 0xfd922800, 0xfd922b00 },
@@ -49,15 +49,15 @@ static const char * const dsi_8916_bus_clk_names[] = {
"mdp_core", "iface", "bus",
};
+static const struct regulator_bulk_data msm8916_dsi_regulators[] = {
+ { .supply = "vdda", .init_load_uA = 100000 }, /* 1.2 V */
+ { .supply = "vddio", .init_load_uA = 100000 }, /* 1.8 V */
+};
+
static const struct msm_dsi_config msm8916_dsi_cfg = {
.io_offset = DSI_6G_REG_SHIFT,
- .reg_cfg = {
- .num = 2,
- .regs = {
- {"vdda", 100000, 100}, /* 1.2 V */
- {"vddio", 100000, 100}, /* 1.8 V */
- },
- },
+ .regulator_data = msm8916_dsi_regulators,
+ .num_regulators = ARRAY_SIZE(msm8916_dsi_regulators),
.bus_clk_names = dsi_8916_bus_clk_names,
.num_bus_clks = ARRAY_SIZE(dsi_8916_bus_clk_names),
.io_start = { 0x1a98000 },
@@ -68,34 +68,34 @@ static const char * const dsi_8976_bus_clk_names[] = {
"mdp_core", "iface", "bus",
};
+static const struct regulator_bulk_data msm8976_dsi_regulators[] = {
+ { .supply = "vdda", .init_load_uA = 100000 }, /* 1.2 V */
+ { .supply = "vddio", .init_load_uA = 100000 }, /* 1.8 V */
+};
+
static const struct msm_dsi_config msm8976_dsi_cfg = {
.io_offset = DSI_6G_REG_SHIFT,
- .reg_cfg = {
- .num = 2,
- .regs = {
- {"vdda", 100000, 100}, /* 1.2 V */
- {"vddio", 100000, 100}, /* 1.8 V */
- },
- },
+ .regulator_data = msm8976_dsi_regulators,
+ .num_regulators = ARRAY_SIZE(msm8976_dsi_regulators),
.bus_clk_names = dsi_8976_bus_clk_names,
.num_bus_clks = ARRAY_SIZE(dsi_8976_bus_clk_names),
.io_start = { 0x1a94000, 0x1a96000 },
.num_dsi = 2,
};
+static const struct regulator_bulk_data msm8994_dsi_regulators[] = {
+ { .supply = "vdda", .init_load_uA = 100000 }, /* 1.25 V */
+ { .supply = "vddio", .init_load_uA = 100000 }, /* 1.8 V */
+ { .supply = "vcca", .init_load_uA = 10000 }, /* 1.0 V */
+ { .supply = "vdd", .init_load_uA = 100000 }, /* 1.8 V */
+ { .supply = "lab_reg", .init_load_uA = -1 },
+ { .supply = "ibb_reg", .init_load_uA = -1 },
+};
+
static const struct msm_dsi_config msm8994_dsi_cfg = {
.io_offset = DSI_6G_REG_SHIFT,
- .reg_cfg = {
- .num = 6,
- .regs = {
- {"vdda", 100000, 100}, /* 1.25 V */
- {"vddio", 100000, 100}, /* 1.8 V */
- {"vcca", 10000, 100}, /* 1.0 V */
- {"vdd", 100000, 100}, /* 1.8 V */
- {"lab_reg", -1, -1},
- {"ibb_reg", -1, -1},
- },
- },
+ .regulator_data = msm8994_dsi_regulators,
+ .num_regulators = ARRAY_SIZE(msm8994_dsi_regulators),
.bus_clk_names = dsi_6g_bus_clk_names,
.num_bus_clks = ARRAY_SIZE(dsi_6g_bus_clk_names),
.io_start = { 0xfd998000, 0xfd9a0000 },
@@ -106,16 +106,16 @@ static const char * const dsi_8996_bus_clk_names[] = {
"mdp_core", "iface", "bus", "core_mmss",
};
+static const struct regulator_bulk_data msm8996_dsi_regulators[] = {
+ { .supply = "vdda", .init_load_uA = 18160 }, /* 1.25 V */
+ { .supply = "vcca", .init_load_uA = 17000 }, /* 0.925 V */
+ { .supply = "vddio", .init_load_uA = 100000 }, /* 1.8 V */
+};
+
static const struct msm_dsi_config msm8996_dsi_cfg = {
.io_offset = DSI_6G_REG_SHIFT,
- .reg_cfg = {
- .num = 2,
- .regs = {
- {"vdda", 18160, 1 }, /* 1.25 V */
- {"vcca", 17000, 32 }, /* 0.925 V */
- {"vddio", 100000, 100 },/* 1.8 V */
- },
- },
+ .regulator_data = msm8996_dsi_regulators,
+ .num_regulators = ARRAY_SIZE(msm8996_dsi_regulators),
.bus_clk_names = dsi_8996_bus_clk_names,
.num_bus_clks = ARRAY_SIZE(dsi_8996_bus_clk_names),
.io_start = { 0x994000, 0x996000 },
@@ -126,15 +126,15 @@ static const char * const dsi_msm8998_bus_clk_names[] = {
"iface", "bus", "core",
};
+static const struct regulator_bulk_data msm8998_dsi_regulators[] = {
+ { .supply = "vdd", .init_load_uA = 367000 }, /* 0.9 V */
+ { .supply = "vdda", .init_load_uA = 62800 }, /* 1.2 V */
+};
+
static const struct msm_dsi_config msm8998_dsi_cfg = {
.io_offset = DSI_6G_REG_SHIFT,
- .reg_cfg = {
- .num = 2,
- .regs = {
- {"vdd", 367000, 16 }, /* 0.9 V */
- {"vdda", 62800, 2 }, /* 1.2 V */
- },
- },
+ .regulator_data = msm8998_dsi_regulators,
+ .num_regulators = ARRAY_SIZE(msm8998_dsi_regulators),
.bus_clk_names = dsi_msm8998_bus_clk_names,
.num_bus_clks = ARRAY_SIZE(dsi_msm8998_bus_clk_names),
.io_start = { 0xc994000, 0xc996000 },
@@ -145,14 +145,14 @@ static const char * const dsi_sdm660_bus_clk_names[] = {
"iface", "bus", "core", "core_mmss",
};
+static const struct regulator_bulk_data sdm660_dsi_regulators[] = {
+ { .supply = "vdda", .init_load_uA = 12560 }, /* 1.2 V */
+};
+
static const struct msm_dsi_config sdm660_dsi_cfg = {
.io_offset = DSI_6G_REG_SHIFT,
- .reg_cfg = {
- .num = 2,
- .regs = {
- {"vdda", 12560, 4 }, /* 1.2 V */
- },
- },
+ .regulator_data = sdm660_dsi_regulators,
+ .num_regulators = ARRAY_SIZE(sdm660_dsi_regulators),
.bus_clk_names = dsi_sdm660_bus_clk_names,
.num_bus_clks = ARRAY_SIZE(dsi_sdm660_bus_clk_names),
.io_start = { 0xc994000, 0xc996000 },
@@ -167,28 +167,28 @@ static const char * const dsi_sc7180_bus_clk_names[] = {
"iface", "bus",
};
+static const struct regulator_bulk_data sdm845_dsi_regulators[] = {
+ { .supply = "vdda", .init_load_uA = 21800 }, /* 1.2 V */
+};
+
static const struct msm_dsi_config sdm845_dsi_cfg = {
.io_offset = DSI_6G_REG_SHIFT,
- .reg_cfg = {
- .num = 1,
- .regs = {
- {"vdda", 21800, 4 }, /* 1.2 V */
- },
- },
+ .regulator_data = sdm845_dsi_regulators,
+ .num_regulators = ARRAY_SIZE(sdm845_dsi_regulators),
.bus_clk_names = dsi_sdm845_bus_clk_names,
.num_bus_clks = ARRAY_SIZE(dsi_sdm845_bus_clk_names),
.io_start = { 0xae94000, 0xae96000 },
.num_dsi = 2,
};
+static const struct regulator_bulk_data sc7180_dsi_regulators[] = {
+ { .supply = "vdda", .init_load_uA = 21800 }, /* 1.2 V */
+};
+
static const struct msm_dsi_config sc7180_dsi_cfg = {
.io_offset = DSI_6G_REG_SHIFT,
- .reg_cfg = {
- .num = 1,
- .regs = {
- {"vdda", 21800, 4 }, /* 1.2 V */
- },
- },
+ .regulator_data = sc7180_dsi_regulators,
+ .num_regulators = ARRAY_SIZE(sc7180_dsi_regulators),
.bus_clk_names = dsi_sc7180_bus_clk_names,
.num_bus_clks = ARRAY_SIZE(dsi_sc7180_bus_clk_names),
.io_start = { 0xae94000 },
@@ -199,14 +199,14 @@ static const char * const dsi_sc7280_bus_clk_names[] = {
"iface", "bus",
};
+static const struct regulator_bulk_data sc7280_dsi_regulators[] = {
+ { .supply = "vdda", .init_load_uA = 8350 }, /* 1.2 V */
+};
+
static const struct msm_dsi_config sc7280_dsi_cfg = {
.io_offset = DSI_6G_REG_SHIFT,
- .reg_cfg = {
- .num = 1,
- .regs = {
- {"vdda", 8350, 0 }, /* 1.2 V */
- },
- },
+ .regulator_data = sc7280_dsi_regulators,
+ .num_regulators = ARRAY_SIZE(sc7280_dsi_regulators),
.bus_clk_names = dsi_sc7280_bus_clk_names,
.num_bus_clks = ARRAY_SIZE(dsi_sc7280_bus_clk_names),
.io_start = { 0xae94000 },
@@ -217,14 +217,14 @@ static const char * const dsi_qcm2290_bus_clk_names[] = {
"iface", "bus",
};
+static const struct regulator_bulk_data qcm2290_dsi_cfg_regulators[] = {
+ { .supply = "vdda", .init_load_uA = 21800 }, /* 1.2 V */
+};
+
static const struct msm_dsi_config qcm2290_dsi_cfg = {
.io_offset = DSI_6G_REG_SHIFT,
- .reg_cfg = {
- .num = 1,
- .regs = {
- {"vdda", 21800, 4 }, /* 1.2 V */
- },
- },
+ .regulator_data = qcm2290_dsi_cfg_regulators,
+ .num_regulators = ARRAY_SIZE(qcm2290_dsi_cfg_regulators),
.bus_clk_names = dsi_qcm2290_bus_clk_names,
.num_bus_clks = ARRAY_SIZE(dsi_qcm2290_bus_clk_names),
.io_start = { 0x5e94000 },
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.h b/drivers/gpu/drm/msm/dsi/dsi_cfg.h
index fe54a999968b..8f04e685a74e 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.h
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.h
@@ -32,7 +32,8 @@
struct msm_dsi_config {
u32 io_offset;
- struct dsi_reg_config reg_cfg;
+ const struct regulator_bulk_data *regulator_data;
+ int num_regulators;
const char * const *bus_clk_names;
const int num_bus_clks;
const resource_size_t io_start[DSI_MAX];
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index a34078497af1..7fbf391c024f 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -33,7 +33,7 @@
#define DSI_RESET_TOGGLE_DELAY_MS 20
-static int dsi_populate_dsc_params(struct msm_display_dsc_config *dsc);
+static int dsi_populate_dsc_params(struct drm_dsc_config *dsc);
static int dsi_get_version(const void __iomem *base, u32 *major, u32 *minor)
{
@@ -108,7 +108,7 @@ struct msm_dsi_host {
void __iomem *ctrl_base;
phys_addr_t ctrl_size;
- struct regulator_bulk_data supplies[DSI_DEV_REGULATOR_MAX];
+ struct regulator_bulk_data *supplies;
int num_bus_clks;
struct clk_bulk_data bus_clks[DSI_BUS_CLK_MAX];
@@ -144,7 +144,6 @@ struct msm_dsi_host {
u32 err_work_state;
struct work_struct err_work;
- struct work_struct hpd_work;
struct workqueue_struct *workqueue;
/* DSI 6G TX buffer*/
@@ -161,10 +160,9 @@ struct msm_dsi_host {
struct regmap *sfpb;
struct drm_display_mode *mode;
- struct msm_display_dsc_config *dsc;
+ struct drm_dsc_config *dsc;
/* connected device info */
- struct device_node *device_node;
unsigned int channel;
unsigned int lanes;
enum mipi_dsi_pixel_format format;
@@ -205,9 +203,6 @@ static inline void dsi_write(struct msm_dsi_host *msm_host, u32 reg, u32 data)
msm_writel(data, msm_host->ctrl_base + reg);
}
-static int dsi_host_regulator_enable(struct msm_dsi_host *msm_host);
-static void dsi_host_regulator_disable(struct msm_dsi_host *msm_host);
-
static const struct msm_dsi_cfg_handler *dsi_get_config(
struct msm_dsi_host *msm_host)
{
@@ -258,76 +253,6 @@ static inline struct msm_dsi_host *to_msm_dsi_host(struct mipi_dsi_host *host)
return container_of(host, struct msm_dsi_host, base);
}
-static void dsi_host_regulator_disable(struct msm_dsi_host *msm_host)
-{
- struct regulator_bulk_data *s = msm_host->supplies;
- const struct dsi_reg_entry *regs = msm_host->cfg_hnd->cfg->reg_cfg.regs;
- int num = msm_host->cfg_hnd->cfg->reg_cfg.num;
- int i;
-
- DBG("");
- for (i = num - 1; i >= 0; i--)
- if (regs[i].disable_load >= 0)
- regulator_set_load(s[i].consumer,
- regs[i].disable_load);
-
- regulator_bulk_disable(num, s);
-}
-
-static int dsi_host_regulator_enable(struct msm_dsi_host *msm_host)
-{
- struct regulator_bulk_data *s = msm_host->supplies;
- const struct dsi_reg_entry *regs = msm_host->cfg_hnd->cfg->reg_cfg.regs;
- int num = msm_host->cfg_hnd->cfg->reg_cfg.num;
- int ret, i;
-
- DBG("");
- for (i = 0; i < num; i++) {
- if (regs[i].enable_load >= 0) {
- ret = regulator_set_load(s[i].consumer,
- regs[i].enable_load);
- if (ret < 0) {
- pr_err("regulator %d set op mode failed, %d\n",
- i, ret);
- goto fail;
- }
- }
- }
-
- ret = regulator_bulk_enable(num, s);
- if (ret < 0) {
- pr_err("regulator enable failed, %d\n", ret);
- goto fail;
- }
-
- return 0;
-
-fail:
- for (i--; i >= 0; i--)
- regulator_set_load(s[i].consumer, regs[i].disable_load);
- return ret;
-}
-
-static int dsi_regulator_init(struct msm_dsi_host *msm_host)
-{
- struct regulator_bulk_data *s = msm_host->supplies;
- const struct dsi_reg_entry *regs = msm_host->cfg_hnd->cfg->reg_cfg.regs;
- int num = msm_host->cfg_hnd->cfg->reg_cfg.num;
- int i, ret;
-
- for (i = 0; i < num; i++)
- s[i].supply = regs[i].name;
-
- ret = devm_regulator_bulk_get(&msm_host->pdev->dev, num, s);
- if (ret < 0) {
- pr_err("%s: failed to init regulator, ret=%d\n",
- __func__, ret);
- return ret;
- }
-
- return 0;
-}
-
int dsi_clk_init_v2(struct msm_dsi_host *msm_host)
{
struct platform_device *pdev = msm_host->pdev;
@@ -916,7 +841,7 @@ static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable,
static void dsi_update_dsc_timing(struct msm_dsi_host *msm_host, bool is_cmd_mode, u32 hdisplay)
{
- struct msm_display_dsc_config *dsc = msm_host->dsc;
+ struct drm_dsc_config *dsc = msm_host->dsc;
u32 reg, intf_width, reg_ctrl, reg_ctrl2;
u32 slice_per_intf, total_bytes_per_intf;
u32 pkt_per_line;
@@ -927,24 +852,24 @@ static void dsi_update_dsc_timing(struct msm_dsi_host *msm_host, bool is_cmd_mod
* compress mode registers
*/
intf_width = hdisplay;
- slice_per_intf = DIV_ROUND_UP(intf_width, dsc->drm->slice_width);
+ slice_per_intf = DIV_ROUND_UP(intf_width, dsc->slice_width);
/* If slice_per_pkt is greater than slice_per_intf
* then default to 1. This can happen during partial
* update.
*/
- if (slice_per_intf > dsc->drm->slice_count)
- dsc->drm->slice_count = 1;
+ if (slice_per_intf > dsc->slice_count)
+ dsc->slice_count = 1;
- slice_per_intf = DIV_ROUND_UP(hdisplay, dsc->drm->slice_width);
- bytes_in_slice = DIV_ROUND_UP(dsc->drm->slice_width * dsc->drm->bits_per_pixel, 8);
+ slice_per_intf = DIV_ROUND_UP(hdisplay, dsc->slice_width);
+ bytes_in_slice = DIV_ROUND_UP(dsc->slice_width * dsc->bits_per_pixel, 8);
- dsc->drm->slice_chunk_size = bytes_in_slice;
+ dsc->slice_chunk_size = bytes_in_slice;
total_bytes_per_intf = bytes_in_slice * slice_per_intf;
eol_byte_num = total_bytes_per_intf % 3;
- pkt_per_line = slice_per_intf / dsc->drm->slice_count;
+ pkt_per_line = slice_per_intf / dsc->slice_count;
if (is_cmd_mode) /* packet data type */
reg = DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM0_DATATYPE(MIPI_DSI_DCS_LONG_WRITE);
@@ -1009,7 +934,7 @@ static void dsi_timing_setup(struct msm_dsi_host *msm_host, bool is_bonded_dsi)
}
if (msm_host->dsc) {
- struct msm_display_dsc_config *dsc = msm_host->dsc;
+ struct drm_dsc_config *dsc = msm_host->dsc;
/* update dsc params with timing params */
if (!dsc || !mode->hdisplay || !mode->vdisplay) {
@@ -1018,9 +943,9 @@ static void dsi_timing_setup(struct msm_dsi_host *msm_host, bool is_bonded_dsi)
return;
}
- dsc->drm->pic_width = mode->hdisplay;
- dsc->drm->pic_height = mode->vdisplay;
- DBG("Mode %dx%d\n", dsc->drm->pic_width, dsc->drm->pic_height);
+ dsc->pic_width = mode->hdisplay;
+ dsc->pic_height = mode->vdisplay;
+ DBG("Mode %dx%d\n", dsc->pic_width, dsc->pic_height);
/* we do the calculations for dsc parameters here so that
* panel can use these parameters
@@ -1500,14 +1425,6 @@ static int dsi_cmds2buf_tx(struct msm_dsi_host *msm_host,
return len;
}
-static void dsi_hpd_worker(struct work_struct *work)
-{
- struct msm_dsi_host *msm_host =
- container_of(work, struct msm_dsi_host, hpd_work);
-
- drm_helper_hpd_irq_event(msm_host->dev);
-}
-
static void dsi_err_worker(struct work_struct *work)
{
struct msm_dsi_host *msm_host =
@@ -1686,6 +1603,8 @@ static int dsi_host_attach(struct mipi_dsi_host *host,
msm_host->lanes = dsi->lanes;
msm_host->format = dsi->format;
msm_host->mode_flags = dsi->mode_flags;
+ if (dsi->dsc)
+ msm_host->dsc = dsi->dsc;
/* Some gpios defined in panel DT need to be controlled by host */
ret = dsi_host_init_panel_gpios(msm_host, &dsi->dev);
@@ -1697,8 +1616,6 @@ static int dsi_host_attach(struct mipi_dsi_host *host,
return ret;
DBG("id=%d", msm_host->id);
- if (msm_host->dev)
- queue_work(msm_host->workqueue, &msm_host->hpd_work);
return 0;
}
@@ -1710,11 +1627,7 @@ static int dsi_host_detach(struct mipi_dsi_host *host,
dsi_dev_detach(msm_host->pdev);
- msm_host->device_node = NULL;
-
DBG("id=%d", msm_host->id);
- if (msm_host->dev)
- queue_work(msm_host->workqueue, &msm_host->hpd_work);
return 0;
}
@@ -1841,7 +1754,7 @@ static char bpg_offset[DSC_NUM_BUF_RANGES] = {
2, 0, 0, -2, -4, -6, -8, -8, -8, -10, -10, -12, -12, -12, -12
};
-static int dsi_populate_dsc_params(struct msm_display_dsc_config *dsc)
+static int dsi_populate_dsc_params(struct drm_dsc_config *dsc)
{
int mux_words_size;
int groups_per_line, groups_total;
@@ -1854,98 +1767,98 @@ static int dsi_populate_dsc_params(struct msm_display_dsc_config *dsc)
int final_value, final_scale;
int i;
- dsc->drm->rc_model_size = 8192;
- dsc->drm->first_line_bpg_offset = 12;
- dsc->drm->rc_edge_factor = 6;
- dsc->drm->rc_tgt_offset_high = 3;
- dsc->drm->rc_tgt_offset_low = 3;
- dsc->drm->simple_422 = 0;
- dsc->drm->convert_rgb = 1;
- dsc->drm->vbr_enable = 0;
+ dsc->rc_model_size = 8192;
+ dsc->first_line_bpg_offset = 12;
+ dsc->rc_edge_factor = 6;
+ dsc->rc_tgt_offset_high = 3;
+ dsc->rc_tgt_offset_low = 3;
+ dsc->simple_422 = 0;
+ dsc->convert_rgb = 1;
+ dsc->vbr_enable = 0;
/* handle only bpp = bpc = 8 */
for (i = 0; i < DSC_NUM_BUF_RANGES - 1 ; i++)
- dsc->drm->rc_buf_thresh[i] = dsi_dsc_rc_buf_thresh[i];
+ dsc->rc_buf_thresh[i] = dsi_dsc_rc_buf_thresh[i];
for (i = 0; i < DSC_NUM_BUF_RANGES; i++) {
- dsc->drm->rc_range_params[i].range_min_qp = min_qp[i];
- dsc->drm->rc_range_params[i].range_max_qp = max_qp[i];
- dsc->drm->rc_range_params[i].range_bpg_offset = bpg_offset[i];
+ dsc->rc_range_params[i].range_min_qp = min_qp[i];
+ dsc->rc_range_params[i].range_max_qp = max_qp[i];
+ dsc->rc_range_params[i].range_bpg_offset = bpg_offset[i];
}
- dsc->drm->initial_offset = 6144; /* Not bpp 12 */
- if (dsc->drm->bits_per_pixel != 8)
- dsc->drm->initial_offset = 2048; /* bpp = 12 */
+ dsc->initial_offset = 6144; /* Not bpp 12 */
+ if (dsc->bits_per_pixel != 8)
+ dsc->initial_offset = 2048; /* bpp = 12 */
mux_words_size = 48; /* bpc == 8/10 */
- if (dsc->drm->bits_per_component == 12)
+ if (dsc->bits_per_component == 12)
mux_words_size = 64;
- dsc->drm->initial_xmit_delay = 512;
- dsc->drm->initial_scale_value = 32;
- dsc->drm->first_line_bpg_offset = 12;
- dsc->drm->line_buf_depth = dsc->drm->bits_per_component + 1;
+ dsc->initial_xmit_delay = 512;
+ dsc->initial_scale_value = 32;
+ dsc->first_line_bpg_offset = 12;
+ dsc->line_buf_depth = dsc->bits_per_component + 1;
/* bpc 8 */
- dsc->drm->flatness_min_qp = 3;
- dsc->drm->flatness_max_qp = 12;
- dsc->drm->rc_quant_incr_limit0 = 11;
- dsc->drm->rc_quant_incr_limit1 = 11;
- dsc->drm->mux_word_size = DSC_MUX_WORD_SIZE_8_10_BPC;
+ dsc->flatness_min_qp = 3;
+ dsc->flatness_max_qp = 12;
+ dsc->rc_quant_incr_limit0 = 11;
+ dsc->rc_quant_incr_limit1 = 11;
+ dsc->mux_word_size = DSC_MUX_WORD_SIZE_8_10_BPC;
/* FIXME: need to call drm_dsc_compute_rc_parameters() so that rest of
* params are calculated
*/
- groups_per_line = DIV_ROUND_UP(dsc->drm->slice_width, 3);
- dsc->drm->slice_chunk_size = dsc->drm->slice_width * dsc->drm->bits_per_pixel / 8;
- if ((dsc->drm->slice_width * dsc->drm->bits_per_pixel) % 8)
- dsc->drm->slice_chunk_size++;
+ groups_per_line = DIV_ROUND_UP(dsc->slice_width, 3);
+ dsc->slice_chunk_size = dsc->slice_width * dsc->bits_per_pixel / 8;
+ if ((dsc->slice_width * dsc->bits_per_pixel) % 8)
+ dsc->slice_chunk_size++;
/* rbs-min */
- min_rate_buffer_size = dsc->drm->rc_model_size - dsc->drm->initial_offset +
- dsc->drm->initial_xmit_delay * dsc->drm->bits_per_pixel +
- groups_per_line * dsc->drm->first_line_bpg_offset;
+ min_rate_buffer_size = dsc->rc_model_size - dsc->initial_offset +
+ dsc->initial_xmit_delay * dsc->bits_per_pixel +
+ groups_per_line * dsc->first_line_bpg_offset;
- hrd_delay = DIV_ROUND_UP(min_rate_buffer_size, dsc->drm->bits_per_pixel);
+ hrd_delay = DIV_ROUND_UP(min_rate_buffer_size, dsc->bits_per_pixel);
- dsc->drm->initial_dec_delay = hrd_delay - dsc->drm->initial_xmit_delay;
+ dsc->initial_dec_delay = hrd_delay - dsc->initial_xmit_delay;
- dsc->drm->initial_scale_value = 8 * dsc->drm->rc_model_size /
- (dsc->drm->rc_model_size - dsc->drm->initial_offset);
+ dsc->initial_scale_value = 8 * dsc->rc_model_size /
+ (dsc->rc_model_size - dsc->initial_offset);
- slice_bits = 8 * dsc->drm->slice_chunk_size * dsc->drm->slice_height;
+ slice_bits = 8 * dsc->slice_chunk_size * dsc->slice_height;
- groups_total = groups_per_line * dsc->drm->slice_height;
+ groups_total = groups_per_line * dsc->slice_height;
- data = dsc->drm->first_line_bpg_offset * 2048;
+ data = dsc->first_line_bpg_offset * 2048;
- dsc->drm->nfl_bpg_offset = DIV_ROUND_UP(data, (dsc->drm->slice_height - 1));
+ dsc->nfl_bpg_offset = DIV_ROUND_UP(data, (dsc->slice_height - 1));
- pre_num_extra_mux_bits = 3 * (mux_words_size + (4 * dsc->drm->bits_per_component + 4) - 2);
+ pre_num_extra_mux_bits = 3 * (mux_words_size + (4 * dsc->bits_per_component + 4) - 2);
num_extra_mux_bits = pre_num_extra_mux_bits - (mux_words_size -
((slice_bits - pre_num_extra_mux_bits) % mux_words_size));
- data = 2048 * (dsc->drm->rc_model_size - dsc->drm->initial_offset + num_extra_mux_bits);
- dsc->drm->slice_bpg_offset = DIV_ROUND_UP(data, groups_total);
+ data = 2048 * (dsc->rc_model_size - dsc->initial_offset + num_extra_mux_bits);
+ dsc->slice_bpg_offset = DIV_ROUND_UP(data, groups_total);
/* bpp * 16 + 0.5 */
- data = dsc->drm->bits_per_pixel * 16;
+ data = dsc->bits_per_pixel * 16;
data *= 2;
data++;
data /= 2;
target_bpp_x16 = data;
- data = (dsc->drm->initial_xmit_delay * target_bpp_x16) / 16;
- final_value = dsc->drm->rc_model_size - data + num_extra_mux_bits;
- dsc->drm->final_offset = final_value;
+ data = (dsc->initial_xmit_delay * target_bpp_x16) / 16;
+ final_value = dsc->rc_model_size - data + num_extra_mux_bits;
+ dsc->final_offset = final_value;
- final_scale = 8 * dsc->drm->rc_model_size / (dsc->drm->rc_model_size - final_value);
+ final_scale = 8 * dsc->rc_model_size / (dsc->rc_model_size - final_value);
- data = (final_scale - 9) * (dsc->drm->nfl_bpg_offset + dsc->drm->slice_bpg_offset);
- dsc->drm->scale_increment_interval = (2048 * dsc->drm->final_offset) / data;
+ data = (final_scale - 9) * (dsc->nfl_bpg_offset + dsc->slice_bpg_offset);
+ dsc->scale_increment_interval = (2048 * dsc->final_offset) / data;
- dsc->drm->scale_decrement_interval = groups_per_line / (dsc->drm->initial_scale_value - 8);
+ dsc->scale_decrement_interval = groups_per_line / (dsc->initial_scale_value - 8);
return 0;
}
@@ -1954,7 +1867,7 @@ static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
{
struct device *dev = &msm_host->pdev->dev;
struct device_node *np = dev->of_node;
- struct device_node *endpoint, *device_node;
+ struct device_node *endpoint;
int ret = 0;
/*
@@ -1977,16 +1890,6 @@ static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
goto err;
}
- /* Get panel node from the output port's endpoint data */
- device_node = of_graph_get_remote_node(np, 1, 0);
- if (!device_node) {
- DRM_DEV_DEBUG(dev, "%s: no valid device\n", __func__);
- ret = -ENODEV;
- goto err;
- }
-
- msm_host->device_node = device_node;
-
if (of_property_read_bool(np, "syscon-sfpb")) {
msm_host->sfpb = syscon_regmap_lookup_by_phandle(np,
"syscon-sfpb");
@@ -1997,8 +1900,6 @@ static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
}
}
- of_node_put(device_node);
-
err:
of_node_put(endpoint);
@@ -2028,6 +1929,7 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi)
{
struct msm_dsi_host *msm_host = NULL;
struct platform_device *pdev = msm_dsi->pdev;
+ const struct msm_dsi_config *cfg;
int ret;
msm_host = devm_kzalloc(&pdev->dev, sizeof(*msm_host), GFP_KERNEL);
@@ -2060,6 +1962,7 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi)
pr_err("%s: get config failed\n", __func__);
goto fail;
}
+ cfg = msm_host->cfg_hnd->cfg;
msm_host->id = dsi_host_get_id(msm_host);
if (msm_host->id < 0) {
@@ -2069,13 +1972,13 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi)
}
/* fixup base address by io offset */
- msm_host->ctrl_base += msm_host->cfg_hnd->cfg->io_offset;
+ msm_host->ctrl_base += cfg->io_offset;
- ret = dsi_regulator_init(msm_host);
- if (ret) {
- pr_err("%s: regulator init failed\n", __func__);
+ ret = devm_regulator_bulk_get_const(&pdev->dev, cfg->num_regulators,
+ cfg->regulator_data,
+ &msm_host->supplies);
+ if (ret)
goto fail;
- }
ret = dsi_clk_init(msm_host);
if (ret) {
@@ -2126,7 +2029,6 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi)
/* setup workqueue */
msm_host->workqueue = alloc_ordered_workqueue("dsi_drm_work", 0);
INIT_WORK(&msm_host->err_work, dsi_err_worker);
- INIT_WORK(&msm_host->hpd_work, dsi_hpd_worker);
msm_dsi->id = msm_host->id;
@@ -2159,23 +2061,9 @@ int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
- struct drm_panel *panel;
int ret;
msm_host->dev = dev;
- panel = msm_dsi_host_get_panel(&msm_host->base);
-
- if (!IS_ERR(panel) && panel->dsc) {
- struct msm_display_dsc_config *dsc = msm_host->dsc;
-
- if (!dsc) {
- dsc = devm_kzalloc(&msm_host->pdev->dev, sizeof(*dsc), GFP_KERNEL);
- if (!dsc)
- return -ENOMEM;
- dsc->drm = panel->dsc;
- msm_host->dsc = dsc;
- }
- }
ret = cfg_hnd->ops->tx_buf_alloc(msm_host, SZ_4K);
if (ret) {
@@ -2556,7 +2444,8 @@ int msm_dsi_host_power_on(struct mipi_dsi_host *host,
msm_dsi_sfpb_config(msm_host, true);
- ret = dsi_host_regulator_enable(msm_host);
+ ret = regulator_bulk_enable(msm_host->cfg_hnd->cfg->num_regulators,
+ msm_host->supplies);
if (ret) {
pr_err("%s:Failed to enable vregs.ret=%d\n",
__func__, ret);
@@ -2596,7 +2485,8 @@ fail_disable_clk:
cfg_hnd->ops->link_clk_disable(msm_host);
pm_runtime_put(&msm_host->pdev->dev);
fail_disable_reg:
- dsi_host_regulator_disable(msm_host);
+ regulator_bulk_disable(msm_host->cfg_hnd->cfg->num_regulators,
+ msm_host->supplies);
unlock_ret:
mutex_unlock(&msm_host->dev_mutex);
return ret;
@@ -2623,7 +2513,8 @@ int msm_dsi_host_power_off(struct mipi_dsi_host *host)
cfg_hnd->ops->link_clk_disable(msm_host);
pm_runtime_put(&msm_host->pdev->dev);
- dsi_host_regulator_disable(msm_host);
+ regulator_bulk_disable(msm_host->cfg_hnd->cfg->num_regulators,
+ msm_host->supplies);
msm_dsi_sfpb_config(msm_host, false);
@@ -2659,45 +2550,33 @@ enum drm_mode_status msm_dsi_host_check_dsc(struct mipi_dsi_host *host,
const struct drm_display_mode *mode)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
- struct msm_display_dsc_config *dsc = msm_host->dsc;
+ struct drm_dsc_config *dsc = msm_host->dsc;
int pic_width = mode->hdisplay;
int pic_height = mode->vdisplay;
if (!msm_host->dsc)
return MODE_OK;
- if (pic_width % dsc->drm->slice_width) {
+ if (pic_width % dsc->slice_width) {
pr_err("DSI: pic_width %d has to be multiple of slice %d\n",
- pic_width, dsc->drm->slice_width);
+ pic_width, dsc->slice_width);
return MODE_H_ILLEGAL;
}
- if (pic_height % dsc->drm->slice_height) {
+ if (pic_height % dsc->slice_height) {
pr_err("DSI: pic_height %d has to be multiple of slice %d\n",
- pic_height, dsc->drm->slice_height);
+ pic_height, dsc->slice_height);
return MODE_V_ILLEGAL;
}
return MODE_OK;
}
-struct drm_panel *msm_dsi_host_get_panel(struct mipi_dsi_host *host)
-{
- return of_drm_find_panel(to_msm_dsi_host(host)->device_node);
-}
-
unsigned long msm_dsi_host_get_mode_flags(struct mipi_dsi_host *host)
{
return to_msm_dsi_host(host)->mode_flags;
}
-struct drm_bridge *msm_dsi_host_get_bridge(struct mipi_dsi_host *host)
-{
- struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
-
- return of_drm_find_bridge(msm_host->device_node);
-}
-
void msm_dsi_host_snapshot(struct msm_disp_state *disp_state, struct mipi_dsi_host *host)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
@@ -2771,7 +2650,7 @@ void msm_dsi_host_test_pattern_en(struct mipi_dsi_host *host)
DSI_TEST_PATTERN_GEN_CMD_STREAM0_TRIGGER_SW_TRIGGER);
}
-struct msm_display_dsc_config *msm_dsi_host_get_dsc_config(struct mipi_dsi_host *host)
+struct drm_dsc_config *msm_dsi_host_get_dsc_config(struct mipi_dsi_host *host)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c
index cb84d185d73a..3a1417397283 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_manager.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
@@ -141,14 +141,11 @@ static int enable_phy(struct msm_dsi *msm_dsi,
struct msm_dsi_phy_shared_timings *shared_timings)
{
struct msm_dsi_phy_clk_request clk_req;
- int ret;
bool is_bonded_dsi = IS_BONDED_DSI();
msm_dsi_host_get_phy_clk_req(msm_dsi->host, &clk_req, is_bonded_dsi);
- ret = msm_dsi_phy_enable(msm_dsi->phy, &clk_req, shared_timings);
-
- return ret;
+ return msm_dsi_phy_enable(msm_dsi->phy, &clk_req, shared_timings);
}
static int
@@ -214,39 +211,26 @@ static void dsi_mgr_phy_disable(int id)
}
}
-struct dsi_connector {
- struct drm_connector base;
- int id;
-};
-
struct dsi_bridge {
struct drm_bridge base;
int id;
};
-#define to_dsi_connector(x) container_of(x, struct dsi_connector, base)
#define to_dsi_bridge(x) container_of(x, struct dsi_bridge, base)
-static inline int dsi_mgr_connector_get_id(struct drm_connector *connector)
-{
- struct dsi_connector *dsi_connector = to_dsi_connector(connector);
- return dsi_connector->id;
-}
-
static int dsi_mgr_bridge_get_id(struct drm_bridge *bridge)
{
struct dsi_bridge *dsi_bridge = to_dsi_bridge(bridge);
return dsi_bridge->id;
}
-static int msm_dsi_manager_panel_init(struct drm_connector *conn, u8 id)
+static void msm_dsi_manager_set_split_display(u8 id)
{
- struct msm_drm_private *priv = conn->dev->dev_private;
- struct msm_kms *kms = priv->kms;
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
struct msm_dsi *other_dsi = dsi_mgr_get_other_dsi(id);
+ struct msm_drm_private *priv = msm_dsi->dev->dev_private;
+ struct msm_kms *kms = priv->kms;
struct msm_dsi *master_dsi, *slave_dsi;
- struct drm_panel *panel;
if (IS_BONDED_DSI() && !IS_MASTER_DSI_LINK(id)) {
master_dsi = other_dsi;
@@ -256,89 +240,18 @@ static int msm_dsi_manager_panel_init(struct drm_connector *conn, u8 id)
slave_dsi = other_dsi;
}
- /*
- * There is only 1 panel in the global panel list for bonded DSI mode.
- * Therefore slave dsi should get the drm_panel instance from master
- * dsi.
- */
- panel = msm_dsi_host_get_panel(master_dsi->host);
- if (IS_ERR(panel)) {
- DRM_ERROR("Could not find panel for %u (%ld)\n", msm_dsi->id,
- PTR_ERR(panel));
- return PTR_ERR(panel);
- }
-
- if (!panel || !IS_BONDED_DSI())
- goto out;
-
- drm_object_attach_property(&conn->base,
- conn->dev->mode_config.tile_property, 0);
+ if (!msm_dsi->external_bridge || !IS_BONDED_DSI())
+ return;
/*
* Set split display info to kms once bonded DSI panel is connected to
* both hosts.
*/
- if (other_dsi && other_dsi->panel && kms->funcs->set_split_display) {
+ if (other_dsi && other_dsi->external_bridge && kms->funcs->set_split_display) {
kms->funcs->set_split_display(kms, master_dsi->encoder,
slave_dsi->encoder,
msm_dsi_is_cmd_mode(msm_dsi));
}
-
-out:
- msm_dsi->panel = panel;
- return 0;
-}
-
-static enum drm_connector_status dsi_mgr_connector_detect(
- struct drm_connector *connector, bool force)
-{
- int id = dsi_mgr_connector_get_id(connector);
- struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
-
- return msm_dsi->panel ? connector_status_connected :
- connector_status_disconnected;
-}
-
-static void dsi_mgr_connector_destroy(struct drm_connector *connector)
-{
- struct dsi_connector *dsi_connector = to_dsi_connector(connector);
-
- DBG("");
-
- drm_connector_cleanup(connector);
-
- kfree(dsi_connector);
-}
-
-static int dsi_mgr_connector_get_modes(struct drm_connector *connector)
-{
- int id = dsi_mgr_connector_get_id(connector);
- struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
- struct drm_panel *panel = msm_dsi->panel;
- int num;
-
- if (!panel)
- return 0;
-
- /*
- * In bonded DSI mode, we have one connector that can be
- * attached to the drm_panel.
- */
- num = drm_panel_get_modes(panel, connector);
- if (!num)
- return 0;
-
- return num;
-}
-
-static struct drm_encoder *
-dsi_mgr_connector_best_encoder(struct drm_connector *connector)
-{
- int id = dsi_mgr_connector_get_id(connector);
- struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
-
- DBG("");
- return msm_dsi_get_encoder(msm_dsi);
}
static void dsi_mgr_bridge_power_on(struct drm_bridge *bridge)
@@ -403,7 +316,6 @@ static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge)
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
struct msm_dsi *msm_dsi1 = dsi_mgr_get_dsi(DSI_1);
struct mipi_dsi_host *host = msm_dsi->host;
- struct drm_panel *panel = msm_dsi->panel;
bool is_bonded_dsi = IS_BONDED_DSI();
int ret;
@@ -418,18 +330,6 @@ static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge)
if (!dsi_mgr_power_on_early(bridge))
dsi_mgr_bridge_power_on(bridge);
- /* Always call panel functions once, because even for dual panels,
- * there is only one drm_panel instance.
- */
- if (panel) {
- ret = drm_panel_prepare(panel);
- if (ret) {
- pr_err("%s: prepare panel %d failed, %d\n", __func__,
- id, ret);
- goto panel_prep_fail;
- }
- }
-
ret = msm_dsi_host_enable(host);
if (ret) {
pr_err("%s: enable host %d failed, %d\n", __func__, id, ret);
@@ -449,9 +349,6 @@ static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge)
host1_en_fail:
msm_dsi_host_disable(host);
host_en_fail:
- if (panel)
- drm_panel_unprepare(panel);
-panel_prep_fail:
return;
}
@@ -469,62 +366,12 @@ void msm_dsi_manager_tpg_enable(void)
}
}
-static void dsi_mgr_bridge_enable(struct drm_bridge *bridge)
-{
- int id = dsi_mgr_bridge_get_id(bridge);
- struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
- struct drm_panel *panel = msm_dsi->panel;
- bool is_bonded_dsi = IS_BONDED_DSI();
- int ret;
-
- DBG("id=%d", id);
- if (!msm_dsi_device_connected(msm_dsi))
- return;
-
- /* Do nothing with the host if it is slave-DSI in case of bonded DSI */
- if (is_bonded_dsi && !IS_MASTER_DSI_LINK(id))
- return;
-
- if (panel) {
- ret = drm_panel_enable(panel);
- if (ret) {
- pr_err("%s: enable panel %d failed, %d\n", __func__, id,
- ret);
- }
- }
-}
-
-static void dsi_mgr_bridge_disable(struct drm_bridge *bridge)
-{
- int id = dsi_mgr_bridge_get_id(bridge);
- struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
- struct drm_panel *panel = msm_dsi->panel;
- bool is_bonded_dsi = IS_BONDED_DSI();
- int ret;
-
- DBG("id=%d", id);
- if (!msm_dsi_device_connected(msm_dsi))
- return;
-
- /* Do nothing with the host if it is slave-DSI in case of bonded DSI */
- if (is_bonded_dsi && !IS_MASTER_DSI_LINK(id))
- return;
-
- if (panel) {
- ret = drm_panel_disable(panel);
- if (ret)
- pr_err("%s: Panel %d OFF failed, %d\n", __func__, id,
- ret);
- }
-}
-
static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge)
{
int id = dsi_mgr_bridge_get_id(bridge);
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
struct msm_dsi *msm_dsi1 = dsi_mgr_get_dsi(DSI_1);
struct mipi_dsi_host *host = msm_dsi->host;
- struct drm_panel *panel = msm_dsi->panel;
bool is_bonded_dsi = IS_BONDED_DSI();
int ret;
@@ -551,13 +398,6 @@ static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge)
pr_err("%s: host1 disable failed, %d\n", __func__, ret);
}
- if (panel) {
- ret = drm_panel_unprepare(panel);
- if (ret)
- pr_err("%s: Panel %d unprepare failed,%d\n", __func__,
- id, ret);
- }
-
msm_dsi_host_disable_irq(host);
if (is_bonded_dsi && msm_dsi1)
msm_dsi_host_disable_irq(msm_dsi1->host);
@@ -614,76 +454,13 @@ static enum drm_mode_status dsi_mgr_bridge_mode_valid(struct drm_bridge *bridge,
return msm_dsi_host_check_dsc(host, mode);
}
-static const struct drm_connector_funcs dsi_mgr_connector_funcs = {
- .detect = dsi_mgr_connector_detect,
- .fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = dsi_mgr_connector_destroy,
- .reset = drm_atomic_helper_connector_reset,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-static const struct drm_connector_helper_funcs dsi_mgr_conn_helper_funcs = {
- .get_modes = dsi_mgr_connector_get_modes,
- .best_encoder = dsi_mgr_connector_best_encoder,
-};
-
static const struct drm_bridge_funcs dsi_mgr_bridge_funcs = {
.pre_enable = dsi_mgr_bridge_pre_enable,
- .enable = dsi_mgr_bridge_enable,
- .disable = dsi_mgr_bridge_disable,
.post_disable = dsi_mgr_bridge_post_disable,
.mode_set = dsi_mgr_bridge_mode_set,
.mode_valid = dsi_mgr_bridge_mode_valid,
};
-/* initialize connector when we're connected to a drm_panel */
-struct drm_connector *msm_dsi_manager_connector_init(u8 id)
-{
- struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
- struct drm_connector *connector = NULL;
- struct dsi_connector *dsi_connector;
- int ret;
-
- dsi_connector = kzalloc(sizeof(*dsi_connector), GFP_KERNEL);
- if (!dsi_connector)
- return ERR_PTR(-ENOMEM);
-
- dsi_connector->id = id;
-
- connector = &dsi_connector->base;
-
- ret = drm_connector_init(msm_dsi->dev, connector,
- &dsi_mgr_connector_funcs, DRM_MODE_CONNECTOR_DSI);
- if (ret)
- return ERR_PTR(ret);
-
- drm_connector_helper_add(connector, &dsi_mgr_conn_helper_funcs);
-
- /* Enable HPD to let hpd event is handled
- * when panel is attached to the host.
- */
- connector->polled = DRM_CONNECTOR_POLL_HPD;
-
- /* Display driver doesn't support interlace now. */
- connector->interlace_allowed = 0;
- connector->doublescan_allowed = 0;
-
- drm_connector_attach_encoder(connector, msm_dsi->encoder);
-
- ret = msm_dsi_manager_panel_init(connector, id);
- if (ret) {
- DRM_DEV_ERROR(msm_dsi->dev->dev, "init panel failed %d\n", ret);
- goto fail;
- }
-
- return connector;
-
-fail:
- connector->funcs->destroy(connector);
- return ERR_PTR(ret);
-}
-
/* initialize bridge */
struct drm_bridge *msm_dsi_manager_bridge_init(u8 id)
{
@@ -722,18 +499,21 @@ fail:
return ERR_PTR(ret);
}
-struct drm_connector *msm_dsi_manager_ext_bridge_init(u8 id)
+int msm_dsi_manager_ext_bridge_init(u8 id)
{
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
struct drm_device *dev = msm_dsi->dev;
- struct drm_connector *connector;
struct drm_encoder *encoder;
struct drm_bridge *int_bridge, *ext_bridge;
int ret;
int_bridge = msm_dsi->bridge;
- ext_bridge = msm_dsi->external_bridge =
- msm_dsi_host_get_bridge(msm_dsi->host);
+ ext_bridge = devm_drm_of_get_bridge(&msm_dsi->pdev->dev,
+ msm_dsi->pdev->dev.of_node, 1, 0);
+ if (IS_ERR(ext_bridge))
+ return PTR_ERR(ext_bridge);
+
+ msm_dsi->external_bridge = ext_bridge;
encoder = msm_dsi->encoder;
@@ -745,36 +525,32 @@ struct drm_connector *msm_dsi_manager_ext_bridge_init(u8 id)
ret = drm_bridge_attach(encoder, ext_bridge, int_bridge,
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret == -EINVAL) {
- struct drm_connector *connector;
- struct list_head *connector_list;
-
- /* link the internal dsi bridge to the external bridge */
- drm_bridge_attach(encoder, ext_bridge, int_bridge, 0);
-
/*
- * we need the drm_connector created by the external bridge
- * driver (or someone else) to feed it to our driver's
- * priv->connector[] list, mainly for msm_fbdev_init()
+ * link the internal dsi bridge to the external bridge,
+ * connector is created by the next bridge.
*/
- connector_list = &dev->mode_config.connector_list;
+ ret = drm_bridge_attach(encoder, ext_bridge, int_bridge, 0);
+ if (ret < 0)
+ return ret;
+ } else {
+ struct drm_connector *connector;
- list_for_each_entry(connector, connector_list, head) {
- if (drm_connector_has_possible_encoder(connector, encoder))
- return connector;
+ /* We are in charge of the connector, create one now. */
+ connector = drm_bridge_connector_init(dev, encoder);
+ if (IS_ERR(connector)) {
+ DRM_ERROR("Unable to create bridge connector\n");
+ return PTR_ERR(connector);
}
- return ERR_PTR(-ENODEV);
- }
-
- connector = drm_bridge_connector_init(dev, encoder);
- if (IS_ERR(connector)) {
- DRM_ERROR("Unable to create bridge connector\n");
- return ERR_CAST(connector);
+ ret = drm_connector_attach_encoder(connector, encoder);
+ if (ret < 0)
+ return ret;
}
- drm_connector_attach_encoder(connector, encoder);
+ /* The pipeline is ready, ping encoders if necessary */
+ msm_dsi_manager_set_split_display(id);
- return connector;
+ return 0;
}
void msm_dsi_manager_bridge_destroy(struct drm_bridge *bridge)
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
index a39de3bdc7fa..7fc0975cb869 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
@@ -347,7 +347,7 @@ int msm_dsi_dphy_timing_calc_v3(struct msm_dsi_dphy_timing *timing,
} else {
timing->shared_timings.clk_pre =
linear_inter(tmax, tmin, pcnt2, 0, false);
- timing->shared_timings.clk_pre_inc_by_2 = 0;
+ timing->shared_timings.clk_pre_inc_by_2 = 0;
}
timing->ta_go = 3;
@@ -507,82 +507,6 @@ int msm_dsi_cphy_timing_calc_v4(struct msm_dsi_dphy_timing *timing,
return 0;
}
-static int dsi_phy_regulator_init(struct msm_dsi_phy *phy)
-{
- struct regulator_bulk_data *s = phy->supplies;
- const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs;
- struct device *dev = &phy->pdev->dev;
- int num = phy->cfg->reg_cfg.num;
- int i, ret;
-
- for (i = 0; i < num; i++)
- s[i].supply = regs[i].name;
-
- ret = devm_regulator_bulk_get(dev, num, s);
- if (ret < 0) {
- if (ret != -EPROBE_DEFER) {
- DRM_DEV_ERROR(dev,
- "%s: failed to init regulator, ret=%d\n",
- __func__, ret);
- }
-
- return ret;
- }
-
- return 0;
-}
-
-static void dsi_phy_regulator_disable(struct msm_dsi_phy *phy)
-{
- struct regulator_bulk_data *s = phy->supplies;
- const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs;
- int num = phy->cfg->reg_cfg.num;
- int i;
-
- DBG("");
- for (i = num - 1; i >= 0; i--)
- if (regs[i].disable_load >= 0)
- regulator_set_load(s[i].consumer, regs[i].disable_load);
-
- regulator_bulk_disable(num, s);
-}
-
-static int dsi_phy_regulator_enable(struct msm_dsi_phy *phy)
-{
- struct regulator_bulk_data *s = phy->supplies;
- const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs;
- struct device *dev = &phy->pdev->dev;
- int num = phy->cfg->reg_cfg.num;
- int ret, i;
-
- DBG("");
- for (i = 0; i < num; i++) {
- if (regs[i].enable_load >= 0) {
- ret = regulator_set_load(s[i].consumer,
- regs[i].enable_load);
- if (ret < 0) {
- DRM_DEV_ERROR(dev,
- "regulator %d set op mode failed, %d\n",
- i, ret);
- goto fail;
- }
- }
- }
-
- ret = regulator_bulk_enable(num, s);
- if (ret < 0) {
- DRM_DEV_ERROR(dev, "regulator enable failed, %d\n", ret);
- goto fail;
- }
-
- return 0;
-
-fail:
- for (i--; i >= 0; i--)
- regulator_set_load(s[i].consumer, regs[i].disable_load);
- return ret;
-}
-
static int dsi_phy_enable_resource(struct msm_dsi_phy *phy)
{
struct device *dev = &phy->pdev->dev;
@@ -697,12 +621,9 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
phy->pdev = pdev;
phy->id = dsi_phy_get_id(phy);
- if (phy->id < 0) {
- ret = phy->id;
- DRM_DEV_ERROR(dev, "%s: couldn't identify PHY index, %d\n",
- __func__, ret);
- goto fail;
- }
+ if (phy->id < 0)
+ return dev_err_probe(dev, phy->id,
+ "Couldn't identify PHY index\n");
phy->regulator_ldo_mode = of_property_read_bool(dev->of_node,
"qcom,dsi-phy-regulator-ldo-mode");
@@ -710,86 +631,71 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
phy->cphy_mode = (phy_type == PHY_TYPE_CPHY);
phy->base = msm_ioremap_size(pdev, "dsi_phy", &phy->base_size);
- if (IS_ERR(phy->base)) {
- DRM_DEV_ERROR(dev, "%s: failed to map phy base\n", __func__);
- ret = -ENOMEM;
- goto fail;
- }
+ if (IS_ERR(phy->base))
+ return dev_err_probe(dev, PTR_ERR(phy->base),
+ "Failed to map phy base\n");
phy->pll_base = msm_ioremap_size(pdev, "dsi_pll", &phy->pll_size);
- if (IS_ERR(phy->pll_base)) {
- DRM_DEV_ERROR(&pdev->dev, "%s: failed to map pll base\n", __func__);
- ret = -ENOMEM;
- goto fail;
- }
+ if (IS_ERR(phy->pll_base))
+ return dev_err_probe(dev, PTR_ERR(phy->pll_base),
+ "Failed to map pll base\n");
if (phy->cfg->has_phy_lane) {
phy->lane_base = msm_ioremap_size(pdev, "dsi_phy_lane", &phy->lane_size);
- if (IS_ERR(phy->lane_base)) {
- DRM_DEV_ERROR(&pdev->dev, "%s: failed to map phy lane base\n", __func__);
- ret = -ENOMEM;
- goto fail;
- }
+ if (IS_ERR(phy->lane_base))
+ return dev_err_probe(dev, PTR_ERR(phy->lane_base),
+ "Failed to map phy lane base\n");
}
if (phy->cfg->has_phy_regulator) {
phy->reg_base = msm_ioremap_size(pdev, "dsi_phy_regulator", &phy->reg_size);
- if (IS_ERR(phy->reg_base)) {
- DRM_DEV_ERROR(&pdev->dev, "%s: failed to map phy regulator base\n", __func__);
- ret = -ENOMEM;
- goto fail;
- }
+ if (IS_ERR(phy->reg_base))
+ return dev_err_probe(dev, PTR_ERR(phy->reg_base),
+ "Failed to map phy regulator base\n");
}
if (phy->cfg->ops.parse_dt_properties) {
ret = phy->cfg->ops.parse_dt_properties(phy);
if (ret)
- goto fail;
+ return ret;
}
- ret = dsi_phy_regulator_init(phy);
+ ret = devm_regulator_bulk_get_const(dev, phy->cfg->num_regulators,
+ phy->cfg->regulator_data,
+ &phy->supplies);
if (ret)
- goto fail;
+ return ret;
phy->ahb_clk = msm_clk_get(pdev, "iface");
- if (IS_ERR(phy->ahb_clk)) {
- DRM_DEV_ERROR(dev, "%s: Unable to get ahb clk\n", __func__);
- ret = PTR_ERR(phy->ahb_clk);
- goto fail;
- }
+ if (IS_ERR(phy->ahb_clk))
+ return dev_err_probe(dev, PTR_ERR(phy->ahb_clk),
+ "Unable to get ahb clk\n");
/* PLL init will call into clk_register which requires
* register access, so we need to enable power and ahb clock.
*/
ret = dsi_phy_enable_resource(phy);
if (ret)
- goto fail;
+ return ret;
if (phy->cfg->ops.pll_init) {
ret = phy->cfg->ops.pll_init(phy);
- if (ret) {
- DRM_DEV_INFO(dev,
- "%s: pll init failed: %d, need separate pll clk driver\n",
- __func__, ret);
- goto fail;
- }
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "PLL init failed; need separate clk driver\n");
}
ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
phy->provided_clocks);
- if (ret) {
- DRM_DEV_ERROR(dev, "%s: failed to register clk provider: %d\n", __func__, ret);
- goto fail;
- }
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to register clk provider\n");
dsi_phy_disable_resource(phy);
platform_set_drvdata(pdev, phy);
return 0;
-
-fail:
- return ret;
}
static struct platform_driver dsi_phy_platform_driver = {
@@ -829,7 +735,7 @@ int msm_dsi_phy_enable(struct msm_dsi_phy *phy,
goto res_en_fail;
}
- ret = dsi_phy_regulator_enable(phy);
+ ret = regulator_bulk_enable(phy->cfg->num_regulators, phy->supplies);
if (ret) {
DRM_DEV_ERROR(dev, "%s: regulator enable failed, %d\n",
__func__, ret);
@@ -866,7 +772,7 @@ pll_restor_fail:
if (phy->cfg->ops.disable)
phy->cfg->ops.disable(phy);
phy_en_fail:
- dsi_phy_regulator_disable(phy);
+ regulator_bulk_disable(phy->cfg->num_regulators, phy->supplies);
reg_en_fail:
dsi_phy_disable_resource(phy);
res_en_fail:
@@ -880,7 +786,7 @@ void msm_dsi_phy_disable(struct msm_dsi_phy *phy)
phy->cfg->ops.disable(phy);
- dsi_phy_regulator_disable(phy);
+ regulator_bulk_disable(phy->cfg->num_regulators, phy->supplies);
dsi_phy_disable_resource(phy);
}
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
index dc91b43d5a38..60a99c6525b2 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
@@ -29,7 +29,8 @@ struct msm_dsi_phy_ops {
};
struct msm_dsi_phy_cfg {
- struct dsi_reg_config reg_cfg;
+ const struct regulator_bulk_data *regulator_data;
+ int num_regulators;
struct msm_dsi_phy_ops ops;
unsigned long min_pll_rate;
@@ -98,7 +99,7 @@ struct msm_dsi_phy {
int id;
struct clk *ahb_clk;
- struct regulator_bulk_data supplies[DSI_DEV_REGULATOR_MAX];
+ struct regulator_bulk_data *supplies;
struct msm_dsi_dphy_timing timing;
const struct msm_dsi_phy_cfg *cfg;
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
index 08b015ea1b1e..27b592c776a3 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
@@ -188,19 +188,19 @@ static void dsi_pll_ssc_commit(struct dsi_pll_10nm *pll, struct dsi_pll_config *
pr_debug("SSC is enabled\n");
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_STEPSIZE_LOW_1,
- config->ssc_stepsize & 0xff);
+ config->ssc_stepsize & 0xff);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_STEPSIZE_HIGH_1,
- config->ssc_stepsize >> 8);
+ config->ssc_stepsize >> 8);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_PER_LOW_1,
- config->ssc_div_per & 0xff);
+ config->ssc_div_per & 0xff);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_PER_HIGH_1,
- config->ssc_div_per >> 8);
+ config->ssc_div_per >> 8);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_ADJPER_LOW_1,
- config->ssc_adj_per & 0xff);
+ config->ssc_adj_per & 0xff);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_ADJPER_HIGH_1,
- config->ssc_adj_per >> 8);
+ config->ssc_adj_per >> 8);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_CONTROL,
- SSC_EN | (config->ssc_center ? SSC_CENTER : 0));
+ SSC_EN | (config->ssc_center ? SSC_CENTER : 0));
}
}
@@ -215,16 +215,19 @@ static void dsi_pll_config_hzindep_reg(struct dsi_pll_10nm *pll)
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_FEEDBACK_DIVIDER, 0x4e);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_CALIBRATION_SETTINGS, 0x40);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_THREE,
- 0xba);
- dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_FREQ_DETECT_SETTINGS_ONE, 0x0c);
+ 0xba);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_FREQ_DETECT_SETTINGS_ONE,
+ 0x0c);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_OUTDIV, 0x00);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_CORE_OVERRIDE, 0x00);
- dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_DIGITAL_TIMERS_TWO, 0x08);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_DIGITAL_TIMERS_TWO,
+ 0x08);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_PROP_GAIN_RATE_1, 0x08);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_BAND_SET_RATE_1, 0xc0);
- dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1, 0xfa);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1,
+ 0xfa);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_FL_INT_GAIN_PFILT_BAND_1,
- 0x4c);
+ 0x4c);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCK_OVERRIDE, 0x80);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PFILT, 0x29);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_IFILT, 0x3f);
@@ -236,18 +239,18 @@ static void dsi_pll_commit(struct dsi_pll_10nm *pll, struct dsi_pll_config *conf
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_CORE_INPUT_OVERRIDE, 0x12);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_DECIMAL_DIV_START_1,
- config->decimal_div_start);
+ config->decimal_div_start);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_LOW_1,
- config->frac_div_start & 0xff);
+ config->frac_div_start & 0xff);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_MID_1,
- (config->frac_div_start & 0xff00) >> 8);
+ (config->frac_div_start & 0xff00) >> 8);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_HIGH_1,
- (config->frac_div_start & 0x30000) >> 16);
+ (config->frac_div_start & 0x30000) >> 16);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCKDET_RATE_1, 64);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCK_DELAY, 0x06);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_CMODE, 0x10);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_CLOCK_INVERTERS,
- config->pll_clock_inverters);
+ config->pll_clock_inverters);
}
static int dsi_pll_10nm_vco_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -306,7 +309,7 @@ static void dsi_pll_disable_pll_bias(struct dsi_pll_10nm *pll)
dsi_phy_write(pll->phy->pll_base + REG_DSI_10nm_PHY_PLL_SYSTEM_MUXES, 0);
dsi_phy_write(pll->phy->base + REG_DSI_10nm_PHY_CMN_CTRL_0,
- data & ~BIT(5));
+ data & ~BIT(5));
ndelay(250);
}
@@ -315,7 +318,7 @@ static void dsi_pll_enable_pll_bias(struct dsi_pll_10nm *pll)
u32 data = dsi_phy_read(pll->phy->base + REG_DSI_10nm_PHY_CMN_CTRL_0);
dsi_phy_write(pll->phy->base + REG_DSI_10nm_PHY_CMN_CTRL_0,
- data | BIT(5));
+ data | BIT(5));
dsi_phy_write(pll->phy->pll_base + REG_DSI_10nm_PHY_PLL_SYSTEM_MUXES, 0xc0);
ndelay(250);
}
@@ -326,7 +329,7 @@ static void dsi_pll_disable_global_clk(struct dsi_pll_10nm *pll)
data = dsi_phy_read(pll->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG1);
dsi_phy_write(pll->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG1,
- data & ~BIT(5));
+ data & ~BIT(5));
}
static void dsi_pll_enable_global_clk(struct dsi_pll_10nm *pll)
@@ -335,7 +338,7 @@ static void dsi_pll_enable_global_clk(struct dsi_pll_10nm *pll)
data = dsi_phy_read(pll->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG1);
dsi_phy_write(pll->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG1,
- data | BIT(5));
+ data | BIT(5));
}
static int dsi_pll_10nm_vco_prepare(struct clk_hw *hw)
@@ -356,7 +359,7 @@ static int dsi_pll_10nm_vco_prepare(struct clk_hw *hw)
/* Start PLL */
dsi_phy_write(pll_10nm->phy->base + REG_DSI_10nm_PHY_CMN_PLL_CNTRL,
- 0x01);
+ 0x01);
/*
* ensure all PLL configurations are written prior to checking
@@ -378,10 +381,10 @@ static int dsi_pll_10nm_vco_prepare(struct clk_hw *hw)
dsi_pll_enable_global_clk(pll_10nm->slave);
dsi_phy_write(pll_10nm->phy->base + REG_DSI_10nm_PHY_CMN_RBUF_CTRL,
- 0x01);
+ 0x01);
if (pll_10nm->slave)
dsi_phy_write(pll_10nm->slave->phy->base +
- REG_DSI_10nm_PHY_CMN_RBUF_CTRL, 0x01);
+ REG_DSI_10nm_PHY_CMN_RBUF_CTRL, 0x01);
error:
return rc;
@@ -486,7 +489,7 @@ static void dsi_10nm_pll_save_state(struct msm_dsi_phy *phy)
u32 cmn_clk_cfg0, cmn_clk_cfg1;
cached->pll_out_div = dsi_phy_read(pll_10nm->phy->pll_base +
- REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE);
+ REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE);
cached->pll_out_div &= 0x3;
cmn_clk_cfg0 = dsi_phy_read(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG0);
@@ -515,7 +518,7 @@ static int dsi_10nm_pll_restore_state(struct msm_dsi_phy *phy)
dsi_phy_write(pll_10nm->phy->pll_base + REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE, val);
dsi_phy_write(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG0,
- cached->bit_clk_div | (cached->pix_clk_div << 4));
+ cached->bit_clk_div | (cached->pix_clk_div << 4));
val = dsi_phy_read(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG1);
val &= ~0x3;
@@ -571,64 +574,59 @@ static int dsi_10nm_set_usecase(struct msm_dsi_phy *phy)
*/
static int pll_10nm_register(struct dsi_pll_10nm *pll_10nm, struct clk_hw **provided_clocks)
{
- char clk_name[32], parent[32], vco_name[32];
- char parent2[32], parent3[32], parent4[32];
+ char clk_name[32];
struct clk_init_data vco_init = {
.parent_data = &(const struct clk_parent_data) {
.fw_name = "ref",
},
.num_parents = 1,
- .name = vco_name,
+ .name = clk_name,
.flags = CLK_IGNORE_UNUSED,
.ops = &clk_ops_dsi_pll_10nm_vco,
};
struct device *dev = &pll_10nm->phy->pdev->dev;
- struct clk_hw *hw;
+ struct clk_hw *hw, *pll_out_div, *pll_bit, *pll_by_2_bit;
+ struct clk_hw *pll_post_out_div, *pclk_mux;
int ret;
DBG("DSI%d", pll_10nm->phy->id);
- snprintf(vco_name, 32, "dsi%dvco_clk", pll_10nm->phy->id);
+ snprintf(clk_name, sizeof(clk_name), "dsi%dvco_clk", pll_10nm->phy->id);
pll_10nm->clk_hw.init = &vco_init;
ret = devm_clk_hw_register(dev, &pll_10nm->clk_hw);
if (ret)
return ret;
- snprintf(clk_name, 32, "dsi%d_pll_out_div_clk", pll_10nm->phy->id);
- snprintf(parent, 32, "dsi%dvco_clk", pll_10nm->phy->id);
+ snprintf(clk_name, sizeof(clk_name), "dsi%d_pll_out_div_clk", pll_10nm->phy->id);
- hw = devm_clk_hw_register_divider(dev, clk_name,
- parent, CLK_SET_RATE_PARENT,
- pll_10nm->phy->pll_base +
- REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE,
- 0, 2, CLK_DIVIDER_POWER_OF_TWO, NULL);
- if (IS_ERR(hw)) {
- ret = PTR_ERR(hw);
+ pll_out_div = devm_clk_hw_register_divider_parent_hw(dev, clk_name,
+ &pll_10nm->clk_hw, CLK_SET_RATE_PARENT,
+ pll_10nm->phy->pll_base +
+ REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE,
+ 0, 2, CLK_DIVIDER_POWER_OF_TWO, NULL);
+ if (IS_ERR(pll_out_div)) {
+ ret = PTR_ERR(pll_out_div);
goto fail;
}
- snprintf(clk_name, 32, "dsi%d_pll_bit_clk", pll_10nm->phy->id);
- snprintf(parent, 32, "dsi%d_pll_out_div_clk", pll_10nm->phy->id);
+ snprintf(clk_name, sizeof(clk_name), "dsi%d_pll_bit_clk", pll_10nm->phy->id);
/* BIT CLK: DIV_CTRL_3_0 */
- hw = devm_clk_hw_register_divider(dev, clk_name, parent,
- CLK_SET_RATE_PARENT,
- pll_10nm->phy->base +
- REG_DSI_10nm_PHY_CMN_CLK_CFG0,
- 0, 4, CLK_DIVIDER_ONE_BASED,
- &pll_10nm->postdiv_lock);
- if (IS_ERR(hw)) {
- ret = PTR_ERR(hw);
+ pll_bit = devm_clk_hw_register_divider_parent_hw(dev, clk_name,
+ pll_out_div, CLK_SET_RATE_PARENT,
+ pll_10nm->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG0,
+ 0, 4, CLK_DIVIDER_ONE_BASED, &pll_10nm->postdiv_lock);
+ if (IS_ERR(pll_bit)) {
+ ret = PTR_ERR(pll_bit);
goto fail;
}
- snprintf(clk_name, 32, "dsi%d_phy_pll_out_byteclk", pll_10nm->phy->id);
- snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_10nm->phy->id);
+ snprintf(clk_name, sizeof(clk_name), "dsi%d_phy_pll_out_byteclk", pll_10nm->phy->id);
/* DSI Byte clock = VCO_CLK / OUT_DIV / BIT_DIV / 8 */
- hw = devm_clk_hw_register_fixed_factor(dev, clk_name, parent,
- CLK_SET_RATE_PARENT, 1, 8);
+ hw = devm_clk_hw_register_fixed_factor_parent_hw(dev, clk_name,
+ pll_bit, CLK_SET_RATE_PARENT, 1, 8);
if (IS_ERR(hw)) {
ret = PTR_ERR(hw);
goto fail;
@@ -636,52 +634,45 @@ static int pll_10nm_register(struct dsi_pll_10nm *pll_10nm, struct clk_hw **prov
provided_clocks[DSI_BYTE_PLL_CLK] = hw;
- snprintf(clk_name, 32, "dsi%d_pll_by_2_bit_clk", pll_10nm->phy->id);
- snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_10nm->phy->id);
+ snprintf(clk_name, sizeof(clk_name), "dsi%d_pll_by_2_bit_clk", pll_10nm->phy->id);
- hw = devm_clk_hw_register_fixed_factor(dev, clk_name, parent,
- 0, 1, 2);
- if (IS_ERR(hw)) {
- ret = PTR_ERR(hw);
+ pll_by_2_bit = devm_clk_hw_register_fixed_factor_parent_hw(dev,
+ clk_name, pll_bit, 0, 1, 2);
+ if (IS_ERR(pll_by_2_bit)) {
+ ret = PTR_ERR(pll_by_2_bit);
goto fail;
}
- snprintf(clk_name, 32, "dsi%d_pll_post_out_div_clk", pll_10nm->phy->id);
- snprintf(parent, 32, "dsi%d_pll_out_div_clk", pll_10nm->phy->id);
+ snprintf(clk_name, sizeof(clk_name), "dsi%d_pll_post_out_div_clk", pll_10nm->phy->id);
- hw = devm_clk_hw_register_fixed_factor(dev, clk_name, parent,
- 0, 1, 4);
- if (IS_ERR(hw)) {
- ret = PTR_ERR(hw);
+ pll_post_out_div = devm_clk_hw_register_fixed_factor_parent_hw(dev,
+ clk_name, pll_out_div, 0, 1, 4);
+ if (IS_ERR(pll_post_out_div)) {
+ ret = PTR_ERR(pll_post_out_div);
goto fail;
}
- snprintf(clk_name, 32, "dsi%d_pclk_mux", pll_10nm->phy->id);
- snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_10nm->phy->id);
- snprintf(parent2, 32, "dsi%d_pll_by_2_bit_clk", pll_10nm->phy->id);
- snprintf(parent3, 32, "dsi%d_pll_out_div_clk", pll_10nm->phy->id);
- snprintf(parent4, 32, "dsi%d_pll_post_out_div_clk", pll_10nm->phy->id);
-
- hw = devm_clk_hw_register_mux(dev, clk_name,
- ((const char *[]){
- parent, parent2, parent3, parent4
- }), 4, 0, pll_10nm->phy->base +
- REG_DSI_10nm_PHY_CMN_CLK_CFG1,
- 0, 2, 0, NULL);
- if (IS_ERR(hw)) {
- ret = PTR_ERR(hw);
+ snprintf(clk_name, sizeof(clk_name), "dsi%d_pclk_mux", pll_10nm->phy->id);
+
+ pclk_mux = devm_clk_hw_register_mux_parent_hws(dev, clk_name,
+ ((const struct clk_hw *[]){
+ pll_bit,
+ pll_by_2_bit,
+ pll_out_div,
+ pll_post_out_div,
+ }), 4, 0, pll_10nm->phy->base +
+ REG_DSI_10nm_PHY_CMN_CLK_CFG1, 0, 2, 0, NULL);
+ if (IS_ERR(pclk_mux)) {
+ ret = PTR_ERR(pclk_mux);
goto fail;
}
- snprintf(clk_name, 32, "dsi%d_phy_pll_out_dsiclk", pll_10nm->phy->id);
- snprintf(parent, 32, "dsi%d_pclk_mux", pll_10nm->phy->id);
+ snprintf(clk_name, sizeof(clk_name), "dsi%d_phy_pll_out_dsiclk", pll_10nm->phy->id);
/* PIX CLK DIV : DIV_CTRL_7_4*/
- hw = devm_clk_hw_register_divider(dev, clk_name, parent,
- 0, pll_10nm->phy->base +
- REG_DSI_10nm_PHY_CMN_CLK_CFG0,
- 4, 4, CLK_DIVIDER_ONE_BASED,
- &pll_10nm->postdiv_lock);
+ hw = devm_clk_hw_register_divider_parent_hw(dev, clk_name, pclk_mux,
+ 0, pll_10nm->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG0,
+ 4, 4, CLK_DIVIDER_ONE_BASED, &pll_10nm->postdiv_lock);
if (IS_ERR(hw)) {
ret = PTR_ERR(hw);
goto fail;
@@ -1028,14 +1019,14 @@ static int dsi_10nm_phy_parse_dt(struct msm_dsi_phy *phy)
return 0;
}
+static const struct regulator_bulk_data dsi_phy_10nm_regulators[] = {
+ { .supply = "vdds", .init_load_uA = 36000 },
+};
+
const struct msm_dsi_phy_cfg dsi_phy_10nm_cfgs = {
.has_phy_lane = true,
- .reg_cfg = {
- .num = 1,
- .regs = {
- {"vdds", 36000, 32},
- },
- },
+ .regulator_data = dsi_phy_10nm_regulators,
+ .num_regulators = ARRAY_SIZE(dsi_phy_10nm_regulators),
.ops = {
.enable = dsi_10nm_phy_enable,
.disable = dsi_10nm_phy_disable,
@@ -1052,12 +1043,8 @@ const struct msm_dsi_phy_cfg dsi_phy_10nm_cfgs = {
const struct msm_dsi_phy_cfg dsi_phy_10nm_8998_cfgs = {
.has_phy_lane = true,
- .reg_cfg = {
- .num = 1,
- .regs = {
- {"vdds", 36000, 32},
- },
- },
+ .regulator_data = dsi_phy_10nm_regulators,
+ .num_regulators = ARRAY_SIZE(dsi_phy_10nm_regulators),
.ops = {
.enable = dsi_10nm_phy_enable,
.disable = dsi_10nm_phy_disable,
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
index 8199c53567f4..0f8f4ca46429 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
@@ -711,7 +711,7 @@ static int dsi_14nm_pll_restore_state(struct msm_dsi_phy *phy)
cached_state->vco_rate, 0);
if (ret) {
DRM_DEV_ERROR(&pll_14nm->phy->pdev->dev,
- "restore vco rate failed. ret=%d\n", ret);
+ "restore vco rate failed. ret=%d\n", ret);
return ret;
}
@@ -764,14 +764,14 @@ static int dsi_14nm_set_usecase(struct msm_dsi_phy *phy)
static struct clk_hw *pll_14nm_postdiv_register(struct dsi_pll_14nm *pll_14nm,
const char *name,
- const char *parent_name,
+ const struct clk_hw *parent_hw,
unsigned long flags,
u8 shift)
{
struct dsi_pll_14nm_postdiv *pll_postdiv;
struct device *dev = &pll_14nm->phy->pdev->dev;
struct clk_init_data postdiv_init = {
- .parent_names = (const char *[]) { parent_name },
+ .parent_hws = (const struct clk_hw *[]) { parent_hw },
.num_parents = 1,
.name = name,
.flags = flags,
@@ -800,72 +800,70 @@ static struct clk_hw *pll_14nm_postdiv_register(struct dsi_pll_14nm *pll_14nm,
static int pll_14nm_register(struct dsi_pll_14nm *pll_14nm, struct clk_hw **provided_clocks)
{
- char clk_name[32], parent[32], vco_name[32];
+ char clk_name[32];
struct clk_init_data vco_init = {
.parent_data = &(const struct clk_parent_data) {
.fw_name = "ref",
},
.num_parents = 1,
- .name = vco_name,
+ .name = clk_name,
.flags = CLK_IGNORE_UNUSED,
.ops = &clk_ops_dsi_pll_14nm_vco,
};
struct device *dev = &pll_14nm->phy->pdev->dev;
- struct clk_hw *hw;
+ struct clk_hw *hw, *n1_postdiv, *n1_postdivby2;
int ret;
DBG("DSI%d", pll_14nm->phy->id);
- snprintf(vco_name, 32, "dsi%dvco_clk", pll_14nm->phy->id);
+ snprintf(clk_name, sizeof(clk_name), "dsi%dvco_clk", pll_14nm->phy->id);
pll_14nm->clk_hw.init = &vco_init;
ret = devm_clk_hw_register(dev, &pll_14nm->clk_hw);
if (ret)
return ret;
- snprintf(clk_name, 32, "dsi%dn1_postdiv_clk", pll_14nm->phy->id);
- snprintf(parent, 32, "dsi%dvco_clk", pll_14nm->phy->id);
+ snprintf(clk_name, sizeof(clk_name), "dsi%dn1_postdiv_clk", pll_14nm->phy->id);
/* N1 postdiv, bits 0-3 in REG_DSI_14nm_PHY_CMN_CLK_CFG0 */
- hw = pll_14nm_postdiv_register(pll_14nm, clk_name, parent,
- CLK_SET_RATE_PARENT, 0);
- if (IS_ERR(hw))
- return PTR_ERR(hw);
+ n1_postdiv = pll_14nm_postdiv_register(pll_14nm, clk_name,
+ &pll_14nm->clk_hw, CLK_SET_RATE_PARENT, 0);
+ if (IS_ERR(n1_postdiv))
+ return PTR_ERR(n1_postdiv);
- snprintf(clk_name, 32, "dsi%dpllbyte", pll_14nm->phy->id);
- snprintf(parent, 32, "dsi%dn1_postdiv_clk", pll_14nm->phy->id);
+ snprintf(clk_name, sizeof(clk_name), "dsi%dpllbyte", pll_14nm->phy->id);
/* DSI Byte clock = VCO_CLK / N1 / 8 */
- hw = devm_clk_hw_register_fixed_factor(dev, clk_name, parent,
- CLK_SET_RATE_PARENT, 1, 8);
+ hw = devm_clk_hw_register_fixed_factor_parent_hw(dev, clk_name,
+ n1_postdiv, CLK_SET_RATE_PARENT, 1, 8);
if (IS_ERR(hw))
return PTR_ERR(hw);
provided_clocks[DSI_BYTE_PLL_CLK] = hw;
- snprintf(clk_name, 32, "dsi%dn1_postdivby2_clk", pll_14nm->phy->id);
- snprintf(parent, 32, "dsi%dn1_postdiv_clk", pll_14nm->phy->id);
+ snprintf(clk_name, sizeof(clk_name), "dsi%dn1_postdivby2_clk", pll_14nm->phy->id);
/*
* Skip the mux for now, force DSICLK_SEL to 1, Add a /2 divider
* on the way. Don't let it set parent.
*/
- hw = devm_clk_hw_register_fixed_factor(dev, clk_name, parent, 0, 1, 2);
- if (IS_ERR(hw))
- return PTR_ERR(hw);
+ n1_postdivby2 = devm_clk_hw_register_fixed_factor_parent_hw(dev,
+ clk_name, n1_postdiv, 0, 1, 2);
+ if (IS_ERR(n1_postdivby2))
+ return PTR_ERR(n1_postdivby2);
- snprintf(clk_name, 32, "dsi%dpll", pll_14nm->phy->id);
- snprintf(parent, 32, "dsi%dn1_postdivby2_clk", pll_14nm->phy->id);
+ snprintf(clk_name, sizeof(clk_name), "dsi%dpll", pll_14nm->phy->id);
/* DSI pixel clock = VCO_CLK / N1 / 2 / N2
* This is the output of N2 post-divider, bits 4-7 in
* REG_DSI_14nm_PHY_CMN_CLK_CFG0. Don't let it set parent.
*/
- hw = pll_14nm_postdiv_register(pll_14nm, clk_name, parent, 0, 4);
+ hw = pll_14nm_postdiv_register(pll_14nm, clk_name, n1_postdivby2,
+ 0, 4);
if (IS_ERR(hw))
return PTR_ERR(hw);
- provided_clocks[DSI_PIXEL_PLL_CLK] = hw;
+ provided_clocks[DSI_PIXEL_PLL_CLK] = hw;
return 0;
}
@@ -952,7 +950,8 @@ static int dsi_14nm_phy_enable(struct msm_dsi_phy *phy,
if (msm_dsi_dphy_timing_calc_v2(timing, clk_req)) {
DRM_DEV_ERROR(&phy->pdev->dev,
- "%s: D-PHY timing calculation failed\n", __func__);
+ "%s: D-PHY timing calculation failed\n",
+ __func__);
return -EINVAL;
}
@@ -1005,7 +1004,7 @@ static int dsi_14nm_phy_enable(struct msm_dsi_phy *phy,
ret = dsi_14nm_set_usecase(phy);
if (ret) {
DRM_DEV_ERROR(&phy->pdev->dev, "%s: set pll usecase failed, %d\n",
- __func__, ret);
+ __func__, ret);
return ret;
}
@@ -1024,14 +1023,18 @@ static void dsi_14nm_phy_disable(struct msm_dsi_phy *phy)
wmb();
}
+static const struct regulator_bulk_data dsi_phy_14nm_17mA_regulators[] = {
+ { .supply = "vcca", .init_load_uA = 17000 },
+};
+
+static const struct regulator_bulk_data dsi_phy_14nm_73p4mA_regulators[] = {
+ { .supply = "vcca", .init_load_uA = 73400 },
+};
+
const struct msm_dsi_phy_cfg dsi_phy_14nm_cfgs = {
.has_phy_lane = true,
- .reg_cfg = {
- .num = 1,
- .regs = {
- {"vcca", 17000, 32},
- },
- },
+ .regulator_data = dsi_phy_14nm_17mA_regulators,
+ .num_regulators = ARRAY_SIZE(dsi_phy_14nm_17mA_regulators),
.ops = {
.enable = dsi_14nm_phy_enable,
.disable = dsi_14nm_phy_disable,
@@ -1047,12 +1050,8 @@ const struct msm_dsi_phy_cfg dsi_phy_14nm_cfgs = {
const struct msm_dsi_phy_cfg dsi_phy_14nm_660_cfgs = {
.has_phy_lane = true,
- .reg_cfg = {
- .num = 1,
- .regs = {
- {"vcca", 73400, 32},
- },
- },
+ .regulator_data = dsi_phy_14nm_73p4mA_regulators,
+ .num_regulators = ARRAY_SIZE(dsi_phy_14nm_73p4mA_regulators),
.ops = {
.enable = dsi_14nm_phy_enable,
.disable = dsi_14nm_phy_disable,
@@ -1068,12 +1067,8 @@ const struct msm_dsi_phy_cfg dsi_phy_14nm_660_cfgs = {
const struct msm_dsi_phy_cfg dsi_phy_14nm_8953_cfgs = {
.has_phy_lane = true,
- .reg_cfg = {
- .num = 1,
- .regs = {
- {"vcca", 17000, 32},
- },
- },
+ .regulator_data = dsi_phy_14nm_17mA_regulators,
+ .num_regulators = ARRAY_SIZE(dsi_phy_14nm_17mA_regulators),
.ops = {
.enable = dsi_14nm_phy_enable,
.disable = dsi_14nm_phy_disable,
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
index ee7c418a1c29..c9752b991744 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
@@ -129,15 +129,15 @@ static void dsi_20nm_phy_disable(struct msm_dsi_phy *phy)
dsi_20nm_phy_regulator_ctrl(phy, false);
}
+static const struct regulator_bulk_data dsi_phy_20nm_regulators[] = {
+ { .supply = "vddio", .init_load_uA = 100000 }, /* 1.8 V */
+ { .supply = "vcca", .init_load_uA = 10000 }, /* 1.0 V */
+};
+
const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs = {
.has_phy_regulator = true,
- .reg_cfg = {
- .num = 2,
- .regs = {
- {"vddio", 100000, 100}, /* 1.8 V */
- {"vcca", 10000, 100}, /* 1.0 V */
- },
- },
+ .regulator_data = dsi_phy_20nm_regulators,
+ .num_regulators = ARRAY_SIZE(dsi_phy_20nm_regulators),
.ops = {
.enable = dsi_20nm_phy_enable,
.disable = dsi_20nm_phy_disable,
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
index 48eab80b548e..4c1bf55c5f38 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
@@ -104,7 +104,7 @@ static void pll_28nm_software_reset(struct dsi_pll_28nm *pll_28nm)
* reset bit off and back on.
*/
dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_TEST_CFG,
- DSI_28nm_PHY_PLL_TEST_CFG_PLL_SW_RESET, 1);
+ DSI_28nm_PHY_PLL_TEST_CFG_PLL_SW_RESET, 1);
dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_TEST_CFG, 0x00, 1);
}
@@ -201,9 +201,9 @@ static int dsi_pll_28nm_clk_set_rate(struct clk_hw *hw, unsigned long rate,
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG1, sdm_cfg1);
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG2,
- DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0(sdm_cfg2));
+ DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0(sdm_cfg2));
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG3,
- DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8(sdm_cfg3));
+ DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8(sdm_cfg3));
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG4, 0x00);
/* Add hardware recommended delay for correct PLL configuration */
@@ -316,12 +316,12 @@ static int _dsi_pll_28nm_vco_prepare_hpm(struct dsi_pll_28nm *pll_28nm)
for (i = 0; i < 2; i++) {
/* DSI Uniphy lock detect setting */
dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2,
- 0x0c, 100);
+ 0x0c, 100);
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2, 0x0d);
/* poll for PLL ready status */
- locked = pll_28nm_poll_for_ready(pll_28nm,
- max_reads, timeout_us);
+ locked = pll_28nm_poll_for_ready(pll_28nm, max_reads,
+ timeout_us);
if (locked)
break;
@@ -508,28 +508,28 @@ static int dsi_28nm_pll_restore_state(struct msm_dsi_phy *phy)
}
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG,
- cached_state->postdiv3);
+ cached_state->postdiv3);
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG,
- cached_state->postdiv1);
+ cached_state->postdiv1);
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_VREG_CFG,
- cached_state->byte_mux);
+ cached_state->byte_mux);
return 0;
}
static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm, struct clk_hw **provided_clocks)
{
- char clk_name[32], parent1[32], parent2[32], vco_name[32];
+ char clk_name[32];
struct clk_init_data vco_init = {
.parent_data = &(const struct clk_parent_data) {
.fw_name = "ref", .name = "xo",
},
.num_parents = 1,
- .name = vco_name,
+ .name = clk_name,
.flags = CLK_IGNORE_UNUSED,
};
struct device *dev = &pll_28nm->phy->pdev->dev;
- struct clk_hw *hw;
+ struct clk_hw *hw, *analog_postdiv, *indirect_path_div2, *byte_mux;
int ret;
DBG("%d", pll_28nm->phy->id);
@@ -539,55 +539,49 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm, struct clk_hw **prov
else
vco_init.ops = &clk_ops_dsi_pll_28nm_vco_hpm;
- snprintf(vco_name, 32, "dsi%dvco_clk", pll_28nm->phy->id);
+ snprintf(clk_name, sizeof(clk_name), "dsi%dvco_clk", pll_28nm->phy->id);
pll_28nm->clk_hw.init = &vco_init;
ret = devm_clk_hw_register(dev, &pll_28nm->clk_hw);
if (ret)
return ret;
- snprintf(clk_name, 32, "dsi%danalog_postdiv_clk", pll_28nm->phy->id);
- snprintf(parent1, 32, "dsi%dvco_clk", pll_28nm->phy->id);
- hw = devm_clk_hw_register_divider(dev, clk_name,
- parent1, CLK_SET_RATE_PARENT,
+ snprintf(clk_name, sizeof(clk_name), "dsi%danalog_postdiv_clk", pll_28nm->phy->id);
+ analog_postdiv = devm_clk_hw_register_divider_parent_hw(dev, clk_name,
+ &pll_28nm->clk_hw, CLK_SET_RATE_PARENT,
pll_28nm->phy->pll_base +
- REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG,
+ REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG,
0, 4, 0, NULL);
- if (IS_ERR(hw))
- return PTR_ERR(hw);
-
- snprintf(clk_name, 32, "dsi%dindirect_path_div2_clk", pll_28nm->phy->id);
- snprintf(parent1, 32, "dsi%danalog_postdiv_clk", pll_28nm->phy->id);
- hw = devm_clk_hw_register_fixed_factor(dev, clk_name,
- parent1, CLK_SET_RATE_PARENT,
- 1, 2);
- if (IS_ERR(hw))
- return PTR_ERR(hw);
-
- snprintf(clk_name, 32, "dsi%dpll", pll_28nm->phy->id);
- snprintf(parent1, 32, "dsi%dvco_clk", pll_28nm->phy->id);
- hw = devm_clk_hw_register_divider(dev, clk_name,
- parent1, 0, pll_28nm->phy->pll_base +
+ if (IS_ERR(analog_postdiv))
+ return PTR_ERR(analog_postdiv);
+
+ snprintf(clk_name, sizeof(clk_name), "dsi%dindirect_path_div2_clk", pll_28nm->phy->id);
+ indirect_path_div2 = devm_clk_hw_register_fixed_factor_parent_hw(dev,
+ clk_name, analog_postdiv, CLK_SET_RATE_PARENT, 1, 2);
+ if (IS_ERR(indirect_path_div2))
+ return PTR_ERR(indirect_path_div2);
+
+ snprintf(clk_name, sizeof(clk_name), "dsi%dpll", pll_28nm->phy->id);
+ hw = devm_clk_hw_register_divider_parent_hw(dev, clk_name,
+ &pll_28nm->clk_hw, 0, pll_28nm->phy->pll_base +
REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG,
- 0, 8, 0, NULL);
+ 0, 8, 0, NULL);
if (IS_ERR(hw))
return PTR_ERR(hw);
provided_clocks[DSI_PIXEL_PLL_CLK] = hw;
- snprintf(clk_name, 32, "dsi%dbyte_mux", pll_28nm->phy->id);
- snprintf(parent1, 32, "dsi%dvco_clk", pll_28nm->phy->id);
- snprintf(parent2, 32, "dsi%dindirect_path_div2_clk", pll_28nm->phy->id);
- hw = devm_clk_hw_register_mux(dev, clk_name,
- ((const char *[]){
- parent1, parent2
+ snprintf(clk_name, sizeof(clk_name), "dsi%dbyte_mux", pll_28nm->phy->id);
+ byte_mux = devm_clk_hw_register_mux_parent_hws(dev, clk_name,
+ ((const struct clk_hw *[]){
+ &pll_28nm->clk_hw,
+ indirect_path_div2,
}), 2, CLK_SET_RATE_PARENT, pll_28nm->phy->pll_base +
- REG_DSI_28nm_PHY_PLL_VREG_CFG, 1, 1, 0, NULL);
- if (IS_ERR(hw))
- return PTR_ERR(hw);
+ REG_DSI_28nm_PHY_PLL_VREG_CFG, 1, 1, 0, NULL);
+ if (IS_ERR(byte_mux))
+ return PTR_ERR(byte_mux);
- snprintf(clk_name, 32, "dsi%dpllbyte", pll_28nm->phy->id);
- snprintf(parent1, 32, "dsi%dbyte_mux", pll_28nm->phy->id);
- hw = devm_clk_hw_register_fixed_factor(dev, clk_name,
- parent1, CLK_SET_RATE_PARENT, 1, 4);
+ snprintf(clk_name, sizeof(clk_name), "dsi%dpllbyte", pll_28nm->phy->id);
+ hw = devm_clk_hw_register_fixed_factor_parent_hw(dev, clk_name,
+ byte_mux, CLK_SET_RATE_PARENT, 1, 4);
if (IS_ERR(hw))
return PTR_ERR(hw);
provided_clocks[DSI_BYTE_PLL_CLK] = hw;
@@ -627,31 +621,31 @@ static void dsi_28nm_dphy_set_timing(struct msm_dsi_phy *phy,
void __iomem *base = phy->base;
dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_0,
- DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO(timing->clk_zero));
+ DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO(timing->clk_zero));
dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_1,
- DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL(timing->clk_trail));
+ DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL(timing->clk_trail));
dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_2,
- DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE(timing->clk_prepare));
+ DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE(timing->clk_prepare));
if (timing->clk_zero & BIT(8))
dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_3,
- DSI_28nm_PHY_TIMING_CTRL_3_CLK_ZERO_8);
+ DSI_28nm_PHY_TIMING_CTRL_3_CLK_ZERO_8);
dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_4,
- DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT(timing->hs_exit));
+ DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT(timing->hs_exit));
dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_5,
- DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO(timing->hs_zero));
+ DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO(timing->hs_zero));
dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_6,
- DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE(timing->hs_prepare));
+ DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE(timing->hs_prepare));
dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_7,
- DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL(timing->hs_trail));
+ DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL(timing->hs_trail));
dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_8,
- DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST(timing->hs_rqst));
+ DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST(timing->hs_rqst));
dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_9,
- DSI_28nm_PHY_TIMING_CTRL_9_TA_GO(timing->ta_go) |
- DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE(timing->ta_sure));
+ DSI_28nm_PHY_TIMING_CTRL_9_TA_GO(timing->ta_go) |
+ DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE(timing->ta_sure));
dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_10,
- DSI_28nm_PHY_TIMING_CTRL_10_TA_GET(timing->ta_get));
+ DSI_28nm_PHY_TIMING_CTRL_10_TA_GET(timing->ta_get));
dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_11,
- DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD(0));
+ DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD(0));
}
static void dsi_28nm_phy_regulator_enable_dcdc(struct msm_dsi_phy *phy)
@@ -713,7 +707,8 @@ static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy,
if (msm_dsi_dphy_timing_calc(timing, clk_req)) {
DRM_DEV_ERROR(&phy->pdev->dev,
- "%s: D-PHY timing calculation failed\n", __func__);
+ "%s: D-PHY timing calculation failed\n",
+ __func__);
return -EINVAL;
}
@@ -769,14 +764,14 @@ static void dsi_28nm_phy_disable(struct msm_dsi_phy *phy)
wmb();
}
+static const struct regulator_bulk_data dsi_phy_28nm_regulators[] = {
+ { .supply = "vddio", .init_load_uA = 100000 },
+};
+
const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs = {
.has_phy_regulator = true,
- .reg_cfg = {
- .num = 1,
- .regs = {
- {"vddio", 100000, 100},
- },
- },
+ .regulator_data = dsi_phy_28nm_regulators,
+ .num_regulators = ARRAY_SIZE(dsi_phy_28nm_regulators),
.ops = {
.enable = dsi_28nm_phy_enable,
.disable = dsi_28nm_phy_disable,
@@ -792,12 +787,8 @@ const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs = {
const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_famb_cfgs = {
.has_phy_regulator = true,
- .reg_cfg = {
- .num = 1,
- .regs = {
- {"vddio", 100000, 100},
- },
- },
+ .regulator_data = dsi_phy_28nm_regulators,
+ .num_regulators = ARRAY_SIZE(dsi_phy_28nm_regulators),
.ops = {
.enable = dsi_28nm_phy_enable,
.disable = dsi_28nm_phy_disable,
@@ -813,12 +804,8 @@ const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_famb_cfgs = {
const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs = {
.has_phy_regulator = true,
- .reg_cfg = {
- .num = 1,
- .regs = {
- {"vddio", 100000, 100}, /* 1.8 V */
- },
- },
+ .regulator_data = dsi_phy_28nm_regulators,
+ .num_regulators = ARRAY_SIZE(dsi_phy_28nm_regulators),
.ops = {
.enable = dsi_28nm_phy_enable,
.disable = dsi_28nm_phy_disable,
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
index fc56cdcc9ad6..26c08047e20c 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
@@ -104,29 +104,29 @@ static int dsi_pll_28nm_clk_set_rate(struct clk_hw *hw, unsigned long rate,
fb_divider = (temp * VCO_PREF_DIV_RATIO) / val;
fb_divider = fb_divider / 2 - 1;
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_1,
- fb_divider & 0xff);
+ fb_divider & 0xff);
val = dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_2);
val |= (fb_divider >> 8) & 0x07;
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_2,
- val);
+ val);
val = dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_3);
val |= (VCO_PREF_DIV_RATIO - 1) & 0x3f;
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_3,
- val);
+ val);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_6,
- 0xf);
+ 0xf);
val = dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8);
val |= 0x7 << 4;
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8,
- val);
+ val);
return 0;
}
@@ -206,7 +206,7 @@ static int dsi_pll_28nm_vco_prepare(struct clk_hw *hw)
/* enable the PLL */
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_0,
- DSI_28nm_8960_PHY_PLL_CTRL_0_ENABLE);
+ DSI_28nm_8960_PHY_PLL_CTRL_0_ENABLE);
locked = pll_28nm_poll_for_ready(pll_28nm, max_reads, timeout_us);
@@ -367,23 +367,23 @@ static int dsi_28nm_pll_restore_state(struct msm_dsi_phy *phy)
cached_state->vco_rate, 0);
if (ret) {
DRM_DEV_ERROR(&pll_28nm->phy->pdev->dev,
- "restore vco rate failed. ret=%d\n", ret);
+ "restore vco rate failed. ret=%d\n", ret);
return ret;
}
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_10,
- cached_state->postdiv3);
+ cached_state->postdiv3);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_9,
- cached_state->postdiv2);
+ cached_state->postdiv2);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8,
- cached_state->postdiv1);
+ cached_state->postdiv1);
return 0;
}
static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm, struct clk_hw **provided_clocks)
{
- char *clk_name, *parent_name, *vco_name;
+ char clk_name[32];
struct clk_init_data vco_init = {
.parent_data = &(const struct clk_parent_data) {
.fw_name = "ref",
@@ -404,20 +404,8 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm, struct clk_hw **prov
if (!bytediv)
return -ENOMEM;
- vco_name = devm_kzalloc(dev, 32, GFP_KERNEL);
- if (!vco_name)
- return -ENOMEM;
-
- parent_name = devm_kzalloc(dev, 32, GFP_KERNEL);
- if (!parent_name)
- return -ENOMEM;
-
- clk_name = devm_kzalloc(dev, 32, GFP_KERNEL);
- if (!clk_name)
- return -ENOMEM;
-
- snprintf(vco_name, 32, "dsi%dvco_clk", pll_28nm->phy->id);
- vco_init.name = vco_name;
+ snprintf(clk_name, sizeof(clk_name), "dsi%dvco_clk", pll_28nm->phy->id);
+ vco_init.name = clk_name;
pll_28nm->clk_hw.init = &vco_init;
@@ -429,13 +417,14 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm, struct clk_hw **prov
bytediv->hw.init = &bytediv_init;
bytediv->reg = pll_28nm->phy->pll_base + REG_DSI_28nm_8960_PHY_PLL_CTRL_9;
- snprintf(parent_name, 32, "dsi%dvco_clk", pll_28nm->phy->id);
- snprintf(clk_name, 32, "dsi%dpllbyte", pll_28nm->phy->id + 1);
+ snprintf(clk_name, sizeof(clk_name), "dsi%dpllbyte", pll_28nm->phy->id + 1);
bytediv_init.name = clk_name;
bytediv_init.ops = &clk_bytediv_ops;
bytediv_init.flags = CLK_SET_RATE_PARENT;
- bytediv_init.parent_names = (const char * const *) &parent_name;
+ bytediv_init.parent_hws = (const struct clk_hw*[]){
+ &pll_28nm->clk_hw,
+ };
bytediv_init.num_parents = 1;
/* DIV2 */
@@ -444,12 +433,12 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm, struct clk_hw **prov
return ret;
provided_clocks[DSI_BYTE_PLL_CLK] = &bytediv->hw;
- snprintf(clk_name, 32, "dsi%dpll", pll_28nm->phy->id + 1);
+ snprintf(clk_name, sizeof(clk_name), "dsi%dpll", pll_28nm->phy->id + 1);
/* DIV3 */
- hw = devm_clk_hw_register_divider(dev, clk_name,
- parent_name, 0, pll_28nm->phy->pll_base +
+ hw = devm_clk_hw_register_divider_parent_hw(dev, clk_name,
+ &pll_28nm->clk_hw, 0, pll_28nm->phy->pll_base +
REG_DSI_28nm_8960_PHY_PLL_CTRL_10,
- 0, 8, 0, NULL);
+ 0, 8, 0, NULL);
if (IS_ERR(hw))
return PTR_ERR(hw);
provided_clocks[DSI_PIXEL_PLL_CLK] = hw;
@@ -489,29 +478,29 @@ static void dsi_28nm_dphy_set_timing(struct msm_dsi_phy *phy,
void __iomem *base = phy->base;
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_0,
- DSI_28nm_8960_PHY_TIMING_CTRL_0_CLK_ZERO(timing->clk_zero));
+ DSI_28nm_8960_PHY_TIMING_CTRL_0_CLK_ZERO(timing->clk_zero));
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_1,
- DSI_28nm_8960_PHY_TIMING_CTRL_1_CLK_TRAIL(timing->clk_trail));
+ DSI_28nm_8960_PHY_TIMING_CTRL_1_CLK_TRAIL(timing->clk_trail));
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_2,
- DSI_28nm_8960_PHY_TIMING_CTRL_2_CLK_PREPARE(timing->clk_prepare));
+ DSI_28nm_8960_PHY_TIMING_CTRL_2_CLK_PREPARE(timing->clk_prepare));
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_3, 0x0);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_4,
- DSI_28nm_8960_PHY_TIMING_CTRL_4_HS_EXIT(timing->hs_exit));
+ DSI_28nm_8960_PHY_TIMING_CTRL_4_HS_EXIT(timing->hs_exit));
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_5,
- DSI_28nm_8960_PHY_TIMING_CTRL_5_HS_ZERO(timing->hs_zero));
+ DSI_28nm_8960_PHY_TIMING_CTRL_5_HS_ZERO(timing->hs_zero));
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_6,
- DSI_28nm_8960_PHY_TIMING_CTRL_6_HS_PREPARE(timing->hs_prepare));
+ DSI_28nm_8960_PHY_TIMING_CTRL_6_HS_PREPARE(timing->hs_prepare));
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_7,
- DSI_28nm_8960_PHY_TIMING_CTRL_7_HS_TRAIL(timing->hs_trail));
+ DSI_28nm_8960_PHY_TIMING_CTRL_7_HS_TRAIL(timing->hs_trail));
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_8,
- DSI_28nm_8960_PHY_TIMING_CTRL_8_HS_RQST(timing->hs_rqst));
+ DSI_28nm_8960_PHY_TIMING_CTRL_8_HS_RQST(timing->hs_rqst));
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_9,
- DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_GO(timing->ta_go) |
- DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_SURE(timing->ta_sure));
+ DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_GO(timing->ta_go) |
+ DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_SURE(timing->ta_sure));
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_10,
- DSI_28nm_8960_PHY_TIMING_CTRL_10_TA_GET(timing->ta_get));
+ DSI_28nm_8960_PHY_TIMING_CTRL_10_TA_GET(timing->ta_get));
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_11,
- DSI_28nm_8960_PHY_TIMING_CTRL_11_TRIG3_CMD(0));
+ DSI_28nm_8960_PHY_TIMING_CTRL_11_TRIG3_CMD(0));
}
static void dsi_28nm_phy_regulator_init(struct msm_dsi_phy *phy)
@@ -523,7 +512,7 @@ static void dsi_28nm_phy_regulator_init(struct msm_dsi_phy *phy)
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_2, 1);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_3, 0);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_4,
- 0x100);
+ 0x100);
}
static void dsi_28nm_phy_regulator_ctrl(struct msm_dsi_phy *phy)
@@ -544,7 +533,7 @@ static void dsi_28nm_phy_calibration(struct msm_dsi_phy *phy)
int i = 5000;
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CAL_PWR_CFG,
- 0x3);
+ 0x3);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_CAL_SW_CFG_2, 0x0);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_1, 0x5a);
@@ -577,11 +566,11 @@ static void dsi_28nm_phy_lane_config(struct msm_dsi_phy *phy)
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LN_CFG_1(i), 0x45);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LN_CFG_2(i), 0x00);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LN_TEST_DATAPATH(i),
- 0x00);
+ 0x00);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LN_TEST_STR_0(i),
- 0x01);
+ 0x01);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LN_TEST_STR_1(i),
- 0x66);
+ 0x66);
}
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LNCK_CFG_0, 0x40);
@@ -602,7 +591,8 @@ static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy,
if (msm_dsi_dphy_timing_calc(timing, clk_req)) {
DRM_DEV_ERROR(&phy->pdev->dev,
- "%s: D-PHY timing calculation failed\n", __func__);
+ "%s: D-PHY timing calculation failed\n",
+ __func__);
return -EINVAL;
}
@@ -648,14 +638,14 @@ static void dsi_28nm_phy_disable(struct msm_dsi_phy *phy)
wmb();
}
+static const struct regulator_bulk_data dsi_phy_28nm_8960_regulators[] = {
+ { .supply = "vddio", .init_load_uA = 100000 }, /* 1.8 V */
+};
+
const struct msm_dsi_phy_cfg dsi_phy_28nm_8960_cfgs = {
.has_phy_regulator = true,
- .reg_cfg = {
- .num = 1,
- .regs = {
- {"vddio", 100000, 100}, /* 1.8 V */
- },
- },
+ .regulator_data = dsi_phy_28nm_8960_regulators,
+ .num_regulators = ARRAY_SIZE(dsi_phy_28nm_8960_regulators),
.ops = {
.enable = dsi_28nm_phy_enable,
.disable = dsi_28nm_phy_disable,
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
index 66ed1919a1db..9e7fa7d88ead 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
@@ -176,19 +176,19 @@ static void dsi_pll_ssc_commit(struct dsi_pll_7nm *pll, struct dsi_pll_config *c
pr_debug("SSC is enabled\n");
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_LOW_1,
- config->ssc_stepsize & 0xff);
+ config->ssc_stepsize & 0xff);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_HIGH_1,
- config->ssc_stepsize >> 8);
+ config->ssc_stepsize >> 8);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_LOW_1,
- config->ssc_div_per & 0xff);
+ config->ssc_div_per & 0xff);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_HIGH_1,
- config->ssc_div_per >> 8);
+ config->ssc_div_per >> 8);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_SSC_ADJPER_LOW_1,
- config->ssc_adj_per & 0xff);
+ config->ssc_adj_per & 0xff);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_SSC_ADJPER_HIGH_1,
- config->ssc_adj_per >> 8);
+ config->ssc_adj_per >> 8);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_SSC_CONTROL,
- SSC_EN | (config->ssc_center ? SSC_CENTER : 0));
+ SSC_EN | (config->ssc_center ? SSC_CENTER : 0));
}
}
@@ -208,7 +208,7 @@ static void dsi_pll_config_hzindep_reg(struct dsi_pll_7nm *pll)
}
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_FIVE_1,
- analog_controls_five_1);
+ analog_controls_five_1);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_VCO_CONFIG_1, vco_config_1);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_FIVE, 0x01);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_TWO, 0x03);
@@ -245,17 +245,20 @@ static void dsi_pll_commit(struct dsi_pll_7nm *pll, struct dsi_pll_config *confi
void __iomem *base = pll->phy->pll_base;
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_CORE_INPUT_OVERRIDE, 0x12);
- dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_DECIMAL_DIV_START_1, config->decimal_div_start);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_DECIMAL_DIV_START_1,
+ config->decimal_div_start);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_LOW_1,
- config->frac_div_start & 0xff);
+ config->frac_div_start & 0xff);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_MID_1,
- (config->frac_div_start & 0xff00) >> 8);
+ (config->frac_div_start & 0xff00) >> 8);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_HIGH_1,
- (config->frac_div_start & 0x30000) >> 16);
+ (config->frac_div_start & 0x30000) >> 16);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_PLL_LOCKDET_RATE_1, 0x40);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_PLL_LOCK_DELAY, 0x06);
- dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_CMODE_1, pll->phy->cphy_mode ? 0x00 : 0x10);
- dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_CLOCK_INVERTERS, config->pll_clock_inverters);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_CMODE_1,
+ pll->phy->cphy_mode ? 0x00 : 0x10);
+ dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_CLOCK_INVERTERS,
+ config->pll_clock_inverters);
}
static int dsi_pll_7nm_vco_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -341,7 +344,7 @@ static void dsi_pll_enable_global_clk(struct dsi_pll_7nm *pll)
data = dsi_phy_read(pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
dsi_phy_write(pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1,
- data | BIT(5) | BIT(4));
+ data | BIT(5) | BIT(4));
}
static void dsi_pll_phy_dig_reset(struct dsi_pll_7nm *pll)
@@ -500,7 +503,7 @@ static void dsi_7nm_pll_save_state(struct msm_dsi_phy *phy)
u32 cmn_clk_cfg0, cmn_clk_cfg1;
cached->pll_out_div = dsi_phy_read(pll_7nm->phy->pll_base +
- REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE);
+ REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE);
cached->pll_out_div &= 0x3;
cmn_clk_cfg0 = dsi_phy_read(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG0);
@@ -529,7 +532,7 @@ static int dsi_7nm_pll_restore_state(struct msm_dsi_phy *phy)
dsi_phy_write(pll_7nm->phy->pll_base + REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE, val);
dsi_phy_write(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG0,
- cached->bit_clk_div | (cached->pix_clk_div << 4));
+ cached->bit_clk_div | (cached->pix_clk_div << 4));
val = dsi_phy_read(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
val &= ~0x3;
@@ -585,65 +588,60 @@ static int dsi_7nm_set_usecase(struct msm_dsi_phy *phy)
*/
static int pll_7nm_register(struct dsi_pll_7nm *pll_7nm, struct clk_hw **provided_clocks)
{
- char clk_name[32], parent[32], vco_name[32];
- char parent2[32];
+ char clk_name[32];
struct clk_init_data vco_init = {
.parent_data = &(const struct clk_parent_data) {
.fw_name = "ref",
},
.num_parents = 1,
- .name = vco_name,
+ .name = clk_name,
.flags = CLK_IGNORE_UNUSED,
.ops = &clk_ops_dsi_pll_7nm_vco,
};
struct device *dev = &pll_7nm->phy->pdev->dev;
- struct clk_hw *hw;
+ struct clk_hw *hw, *pll_out_div, *pll_bit, *pll_by_2_bit;
+ struct clk_hw *pll_post_out_div, *phy_pll_out_dsi_parent;
int ret;
DBG("DSI%d", pll_7nm->phy->id);
- snprintf(vco_name, 32, "dsi%dvco_clk", pll_7nm->phy->id);
+ snprintf(clk_name, sizeof(clk_name), "dsi%dvco_clk", pll_7nm->phy->id);
pll_7nm->clk_hw.init = &vco_init;
ret = devm_clk_hw_register(dev, &pll_7nm->clk_hw);
if (ret)
return ret;
- snprintf(clk_name, 32, "dsi%d_pll_out_div_clk", pll_7nm->phy->id);
- snprintf(parent, 32, "dsi%dvco_clk", pll_7nm->phy->id);
+ snprintf(clk_name, sizeof(clk_name), "dsi%d_pll_out_div_clk", pll_7nm->phy->id);
- hw = devm_clk_hw_register_divider(dev, clk_name,
- parent, CLK_SET_RATE_PARENT,
- pll_7nm->phy->pll_base +
- REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE,
- 0, 2, CLK_DIVIDER_POWER_OF_TWO, NULL);
- if (IS_ERR(hw)) {
- ret = PTR_ERR(hw);
+ pll_out_div = devm_clk_hw_register_divider_parent_hw(dev, clk_name,
+ &pll_7nm->clk_hw, CLK_SET_RATE_PARENT,
+ pll_7nm->phy->pll_base +
+ REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE,
+ 0, 2, CLK_DIVIDER_POWER_OF_TWO, NULL);
+ if (IS_ERR(pll_out_div)) {
+ ret = PTR_ERR(pll_out_div);
goto fail;
}
- snprintf(clk_name, 32, "dsi%d_pll_bit_clk", pll_7nm->phy->id);
- snprintf(parent, 32, "dsi%d_pll_out_div_clk", pll_7nm->phy->id);
+ snprintf(clk_name, sizeof(clk_name), "dsi%d_pll_bit_clk", pll_7nm->phy->id);
/* BIT CLK: DIV_CTRL_3_0 */
- hw = devm_clk_hw_register_divider(dev, clk_name, parent,
- CLK_SET_RATE_PARENT,
- pll_7nm->phy->base +
- REG_DSI_7nm_PHY_CMN_CLK_CFG0,
- 0, 4, CLK_DIVIDER_ONE_BASED,
- &pll_7nm->postdiv_lock);
- if (IS_ERR(hw)) {
- ret = PTR_ERR(hw);
+ pll_bit = devm_clk_hw_register_divider_parent_hw(dev, clk_name,
+ pll_out_div, CLK_SET_RATE_PARENT,
+ pll_7nm->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG0,
+ 0, 4, CLK_DIVIDER_ONE_BASED, &pll_7nm->postdiv_lock);
+ if (IS_ERR(pll_bit)) {
+ ret = PTR_ERR(pll_bit);
goto fail;
}
- snprintf(clk_name, 32, "dsi%d_phy_pll_out_byteclk", pll_7nm->phy->id);
- snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_7nm->phy->id);
+ snprintf(clk_name, sizeof(clk_name), "dsi%d_phy_pll_out_byteclk", pll_7nm->phy->id);
/* DSI Byte clock = VCO_CLK / OUT_DIV / BIT_DIV / 8 */
- hw = devm_clk_hw_register_fixed_factor(dev, clk_name, parent,
- CLK_SET_RATE_PARENT, 1,
- pll_7nm->phy->cphy_mode ? 7 : 8);
+ hw = devm_clk_hw_register_fixed_factor_parent_hw(dev, clk_name,
+ pll_bit, CLK_SET_RATE_PARENT, 1,
+ pll_7nm->phy->cphy_mode ? 7 : 8);
if (IS_ERR(hw)) {
ret = PTR_ERR(hw);
goto fail;
@@ -651,25 +649,25 @@ static int pll_7nm_register(struct dsi_pll_7nm *pll_7nm, struct clk_hw **provide
provided_clocks[DSI_BYTE_PLL_CLK] = hw;
- snprintf(clk_name, 32, "dsi%d_pll_by_2_bit_clk", pll_7nm->phy->id);
- snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_7nm->phy->id);
+ snprintf(clk_name, sizeof(clk_name), "dsi%d_pll_by_2_bit_clk", pll_7nm->phy->id);
- hw = devm_clk_hw_register_fixed_factor(dev, clk_name, parent,
- 0, 1, 2);
- if (IS_ERR(hw)) {
- ret = PTR_ERR(hw);
+ pll_by_2_bit = devm_clk_hw_register_fixed_factor_parent_hw(dev,
+ clk_name, pll_bit, 0, 1, 2);
+ if (IS_ERR(pll_by_2_bit)) {
+ ret = PTR_ERR(pll_by_2_bit);
goto fail;
}
- snprintf(clk_name, 32, "dsi%d_pll_post_out_div_clk", pll_7nm->phy->id);
- snprintf(parent, 32, "dsi%d_pll_out_div_clk", pll_7nm->phy->id);
+ snprintf(clk_name, sizeof(clk_name), "dsi%d_pll_post_out_div_clk", pll_7nm->phy->id);
if (pll_7nm->phy->cphy_mode)
- hw = devm_clk_hw_register_fixed_factor(dev, clk_name, parent, 0, 2, 7);
+ pll_post_out_div = devm_clk_hw_register_fixed_factor_parent_hw(
+ dev, clk_name, pll_out_div, 0, 2, 7);
else
- hw = devm_clk_hw_register_fixed_factor(dev, clk_name, parent, 0, 1, 4);
- if (IS_ERR(hw)) {
- ret = PTR_ERR(hw);
+ pll_post_out_div = devm_clk_hw_register_fixed_factor_parent_hw(
+ dev, clk_name, pll_out_div, 0, 1, 4);
+ if (IS_ERR(pll_post_out_div)) {
+ ret = PTR_ERR(pll_post_out_div);
goto fail;
}
@@ -682,34 +680,32 @@ static int pll_7nm_register(struct dsi_pll_7nm *pll_7nm, struct clk_hw **provide
data = dsi_phy_read(pll_7nm->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
dsi_phy_write(pll_7nm->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1, data | 3);
- snprintf(parent, 32, "dsi%d_pll_post_out_div_clk", pll_7nm->phy->id);
+ phy_pll_out_dsi_parent = pll_post_out_div;
} else {
- snprintf(clk_name, 32, "dsi%d_pclk_mux", pll_7nm->phy->id);
- snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_7nm->phy->id);
- snprintf(parent2, 32, "dsi%d_pll_by_2_bit_clk", pll_7nm->phy->id);
-
- hw = devm_clk_hw_register_mux(dev, clk_name,
- ((const char *[]){
- parent, parent2,
- }), 2, 0, pll_7nm->phy->base +
+ snprintf(clk_name, sizeof(clk_name), "dsi%d_pclk_mux", pll_7nm->phy->id);
+
+ hw = devm_clk_hw_register_mux_parent_hws(dev, clk_name,
+ ((const struct clk_hw *[]){
+ pll_bit,
+ pll_by_2_bit,
+ }), 2, 0, pll_7nm->phy->base +
REG_DSI_7nm_PHY_CMN_CLK_CFG1,
- 0, 1, 0, NULL);
+ 0, 1, 0, NULL);
if (IS_ERR(hw)) {
ret = PTR_ERR(hw);
goto fail;
}
- snprintf(parent, 32, "dsi%d_pclk_mux", pll_7nm->phy->id);
+ phy_pll_out_dsi_parent = hw;
}
- snprintf(clk_name, 32, "dsi%d_phy_pll_out_dsiclk", pll_7nm->phy->id);
+ snprintf(clk_name, sizeof(clk_name), "dsi%d_phy_pll_out_dsiclk", pll_7nm->phy->id);
/* PIX CLK DIV : DIV_CTRL_7_4*/
- hw = devm_clk_hw_register_divider(dev, clk_name, parent,
- 0, pll_7nm->phy->base +
- REG_DSI_7nm_PHY_CMN_CLK_CFG0,
- 4, 4, CLK_DIVIDER_ONE_BASED,
- &pll_7nm->postdiv_lock);
+ hw = devm_clk_hw_register_divider_parent_hw(dev, clk_name,
+ phy_pll_out_dsi_parent, 0,
+ pll_7nm->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG0,
+ 4, 4, CLK_DIVIDER_ONE_BASED, &pll_7nm->postdiv_lock);
if (IS_ERR(hw)) {
ret = PTR_ERR(hw);
goto fail;
@@ -841,7 +837,7 @@ static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy,
ret = msm_dsi_dphy_timing_calc_v4(timing, clk_req);
if (ret) {
DRM_DEV_ERROR(&phy->pdev->dev,
- "%s: PHY timing calculation failed\n", __func__);
+ "%s: PHY timing calculation failed\n", __func__);
return -EINVAL;
}
@@ -960,10 +956,10 @@ static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy,
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_0, 0x00);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_4, timing->hs_exit);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_5,
- timing->shared_timings.clk_pre);
+ timing->shared_timings.clk_pre);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_6, timing->clk_prepare);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_7,
- timing->shared_timings.clk_post);
+ timing->shared_timings.clk_post);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_8, timing->hs_rqst);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_9, 0x02);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_10, 0x04);
@@ -982,9 +978,9 @@ static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy,
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_10, 0x04);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_11, 0x00);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_12,
- timing->shared_timings.clk_pre);
+ timing->shared_timings.clk_pre);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_13,
- timing->shared_timings.clk_post);
+ timing->shared_timings.clk_post);
}
/* DSI lane settings */
@@ -1036,14 +1032,18 @@ static void dsi_7nm_phy_disable(struct msm_dsi_phy *phy)
DBG("DSI%d PHY disabled", phy->id);
}
+static const struct regulator_bulk_data dsi_phy_7nm_36mA_regulators[] = {
+ { .supply = "vdds", .init_load_uA = 36000 },
+};
+
+static const struct regulator_bulk_data dsi_phy_7nm_37750uA_regulators[] = {
+ { .supply = "vdds", .init_load_uA = 37550 },
+};
+
const struct msm_dsi_phy_cfg dsi_phy_7nm_cfgs = {
.has_phy_lane = true,
- .reg_cfg = {
- .num = 1,
- .regs = {
- {"vdds", 36000, 32},
- },
- },
+ .regulator_data = dsi_phy_7nm_36mA_regulators,
+ .num_regulators = ARRAY_SIZE(dsi_phy_7nm_36mA_regulators),
.ops = {
.enable = dsi_7nm_phy_enable,
.disable = dsi_7nm_phy_disable,
@@ -1065,12 +1065,8 @@ const struct msm_dsi_phy_cfg dsi_phy_7nm_cfgs = {
const struct msm_dsi_phy_cfg dsi_phy_7nm_8150_cfgs = {
.has_phy_lane = true,
- .reg_cfg = {
- .num = 1,
- .regs = {
- {"vdds", 36000, 32},
- },
- },
+ .regulator_data = dsi_phy_7nm_36mA_regulators,
+ .num_regulators = ARRAY_SIZE(dsi_phy_7nm_36mA_regulators),
.ops = {
.enable = dsi_7nm_phy_enable,
.disable = dsi_7nm_phy_disable,
@@ -1087,12 +1083,8 @@ const struct msm_dsi_phy_cfg dsi_phy_7nm_8150_cfgs = {
const struct msm_dsi_phy_cfg dsi_phy_7nm_7280_cfgs = {
.has_phy_lane = true,
- .reg_cfg = {
- .num = 1,
- .regs = {
- {"vdds", 37550, 0},
- },
- },
+ .regulator_data = dsi_phy_7nm_37750uA_regulators,
+ .num_regulators = ARRAY_SIZE(dsi_phy_7nm_37750uA_regulators),
.ops = {
.enable = dsi_7nm_phy_enable,
.disable = dsi_7nm_phy_disable,
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c
index b06d9d25a189..4dd055416620 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c
@@ -691,15 +691,13 @@ static const struct clk_ops hdmi_8996_pll_ops = {
.is_enabled = hdmi_8996_pll_is_enabled,
};
-static const char * const hdmi_pll_parents[] = {
- "xo",
-};
-
static const struct clk_init_data pll_init = {
.name = "hdmipll",
.ops = &hdmi_8996_pll_ops,
- .parent_names = hdmi_pll_parents,
- .num_parents = ARRAY_SIZE(hdmi_pll_parents),
+ .parent_data = (const struct clk_parent_data[]){
+ { .fw_name = "xo", .name = "xo_board" },
+ },
+ .num_parents = 1,
.flags = CLK_IGNORE_UNUSED,
};
@@ -707,8 +705,7 @@ int msm_hdmi_pll_8996_init(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct hdmi_pll_8996 *pll;
- struct clk *clk;
- int i;
+ int i, ret;
pll = devm_kzalloc(dev, sizeof(*pll), GFP_KERNEL);
if (!pll)
@@ -735,10 +732,16 @@ int msm_hdmi_pll_8996_init(struct platform_device *pdev)
}
pll->clk_hw.init = &pll_init;
- clk = devm_clk_register(dev, &pll->clk_hw);
- if (IS_ERR(clk)) {
+ ret = devm_clk_hw_register(dev, &pll->clk_hw);
+ if (ret) {
DRM_DEV_ERROR(dev, "failed to register pll clock\n");
- return -EINVAL;
+ return ret;
+ }
+
+ ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, &pll->clk_hw);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "%s: failed to register clk provider: %d\n", __func__, ret);
+ return ret;
}
return 0;
diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c
index 7d2dab260f86..95f4374ae21c 100644
--- a/drivers/gpu/drm/msm/msm_debugfs.c
+++ b/drivers/gpu/drm/msm/msm_debugfs.c
@@ -7,6 +7,7 @@
#ifdef CONFIG_DEBUG_FS
#include <linux/debugfs.h>
+#include <linux/fault-inject.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_file.h>
@@ -326,6 +327,13 @@ void msm_debugfs_init(struct drm_minor *minor)
if (priv->kms && priv->kms->funcs->debugfs_init)
priv->kms->funcs->debugfs_init(priv->kms, minor);
+
+#ifdef CONFIG_FAULT_INJECTION
+ fault_create_debugfs_attr("fail_gem_alloc", minor->debugfs_root,
+ &fail_gem_alloc);
+ fault_create_debugfs_attr("fail_gem_iova", minor->debugfs_root,
+ &fail_gem_iova);
+#endif
}
#endif
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 226d8d4629d2..28034c21f6bc 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -6,6 +6,7 @@
*/
#include <linux/dma-mapping.h>
+#include <linux/fault-inject.h>
#include <linux/kthread.h>
#include <linux/sched/mm.h>
#include <linux/uaccess.h>
@@ -78,6 +79,11 @@ static bool modeset = true;
MODULE_PARM_DESC(modeset, "Use kernel modesetting [KMS] (1=on (default), 0=disable)");
module_param(modeset, bool, 0600);
+#ifdef CONFIG_FAULT_INJECTION
+DECLARE_FAULT_ATTR(fail_gem_alloc);
+DECLARE_FAULT_ATTR(fail_gem_iova);
+#endif
+
static irqreturn_t msm_irq(int irq, void *arg)
{
struct drm_device *dev = arg;
@@ -418,14 +424,18 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
INIT_LIST_HEAD(&priv->objects);
mutex_init(&priv->obj_lock);
- INIT_LIST_HEAD(&priv->inactive_willneed);
- INIT_LIST_HEAD(&priv->inactive_dontneed);
- INIT_LIST_HEAD(&priv->inactive_unpinned);
- mutex_init(&priv->mm_lock);
+ /*
+ * Initialize the LRUs:
+ */
+ mutex_init(&priv->lru.lock);
+ drm_gem_lru_init(&priv->lru.unbacked, &priv->lru.lock);
+ drm_gem_lru_init(&priv->lru.pinned, &priv->lru.lock);
+ drm_gem_lru_init(&priv->lru.willneed, &priv->lru.lock);
+ drm_gem_lru_init(&priv->lru.dontneed, &priv->lru.lock);
/* Teach lockdep about lock ordering wrt. shrinker: */
fs_reclaim_acquire(GFP_KERNEL);
- might_lock(&priv->mm_lock);
+ might_lock(&priv->lru.lock);
fs_reclaim_release(GFP_KERNEL);
drm_mode_config_init(ddev);
@@ -469,6 +479,8 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
}
}
+ drm_helper_move_panel_connectors_to_head(ddev);
+
ddev->mode_config.funcs = &mode_config_funcs;
ddev->mode_config.helper_private = &mode_config_helper_funcs;
@@ -697,6 +709,9 @@ static int msm_ioctl_gem_new(struct drm_device *dev, void *data,
flags |= MSM_BO_WC;
}
+ if (should_fail(&fail_gem_alloc, args->size))
+ return -ENOMEM;
+
return msm_gem_new_handle(dev, file, args->size,
args->flags, &args->handle, NULL);
}
@@ -758,6 +773,9 @@ static int msm_ioctl_gem_info_iova(struct drm_device *dev,
if (!priv->gpu)
return -EINVAL;
+ if (should_fail(&fail_gem_iova, obj->size))
+ return -ENOMEM;
+
/*
* Don't pin the memory here - just get an address so that userspace can
* be productive
@@ -779,6 +797,9 @@ static int msm_ioctl_gem_info_set_iova(struct drm_device *dev,
if (priv->gpu->aspace == ctx->aspace)
return -EOPNOTSUPP;
+ if (should_fail(&fail_gem_iova, obj->size))
+ return -ENOMEM;
+
return msm_gem_set_iova(obj, ctx->aspace, iova);
}
@@ -883,13 +904,13 @@ static int wait_fence(struct msm_gpu_submitqueue *queue, uint32_t fence_id,
* retired, so if the fence is not found it means there is nothing
* to wait for
*/
- ret = mutex_lock_interruptible(&queue->lock);
+ ret = mutex_lock_interruptible(&queue->idr_lock);
if (ret)
return ret;
fence = idr_find(&queue->fence_idr, fence_id);
if (fence)
fence = dma_fence_get_rcu(fence);
- mutex_unlock(&queue->lock);
+ mutex_unlock(&queue->idr_lock);
if (!fence)
return 0;
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 7d5fb0fc22dd..b2ea262296a4 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -33,6 +33,13 @@
#include <drm/msm_drm.h>
#include <drm/drm_gem.h>
+#ifdef CONFIG_FAULT_INJECTION
+extern struct fault_attr fail_gem_alloc;
+extern struct fault_attr fail_gem_iova;
+#else
+# define should_fail(attr, size) 0
+#endif
+
struct msm_kms;
struct msm_gpu;
struct msm_mmu;
@@ -95,11 +102,6 @@ struct msm_drm_thread {
struct kthread_worker *worker;
};
-/* DSC config */
-struct msm_display_dsc_config {
- struct drm_dsc_config *drm;
-};
-
struct msm_drm_private {
struct drm_device *dev;
@@ -141,28 +143,60 @@ struct msm_drm_private {
struct mutex obj_lock;
/**
- * LRUs of inactive GEM objects. Every bo is either in one of the
- * inactive lists (depending on whether or not it is shrinkable) or
- * gpu->active_list (for the gpu it is active on[1]), or transiently
- * on a temporary list as the shrinker is running.
+ * lru:
*
- * Note that inactive_willneed also contains pinned and vmap'd bos,
- * but the number of pinned-but-not-active objects is small (scanout
- * buffers, ringbuffer, etc).
+ * The various LRU's that a GEM object is in at various stages of
+ * it's lifetime. Objects start out in the unbacked LRU. When
+ * pinned (for scannout or permanently mapped GPU buffers, like
+ * ringbuffer, memptr, fw, etc) it moves to the pinned LRU. When
+ * unpinned, it moves into willneed or dontneed LRU depending on
+ * madvise state. When backing pages are evicted (willneed) or
+ * purged (dontneed) it moves back into the unbacked LRU.
*
- * These lists are protected by mm_lock (which should be acquired
- * before per GEM object lock). One should *not* hold mm_lock in
- * get_pages()/vmap()/etc paths, as they can trigger the shrinker.
- *
- * [1] if someone ever added support for the old 2d cores, there could be
- * more than one gpu object
+ * The dontneed LRU is considered by the shrinker for objects
+ * that are candidate for purging, and the willneed LRU is
+ * considered for objects that could be evicted.
*/
- struct list_head inactive_willneed; /* inactive + potentially unpin/evictable */
- struct list_head inactive_dontneed; /* inactive + shrinkable */
- struct list_head inactive_unpinned; /* inactive + purged or unpinned */
- long shrinkable_count; /* write access under mm_lock */
- long evictable_count; /* write access under mm_lock */
- struct mutex mm_lock;
+ struct {
+ /**
+ * unbacked:
+ *
+ * The LRU for GEM objects without backing pages allocated.
+ * This mostly exists so that objects are always is one
+ * LRU.
+ */
+ struct drm_gem_lru unbacked;
+
+ /**
+ * pinned:
+ *
+ * The LRU for pinned GEM objects
+ */
+ struct drm_gem_lru pinned;
+
+ /**
+ * willneed:
+ *
+ * The LRU for unpinned GEM objects which are in madvise
+ * WILLNEED state (ie. can be evicted)
+ */
+ struct drm_gem_lru willneed;
+
+ /**
+ * dontneed:
+ *
+ * The LRU for unpinned GEM objects which are in madvise
+ * DONTNEED state (ie. can be purged)
+ */
+ struct drm_gem_lru dontneed;
+
+ /**
+ * lock:
+ *
+ * Protects manipulation of all of the LRUs.
+ */
+ struct mutex lock;
+ } lru;
struct workqueue_struct *wq;
@@ -289,7 +323,7 @@ void msm_dsi_snapshot(struct msm_disp_state *disp_state, struct msm_dsi *msm_dsi
bool msm_dsi_is_cmd_mode(struct msm_dsi *msm_dsi);
bool msm_dsi_is_bonded_dsi(struct msm_dsi *msm_dsi);
bool msm_dsi_is_master_dsi(struct msm_dsi *msm_dsi);
-struct msm_display_dsc_config *msm_dsi_get_dsc_config(struct msm_dsi *msm_dsi);
+struct drm_dsc_config *msm_dsi_get_dsc_config(struct msm_dsi *msm_dsi);
#else
static inline void __init msm_dsi_register(void)
{
@@ -319,7 +353,7 @@ static inline bool msm_dsi_is_master_dsi(struct msm_dsi *msm_dsi)
return false;
}
-static inline struct msm_display_dsc_config *msm_dsi_get_dsc_config(struct msm_dsi *msm_dsi)
+static inline struct drm_dsc_config *msm_dsi_get_dsc_config(struct msm_dsi *msm_dsi)
{
return NULL;
}
@@ -432,6 +466,8 @@ void __iomem *msm_ioremap_size(struct platform_device *pdev, const char *name,
phys_addr_t *size);
void __iomem *msm_ioremap_quiet(struct platform_device *pdev, const char *name);
+struct icc_path *msm_icc_get(struct device *dev, const char *name);
+
#define msm_writel(data, addr) writel((data), (addr))
#define msm_readl(addr) readl((addr))
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 8ddbd2e001d4..1dee0d18abbb 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -19,7 +19,7 @@
#include "msm_gpu.h"
#include "msm_mmu.h"
-static void update_inactive(struct msm_gem_object *msm_obj);
+static void update_lru(struct drm_gem_object *obj);
static dma_addr_t physaddr(struct drm_gem_object *obj)
{
@@ -97,7 +97,7 @@ static struct page **get_pages(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- GEM_WARN_ON(!msm_gem_is_locked(obj));
+ msm_gem_assert_locked(obj);
if (!msm_obj->pages) {
struct drm_device *dev = obj->dev;
@@ -132,7 +132,7 @@ static struct page **get_pages(struct drm_gem_object *obj)
if (msm_obj->flags & MSM_BO_WC)
sync_for_device(msm_obj);
- update_inactive(msm_obj);
+ update_lru(obj);
}
return msm_obj->pages;
@@ -174,40 +174,45 @@ static void put_pages(struct drm_gem_object *obj)
put_pages_vram(obj);
msm_obj->pages = NULL;
+ update_lru(obj);
}
}
-struct page **msm_gem_get_pages(struct drm_gem_object *obj)
+static struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct page **p;
- msm_gem_lock(obj);
+ msm_gem_assert_locked(obj);
if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
- msm_gem_unlock(obj);
return ERR_PTR(-EBUSY);
}
p = get_pages(obj);
-
if (!IS_ERR(p)) {
- msm_obj->pin_count++;
- update_inactive(msm_obj);
+ to_msm_bo(obj)->pin_count++;
+ update_lru(obj);
}
- msm_gem_unlock(obj);
return p;
}
-void msm_gem_put_pages(struct drm_gem_object *obj)
+struct page **msm_gem_pin_pages(struct drm_gem_object *obj)
{
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct page **p;
msm_gem_lock(obj);
- msm_obj->pin_count--;
- GEM_WARN_ON(msm_obj->pin_count < 0);
- update_inactive(msm_obj);
+ p = msm_gem_pin_pages_locked(obj);
+ msm_gem_unlock(obj);
+
+ return p;
+}
+
+void msm_gem_unpin_pages(struct drm_gem_object *obj)
+{
+ msm_gem_lock(obj);
+ msm_gem_unpin_locked(obj);
msm_gem_unlock(obj);
}
@@ -273,7 +278,7 @@ static uint64_t mmap_offset(struct drm_gem_object *obj)
struct drm_device *dev = obj->dev;
int ret;
- GEM_WARN_ON(!msm_gem_is_locked(obj));
+ msm_gem_assert_locked(obj);
/* Make it mmapable */
ret = drm_gem_create_mmap_offset(obj);
@@ -302,7 +307,7 @@ static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct msm_gem_vma *vma;
- GEM_WARN_ON(!msm_gem_is_locked(obj));
+ msm_gem_assert_locked(obj);
vma = kzalloc(sizeof(*vma), GFP_KERNEL);
if (!vma)
@@ -321,7 +326,7 @@ static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct msm_gem_vma *vma;
- GEM_WARN_ON(!msm_gem_is_locked(obj));
+ msm_gem_assert_locked(obj);
list_for_each_entry(vma, &msm_obj->vmas, list) {
if (vma->aspace == aspace)
@@ -352,7 +357,7 @@ put_iova_spaces(struct drm_gem_object *obj, bool close)
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct msm_gem_vma *vma;
- GEM_WARN_ON(!msm_gem_is_locked(obj));
+ msm_gem_assert_locked(obj);
list_for_each_entry(vma, &msm_obj->vmas, list) {
if (vma->aspace) {
@@ -370,7 +375,7 @@ put_iova_vmas(struct drm_gem_object *obj)
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct msm_gem_vma *vma, *tmp;
- GEM_WARN_ON(!msm_gem_is_locked(obj));
+ msm_gem_assert_locked(obj);
list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
del_vma(vma);
@@ -383,7 +388,7 @@ static struct msm_gem_vma *get_vma_locked(struct drm_gem_object *obj,
{
struct msm_gem_vma *vma;
- GEM_WARN_ON(!msm_gem_is_locked(obj));
+ msm_gem_assert_locked(obj);
vma = lookup_vma(obj, aspace);
@@ -423,19 +428,18 @@ int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma)
if (msm_obj->flags & MSM_BO_CACHED_COHERENT)
prot |= IOMMU_CACHE;
- GEM_WARN_ON(!msm_gem_is_locked(obj));
+ msm_gem_assert_locked(obj);
if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
return -EBUSY;
- pages = get_pages(obj);
+ pages = msm_gem_pin_pages_locked(obj);
if (IS_ERR(pages))
return PTR_ERR(pages);
ret = msm_gem_map_vma(vma->aspace, vma, prot, msm_obj->sgt, obj->size);
-
- if (!ret)
- msm_obj->pin_count++;
+ if (ret)
+ msm_gem_unpin_locked(obj);
return ret;
}
@@ -444,12 +448,12 @@ void msm_gem_unpin_locked(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- GEM_WARN_ON(!msm_gem_is_locked(obj));
+ msm_gem_assert_locked(obj);
msm_obj->pin_count--;
GEM_WARN_ON(msm_obj->pin_count < 0);
- update_inactive(msm_obj);
+ update_lru(obj);
}
struct msm_gem_vma *msm_gem_get_vma_locked(struct drm_gem_object *obj,
@@ -465,7 +469,7 @@ static int get_and_pin_iova_range_locked(struct drm_gem_object *obj,
struct msm_gem_vma *vma;
int ret;
- GEM_WARN_ON(!msm_gem_is_locked(obj));
+ msm_gem_assert_locked(obj);
vma = get_vma_locked(obj, aspace, range_start, range_end);
if (IS_ERR(vma))
@@ -626,7 +630,7 @@ static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
struct msm_gem_object *msm_obj = to_msm_bo(obj);
int ret = 0;
- GEM_WARN_ON(!msm_gem_is_locked(obj));
+ msm_gem_assert_locked(obj);
if (obj->import_attach)
return ERR_PTR(-ENODEV);
@@ -658,7 +662,7 @@ static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
goto fail;
}
- update_inactive(msm_obj);
+ update_lru(obj);
}
return msm_obj->vaddr;
@@ -699,7 +703,7 @@ void msm_gem_put_vaddr_locked(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- GEM_WARN_ON(!msm_gem_is_locked(obj));
+ msm_gem_assert_locked(obj);
GEM_WARN_ON(msm_obj->vmap_count < 1);
msm_obj->vmap_count--;
@@ -729,8 +733,7 @@ int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
/* If the obj is inactive, we might need to move it
* between inactive lists
*/
- if (msm_obj->active_count == 0)
- update_inactive(msm_obj);
+ update_lru(obj);
msm_gem_unlock(obj);
@@ -742,7 +745,7 @@ void msm_gem_purge(struct drm_gem_object *obj)
struct drm_device *dev = obj->dev;
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- GEM_WARN_ON(!msm_gem_is_locked(obj));
+ msm_gem_assert_locked(obj);
GEM_WARN_ON(!is_purgeable(msm_obj));
/* Get rid of any iommu mapping(s): */
@@ -757,7 +760,6 @@ void msm_gem_purge(struct drm_gem_object *obj)
put_iova_vmas(obj);
msm_obj->madv = __MSM_MADV_PURGED;
- update_inactive(msm_obj);
drm_gem_free_mmap_offset(obj);
@@ -780,10 +782,8 @@ void msm_gem_evict(struct drm_gem_object *obj)
struct drm_device *dev = obj->dev;
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- GEM_WARN_ON(!msm_gem_is_locked(obj));
+ msm_gem_assert_locked(obj);
GEM_WARN_ON(is_unevictable(msm_obj));
- GEM_WARN_ON(!msm_obj->evictable);
- GEM_WARN_ON(msm_obj->active_count);
/* Get rid of any iommu mapping(s): */
put_iova_spaces(obj, false);
@@ -791,15 +791,13 @@ void msm_gem_evict(struct drm_gem_object *obj)
drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
put_pages(obj);
-
- update_inactive(msm_obj);
}
void msm_gem_vunmap(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- GEM_WARN_ON(!msm_gem_is_locked(obj));
+ msm_gem_assert_locked(obj);
if (!msm_obj->vaddr || GEM_WARN_ON(!is_vunmapable(msm_obj)))
return;
@@ -808,66 +806,37 @@ void msm_gem_vunmap(struct drm_gem_object *obj)
msm_obj->vaddr = NULL;
}
-void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu)
+static void update_lru(struct drm_gem_object *obj)
{
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct msm_drm_private *priv = obj->dev->dev_private;
-
- might_sleep();
- GEM_WARN_ON(!msm_gem_is_locked(obj));
- GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
- GEM_WARN_ON(msm_obj->dontneed);
-
- if (msm_obj->active_count++ == 0) {
- mutex_lock(&priv->mm_lock);
- if (msm_obj->evictable)
- mark_unevictable(msm_obj);
- list_move_tail(&msm_obj->mm_list, &gpu->active_list);
- mutex_unlock(&priv->mm_lock);
- }
-}
-
-void msm_gem_active_put(struct drm_gem_object *obj)
-{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- might_sleep();
- GEM_WARN_ON(!msm_gem_is_locked(obj));
+ msm_gem_assert_locked(&msm_obj->base);
- if (--msm_obj->active_count == 0) {
- update_inactive(msm_obj);
+ if (!msm_obj->pages) {
+ GEM_WARN_ON(msm_obj->pin_count);
+ GEM_WARN_ON(msm_obj->vmap_count);
+
+ drm_gem_lru_move_tail(&priv->lru.unbacked, obj);
+ } else if (msm_obj->pin_count || msm_obj->vmap_count) {
+ drm_gem_lru_move_tail(&priv->lru.pinned, obj);
+ } else if (msm_obj->madv == MSM_MADV_WILLNEED) {
+ drm_gem_lru_move_tail(&priv->lru.willneed, obj);
+ } else {
+ GEM_WARN_ON(msm_obj->madv != MSM_MADV_DONTNEED);
+
+ drm_gem_lru_move_tail(&priv->lru.dontneed, obj);
}
}
-static void update_inactive(struct msm_gem_object *msm_obj)
+bool msm_gem_active(struct drm_gem_object *obj)
{
- struct msm_drm_private *priv = msm_obj->base.dev->dev_private;
-
- GEM_WARN_ON(!msm_gem_is_locked(&msm_obj->base));
+ msm_gem_assert_locked(obj);
- if (msm_obj->active_count != 0)
- return;
-
- mutex_lock(&priv->mm_lock);
-
- if (msm_obj->dontneed)
- mark_unpurgeable(msm_obj);
- if (msm_obj->evictable)
- mark_unevictable(msm_obj);
-
- list_del(&msm_obj->mm_list);
- if ((msm_obj->madv == MSM_MADV_WILLNEED) && msm_obj->sgt) {
- list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed);
- mark_evictable(msm_obj);
- } else if (msm_obj->madv == MSM_MADV_DONTNEED) {
- list_add_tail(&msm_obj->mm_list, &priv->inactive_dontneed);
- mark_purgeable(msm_obj);
- } else {
- GEM_WARN_ON((msm_obj->madv != __MSM_MADV_PURGED) && msm_obj->sgt);
- list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned);
- }
+ if (to_msm_bo(obj)->pin_count)
+ return true;
- mutex_unlock(&priv->mm_lock);
+ return !dma_resv_test_signaled(obj->resv, dma_resv_usage_rw(true));
}
int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
@@ -910,7 +879,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
stats->all.count++;
stats->all.size += obj->size;
- if (is_active(msm_obj)) {
+ if (msm_gem_active(obj)) {
stats->active.count++;
stats->active.size += obj->size;
}
@@ -938,7 +907,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
}
seq_printf(m, "%08x: %c %2d (%2d) %08llx %p",
- msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
+ msm_obj->flags, msm_gem_active(obj) ? 'A' : 'I',
obj->name, kref_read(&obj->refcount),
off, msm_obj->vaddr);
@@ -1015,15 +984,6 @@ static void msm_gem_free_object(struct drm_gem_object *obj)
list_del(&msm_obj->node);
mutex_unlock(&priv->obj_lock);
- mutex_lock(&priv->mm_lock);
- if (msm_obj->dontneed)
- mark_unpurgeable(msm_obj);
- list_del(&msm_obj->mm_list);
- mutex_unlock(&priv->mm_lock);
-
- /* object should not be on active list: */
- GEM_WARN_ON(is_active(msm_obj));
-
put_iova_spaces(obj, true);
if (obj->import_attach) {
@@ -1183,13 +1143,6 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev, uint32_t size, uint32
to_msm_bo(obj)->vram_node = &vma->node;
- /* Call chain get_pages() -> update_inactive() tries to
- * access msm_obj->mm_list, but it is not initialized yet.
- * To avoid NULL pointer dereference error, initialize
- * mm_list to be empty.
- */
- INIT_LIST_HEAD(&msm_obj->mm_list);
-
msm_gem_lock(obj);
pages = get_pages(obj);
msm_gem_unlock(obj);
@@ -1212,9 +1165,7 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev, uint32_t size, uint32
mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
}
- mutex_lock(&priv->mm_lock);
- list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned);
- mutex_unlock(&priv->mm_lock);
+ drm_gem_lru_move_tail(&priv->lru.unbacked, obj);
mutex_lock(&priv->obj_lock);
list_add_tail(&msm_obj->node, &priv->objects);
@@ -1270,9 +1221,7 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
msm_gem_unlock(obj);
- mutex_lock(&priv->mm_lock);
- list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned);
- mutex_unlock(&priv->mm_lock);
+ drm_gem_lru_move_tail(&priv->lru.pinned, obj);
mutex_lock(&priv->obj_lock);
list_add_tail(&msm_obj->node, &priv->objects);
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index 432032ad4aed..c4844cf3a585 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -94,16 +94,6 @@ struct msm_gem_object {
uint8_t madv;
/**
- * Is object on inactive_dontneed list (ie. counted in priv->shrinkable_count)?
- */
- bool dontneed : 1;
-
- /**
- * Is object evictable (ie. counted in priv->evictable_count)?
- */
- bool evictable : 1;
-
- /**
* count of active vmap'ing
*/
uint8_t vmap_count;
@@ -114,17 +104,6 @@ struct msm_gem_object {
*/
struct list_head node;
- /**
- * An object is either:
- * inactive - on priv->inactive_dontneed or priv->inactive_willneed
- * (depending on purgeability status)
- * active - on one one of the gpu's active_list.. well, at
- * least for now we don't have (I don't think) hw sync between
- * 2d and 3d one devices which have both, meaning we need to
- * block on submit if a bo is already on other ring
- */
- struct list_head mm_list;
-
struct page **pages;
struct sg_table *sgt;
void *vaddr;
@@ -138,7 +117,6 @@ struct msm_gem_object {
char name[32]; /* Identifier to print for the debugfs files */
- int active_count;
int pin_count;
};
#define to_msm_bo(x) container_of(x, struct msm_gem_object, base)
@@ -159,8 +137,8 @@ int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace, uint64_t *iova);
void msm_gem_unpin_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace);
-struct page **msm_gem_get_pages(struct drm_gem_object *obj);
-void msm_gem_put_pages(struct drm_gem_object *obj);
+struct page **msm_gem_pin_pages(struct drm_gem_object *obj);
+void msm_gem_unpin_pages(struct drm_gem_object *obj);
int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args);
int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
@@ -171,8 +149,7 @@ void *msm_gem_get_vaddr_active(struct drm_gem_object *obj);
void msm_gem_put_vaddr_locked(struct drm_gem_object *obj);
void msm_gem_put_vaddr(struct drm_gem_object *obj);
int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv);
-void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu);
-void msm_gem_active_put(struct drm_gem_object *obj);
+bool msm_gem_active(struct drm_gem_object *obj);
int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout);
int msm_gem_cpu_fini(struct drm_gem_object *obj);
int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
@@ -208,12 +185,6 @@ msm_gem_lock(struct drm_gem_object *obj)
dma_resv_lock(obj->resv, NULL);
}
-static inline bool __must_check
-msm_gem_trylock(struct drm_gem_object *obj)
-{
- return dma_resv_trylock(obj->resv);
-}
-
static inline int
msm_gem_lock_interruptible(struct drm_gem_object *obj)
{
@@ -226,8 +197,8 @@ msm_gem_unlock(struct drm_gem_object *obj)
dma_resv_unlock(obj->resv);
}
-static inline bool
-msm_gem_is_locked(struct drm_gem_object *obj)
+static inline void
+msm_gem_assert_locked(struct drm_gem_object *obj)
{
/*
* Destroying the object is a special case.. msm_gem_free_object()
@@ -241,13 +212,10 @@ msm_gem_is_locked(struct drm_gem_object *obj)
* Unfortunately lockdep is not aware of this detail. So when the
* refcount drops to zero, we pretend it is already locked.
*/
- return dma_resv_is_locked(obj->resv) || (kref_read(&obj->refcount) == 0);
-}
-
-static inline bool is_active(struct msm_gem_object *msm_obj)
-{
- GEM_WARN_ON(!msm_gem_is_locked(&msm_obj->base));
- return msm_obj->active_count;
+ lockdep_assert_once(
+ (kref_read(&obj->refcount) == 0) ||
+ (lockdep_is_held(&obj->resv->lock.base) != LOCK_STATE_NOT_HELD)
+ );
}
/* imported/exported objects are not purgeable: */
@@ -264,81 +232,15 @@ static inline bool is_purgeable(struct msm_gem_object *msm_obj)
static inline bool is_vunmapable(struct msm_gem_object *msm_obj)
{
- GEM_WARN_ON(!msm_gem_is_locked(&msm_obj->base));
+ msm_gem_assert_locked(&msm_obj->base);
return (msm_obj->vmap_count == 0) && msm_obj->vaddr;
}
-static inline void mark_purgeable(struct msm_gem_object *msm_obj)
-{
- struct msm_drm_private *priv = msm_obj->base.dev->dev_private;
-
- GEM_WARN_ON(!mutex_is_locked(&priv->mm_lock));
-
- if (is_unpurgeable(msm_obj))
- return;
-
- if (GEM_WARN_ON(msm_obj->dontneed))
- return;
-
- priv->shrinkable_count += msm_obj->base.size >> PAGE_SHIFT;
- msm_obj->dontneed = true;
-}
-
-static inline void mark_unpurgeable(struct msm_gem_object *msm_obj)
-{
- struct msm_drm_private *priv = msm_obj->base.dev->dev_private;
-
- GEM_WARN_ON(!mutex_is_locked(&priv->mm_lock));
-
- if (is_unpurgeable(msm_obj))
- return;
-
- if (GEM_WARN_ON(!msm_obj->dontneed))
- return;
-
- priv->shrinkable_count -= msm_obj->base.size >> PAGE_SHIFT;
- GEM_WARN_ON(priv->shrinkable_count < 0);
- msm_obj->dontneed = false;
-}
-
static inline bool is_unevictable(struct msm_gem_object *msm_obj)
{
return is_unpurgeable(msm_obj) || msm_obj->vaddr;
}
-static inline void mark_evictable(struct msm_gem_object *msm_obj)
-{
- struct msm_drm_private *priv = msm_obj->base.dev->dev_private;
-
- WARN_ON(!mutex_is_locked(&priv->mm_lock));
-
- if (is_unevictable(msm_obj))
- return;
-
- if (WARN_ON(msm_obj->evictable))
- return;
-
- priv->evictable_count += msm_obj->base.size >> PAGE_SHIFT;
- msm_obj->evictable = true;
-}
-
-static inline void mark_unevictable(struct msm_gem_object *msm_obj)
-{
- struct msm_drm_private *priv = msm_obj->base.dev->dev_private;
-
- WARN_ON(!mutex_is_locked(&priv->mm_lock));
-
- if (is_unevictable(msm_obj))
- return;
-
- if (WARN_ON(!msm_obj->evictable))
- return;
-
- priv->evictable_count -= msm_obj->base.size >> PAGE_SHIFT;
- WARN_ON(priv->evictable_count < 0);
- msm_obj->evictable = false;
-}
-
void msm_gem_purge(struct drm_gem_object *obj);
void msm_gem_evict(struct drm_gem_object *obj);
void msm_gem_vunmap(struct drm_gem_object *obj);
@@ -390,9 +292,8 @@ struct msm_gem_submit {
/* make sure these don't conflict w/ MSM_SUBMIT_BO_x */
#define BO_VALID 0x8000 /* is current addr in cmdstream correct/valid? */
#define BO_LOCKED 0x4000 /* obj lock is held */
-#define BO_ACTIVE 0x2000 /* active refcnt is held */
-#define BO_OBJ_PINNED 0x1000 /* obj (pages) is pinned and on active list */
-#define BO_VMA_PINNED 0x0800 /* vma (virtual address) is pinned */
+#define BO_OBJ_PINNED 0x2000 /* obj (pages) is pinned and on active list */
+#define BO_VMA_PINNED 0x1000 /* vma (virtual address) is pinned */
uint32_t flags;
union {
struct msm_gem_object *obj;
diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c
index dcc8a573bc76..c1d91863df05 100644
--- a/drivers/gpu/drm/msm/msm_gem_prime.c
+++ b/drivers/gpu/drm/msm/msm_gem_prime.c
@@ -63,12 +63,12 @@ struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
int msm_gem_prime_pin(struct drm_gem_object *obj)
{
if (!obj->import_attach)
- msm_gem_get_pages(obj);
+ msm_gem_pin_pages(obj);
return 0;
}
void msm_gem_prime_unpin(struct drm_gem_object *obj)
{
if (!obj->import_attach)
- msm_gem_put_pages(obj);
+ msm_gem_unpin_pages(obj);
}
diff --git a/drivers/gpu/drm/msm/msm_gem_shrinker.c b/drivers/gpu/drm/msm/msm_gem_shrinker.c
index 0317055e3253..1de14e67f96b 100644
--- a/drivers/gpu/drm/msm/msm_gem_shrinker.c
+++ b/drivers/gpu/drm/msm/msm_gem_shrinker.c
@@ -24,103 +24,77 @@ static bool can_swap(void)
return enable_eviction && get_nr_swap_pages() > 0;
}
+static bool can_block(struct shrink_control *sc)
+{
+ if (!(sc->gfp_mask & __GFP_DIRECT_RECLAIM))
+ return false;
+ return current_is_kswapd() || (sc->gfp_mask & __GFP_RECLAIM);
+}
+
static unsigned long
msm_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
{
struct msm_drm_private *priv =
container_of(shrinker, struct msm_drm_private, shrinker);
- unsigned count = priv->shrinkable_count;
+ unsigned count = priv->lru.dontneed.count;
if (can_swap())
- count += priv->evictable_count;
+ count += priv->lru.willneed.count;
return count;
}
static bool
-purge(struct msm_gem_object *msm_obj)
+purge(struct drm_gem_object *obj)
{
- if (!is_purgeable(msm_obj))
+ if (!is_purgeable(to_msm_bo(obj)))
return false;
- /*
- * This will move the obj out of still_in_list to
- * the purged list
- */
- msm_gem_purge(&msm_obj->base);
+ if (msm_gem_active(obj))
+ return false;
+
+ msm_gem_purge(obj);
return true;
}
static bool
-evict(struct msm_gem_object *msm_obj)
+evict(struct drm_gem_object *obj)
{
- if (is_unevictable(msm_obj))
+ if (is_unevictable(to_msm_bo(obj)))
+ return false;
+
+ if (msm_gem_active(obj))
return false;
- msm_gem_evict(&msm_obj->base);
+ msm_gem_evict(obj);
return true;
}
-static unsigned long
-scan(struct msm_drm_private *priv, unsigned nr_to_scan, struct list_head *list,
- bool (*shrink)(struct msm_gem_object *msm_obj))
+static bool
+wait_for_idle(struct drm_gem_object *obj)
{
- unsigned freed = 0;
- struct list_head still_in_list;
-
- INIT_LIST_HEAD(&still_in_list);
-
- mutex_lock(&priv->mm_lock);
-
- while (freed < nr_to_scan) {
- struct msm_gem_object *msm_obj = list_first_entry_or_null(
- list, typeof(*msm_obj), mm_list);
-
- if (!msm_obj)
- break;
-
- list_move_tail(&msm_obj->mm_list, &still_in_list);
-
- /*
- * If it is in the process of being freed, msm_gem_free_object
- * can be blocked on mm_lock waiting to remove it. So just
- * skip it.
- */
- if (!kref_get_unless_zero(&msm_obj->base.refcount))
- continue;
-
- /*
- * Now that we own a reference, we can drop mm_lock for the
- * rest of the loop body, to reduce contention with the
- * retire_submit path (which could make more objects purgeable)
- */
-
- mutex_unlock(&priv->mm_lock);
-
- /*
- * Note that this still needs to be trylock, since we can
- * hit shrinker in response to trying to get backing pages
- * for this obj (ie. while it's lock is already held)
- */
- if (!msm_gem_trylock(&msm_obj->base))
- goto tail;
-
- if (shrink(msm_obj))
- freed += msm_obj->base.size >> PAGE_SHIFT;
+ enum dma_resv_usage usage = dma_resv_usage_rw(true);
+ return dma_resv_wait_timeout(obj->resv, usage, false, 1000) > 0;
+}
- msm_gem_unlock(&msm_obj->base);
+static bool
+active_purge(struct drm_gem_object *obj)
+{
+ if (!wait_for_idle(obj))
+ return false;
-tail:
- drm_gem_object_put(&msm_obj->base);
- mutex_lock(&priv->mm_lock);
- }
+ return purge(obj);
+}
- list_splice_tail(&still_in_list, list);
- mutex_unlock(&priv->mm_lock);
+static bool
+active_evict(struct drm_gem_object *obj)
+{
+ if (!wait_for_idle(obj))
+ return false;
- return freed;
+ return evict(obj);
}
static unsigned long
@@ -128,21 +102,34 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
{
struct msm_drm_private *priv =
container_of(shrinker, struct msm_drm_private, shrinker);
- unsigned long freed;
-
- freed = scan(priv, sc->nr_to_scan, &priv->inactive_dontneed, purge);
-
- if (freed > 0)
- trace_msm_gem_purge(freed << PAGE_SHIFT);
-
- if (can_swap() && freed < sc->nr_to_scan) {
- int evicted = scan(priv, sc->nr_to_scan - freed,
- &priv->inactive_willneed, evict);
+ struct {
+ struct drm_gem_lru *lru;
+ bool (*shrink)(struct drm_gem_object *obj);
+ bool cond;
+ unsigned long freed;
+ } stages[] = {
+ /* Stages of progressively more aggressive/expensive reclaim: */
+ { &priv->lru.dontneed, purge, true },
+ { &priv->lru.willneed, evict, can_swap() },
+ { &priv->lru.dontneed, active_purge, can_block(sc) },
+ { &priv->lru.willneed, active_evict, can_swap() && can_block(sc) },
+ };
+ long nr = sc->nr_to_scan;
+ unsigned long freed = 0;
- if (evicted > 0)
- trace_msm_gem_evict(evicted << PAGE_SHIFT);
+ for (unsigned i = 0; (nr > 0) && (i < ARRAY_SIZE(stages)); i++) {
+ if (!stages[i].cond)
+ continue;
+ stages[i].freed =
+ drm_gem_lru_scan(stages[i].lru, nr, stages[i].shrink);
+ nr -= stages[i].freed;
+ freed += stages[i].freed;
+ }
- freed += evicted;
+ if (freed) {
+ trace_msm_gem_shrink(sc->nr_to_scan, stages[0].freed,
+ stages[1].freed, stages[2].freed,
+ stages[3].freed);
}
return (freed > 0) ? freed : SHRINK_STOP;
@@ -173,12 +160,12 @@ msm_gem_shrinker_shrink(struct drm_device *dev, unsigned long nr_to_scan)
static const int vmap_shrink_limit = 15;
static bool
-vmap_shrink(struct msm_gem_object *msm_obj)
+vmap_shrink(struct drm_gem_object *obj)
{
- if (!is_vunmapable(msm_obj))
+ if (!is_vunmapable(to_msm_bo(obj)))
return false;
- msm_gem_vunmap(&msm_obj->base);
+ msm_gem_vunmap(obj);
return true;
}
@@ -188,17 +175,18 @@ msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
{
struct msm_drm_private *priv =
container_of(nb, struct msm_drm_private, vmap_notifier);
- struct list_head *mm_lists[] = {
- &priv->inactive_dontneed,
- &priv->inactive_willneed,
- priv->gpu ? &priv->gpu->active_list : NULL,
+ struct drm_gem_lru *lrus[] = {
+ &priv->lru.dontneed,
+ &priv->lru.willneed,
+ &priv->lru.pinned,
NULL,
};
unsigned idx, unmapped = 0;
- for (idx = 0; mm_lists[idx] && unmapped < vmap_shrink_limit; idx++) {
- unmapped += scan(priv, vmap_shrink_limit - unmapped,
- mm_lists[idx], vmap_shrink);
+ for (idx = 0; lrus[idx] && unmapped < vmap_shrink_limit; idx++) {
+ unmapped += drm_gem_lru_scan(lrus[idx],
+ vmap_shrink_limit - unmapped,
+ vmap_shrink);
}
*(unsigned long *)ptr += unmapped;
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index c9e4aeb14f4a..5599d93ec0d2 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -26,6 +26,7 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
struct msm_gpu_submitqueue *queue, uint32_t nr_bos,
uint32_t nr_cmds)
{
+ static atomic_t ident = ATOMIC_INIT(0);
struct msm_gem_submit *submit;
uint64_t sz;
int ret;
@@ -36,7 +37,7 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
if (sz > SIZE_MAX)
return ERR_PTR(-ENOMEM);
- submit = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
+ submit = kzalloc(sz, GFP_KERNEL);
if (!submit)
return ERR_PTR(-ENOMEM);
@@ -52,9 +53,13 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
submit->gpu = gpu;
submit->cmd = (void *)&submit->bos[nr_bos];
submit->queue = queue;
+ submit->pid = get_pid(task_pid(current));
submit->ring = gpu->rb[queue->ring_nr];
submit->fault_dumped = false;
+ /* Get a unique identifier for the submission for logging purposes */
+ submit->ident = atomic_inc_return(&ident) - 1;
+
INIT_LIST_HEAD(&submit->node);
return submit;
@@ -67,9 +72,9 @@ void __msm_gem_submit_destroy(struct kref *kref)
unsigned i;
if (submit->fence_id) {
- mutex_lock(&submit->queue->lock);
+ mutex_lock(&submit->queue->idr_lock);
idr_remove(&submit->queue->fence_idr, submit->fence_id);
- mutex_unlock(&submit->queue->lock);
+ mutex_unlock(&submit->queue->idr_lock);
}
dma_fence_put(submit->user_fence);
@@ -238,17 +243,13 @@ static void submit_cleanup_bo(struct msm_gem_submit *submit, int i,
if (flags & BO_OBJ_PINNED)
msm_gem_unpin_locked(obj);
- if (flags & BO_ACTIVE)
- msm_gem_active_put(obj);
-
if (flags & BO_LOCKED)
dma_resv_unlock(obj->resv);
}
static void submit_unlock_unpin_bo(struct msm_gem_submit *submit, int i)
{
- unsigned cleanup_flags = BO_VMA_PINNED | BO_OBJ_PINNED |
- BO_ACTIVE | BO_LOCKED;
+ unsigned cleanup_flags = BO_VMA_PINNED | BO_OBJ_PINNED | BO_LOCKED;
submit_cleanup_bo(submit, i, cleanup_flags);
if (!(submit->bos[i].flags & BO_VALID))
@@ -353,18 +354,6 @@ static int submit_pin_objects(struct msm_gem_submit *submit)
submit->valid = true;
- /*
- * Increment active_count first, so if under memory pressure, we
- * don't inadvertently evict a bo needed by the submit in order
- * to pin an earlier bo in the same submit.
- */
- for (i = 0; i < submit->nr_bos; i++) {
- struct drm_gem_object *obj = &submit->bos[i].obj->base;
-
- msm_gem_active_get(obj, submit->gpu);
- submit->bos[i].flags |= BO_ACTIVE;
- }
-
for (i = 0; i < submit->nr_bos; i++) {
struct drm_gem_object *obj = &submit->bos[i].obj->base;
struct msm_gem_vma *vma;
@@ -512,11 +501,11 @@ out:
*/
static void submit_cleanup(struct msm_gem_submit *submit, bool error)
{
- unsigned cleanup_flags = BO_LOCKED;
+ unsigned cleanup_flags = BO_LOCKED | BO_OBJ_PINNED;
unsigned i;
if (error)
- cleanup_flags |= BO_VMA_PINNED | BO_OBJ_PINNED | BO_ACTIVE;
+ cleanup_flags |= BO_VMA_PINNED;
for (i = 0; i < submit->nr_bos; i++) {
struct msm_gem_object *msm_obj = submit->bos[i].obj;
@@ -533,10 +522,6 @@ void msm_submit_retire(struct msm_gem_submit *submit)
for (i = 0; i < submit->nr_bos; i++) {
struct drm_gem_object *obj = &submit->bos[i].obj->base;
- msm_gem_lock(obj);
- /* Note, VMA already fence-unpinned before submit: */
- submit_cleanup_bo(submit, i, BO_OBJ_PINNED | BO_ACTIVE);
- msm_gem_unlock(obj);
drm_gem_object_put(obj);
}
}
@@ -718,7 +703,6 @@ static void msm_process_post_deps(struct msm_submit_post_dep *post_deps,
int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
struct drm_file *file)
{
- static atomic_t ident = ATOMIC_INIT(0);
struct msm_drm_private *priv = dev->dev_private;
struct drm_msm_gem_submit *args = data;
struct msm_file_private *ctx = file->driver_priv;
@@ -729,10 +713,9 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
struct msm_submit_post_dep *post_deps = NULL;
struct drm_syncobj **syncobjs_to_reset = NULL;
int out_fence_fd = -1;
- struct pid *pid = get_pid(task_pid(current));
bool has_ww_ticket = false;
unsigned i;
- int ret, submitid;
+ int ret;
if (!gpu)
return -ENXIO;
@@ -764,35 +747,26 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
if (!queue)
return -ENOENT;
- /* Get a unique identifier for the submission for logging purposes */
- submitid = atomic_inc_return(&ident) - 1;
-
ring = gpu->rb[queue->ring_nr];
- trace_msm_gpu_submit(pid_nr(pid), ring->id, submitid,
- args->nr_bos, args->nr_cmds);
-
- ret = mutex_lock_interruptible(&queue->lock);
- if (ret)
- goto out_post_unlock;
if (args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
if (out_fence_fd < 0) {
ret = out_fence_fd;
- goto out_unlock;
+ return ret;
}
}
- submit = submit_create(dev, gpu, queue, args->nr_bos,
- args->nr_cmds);
- if (IS_ERR(submit)) {
- ret = PTR_ERR(submit);
- submit = NULL;
- goto out_unlock;
- }
+ submit = submit_create(dev, gpu, queue, args->nr_bos, args->nr_cmds);
+ if (IS_ERR(submit))
+ return PTR_ERR(submit);
+
+ trace_msm_gpu_submit(pid_nr(submit->pid), ring->id, submit->ident,
+ args->nr_bos, args->nr_cmds);
- submit->pid = pid;
- submit->ident = submitid;
+ ret = mutex_lock_interruptible(&queue->lock);
+ if (ret)
+ goto out_post_unlock;
if (args->flags & MSM_SUBMIT_SUDO)
submit->in_rb = true;
@@ -887,6 +861,8 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
submit->nr_cmds = i;
+ mutex_lock(&queue->idr_lock);
+
/*
* If using userspace provided seqno fence, validate that the id
* is available before arming sched job. Since access to fence_idr
@@ -895,6 +871,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
*/
if ((args->flags & MSM_SUBMIT_FENCE_SN_IN) &&
idr_find(&queue->fence_idr, args->fence)) {
+ mutex_unlock(&queue->idr_lock);
ret = -EINVAL;
goto out;
}
@@ -927,6 +904,9 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
submit->user_fence, 1,
INT_MAX, GFP_KERNEL);
}
+
+ mutex_unlock(&queue->idr_lock);
+
if (submit->fence_id < 0) {
ret = submit->fence_id;
submit->fence_id = 0;
@@ -965,9 +945,9 @@ out_unlock:
if (ret && (out_fence_fd >= 0))
put_unused_fd(out_fence_fd);
mutex_unlock(&queue->lock);
+out_post_unlock:
if (submit)
msm_gem_submit_put(submit);
-out_post_unlock:
if (!IS_ERR_OR_NULL(post_deps)) {
for (i = 0; i < args->nr_out_syncobjs; ++i) {
kfree(post_deps[i].chain);
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index c2bfcf3f1f40..0098ee8438aa 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -16,6 +16,7 @@
#include <generated/utsrelease.h>
#include <linux/string_helpers.h>
#include <linux/devcoredump.h>
+#include <linux/reset.h>
#include <linux/sched/task.h>
/*
@@ -394,7 +395,6 @@ static void recover_worker(struct kthread_work *work)
/* Record the crash state */
pm_runtime_get_sync(&gpu->pdev->dev);
msm_gpu_crashstate_capture(gpu, submit, comm, cmd);
- pm_runtime_put_sync(&gpu->pdev->dev);
kfree(cmd);
kfree(comm);
@@ -423,9 +423,7 @@ static void recover_worker(struct kthread_work *work)
/* retire completed submits, plus the one that hung: */
retire_submits(gpu);
- pm_runtime_get_sync(&gpu->pdev->dev);
gpu->funcs->recover(gpu);
- pm_runtime_put_sync(&gpu->pdev->dev);
/*
* Replay all remaining submits starting with highest priority
@@ -442,6 +440,8 @@ static void recover_worker(struct kthread_work *work)
}
}
+ pm_runtime_put(&gpu->pdev->dev);
+
mutex_unlock(&gpu->lock);
msm_gpu_retire(gpu);
@@ -664,11 +664,12 @@ static void retire_submit(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
mutex_lock(&gpu->active_lock);
gpu->active_submits--;
WARN_ON(gpu->active_submits < 0);
- if (!gpu->active_submits)
+ if (!gpu->active_submits) {
msm_devfreq_idle(gpu);
- mutex_unlock(&gpu->active_lock);
+ pm_runtime_put_autosuspend(&gpu->pdev->dev);
+ }
- pm_runtime_put_autosuspend(&gpu->pdev->dev);
+ mutex_unlock(&gpu->active_lock);
msm_gem_submit_put(submit);
}
@@ -757,14 +758,17 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
/* Update devfreq on transition from idle->active: */
mutex_lock(&gpu->active_lock);
- if (!gpu->active_submits)
+ if (!gpu->active_submits) {
+ pm_runtime_get(&gpu->pdev->dev);
msm_devfreq_active(gpu);
+ }
gpu->active_submits++;
mutex_unlock(&gpu->active_lock);
gpu->funcs->submit(gpu, submit);
gpu->cur_ctx_seqno = submit->queue->ctx->seqno;
+ pm_runtime_put(&gpu->pdev->dev);
hangcheck_timer_reset(gpu);
}
@@ -846,7 +850,6 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
sched_set_fifo_low(gpu->worker->task);
- INIT_LIST_HEAD(&gpu->active_list);
mutex_init(&gpu->active_lock);
mutex_init(&gpu->lock);
init_waitqueue_head(&gpu->retire_event);
@@ -901,6 +904,9 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
if (IS_ERR(gpu->gpu_cx))
gpu->gpu_cx = NULL;
+ gpu->cx_collapse = devm_reset_control_get_optional_exclusive(&pdev->dev,
+ "cx_collapse");
+
gpu->pdev = pdev;
platform_set_drvdata(pdev, &gpu->adreno_smmu);
@@ -974,8 +980,6 @@ void msm_gpu_cleanup(struct msm_gpu *gpu)
DBG("%s", gpu->name);
- WARN_ON(!list_empty(&gpu->active_list));
-
for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) {
msm_ringbuffer_destroy(gpu->rb[i]);
gpu->rb[i] = NULL;
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index 4d935fedd2ac..ff911e7305ce 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -13,6 +13,7 @@
#include <linux/interconnect.h>
#include <linux/pm_opp.h>
#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
#include "msm_drv.h"
#include "msm_fence.h"
@@ -187,12 +188,6 @@ struct msm_gpu {
*/
int cur_ctx_seqno;
- /*
- * List of GEM active objects on this gpu. Protected by
- * msm_drm_private::mm_lock
- */
- struct list_head active_list;
-
/**
* lock:
*
@@ -277,6 +272,9 @@ struct msm_gpu {
bool hw_apriv;
struct thermal_cooling_device *cooling;
+
+ /* To poll for cx gdsc collapse during gpu recovery */
+ struct reset_control *cx_collapse;
};
static inline struct msm_gpu *dev_to_gpu(struct device *dev)
@@ -466,7 +464,8 @@ static inline int msm_gpu_convert_priority(struct msm_gpu *gpu, int prio,
* @node: node in the context's list of submitqueues
* @fence_idr: maps fence-id to dma_fence for userspace visible fence
* seqno, protected by submitqueue lock
- * @lock: submitqueue lock
+ * @idr_lock: for serializing access to fence_idr
+ * @lock: submitqueue lock for serializing submits on a queue
* @ref: reference count
* @entity: the submit job-queue
*/
@@ -479,6 +478,7 @@ struct msm_gpu_submitqueue {
struct msm_file_private *ctx;
struct list_head node;
struct idr fence_idr;
+ struct mutex idr_lock;
struct mutex lock;
struct kref ref;
struct drm_sched_entity *entity;
diff --git a/drivers/gpu/drm/msm/msm_gpu_devfreq.c b/drivers/gpu/drm/msm/msm_gpu_devfreq.c
index d1f70426f554..85c443a37e4e 100644
--- a/drivers/gpu/drm/msm/msm_gpu_devfreq.c
+++ b/drivers/gpu/drm/msm/msm_gpu_devfreq.c
@@ -213,6 +213,8 @@ void msm_devfreq_init(struct msm_gpu *gpu)
if (IS_ERR(df->devfreq)) {
DRM_DEV_ERROR(&gpu->pdev->dev, "Couldn't initialize GPU devfreq\n");
+ dev_pm_qos_remove_request(&df->idle_freq);
+ dev_pm_qos_remove_request(&df->boost_freq);
df->devfreq = NULL;
return;
}
diff --git a/drivers/gpu/drm/msm/msm_gpu_trace.h b/drivers/gpu/drm/msm/msm_gpu_trace.h
index ca0b08d7875b..ac40d857bc45 100644
--- a/drivers/gpu/drm/msm/msm_gpu_trace.h
+++ b/drivers/gpu/drm/msm/msm_gpu_trace.h
@@ -115,29 +115,27 @@ TRACE_EVENT(msm_gmu_freq_change,
);
-TRACE_EVENT(msm_gem_purge,
- TP_PROTO(u32 bytes),
- TP_ARGS(bytes),
+TRACE_EVENT(msm_gem_shrink,
+ TP_PROTO(u32 nr_to_scan, u32 purged, u32 evicted,
+ u32 active_purged, u32 active_evicted),
+ TP_ARGS(nr_to_scan, purged, evicted, active_purged, active_evicted),
TP_STRUCT__entry(
- __field(u32, bytes)
+ __field(u32, nr_to_scan)
+ __field(u32, purged)
+ __field(u32, evicted)
+ __field(u32, active_purged)
+ __field(u32, active_evicted)
),
TP_fast_assign(
- __entry->bytes = bytes;
+ __entry->nr_to_scan = nr_to_scan;
+ __entry->purged = purged;
+ __entry->evicted = evicted;
+ __entry->active_purged = active_purged;
+ __entry->active_evicted = active_evicted;
),
- TP_printk("Purging %u bytes", __entry->bytes)
-);
-
-
-TRACE_EVENT(msm_gem_evict,
- TP_PROTO(u32 bytes),
- TP_ARGS(bytes),
- TP_STRUCT__entry(
- __field(u32, bytes)
- ),
- TP_fast_assign(
- __entry->bytes = bytes;
- ),
- TP_printk("Evicting %u bytes", __entry->bytes)
+ TP_printk("nr_to_scan=%u pg, purged=%u pg, evicted=%u pg, active_purged=%u pg, active_evicted=%u pg",
+ __entry->nr_to_scan, __entry->purged, __entry->evicted,
+ __entry->active_purged, __entry->active_evicted)
);
diff --git a/drivers/gpu/drm/msm/msm_io_utils.c b/drivers/gpu/drm/msm/msm_io_utils.c
index 7b504617833a..d02cd29ce829 100644
--- a/drivers/gpu/drm/msm/msm_io_utils.c
+++ b/drivers/gpu/drm/msm/msm_io_utils.c
@@ -5,6 +5,8 @@
* Author: Rob Clark <robdclark@gmail.com>
*/
+#include <linux/interconnect.h>
+
#include "msm_drv.h"
/*
@@ -124,3 +126,23 @@ void msm_hrtimer_work_init(struct msm_hrtimer_work *work,
work->worker = worker;
kthread_init_work(&work->work, fn);
}
+
+struct icc_path *msm_icc_get(struct device *dev, const char *name)
+{
+ struct device *mdss_dev = dev->parent;
+ struct icc_path *path;
+
+ path = of_icc_get(dev, name);
+ if (path)
+ return path;
+
+ /*
+ * If there are no interconnects attached to the corresponding device
+ * node, of_icc_get() will return NULL.
+ *
+ * If the MDP5/DPU device node doesn't have interconnects, lookup the
+ * path in the parent (MDSS) device.
+ */
+ return of_icc_get(mdss_dev, name);
+
+}
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index a54ed354578b..5577cea7c009 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -21,6 +21,7 @@ struct msm_iommu_pagetable {
struct msm_mmu base;
struct msm_mmu *parent;
struct io_pgtable_ops *pgtbl_ops;
+ unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */
phys_addr_t ttbr;
u32 asid;
};
@@ -29,23 +30,84 @@ static struct msm_iommu_pagetable *to_pagetable(struct msm_mmu *mmu)
return container_of(mmu, struct msm_iommu_pagetable, base);
}
+/* based on iommu_pgsize() in iommu.c: */
+static size_t calc_pgsize(struct msm_iommu_pagetable *pagetable,
+ unsigned long iova, phys_addr_t paddr,
+ size_t size, size_t *count)
+{
+ unsigned int pgsize_idx, pgsize_idx_next;
+ unsigned long pgsizes;
+ size_t offset, pgsize, pgsize_next;
+ unsigned long addr_merge = paddr | iova;
+
+ /* Page sizes supported by the hardware and small enough for @size */
+ pgsizes = pagetable->pgsize_bitmap & GENMASK(__fls(size), 0);
+
+ /* Constrain the page sizes further based on the maximum alignment */
+ if (likely(addr_merge))
+ pgsizes &= GENMASK(__ffs(addr_merge), 0);
+
+ /* Make sure we have at least one suitable page size */
+ BUG_ON(!pgsizes);
+
+ /* Pick the biggest page size remaining */
+ pgsize_idx = __fls(pgsizes);
+ pgsize = BIT(pgsize_idx);
+ if (!count)
+ return pgsize;
+
+ /* Find the next biggest support page size, if it exists */
+ pgsizes = pagetable->pgsize_bitmap & ~GENMASK(pgsize_idx, 0);
+ if (!pgsizes)
+ goto out_set_count;
+
+ pgsize_idx_next = __ffs(pgsizes);
+ pgsize_next = BIT(pgsize_idx_next);
+
+ /*
+ * There's no point trying a bigger page size unless the virtual
+ * and physical addresses are similarly offset within the larger page.
+ */
+ if ((iova ^ paddr) & (pgsize_next - 1))
+ goto out_set_count;
+
+ /* Calculate the offset to the next page size alignment boundary */
+ offset = pgsize_next - (addr_merge & (pgsize_next - 1));
+
+ /*
+ * If size is big enough to accommodate the larger page, reduce
+ * the number of smaller pages.
+ */
+ if (offset + pgsize_next <= size)
+ size = offset;
+
+out_set_count:
+ *count = size >> pgsize_idx;
+ return pgsize;
+}
+
static int msm_iommu_pagetable_unmap(struct msm_mmu *mmu, u64 iova,
size_t size)
{
struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);
struct io_pgtable_ops *ops = pagetable->pgtbl_ops;
- size_t unmapped = 0;
- /* Unmap the block one page at a time */
while (size) {
- unmapped += ops->unmap(ops, iova, 4096, NULL);
- iova += 4096;
- size -= 4096;
+ size_t unmapped, pgsize, count;
+
+ pgsize = calc_pgsize(pagetable, iova, iova, size, &count);
+
+ unmapped = ops->unmap_pages(ops, iova, pgsize, count, NULL);
+ if (!unmapped)
+ break;
+
+ iova += unmapped;
+ size -= unmapped;
}
iommu_flush_iotlb_all(to_msm_iommu(pagetable->parent)->domain);
- return (unmapped == size) ? 0 : -EINVAL;
+ return (size == 0) ? 0 : -EINVAL;
}
static int msm_iommu_pagetable_map(struct msm_mmu *mmu, u64 iova,
@@ -54,7 +116,6 @@ static int msm_iommu_pagetable_map(struct msm_mmu *mmu, u64 iova,
struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);
struct io_pgtable_ops *ops = pagetable->pgtbl_ops;
struct scatterlist *sg;
- size_t mapped = 0;
u64 addr = iova;
unsigned int i;
@@ -62,17 +123,26 @@ static int msm_iommu_pagetable_map(struct msm_mmu *mmu, u64 iova,
size_t size = sg->length;
phys_addr_t phys = sg_phys(sg);
- /* Map the block one page at a time */
while (size) {
- if (ops->map(ops, addr, phys, 4096, prot, GFP_KERNEL)) {
- msm_iommu_pagetable_unmap(mmu, iova, mapped);
+ size_t pgsize, count, mapped = 0;
+ int ret;
+
+ pgsize = calc_pgsize(pagetable, addr, phys, size, &count);
+
+ ret = ops->map_pages(ops, addr, phys, pgsize, count,
+ prot, GFP_KERNEL, &mapped);
+
+ /* map_pages could fail after mapping some of the pages,
+ * so update the counters before error handling.
+ */
+ phys += mapped;
+ addr += mapped;
+ size -= mapped;
+
+ if (ret) {
+ msm_iommu_pagetable_unmap(mmu, iova, addr - iova);
return -EINVAL;
}
-
- phys += 4096;
- addr += 4096;
- size -= 4096;
- mapped += 4096;
}
}
@@ -207,6 +277,7 @@ struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent)
/* Needed later for TLB flush */
pagetable->parent = parent;
+ pagetable->pgsize_bitmap = ttbr0_cfg.pgsize_bitmap;
pagetable->ttbr = ttbr0_cfg.arm_lpae_s1_cfg.ttbr;
/*
diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c
index a92ffde53f0b..db2f847c8535 100644
--- a/drivers/gpu/drm/msm/msm_rd.c
+++ b/drivers/gpu/drm/msm/msm_rd.c
@@ -196,6 +196,9 @@ static int rd_open(struct inode *inode, struct file *file)
file->private_data = rd;
rd->open = true;
+ /* Reset fifo to clear any previously unread data: */
+ rd->fifo.head = rd->fifo.tail = 0;
+
/* the parsing tools need to know gpu-id to know which
* register database to load.
*
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c
index 56eecb4a72dc..cad4c3525f0b 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.c
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.c
@@ -29,8 +29,6 @@ static struct dma_fence *msm_job_run(struct drm_sched_job *job)
msm_gem_unlock(obj);
}
- pm_runtime_get_sync(&gpu->pdev->dev);
-
/* TODO move submit path over to using a per-ring lock.. */
mutex_lock(&gpu->lock);
@@ -38,8 +36,6 @@ static struct dma_fence *msm_job_run(struct drm_sched_job *job)
mutex_unlock(&gpu->lock);
- pm_runtime_put(&gpu->pdev->dev);
-
return dma_fence_get(submit->hw_fence);
}
diff --git a/drivers/gpu/drm/msm/msm_submitqueue.c b/drivers/gpu/drm/msm/msm_submitqueue.c
index f486a3cd4e55..c6929e205b51 100644
--- a/drivers/gpu/drm/msm/msm_submitqueue.c
+++ b/drivers/gpu/drm/msm/msm_submitqueue.c
@@ -200,6 +200,7 @@ int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
*id = queue->id;
idr_init(&queue->fence_idr);
+ mutex_init(&queue->idr_lock);
mutex_init(&queue->lock);
list_add_tail(&queue->node, &ctx->submitqueues);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 35bb0bb3fe61..126b3c6e12f9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -822,6 +822,15 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict,
if (ret == 0) {
ret = nouveau_fence_new(chan, false, &fence);
if (ret == 0) {
+ /* TODO: figure out a better solution here
+ *
+ * wait on the fence here explicitly as going through
+ * ttm_bo_move_accel_cleanup somehow doesn't seem to do it.
+ *
+ * Without this the operation can timeout and we'll fallback to a
+ * software copy, which might take several minutes to finish.
+ */
+ nouveau_fence_wait(fence, false, false);
ret = ttm_bo_move_accel_cleanup(bo,
&fence->base,
evict, false,
diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
index 16356611b5b9..5fe209107246 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
@@ -139,44 +139,24 @@ static void nouveau_dmem_fence_done(struct nouveau_fence **fence)
}
}
-static vm_fault_t nouveau_dmem_fault_copy_one(struct nouveau_drm *drm,
- struct vm_fault *vmf, struct migrate_vma *args,
- dma_addr_t *dma_addr)
+static int nouveau_dmem_copy_one(struct nouveau_drm *drm, struct page *spage,
+ struct page *dpage, dma_addr_t *dma_addr)
{
struct device *dev = drm->dev->dev;
- struct page *dpage, *spage;
- struct nouveau_svmm *svmm;
-
- spage = migrate_pfn_to_page(args->src[0]);
- if (!spage || !(args->src[0] & MIGRATE_PFN_MIGRATE))
- return 0;
- dpage = alloc_page_vma(GFP_HIGHUSER, vmf->vma, vmf->address);
- if (!dpage)
- return VM_FAULT_SIGBUS;
lock_page(dpage);
*dma_addr = dma_map_page(dev, dpage, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, *dma_addr))
- goto error_free_page;
+ return -EIO;
- svmm = spage->zone_device_data;
- mutex_lock(&svmm->mutex);
- nouveau_svmm_invalidate(svmm, args->start, args->end);
if (drm->dmem->migrate.copy_func(drm, 1, NOUVEAU_APER_HOST, *dma_addr,
- NOUVEAU_APER_VRAM, nouveau_dmem_page_addr(spage)))
- goto error_dma_unmap;
- mutex_unlock(&svmm->mutex);
+ NOUVEAU_APER_VRAM, nouveau_dmem_page_addr(spage))) {
+ dma_unmap_page(dev, *dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
+ return -EIO;
+ }
- args->dst[0] = migrate_pfn(page_to_pfn(dpage));
return 0;
-
-error_dma_unmap:
- mutex_unlock(&svmm->mutex);
- dma_unmap_page(dev, *dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
-error_free_page:
- __free_page(dpage);
- return VM_FAULT_SIGBUS;
}
static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf)
@@ -184,9 +164,11 @@ static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf)
struct nouveau_drm *drm = page_to_drm(vmf->page);
struct nouveau_dmem *dmem = drm->dmem;
struct nouveau_fence *fence;
+ struct nouveau_svmm *svmm;
+ struct page *spage, *dpage;
unsigned long src = 0, dst = 0;
dma_addr_t dma_addr = 0;
- vm_fault_t ret;
+ vm_fault_t ret = 0;
struct migrate_vma args = {
.vma = vmf->vma,
.start = vmf->address,
@@ -207,9 +189,25 @@ static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf)
if (!args.cpages)
return 0;
- ret = nouveau_dmem_fault_copy_one(drm, vmf, &args, &dma_addr);
- if (ret || dst == 0)
+ spage = migrate_pfn_to_page(src);
+ if (!spage || !(src & MIGRATE_PFN_MIGRATE))
+ goto done;
+
+ dpage = alloc_page_vma(GFP_HIGHUSER, vmf->vma, vmf->address);
+ if (!dpage)
+ goto done;
+
+ dst = migrate_pfn(page_to_pfn(dpage));
+
+ svmm = spage->zone_device_data;
+ mutex_lock(&svmm->mutex);
+ nouveau_svmm_invalidate(svmm, args.start, args.end);
+ ret = nouveau_dmem_copy_one(drm, spage, dpage, &dma_addr);
+ mutex_unlock(&svmm->mutex);
+ if (ret) {
+ ret = VM_FAULT_SIGBUS;
goto done;
+ }
nouveau_fence_new(dmem->migrate.chan, false, &fence);
migrate_vma_pages(&args);
@@ -326,7 +324,7 @@ nouveau_dmem_page_alloc_locked(struct nouveau_drm *drm)
return NULL;
}
- lock_page(page);
+ zone_device_page_init(page);
return page;
}
@@ -369,6 +367,52 @@ nouveau_dmem_suspend(struct nouveau_drm *drm)
mutex_unlock(&drm->dmem->mutex);
}
+/*
+ * Evict all pages mapping a chunk.
+ */
+static void
+nouveau_dmem_evict_chunk(struct nouveau_dmem_chunk *chunk)
+{
+ unsigned long i, npages = range_len(&chunk->pagemap.range) >> PAGE_SHIFT;
+ unsigned long *src_pfns, *dst_pfns;
+ dma_addr_t *dma_addrs;
+ struct nouveau_fence *fence;
+
+ src_pfns = kcalloc(npages, sizeof(*src_pfns), GFP_KERNEL);
+ dst_pfns = kcalloc(npages, sizeof(*dst_pfns), GFP_KERNEL);
+ dma_addrs = kcalloc(npages, sizeof(*dma_addrs), GFP_KERNEL);
+
+ migrate_device_range(src_pfns, chunk->pagemap.range.start >> PAGE_SHIFT,
+ npages);
+
+ for (i = 0; i < npages; i++) {
+ if (src_pfns[i] & MIGRATE_PFN_MIGRATE) {
+ struct page *dpage;
+
+ /*
+ * _GFP_NOFAIL because the GPU is going away and there
+ * is nothing sensible we can do if we can't copy the
+ * data back.
+ */
+ dpage = alloc_page(GFP_HIGHUSER | __GFP_NOFAIL);
+ dst_pfns[i] = migrate_pfn(page_to_pfn(dpage));
+ nouveau_dmem_copy_one(chunk->drm,
+ migrate_pfn_to_page(src_pfns[i]), dpage,
+ &dma_addrs[i]);
+ }
+ }
+
+ nouveau_fence_new(chunk->drm->dmem->migrate.chan, false, &fence);
+ migrate_device_pages(src_pfns, dst_pfns, npages);
+ nouveau_dmem_fence_done(&fence);
+ migrate_device_finalize(src_pfns, dst_pfns, npages);
+ kfree(src_pfns);
+ kfree(dst_pfns);
+ for (i = 0; i < npages; i++)
+ dma_unmap_page(chunk->drm->dev->dev, dma_addrs[i], PAGE_SIZE, DMA_BIDIRECTIONAL);
+ kfree(dma_addrs);
+}
+
void
nouveau_dmem_fini(struct nouveau_drm *drm)
{
@@ -380,8 +424,10 @@ nouveau_dmem_fini(struct nouveau_drm *drm)
mutex_lock(&drm->dmem->mutex);
list_for_each_entry_safe(chunk, tmp, &drm->dmem->chunks, list) {
+ nouveau_dmem_evict_chunk(chunk);
nouveau_bo_unpin(chunk->bo);
nouveau_bo_ref(NULL, &chunk->bo);
+ WARN_ON(chunk->callocated);
list_del(&chunk->list);
memunmap_pages(&chunk->pagemap);
release_mem_region(chunk->pagemap.range.start,
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 561309d447e0..fd99ec0f4257 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -28,6 +28,7 @@
#include <linux/pm_runtime.h>
#include <linux/vga_switcheroo.h>
#include <linux/mmu_notifier.h>
+#include <linux/dynamic_debug.h>
#include <drm/drm_aperture.h>
#include <drm/drm_crtc_helper.h>
@@ -70,6 +71,18 @@
#include "nouveau_svm.h"
#include "nouveau_dmem.h"
+DECLARE_DYNDBG_CLASSMAP(drm_debug_classes, DD_CLASS_TYPE_DISJOINT_BITS, 0,
+ "DRM_UT_CORE",
+ "DRM_UT_DRIVER",
+ "DRM_UT_KMS",
+ "DRM_UT_PRIME",
+ "DRM_UT_ATOMIC",
+ "DRM_UT_VBL",
+ "DRM_UT_STATE",
+ "DRM_UT_LEASE",
+ "DRM_UT_DP",
+ "DRM_UT_DRMRES");
+
MODULE_PARM_DESC(config, "option string to pass to driver core");
static char *nouveau_config;
module_param_named(config, nouveau_config, charp, 0400);
diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c
index 4b546b02d3ae..4b39d1dd9140 100644
--- a/drivers/gpu/drm/panel/panel-edp.c
+++ b/drivers/gpu/drm/panel/panel-edp.c
@@ -1285,7 +1285,8 @@ static const struct panel_desc innolux_n116bca_ea1 = {
},
.delay = {
.hpd_absent = 200,
- .prepare_to_enable = 80,
+ .enable = 80,
+ .disable = 50,
.unprepare = 500,
},
};
diff --git a/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c b/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c
index cb5cb27462df..36a46cb7fe1c 100644
--- a/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c
+++ b/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c
@@ -288,7 +288,7 @@ static int lcd_olinuxino_probe(struct i2c_client *client,
return 0;
}
-static int lcd_olinuxino_remove(struct i2c_client *client)
+static void lcd_olinuxino_remove(struct i2c_client *client)
{
struct lcd_olinuxino *panel = i2c_get_clientdata(client);
@@ -296,8 +296,6 @@ static int lcd_olinuxino_remove(struct i2c_client *client)
drm_panel_disable(&panel->panel);
drm_panel_unprepare(&panel->panel);
-
- return 0;
}
static const struct of_device_id lcd_olinuxino_of_ids[] = {
diff --git a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
index a6dc5ab182fa..79f852465a84 100644
--- a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
+++ b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
@@ -446,7 +446,7 @@ error:
return -ENODEV;
}
-static int rpi_touchscreen_remove(struct i2c_client *i2c)
+static void rpi_touchscreen_remove(struct i2c_client *i2c)
{
struct rpi_touchscreen *ts = i2c_get_clientdata(i2c);
@@ -455,8 +455,6 @@ static int rpi_touchscreen_remove(struct i2c_client *i2c)
drm_panel_remove(&ts->base);
mipi_dsi_device_unregister(ts->dsi);
-
- return 0;
}
static int rpi_touchscreen_dsi_probe(struct mipi_dsi_device *dsi)
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 252fd66011cc..2944228a8e2c 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -2252,7 +2252,7 @@ static const struct panel_desc innolux_g121i1_l01 = {
.enable = 200,
.disable = 20,
},
- .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.c b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
index 5110cd9b2425..fe5f12f16a63 100644
--- a/drivers/gpu/drm/panfrost/panfrost_devfreq.c
+++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
@@ -131,6 +131,17 @@ int panfrost_devfreq_init(struct panfrost_device *pfdev)
return PTR_ERR(opp);
panfrost_devfreq_profile.initial_freq = cur_freq;
+
+ /*
+ * Set the recommend OPP this will enable and configure the regulator
+ * if any and will avoid a switch off by regulator_late_cleanup()
+ */
+ ret = dev_pm_opp_set_opp(dev, opp);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "Couldn't set recommended OPP\n");
+ return ret;
+ }
+
dev_pm_opp_put(opp);
/*
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index ac006bed4743..8ef25ab305ae 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -2056,7 +2056,7 @@ static void ci_clear_vc(struct radeon_device *rdev)
static int ci_upload_firmware(struct radeon_device *rdev)
{
struct ci_power_info *pi = ci_get_pi(rdev);
- int i, ret;
+ int i;
for (i = 0; i < rdev->usec_timeout; i++) {
if (RREG32_SMC(RCU_UC_EVENTS) & BOOT_SEQ_DONE)
@@ -2067,9 +2067,7 @@ static int ci_upload_firmware(struct radeon_device *rdev)
ci_stop_smc_clock(rdev);
ci_reset_smc(rdev);
- ret = ci_load_smc_ucode(rdev, pi->sram_end);
-
- return ret;
+ return ci_load_smc_ucode(rdev, pi->sram_end);
}
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index a28d5ceab628..6cbe1ab81aba 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -512,14 +512,11 @@ long radeon_drm_ioctl(struct file *filp,
static long radeon_kms_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
unsigned int nr = DRM_IOCTL_NR(cmd);
- int ret;
if (nr < DRM_COMMAND_BASE)
return drm_compat_ioctl(filp, cmd, arg);
- ret = radeon_drm_ioctl(filp, cmd, arg);
-
- return ret;
+ return radeon_drm_ioctl(filp, cmd, arg);
}
#endif
diff --git a/drivers/gpu/drm/rcar-du/Makefile b/drivers/gpu/drm/rcar-du/Makefile
index e7275b5e7ec8..6f132325c8b7 100644
--- a/drivers/gpu/drm/rcar-du/Makefile
+++ b/drivers/gpu/drm/rcar-du/Makefile
@@ -14,10 +14,3 @@ obj-$(CONFIG_DRM_RCAR_DU) += rcar-du-drm.o
obj-$(CONFIG_DRM_RCAR_DW_HDMI) += rcar_dw_hdmi.o
obj-$(CONFIG_DRM_RCAR_LVDS) += rcar_lvds.o
obj-$(CONFIG_DRM_RCAR_MIPI_DSI) += rcar_mipi_dsi.o
-
-# 'remote-endpoint' is fixed up at run-time
-DTC_FLAGS_rcar_du_of_lvds_r8a7790 += -Wno-graph_endpoint
-DTC_FLAGS_rcar_du_of_lvds_r8a7791 += -Wno-graph_endpoint
-DTC_FLAGS_rcar_du_of_lvds_r8a7793 += -Wno-graph_endpoint
-DTC_FLAGS_rcar_du_of_lvds_r8a7795 += -Wno-graph_endpoint
-DTC_FLAGS_rcar_du_of_lvds_r8a7796 += -Wno-graph_endpoint
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
index fd3b94649a01..3619e1ddeb62 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
@@ -29,6 +29,7 @@
#include "rcar_du_regs.h"
#include "rcar_du_vsp.h"
#include "rcar_lvds.h"
+#include "rcar_mipi_dsi.h"
static u32 rcar_du_crtc_read(struct rcar_du_crtc *rcrtc, u32 reg)
{
@@ -744,7 +745,19 @@ static void rcar_du_crtc_atomic_enable(struct drm_crtc *crtc,
const struct drm_display_mode *mode =
&crtc->state->adjusted_mode;
- rcar_lvds_clk_enable(bridge, mode->clock * 1000);
+ rcar_lvds_pclk_enable(bridge, mode->clock * 1000);
+ }
+
+ /*
+ * Similarly to LVDS, on V3U the dot clock is provided by the DSI
+ * encoder, and we need to enable the DSI clocks before enabling the CRTC.
+ */
+ if ((rcdu->info->dsi_clk_mask & BIT(rcrtc->index)) &&
+ (rstate->outputs &
+ (BIT(RCAR_DU_OUTPUT_DSI0) | BIT(RCAR_DU_OUTPUT_DSI1)))) {
+ struct drm_bridge *bridge = rcdu->dsi[rcrtc->index];
+
+ rcar_mipi_dsi_pclk_enable(bridge, state);
}
rcar_du_crtc_start(rcrtc);
@@ -777,7 +790,20 @@ static void rcar_du_crtc_atomic_disable(struct drm_crtc *crtc,
* Disable the LVDS clock output, see
* rcar_du_crtc_atomic_enable().
*/
- rcar_lvds_clk_disable(bridge);
+ rcar_lvds_pclk_disable(bridge);
+ }
+
+ if ((rcdu->info->dsi_clk_mask & BIT(rcrtc->index)) &&
+ (rstate->outputs &
+ (BIT(RCAR_DU_OUTPUT_DSI0) | BIT(RCAR_DU_OUTPUT_DSI1)))) {
+ struct drm_bridge *bridge = rcdu->dsi[rcrtc->index];
+
+ /*
+ * Disable the DSI clock output, see
+ * rcar_du_crtc_atomic_enable().
+ */
+
+ rcar_mipi_dsi_pclk_disable(bridge);
}
spin_lock_irq(&crtc->dev->event_lock);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
index 00ac233a115e..a2776f1d6f2c 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
@@ -27,7 +27,6 @@
#include "rcar_du_drv.h"
#include "rcar_du_kms.h"
-#include "rcar_du_regs.h"
/* -----------------------------------------------------------------------------
* Device Information
@@ -507,7 +506,8 @@ static const struct rcar_du_device_info rcar_du_r8a7799x_info = {
static const struct rcar_du_device_info rcar_du_r8a779a0_info = {
.gen = 3,
.features = RCAR_DU_FEATURE_CRTC_IRQ
- | RCAR_DU_FEATURE_VSP1_SOURCE,
+ | RCAR_DU_FEATURE_VSP1_SOURCE
+ | RCAR_DU_FEATURE_NO_BLENDING,
.channels_mask = BIT(1) | BIT(0),
.routes = {
/* R8A779A0 has two MIPI DSI outputs. */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.h b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
index bfad7775d9a1..5cfa2bb7ad93 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
@@ -31,6 +31,7 @@ struct rcar_du_device;
#define RCAR_DU_FEATURE_VSP1_SOURCE BIT(2) /* Has inputs from VSP1 */
#define RCAR_DU_FEATURE_INTERLACED BIT(3) /* HW supports interlaced */
#define RCAR_DU_FEATURE_TVM_SYNC BIT(4) /* Has TV switch/sync modes */
+#define RCAR_DU_FEATURE_NO_BLENDING BIT(5) /* PnMR.SPIM does not have ALP nor EOR bits */
#define RCAR_DU_QUIRK_ALIGN_128B BIT(0) /* Align pitches to 128 bytes */
@@ -91,6 +92,7 @@ struct rcar_du_device_info {
#define RCAR_DU_MAX_GROUPS DIV_ROUND_UP(RCAR_DU_MAX_CRTCS, 2)
#define RCAR_DU_MAX_VSPS 4
#define RCAR_DU_MAX_LVDS 2
+#define RCAR_DU_MAX_DSI 2
struct rcar_du_device {
struct device *dev;
@@ -107,6 +109,7 @@ struct rcar_du_device {
struct platform_device *cmms[RCAR_DU_MAX_CRTCS];
struct rcar_du_vsp vsps[RCAR_DU_MAX_VSPS];
struct drm_bridge *lvds[RCAR_DU_MAX_LVDS];
+ struct drm_bridge *dsi[RCAR_DU_MAX_DSI];
struct {
struct drm_property *colorkey;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
index 60d6be78323b..b1787be31e92 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
@@ -9,18 +9,13 @@
#include <linux/export.h>
#include <linux/of.h>
-#include <linux/slab.h>
#include <drm/drm_bridge.h>
#include <drm/drm_bridge_connector.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_managed.h>
-#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_panel.h>
#include "rcar_du_drv.h"
#include "rcar_du_encoder.h"
-#include "rcar_du_kms.h"
#include "rcar_lvds.h"
/* -----------------------------------------------------------------------------
@@ -84,6 +79,10 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu,
if (output == RCAR_DU_OUTPUT_LVDS0 ||
output == RCAR_DU_OUTPUT_LVDS1)
rcdu->lvds[output - RCAR_DU_OUTPUT_LVDS0] = bridge;
+
+ if (output == RCAR_DU_OUTPUT_DSI0 ||
+ output == RCAR_DU_OUTPUT_DSI1)
+ rcdu->dsi[output - RCAR_DU_OUTPUT_DSI0] = bridge;
}
/*
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index 21881fb5e84a..8c2719efda2a 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -405,8 +405,8 @@ rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv,
format = rcar_du_format_info(mode_cmd->pixel_format);
if (format == NULL) {
- dev_dbg(dev->dev, "unsupported pixel format %08x\n",
- mode_cmd->pixel_format);
+ dev_dbg(dev->dev, "unsupported pixel format %p4cc\n",
+ &mode_cmd->pixel_format);
return ERR_PTR(-EINVAL);
}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.c b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
index 9e1f0cbbf642..d759e0192181 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_plane.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
@@ -506,8 +506,15 @@ static void rcar_du_plane_setup_format_gen3(struct rcar_du_group *rgrp,
unsigned int index,
const struct rcar_du_plane_state *state)
{
- rcar_du_plane_write(rgrp, index, PnMR,
- PnMR_SPIM_TP_OFF | state->format->pnmr);
+ struct rcar_du_device *rcdu = rgrp->dev;
+ u32 pnmr = state->format->pnmr | PnMR_SPIM_TP_OFF;
+
+ if (rcdu->info->features & RCAR_DU_FEATURE_NO_BLENDING) {
+ /* No blending. ALP and EOR are not supported. */
+ pnmr &= ~(PnMR_SPIM_ALP | PnMR_SPIM_EOR);
+ }
+
+ rcar_du_plane_write(rgrp, index, PnMR, pnmr);
rcar_du_plane_write(rgrp, index, PnDDCR4,
state->format->edf | PnDDCR4_CODE);
@@ -521,7 +528,6 @@ static void rcar_du_plane_setup_format_gen3(struct rcar_du_group *rgrp,
* register to 0 to avoid this.
*/
- /* TODO: Check if alpha-blending should be disabled in PnMR. */
rcar_du_plane_write(rgrp, index, PnALPHAR, 0);
}
@@ -619,8 +625,8 @@ int __rcar_du_plane_atomic_check(struct drm_plane *plane,
*format = rcar_du_format_info(state->fb->format->format);
if (*format == NULL) {
- dev_dbg(dev->dev, "%s: unsupported format %08x\n", __func__,
- state->fb->format->format);
+ dev_dbg(dev->dev, "%s: unsupported format %p4cc\n", __func__,
+ &state->fb->format->format);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
index 10b7f1d0877a..e465aef41585 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
@@ -152,6 +152,7 @@ static void rcar_du_vsp_plane_setup(struct rcar_du_vsp_plane *plane)
.alpha = state->state.alpha >> 8,
.zpos = state->state.zpos,
};
+ u32 fourcc = state->format->fourcc;
unsigned int i;
cfg.src.left = state->state.src.x1 >> 16;
@@ -168,9 +169,27 @@ static void rcar_du_vsp_plane_setup(struct rcar_du_vsp_plane *plane)
cfg.mem[i] = sg_dma_address(state->sg_tables[i].sgl)
+ fb->offsets[i];
- format = rcar_du_format_info(state->format->fourcc);
+ if (state->state.pixel_blend_mode == DRM_MODE_BLEND_PIXEL_NONE) {
+ switch (fourcc) {
+ case DRM_FORMAT_ARGB1555:
+ fourcc = DRM_FORMAT_XRGB1555;
+ break;
+
+ case DRM_FORMAT_ARGB4444:
+ fourcc = DRM_FORMAT_XRGB4444;
+ break;
+
+ case DRM_FORMAT_ARGB8888:
+ fourcc = DRM_FORMAT_XRGB8888;
+ break;
+ }
+ }
+
+ format = rcar_du_format_info(fourcc);
cfg.pixelformat = format->v4l2;
+ cfg.premult = state->state.pixel_blend_mode == DRM_MODE_BLEND_PREMULTI;
+
vsp1_du_atomic_update(plane->vsp->vsp, crtc->vsp_pipe,
plane->index, &cfg);
}
@@ -436,6 +455,11 @@ int rcar_du_vsp_init(struct rcar_du_vsp *vsp, struct device_node *np,
drm_plane_create_zpos_property(&plane->plane, i, 0,
num_planes - 1);
+ drm_plane_create_blend_mode_property(&plane->plane,
+ BIT(DRM_MODE_BLEND_PIXEL_NONE) |
+ BIT(DRM_MODE_BLEND_PREMULTI) |
+ BIT(DRM_MODE_BLEND_COVERAGE));
+
vsp->num_planes++;
}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_writeback.c b/drivers/gpu/drm/rcar-du/rcar_du_writeback.c
index 25f50a297c11..8cd37d7b8ae2 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_writeback.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_writeback.c
@@ -166,8 +166,8 @@ static int rcar_du_wb_enc_atomic_check(struct drm_encoder *encoder,
wb_state->format = rcar_du_format_info(fb->format->format);
if (wb_state->format == NULL) {
- dev_dbg(dev->dev, "%s: unsupported format %08x\n", __func__,
- fb->format->format);
+ dev_dbg(dev->dev, "%s: unsupported format %p4cc\n", __func__,
+ &fb->format->format);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds.c b/drivers/gpu/drm/rcar-du/rcar_lvds.c
index d85aa4bc7f84..81a060c2fe3f 100644
--- a/drivers/gpu/drm/rcar-du/rcar_lvds.c
+++ b/drivers/gpu/drm/rcar-du/rcar_lvds.c
@@ -306,7 +306,7 @@ static void rcar_lvds_pll_setup_d3_e3(struct rcar_lvds *lvds, unsigned int freq)
* Clock - D3/E3 only
*/
-int rcar_lvds_clk_enable(struct drm_bridge *bridge, unsigned long freq)
+int rcar_lvds_pclk_enable(struct drm_bridge *bridge, unsigned long freq)
{
struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge);
int ret;
@@ -324,9 +324,9 @@ int rcar_lvds_clk_enable(struct drm_bridge *bridge, unsigned long freq)
return 0;
}
-EXPORT_SYMBOL_GPL(rcar_lvds_clk_enable);
+EXPORT_SYMBOL_GPL(rcar_lvds_pclk_enable);
-void rcar_lvds_clk_disable(struct drm_bridge *bridge)
+void rcar_lvds_pclk_disable(struct drm_bridge *bridge)
{
struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge);
@@ -339,7 +339,7 @@ void rcar_lvds_clk_disable(struct drm_bridge *bridge)
clk_disable_unprepare(lvds->clocks.mod);
}
-EXPORT_SYMBOL_GPL(rcar_lvds_clk_disable);
+EXPORT_SYMBOL_GPL(rcar_lvds_pclk_disable);
/* -----------------------------------------------------------------------------
* Bridge
diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds.h b/drivers/gpu/drm/rcar-du/rcar_lvds.h
index 3097bf749bec..bee7033b60d6 100644
--- a/drivers/gpu/drm/rcar-du/rcar_lvds.h
+++ b/drivers/gpu/drm/rcar-du/rcar_lvds.h
@@ -13,17 +13,17 @@
struct drm_bridge;
#if IS_ENABLED(CONFIG_DRM_RCAR_LVDS)
-int rcar_lvds_clk_enable(struct drm_bridge *bridge, unsigned long freq);
-void rcar_lvds_clk_disable(struct drm_bridge *bridge);
+int rcar_lvds_pclk_enable(struct drm_bridge *bridge, unsigned long freq);
+void rcar_lvds_pclk_disable(struct drm_bridge *bridge);
bool rcar_lvds_dual_link(struct drm_bridge *bridge);
bool rcar_lvds_is_connected(struct drm_bridge *bridge);
#else
-static inline int rcar_lvds_clk_enable(struct drm_bridge *bridge,
- unsigned long freq)
+static inline int rcar_lvds_pclk_enable(struct drm_bridge *bridge,
+ unsigned long freq)
{
return -ENOSYS;
}
-static inline void rcar_lvds_clk_disable(struct drm_bridge *bridge) { }
+static inline void rcar_lvds_pclk_disable(struct drm_bridge *bridge) { }
static inline bool rcar_lvds_dual_link(struct drm_bridge *bridge)
{
return false;
diff --git a/drivers/gpu/drm/rcar-du/rcar_mipi_dsi.c b/drivers/gpu/drm/rcar-du/rcar_mipi_dsi.c
index 62f7eb84ab01..a7f2b7f66a17 100644
--- a/drivers/gpu/drm/rcar-du/rcar_mipi_dsi.c
+++ b/drivers/gpu/drm/rcar-du/rcar_mipi_dsi.c
@@ -25,6 +25,7 @@
#include <drm/drm_panel.h>
#include <drm/drm_probe_helper.h>
+#include "rcar_mipi_dsi.h"
#include "rcar_mipi_dsi_regs.h"
struct rcar_mipi_dsi {
@@ -414,7 +415,7 @@ static int rcar_mipi_dsi_startup(struct rcar_mipi_dsi *dsi,
/* Enable DOT clock */
vclkset = VCLKSET_CKEN;
- rcar_mipi_dsi_set(dsi, VCLKSET, vclkset);
+ rcar_mipi_dsi_write(dsi, VCLKSET, vclkset);
if (dsi_format == 24)
vclkset |= VCLKSET_BPP_24;
@@ -429,7 +430,7 @@ static int rcar_mipi_dsi_startup(struct rcar_mipi_dsi *dsi,
vclkset |= VCLKSET_COLOR_RGB | VCLKSET_DIV(setup_info.div)
| VCLKSET_LANE(dsi->lanes - 1);
- rcar_mipi_dsi_set(dsi, VCLKSET, vclkset);
+ rcar_mipi_dsi_write(dsi, VCLKSET, vclkset);
/* After setting VCLKSET register, enable VCLKEN */
rcar_mipi_dsi_set(dsi, VCLKEN, VCLKEN_CKEN);
@@ -441,9 +442,21 @@ static int rcar_mipi_dsi_startup(struct rcar_mipi_dsi *dsi,
static void rcar_mipi_dsi_shutdown(struct rcar_mipi_dsi *dsi)
{
+ /* Disable VCLKEN */
+ rcar_mipi_dsi_write(dsi, VCLKSET, 0);
+
+ /* Disable DOT clock */
+ rcar_mipi_dsi_write(dsi, VCLKSET, 0);
+
rcar_mipi_dsi_clr(dsi, PHYSETUP, PHYSETUP_RSTZ);
rcar_mipi_dsi_clr(dsi, PHYSETUP, PHYSETUP_SHUTDOWNZ);
+ /* CFGCLK disable */
+ rcar_mipi_dsi_clr(dsi, CFGCLKSET, CFGCLKSET_CKEN);
+
+ /* LPCLK disable */
+ rcar_mipi_dsi_clr(dsi, LPCLKSET, LPCLKSET_CKEN);
+
dev_dbg(dsi->dev, "DSI device is shutdown\n");
}
@@ -542,6 +555,34 @@ static int rcar_mipi_dsi_start_video(struct rcar_mipi_dsi *dsi)
return 0;
}
+static void rcar_mipi_dsi_stop_video(struct rcar_mipi_dsi *dsi)
+{
+ u32 status;
+ int ret;
+
+ /* Disable transmission in video mode. */
+ rcar_mipi_dsi_clr(dsi, TXVMCR, TXVMCR_EN_VIDEO);
+
+ ret = read_poll_timeout(rcar_mipi_dsi_read, status,
+ !(status & TXVMSR_ACT),
+ 2000, 100000, false, dsi, TXVMSR);
+ if (ret < 0) {
+ dev_err(dsi->dev, "Failed to disable video transmission\n");
+ return;
+ }
+
+ /* Assert video FIFO clear. */
+ rcar_mipi_dsi_set(dsi, TXVMCR, TXVMCR_VFCLR);
+
+ ret = read_poll_timeout(rcar_mipi_dsi_read, status,
+ !(status & TXVMSR_VFRDY),
+ 2000, 100000, false, dsi, TXVMSR);
+ if (ret < 0) {
+ dev_err(dsi->dev, "Failed to assert video FIFO clear\n");
+ return;
+ }
+}
+
/* -----------------------------------------------------------------------------
* Bridge
*/
@@ -558,7 +599,22 @@ static int rcar_mipi_dsi_attach(struct drm_bridge *bridge,
static void rcar_mipi_dsi_atomic_enable(struct drm_bridge *bridge,
struct drm_bridge_state *old_bridge_state)
{
- struct drm_atomic_state *state = old_bridge_state->base.state;
+ struct rcar_mipi_dsi *dsi = bridge_to_rcar_mipi_dsi(bridge);
+
+ rcar_mipi_dsi_start_video(dsi);
+}
+
+static void rcar_mipi_dsi_atomic_disable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
+{
+ struct rcar_mipi_dsi *dsi = bridge_to_rcar_mipi_dsi(bridge);
+
+ rcar_mipi_dsi_stop_video(dsi);
+}
+
+void rcar_mipi_dsi_pclk_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
+{
struct rcar_mipi_dsi *dsi = bridge_to_rcar_mipi_dsi(bridge);
const struct drm_display_mode *mode;
struct drm_connector *connector;
@@ -586,8 +642,6 @@ static void rcar_mipi_dsi_atomic_enable(struct drm_bridge *bridge,
if (ret < 0)
goto err_dsi_start_hs;
- rcar_mipi_dsi_start_video(dsi);
-
return;
err_dsi_start_hs:
@@ -595,15 +649,16 @@ err_dsi_start_hs:
err_dsi_startup:
rcar_mipi_dsi_clk_disable(dsi);
}
+EXPORT_SYMBOL_GPL(rcar_mipi_dsi_pclk_enable);
-static void rcar_mipi_dsi_atomic_disable(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state)
+void rcar_mipi_dsi_pclk_disable(struct drm_bridge *bridge)
{
struct rcar_mipi_dsi *dsi = bridge_to_rcar_mipi_dsi(bridge);
rcar_mipi_dsi_shutdown(dsi);
rcar_mipi_dsi_clk_disable(dsi);
}
+EXPORT_SYMBOL_GPL(rcar_mipi_dsi_pclk_disable);
static enum drm_mode_status
rcar_mipi_dsi_bridge_mode_valid(struct drm_bridge *bridge,
diff --git a/drivers/gpu/drm/rcar-du/rcar_mipi_dsi.h b/drivers/gpu/drm/rcar-du/rcar_mipi_dsi.h
new file mode 100644
index 000000000000..528a196e6edd
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_mipi_dsi.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * R-Car DSI Encoder
+ *
+ * Copyright (C) 2022 Renesas Electronics Corporation
+ *
+ * Contact: Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
+ */
+
+#ifndef __RCAR_MIPI_DSI_H__
+#define __RCAR_MIPI_DSI_H__
+
+struct drm_atomic_state;
+struct drm_bridge;
+
+#if IS_ENABLED(CONFIG_DRM_RCAR_MIPI_DSI)
+void rcar_mipi_dsi_pclk_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state);
+void rcar_mipi_dsi_pclk_disable(struct drm_bridge *bridge);
+#else
+static inline void rcar_mipi_dsi_pclk_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
+{
+}
+
+static inline void rcar_mipi_dsi_pclk_disable(struct drm_bridge *bridge)
+{
+}
+#endif /* CONFIG_DRM_RCAR_MIPI_DSI */
+
+#endif /* __RCAR_MIPI_DSI_H__ */
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c
index c204e9b95c1f..518ee13b1d6f 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-core.c
+++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c
@@ -283,8 +283,9 @@ static int cdn_dp_connector_get_modes(struct drm_connector *connector)
return ret;
}
-static int cdn_dp_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+static enum drm_mode_status
+cdn_dp_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
{
struct cdn_dp_device *dp = connector_to_dp(connector);
struct drm_display_info *display_info = &dp->connector.display_info;
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
index 552426d5d3a2..aac20be5ac08 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
@@ -1438,11 +1438,15 @@ static void rk3568_set_intf_mux(struct vop2_video_port *vp, int id,
die &= ~RK3568_SYS_DSP_INFACE_EN_HDMI_MUX;
die |= RK3568_SYS_DSP_INFACE_EN_HDMI |
FIELD_PREP(RK3568_SYS_DSP_INFACE_EN_HDMI_MUX, vp->id);
+ dip &= ~RK3568_DSP_IF_POL__HDMI_PIN_POL;
+ dip |= FIELD_PREP(RK3568_DSP_IF_POL__HDMI_PIN_POL, polflags);
break;
case ROCKCHIP_VOP2_EP_EDP0:
die &= ~RK3568_SYS_DSP_INFACE_EN_EDP_MUX;
die |= RK3568_SYS_DSP_INFACE_EN_EDP |
FIELD_PREP(RK3568_SYS_DSP_INFACE_EN_EDP_MUX, vp->id);
+ dip &= ~RK3568_DSP_IF_POL__EDP_PIN_POL;
+ dip |= FIELD_PREP(RK3568_DSP_IF_POL__EDP_PIN_POL, polflags);
break;
case ROCKCHIP_VOP2_EP_MIPI0:
die &= ~RK3568_SYS_DSP_INFACE_EN_MIPI0_MUX;
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index 4cc59bae38dd..2fab218d7082 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -919,8 +919,7 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
job = list_first_entry_or_null(&sched->pending_list,
struct drm_sched_job, list);
- if (job && (!job->s_fence->parent ||
- dma_fence_is_signaled(job->s_fence->parent))) {
+ if (job && dma_fence_is_signaled(&job->s_fence->finished)) {
/* remove job from pending_list */
list_del_init(&job->list);
@@ -930,9 +929,9 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
next = list_first_entry_or_null(&sched->pending_list,
typeof(*next), list);
- if (next && job->s_fence->parent) {
+ if (next) {
next->s_fence->scheduled.timestamp =
- job->s_fence->parent->timestamp;
+ job->s_fence->finished.timestamp;
/* start TO timer for next job */
drm_sched_start_timeout(sched);
}
diff --git a/drivers/gpu/drm/solomon/ssd130x-i2c.c b/drivers/gpu/drm/solomon/ssd130x-i2c.c
index 1e0fcec7be47..ddfa0bb5d9c9 100644
--- a/drivers/gpu/drm/solomon/ssd130x-i2c.c
+++ b/drivers/gpu/drm/solomon/ssd130x-i2c.c
@@ -39,13 +39,11 @@ static int ssd130x_i2c_probe(struct i2c_client *client)
return 0;
}
-static int ssd130x_i2c_remove(struct i2c_client *client)
+static void ssd130x_i2c_remove(struct i2c_client *client)
{
struct ssd130x_device *ssd130x = i2c_get_clientdata(client);
ssd130x_remove(ssd130x);
-
- return 0;
}
static void ssd130x_i2c_shutdown(struct i2c_client *client)
diff --git a/drivers/gpu/drm/tests/drm_buddy_test.c b/drivers/gpu/drm/tests/drm_buddy_test.c
index 7a2b2d6bc3fe..62f69589a72d 100644
--- a/drivers/gpu/drm/tests/drm_buddy_test.c
+++ b/drivers/gpu/drm/tests/drm_buddy_test.c
@@ -729,7 +729,7 @@ static void drm_test_buddy_alloc_limit(struct kunit *test)
static int drm_buddy_init_test(struct kunit *test)
{
while (!random_seed)
- random_seed = get_random_int();
+ random_seed = get_random_u32();
return 0;
}
diff --git a/drivers/gpu/drm/tests/drm_mm_test.c b/drivers/gpu/drm/tests/drm_mm_test.c
index 659d1af4dca7..c4b66eeae203 100644
--- a/drivers/gpu/drm/tests/drm_mm_test.c
+++ b/drivers/gpu/drm/tests/drm_mm_test.c
@@ -2212,7 +2212,7 @@ err_nodes:
static int drm_mm_init_test(struct kunit *test)
{
while (!random_seed)
- random_seed = get_random_int();
+ random_seed = get_random_u32();
return 0;
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 497ee1fdbad7..fa04e62202c1 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -239,16 +239,19 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
if (bo->type != ttm_bo_type_sg)
fbo->base.base.resv = &fbo->base.base._resv;
- if (fbo->base.resource) {
- ttm_resource_set_bo(fbo->base.resource, &fbo->base);
- bo->resource = NULL;
- }
-
dma_resv_init(&fbo->base.base._resv);
fbo->base.base.dev = NULL;
ret = dma_resv_trylock(&fbo->base.base._resv);
WARN_ON(!ret);
+ if (fbo->base.resource) {
+ ttm_resource_set_bo(fbo->base.resource, &fbo->base);
+ bo->resource = NULL;
+ ttm_bo_set_bulk_move(&fbo->base, NULL);
+ } else {
+ fbo->base.bulk_move = NULL;
+ }
+
ret = dma_resv_reserve_fences(&fbo->base.base._resv, 1);
if (ret) {
kfree(fbo);