summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.gitignore1
-rw-r--r--Documentation/devicetree/bindings/display/bridge/ite,it6505.yaml68
-rw-r--r--Documentation/devicetree/bindings/display/bridge/ite,it66121.yaml4
-rw-r--r--Documentation/devicetree/bindings/display/panel/focaltech,gpt3.yaml56
-rw-r--r--Documentation/devicetree/bindings/display/panel/himax,hx8394.yaml76
-rw-r--r--Documentation/devicetree/bindings/display/panel/panel-mipi-dbi-spi.yaml8
-rw-r--r--Documentation/devicetree/bindings/display/panel/visionox,vtdr6130.yaml53
-rw-r--r--Documentation/fb/modedb.rst5
-rw-r--r--Documentation/gpu/drm-kms-helpers.rst7
-rw-r--r--Documentation/gpu/drm-kms.rst6
-rw-r--r--Documentation/gpu/drm-uapi.rst12
-rw-r--r--Documentation/gpu/todo.rst13
-rw-r--r--Documentation/gpu/vc4.rst19
-rw-r--r--Documentation/maintainer/maintainer-entry-profile.rst1
-rw-r--r--Documentation/nvme/feature-and-quirk-policy.rst77
-rw-r--r--Documentation/userspace-api/ioctl/ioctl-number.rst1
-rw-r--r--Documentation/userspace-api/media/v4l/subdev-formats.rst111
-rw-r--r--Documentation/virt/kvm/api.rst46
-rw-r--r--Documentation/virt/kvm/locking.rst19
-rw-r--r--MAINTAINERS14
-rw-r--r--Makefile4
-rw-r--r--arch/x86/events/amd/core.c2
-rw-r--r--arch/x86/kernel/callthunks.c4
-rw-r--r--arch/x86/kernel/kprobes/core.c10
-rw-r--r--arch/x86/kernel/kprobes/opt.c28
-rw-r--r--arch/x86/kvm/hyperv.c63
-rw-r--r--arch/x86/kvm/irq_comm.c5
-rw-r--r--arch/x86/kvm/lapic.h4
-rw-r--r--arch/x86/kvm/mmu/spte.h2
-rw-r--r--arch/x86/kvm/mmu/tdp_mmu.c25
-rw-r--r--arch/x86/kvm/pmu.c3
-rw-r--r--arch/x86/kvm/pmu.h3
-rw-r--r--arch/x86/kvm/vmx/nested.c20
-rw-r--r--arch/x86/kvm/vmx/vmx.c7
-rw-r--r--arch/x86/kvm/x86.c3
-rw-r--r--arch/x86/kvm/xen.c144
-rw-r--r--block/bfq-iosched.c2
-rw-r--r--drivers/acpi/acpi_video.c17
-rw-r--r--drivers/acpi/resource.c32
-rw-r--r--drivers/acpi/video_detect.c23
-rw-r--r--drivers/acpi/x86/s2idle.c87
-rw-r--r--drivers/ata/ahci.c32
-rw-r--r--drivers/dma-buf/dma-buf.c14
-rw-r--r--drivers/dma-buf/udmabuf.c28
-rw-r--r--drivers/firmware/sysfb_simplefb.c43
-rw-r--r--drivers/gpu/drm/Kconfig8
-rw-r--r--drivers/gpu/drm/Makefile2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Kconfig2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c100
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c59
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c325
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c187
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c259
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c44
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c91
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/df_v4_3.c61
-rw-r--r--drivers/gpu/drm/amd/amdgpu/df_v4_3.h31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c155
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c157
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c71
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c97
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c143
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0_3.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c39
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c39
-rw-r--r--drivers/gpu/drm/amd/amdgpu/imu_v11_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v10_1.c108
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v11_0.c107
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_2.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v10_0.c80
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v11_0.c131
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v12_0.c78
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v13_0.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v13_0_4.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v3_1.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c47
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c63
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ta_secureDisplay_if.h24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v8_10.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c9
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c11
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_migrate.c3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c12
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c10
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c478
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h7
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c161
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h26
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c110
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h14
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c26
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c106
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c137
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c54
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stat.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dp_types.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_link.h15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c86
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c40
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c39
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c25
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c37
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h18
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/resource.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c37
-rw-r--r--drivers/gpu/drm/amd/display/dmub/dmub_srv.h3
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h83
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c21
-rw-r--r--drivers/gpu/drm/amd/display/include/dpcd_defs.h5
-rw-r--r--drivers/gpu/drm/amd/display/include/link_service_types.h7
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.c31
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.h3
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/df/df_4_3_offset.h30
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/df/df_4_3_sh_mask.h157
-rw-r--r--drivers/gpu/drm/amd/include/kgd_pp_interface.h2
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c11
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c13
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c16
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c76
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c16
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c31
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c22
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c20
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/smu10_smumgr.c10
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c20
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c51
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c75
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c6
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c1
-rw-r--r--drivers/gpu/drm/arm/hdlcd_drv.c24
-rw-r--r--drivers/gpu/drm/ast/Kconfig2
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c2
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c9
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_drv.c5
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix-anx6345.c5
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c5
-rw-r--r--drivers/gpu/drm/bridge/analogix/anx7625.c6
-rw-r--r--drivers/gpu/drm/bridge/chipone-icn6211.c5
-rw-r--r--drivers/gpu/drm/bridge/chrontel-ch7033.c5
-rw-r--r--drivers/gpu/drm/bridge/ite-it6505.c132
-rw-r--r--drivers/gpu/drm/bridge/ite-it66121.c320
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt8912b.c26
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9211.c5
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9611.c6
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9611uxc.c5
-rw-r--r--drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c16
-rw-r--r--drivers/gpu/drm/bridge/nxp-ptn3460.c5
-rw-r--r--drivers/gpu/drm/bridge/panel.c13
-rw-r--r--drivers/gpu/drm/bridge/parade-ps8622.c6
-rw-r--r--drivers/gpu/drm/bridge/parade-ps8640.c22
-rw-r--r--drivers/gpu/drm/bridge/sii902x.c31
-rw-r--r--drivers/gpu/drm/bridge/sii9234.c5
-rw-r--r--drivers/gpu/drm/bridge/sil-sii8620.c5
-rw-r--r--drivers/gpu/drm/bridge/tc358767.c4
-rw-r--r--drivers/gpu/drm/bridge/tc358768.c5
-rw-r--r--drivers/gpu/drm/bridge/tc358775.c4
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi83.c8
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi86.c5
-rw-r--r--drivers/gpu/drm/bridge/ti-tfp410.c5
-rw-r--r--drivers/gpu/drm/display/drm_dp_mst_topology.c12
-rw-r--r--drivers/gpu/drm/drm_atomic.c23
-rw-r--r--drivers/gpu/drm/drm_atomic_state_helper.c124
-rw-r--r--drivers/gpu/drm/drm_atomic_uapi.c4
-rw-r--r--drivers/gpu/drm/drm_blend.c13
-rw-r--r--drivers/gpu/drm/drm_bridge.c294
-rw-r--r--drivers/gpu/drm/drm_bridge_connector.c27
-rw-r--r--drivers/gpu/drm/drm_client.c11
-rw-r--r--drivers/gpu/drm/drm_client_modeset.c4
-rw-r--r--drivers/gpu/drm/drm_connector.c173
-rw-r--r--drivers/gpu/drm/drm_debugfs.c110
-rw-r--r--drivers/gpu/drm/drm_drv.c7
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c256
-rw-r--r--drivers/gpu/drm/drm_fbdev_generic.c15
-rw-r--r--drivers/gpu/drm/drm_file.c18
-rw-r--r--drivers/gpu/drm/drm_format_helper.c462
-rw-r--r--drivers/gpu/drm/drm_fourcc.c4
-rw-r--r--drivers/gpu/drm/drm_framebuffer.c11
-rw-r--r--drivers/gpu/drm/drm_gem.c19
-rw-r--r--drivers/gpu/drm/drm_gem_atomic_helper.c31
-rw-r--r--drivers/gpu/drm/drm_gem_dma_helper.c4
-rw-r--r--drivers/gpu/drm/drm_gem_shmem_helper.c6
-rw-r--r--drivers/gpu/drm/drm_gem_ttm_helper.c2
-rw-r--r--drivers/gpu/drm/drm_gem_vram_helper.c12
-rw-r--r--drivers/gpu/drm/drm_internal.h5
-rw-r--r--drivers/gpu/drm/drm_ioc32.c13
-rw-r--r--drivers/gpu/drm/drm_ioctl.c25
-rw-r--r--drivers/gpu/drm/drm_lease.c64
-rw-r--r--drivers/gpu/drm/drm_mipi_dbi.c158
-rw-r--r--drivers/gpu/drm/drm_mode_config.c10
-rw-r--r--drivers/gpu/drm/drm_modes.c546
-rw-r--r--drivers/gpu/drm/drm_panel_orientation_quirks.c33
-rw-r--r--drivers/gpu/drm/drm_plane.c5
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c122
-rw-r--r--drivers/gpu/drm/drm_simple_kms_helper.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos5433_drm_decon.c13
-rw-r--r--drivers/gpu/drm/exynos/exynos7_drm_decon.c12
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp.c11
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c11
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c11
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c10
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_mic.c11
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.c12
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_scaler.c12
-rw-r--r--drivers/gpu/drm/gma500/Kconfig2
-rw-r--r--drivers/gpu/drm/gud/gud_connector.c10
-rw-r--r--drivers/gpu/drm/gud/gud_drv.c18
-rw-r--r--drivers/gpu/drm/gud/gud_internal.h1
-rw-r--r--drivers/gpu/drm/gud/gud_pipe.c223
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/Kconfig2
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c4
-rw-r--r--drivers/gpu/drm/i2c/ch7006_drv.c12
-rw-r--r--drivers/gpu/drm/i2c/sil164_drv.c4
-rw-r--r--drivers/gpu/drm/i2c/tda9950.c5
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c4
-rw-r--r--drivers/gpu/drm/i915/Kconfig2
-rw-r--r--drivers/gpu/drm/i915/display/intel_tv.c4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c59
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_types.h2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm.c11
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.c8
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c8
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c42
-rw-r--r--drivers/gpu/drm/i915/i915_deps.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c37
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.h4
-rw-r--r--drivers/gpu/drm/i915/i915_ttm_buddy_manager.c2
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c2
-rw-r--r--drivers/gpu/drm/i915/intel_region_ttm.c3
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_evict.c4
-rw-r--r--drivers/gpu/drm/imx/Kconfig41
-rw-r--r--drivers/gpu/drm/imx/Makefile10
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-dev.c23
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-dev.h7
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-drv.c15
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-kms.c2
-rw-r--r--drivers/gpu/drm/imx/ipuv3/Kconfig41
-rw-r--r--drivers/gpu/drm/imx/ipuv3/Makefile11
-rw-r--r--drivers/gpu/drm/imx/ipuv3/dw_hdmi-imx.c (renamed from drivers/gpu/drm/imx/dw_hdmi-imx.c)0
-rw-r--r--drivers/gpu/drm/imx/ipuv3/imx-drm-core.c (renamed from drivers/gpu/drm/imx/imx-drm-core.c)0
-rw-r--r--drivers/gpu/drm/imx/ipuv3/imx-drm.h (renamed from drivers/gpu/drm/imx/imx-drm.h)0
-rw-r--r--drivers/gpu/drm/imx/ipuv3/imx-ldb.c (renamed from drivers/gpu/drm/imx/imx-ldb.c)0
-rw-r--r--drivers/gpu/drm/imx/ipuv3/imx-tve.c (renamed from drivers/gpu/drm/imx/imx-tve.c)0
-rw-r--r--drivers/gpu/drm/imx/ipuv3/ipuv3-crtc.c (renamed from drivers/gpu/drm/imx/ipuv3-crtc.c)0
-rw-r--r--drivers/gpu/drm/imx/ipuv3/ipuv3-plane.c (renamed from drivers/gpu/drm/imx/ipuv3-plane.c)14
-rw-r--r--drivers/gpu/drm/imx/ipuv3/ipuv3-plane.h (renamed from drivers/gpu/drm/imx/ipuv3-plane.h)0
-rw-r--r--drivers/gpu/drm/imx/ipuv3/parallel-display.c (renamed from drivers/gpu/drm/imx/parallel-display.c)0
-rw-r--r--drivers/gpu/drm/logicvc/logicvc_drm.c13
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dp.c4
-rw-r--r--drivers/gpu/drm/meson/meson_dw_hdmi.c23
-rw-r--r--drivers/gpu/drm/mgag200/Kconfig2
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c2
-rw-r--r--drivers/gpu/drm/mxsfb/Kconfig2
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_drv.c10
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv17.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_prime.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c1
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dispc.c5
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dsi.c26
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c41
-rw-r--r--drivers/gpu/drm/panel/Kconfig29
-rw-r--r--drivers/gpu/drm/panel/Makefile3
-rw-r--r--drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c96
-rw-r--r--drivers/gpu/drm/panel/panel-boe-bf060y8m-aj0.c42
-rw-r--r--drivers/gpu/drm/panel/panel-edp.c11
-rw-r--r--drivers/gpu/drm/panel/panel-elida-kd35t133.c46
-rw-r--r--drivers/gpu/drm/panel/panel-himax-hx8394.c451
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9341.c6
-rw-r--r--drivers/gpu/drm/panel/panel-jdi-fhd-r63452.c58
-rw-r--r--drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c106
-rw-r--r--drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c24
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt35950.c14
-rw-r--r--drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c5
-rw-r--r--drivers/gpu/drm/panel/panel-orisetech-ota5601a.c364
-rw-r--r--drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c5
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-atna33xc20.c10
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c44
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-sofef00.c24
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-ls060t1sx01.c19
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c6
-rw-r--r--drivers/gpu/drm/panel/panel-sitronix-st7703.c341
-rw-r--r--drivers/gpu/drm/panel/panel-sony-tulip-truly-nt35521.c398
-rw-r--r--drivers/gpu/drm/panel/panel-visionox-vtdr6130.c358
-rw-r--r--drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c112
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_device.c10
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_device.h4
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_drv.c7
-rw-r--r--drivers/gpu/drm/qxl/qxl_cmd.c16
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h3
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c4
-rw-r--r--drivers/gpu/drm/radeon/Kconfig2
-rw-r--r--drivers/gpu/drm/radeon/atombios.h10
-rw-r--r--drivers/gpu/drm/radeon/radeon.h3
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_prime.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c4
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.c9
-rw-r--r--drivers/gpu/drm/scheduler/sched_entity.c2
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c4
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_drv.c9
-rw-r--r--drivers/gpu/drm/solomon/ssd130x.c4
-rw-r--r--drivers/gpu/drm/sprd/sprd_dpu.c4
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tv.c141
-rw-r--r--drivers/gpu/drm/tests/Makefile8
-rw-r--r--drivers/gpu/drm/tests/drm_client_modeset_test.c110
-rw-r--r--drivers/gpu/drm/tests/drm_cmdline_parser_test.c68
-rw-r--r--drivers/gpu/drm/tests/drm_connector_test.c76
-rw-r--r--drivers/gpu/drm/tests/drm_format_helper_test.c384
-rw-r--r--drivers/gpu/drm/tests/drm_kunit_helpers.c105
-rw-r--r--drivers/gpu/drm/tests/drm_kunit_helpers.h11
-rw-r--r--drivers/gpu/drm/tests/drm_managed_test.c71
-rw-r--r--drivers/gpu/drm/tests/drm_modes_test.c158
-rw-r--r--drivers/gpu/drm/tests/drm_probe_helper_test.c218
-rw-r--r--drivers/gpu/drm/tidss/tidss_dispc.c6
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c9
-rw-r--r--drivers/gpu/drm/tiny/cirrus.c2
-rw-r--r--drivers/gpu/drm/tiny/gm12u320.c15
-rw-r--r--drivers/gpu/drm/tiny/hx8357d.c5
-rw-r--r--drivers/gpu/drm/tiny/ili9163.c6
-rw-r--r--drivers/gpu/drm/tiny/ili9225.c36
-rw-r--r--drivers/gpu/drm/tiny/ili9341.c5
-rw-r--r--drivers/gpu/drm/tiny/ili9486.c20
-rw-r--r--drivers/gpu/drm/tiny/mi0283qt.c5
-rw-r--r--drivers/gpu/drm/tiny/ofdrm.c40
-rw-r--r--drivers/gpu/drm/tiny/panel-mipi-dbi.c10
-rw-r--r--drivers/gpu/drm/tiny/simpledrm.c30
-rw-r--r--drivers/gpu/drm/tiny/st7586.c39
-rw-r--r--drivers/gpu/drm/tiny/st7735r.c5
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c211
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c111
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c19
-rw-r--r--drivers/gpu/drm/ttm/ttm_device.c26
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c6
-rw-r--r--drivers/gpu/drm/ttm/ttm_pool.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_range_manager.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_resource.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c3
-rw-r--r--drivers/gpu/drm/v3d/v3d_debugfs.c22
-rw-r--r--drivers/gpu/drm/v3d/v3d_gem.c62
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_drv.c6
-rw-r--r--drivers/gpu/drm/vc4/Kconfig16
-rw-r--r--drivers/gpu/drm/vc4/Makefile7
-rw-r--r--drivers/gpu/drm/vc4/tests/.kunitconfig13
-rw-r--r--drivers/gpu/drm/vc4/tests/vc4_mock.c200
-rw-r--r--drivers/gpu/drm/vc4/tests/vc4_mock.h63
-rw-r--r--drivers/gpu/drm/vc4/tests/vc4_mock_crtc.c41
-rw-r--r--drivers/gpu/drm/vc4/tests/vc4_mock_output.c138
-rw-r--r--drivers/gpu/drm/vc4/tests/vc4_mock_plane.c47
-rw-r--r--drivers/gpu/drm/vc4/tests/vc4_test_pv_muxing.c1039
-rw-r--r--drivers/gpu/drm/vc4/vc4_bo.c10
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c215
-rw-r--r--drivers/gpu/drm/vc4/vc4_debugfs.c36
-rw-r--r--drivers/gpu/drm/vc4/vc4_dpi.c34
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c5
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h148
-rw-r--r--drivers/gpu/drm/vc4/vc4_dsi.c187
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c28
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi_regs.h4
-rw-r--r--drivers/gpu/drm/vc4/vc4_hvs.c272
-rw-r--r--drivers/gpu/drm/vc4/vc4_kms.c139
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c139
-rw-r--r--drivers/gpu/drm/vc4/vc4_regs.h20
-rw-r--r--drivers/gpu/drm/vc4/vc4_txp.c62
-rw-r--r--drivers/gpu/drm/vc4/vc4_v3d.c14
-rw-r--r--drivers/gpu/drm/vc4/vc4_vec.c365
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h3
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_trace.h26
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vq.c13
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.c27
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.h4
-rw-r--r--drivers/gpu/drm/vkms/vkms_plane.c36
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_system_manager.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c1
-rw-r--r--drivers/gpu/ipu-v3/ipu-common.c1
-rw-r--r--drivers/nvme/host/auth.c2
-rw-r--r--drivers/nvme/host/core.c34
-rw-r--r--drivers/nvme/host/ioctl.c28
-rw-r--r--drivers/nvme/host/nvme.h2
-rw-r--r--drivers/nvme/host/pci.c46
-rw-r--r--drivers/nvme/target/admin-cmd.c37
-rw-r--r--drivers/nvme/target/passthru.c11
-rw-r--r--include/acpi/video.h2
-rw-r--r--include/asm-generic/vmlinux.lds.h5
-rw-r--r--include/drm/display/drm_dp.h6
-rw-r--r--include/drm/drm_atomic.h32
-rw-r--r--include/drm/drm_atomic_state_helper.h4
-rw-r--r--include/drm/drm_audio_component.h3
-rw-r--r--include/drm/drm_bridge.h36
-rw-r--r--include/drm/drm_bridge_connector.h2
-rw-r--r--include/drm/drm_connector.h82
-rw-r--r--include/drm/drm_debugfs.h59
-rw-r--r--include/drm/drm_device.h32
-rw-r--r--include/drm/drm_format_helper.h16
-rw-r--r--include/drm/drm_gem.h1
-rw-r--r--include/drm/drm_gem_atomic_helper.h2
-rw-r--r--include/drm/drm_gem_ttm_helper.h3
-rw-r--r--include/drm/drm_gem_vram_helper.h4
-rw-r--r--include/drm/drm_kunit_helpers.h91
-rw-r--r--include/drm/drm_mipi_dbi.h43
-rw-r--r--include/drm/drm_mipi_dsi.h39
-rw-r--r--include/drm/drm_mode_config.h12
-rw-r--r--include/drm/drm_modes.h17
-rw-r--r--include/drm/drm_modeset_helper_vtables.h22
-rw-r--r--include/drm/drm_panel.h10
-rw-r--r--include/drm/drm_plane.h4
-rw-r--r--include/drm/drm_probe_helper.h1
-rw-r--r--include/drm/drm_simple_kms_helper.h4
-rw-r--r--include/drm/ttm/ttm_bo.h (renamed from include/drm/ttm/ttm_bo_api.h)371
-rw-r--r--include/drm/ttm/ttm_bo_driver.h303
-rw-r--r--include/drm/ttm/ttm_device.h7
-rw-r--r--include/drm/ttm/ttm_execbuf_util.h4
-rw-r--r--include/linux/dma-buf.h4
-rw-r--r--include/linux/nvme.h4
-rw-r--r--include/uapi/drm/amdgpu_drm.h6
-rw-r--r--include/uapi/drm/drm_fourcc.h12
-rw-r--r--include/uapi/linux/io_uring.h8
-rw-r--r--include/uapi/linux/kvm.h3
-rw-r--r--include/uapi/linux/media-bus-format.h5
-rw-r--r--io_uring/cancel.c9
-rw-r--r--io_uring/io_uring.c30
-rw-r--r--kernel/events/core.c54
-rw-r--r--kernel/futex/syscalls.c11
-rw-r--r--kernel/locking/rtmutex.c55
-rw-r--r--kernel/locking/rtmutex_api.c6
-rw-r--r--lib/kunit/string-stream.c4
-rw-r--r--scripts/Makefile.modpost22
-rw-r--r--scripts/Makefile.package1
-rw-r--r--scripts/basic/fixdep.c1
-rw-r--r--scripts/kconfig/mconf.c6
-rwxr-xr-xscripts/package/mkspec3
-rw-r--r--sound/pci/hda/patch_hdmi.c27
-rw-r--r--sound/pci/hda/patch_realtek.c13
-rw-r--r--sound/usb/line6/driver.c3
-rw-r--r--sound/usb/line6/midi.c6
-rw-r--r--sound/usb/line6/midibuf.c25
-rw-r--r--sound/usb/line6/midibuf.h5
-rw-r--r--sound/usb/line6/pod.c3
-rw-r--r--tools/testing/selftests/kvm/.gitignore91
-rw-r--r--tools/testing/selftests/kvm/Makefile64
-rw-r--r--tools/testing/selftests/kvm/aarch64/page_fault_test.c2
-rw-r--r--tools/testing/selftests/kvm/lib/aarch64/ucall.c6
-rw-r--r--tools/testing/selftests/kvm/lib/kvm_util.c13
-rw-r--r--tools/testing/selftests/kvm/lib/ucall_common.c16
-rw-r--r--tools/testing/selftests/kvm/lib/x86_64/processor.c2
-rw-r--r--tools/testing/selftests/kvm/memslot_perf_test.c3
-rw-r--r--tools/testing/selftests/kvm/x86_64/hyperv_ipi.c3
-rw-r--r--tools/testing/selftests/kvm/x86_64/svm_nested_soft_inject_test.c13
-rw-r--r--tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c5
-rw-r--r--tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c6
-rw-r--r--virt/kvm/kvm_mm.h4
572 files changed, 13970 insertions, 7070 deletions
diff --git a/.gitignore b/.gitignore
index 3ec73ead6757..20dce5c3b9e0 100644
--- a/.gitignore
+++ b/.gitignore
@@ -39,6 +39,7 @@
*.o.*
*.patch
*.rmeta
+*.rpm
*.rsi
*.s
*.so
diff --git a/Documentation/devicetree/bindings/display/bridge/ite,it6505.yaml b/Documentation/devicetree/bindings/display/bridge/ite,it6505.yaml
index b697c42399ea..c9a882ee6d98 100644
--- a/Documentation/devicetree/bindings/display/bridge/ite,it6505.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/ite,it6505.yaml
@@ -52,9 +52,49 @@ properties:
maxItems: 1
description: extcon specifier for the Power Delivery
- port:
- $ref: /schemas/graph.yaml#/properties/port
- description: A port node pointing to DPI host port node
+ ports:
+ $ref: /schemas/graph.yaml#/properties/ports
+
+ properties:
+ port@0:
+ $ref: /schemas/graph.yaml#/$defs/port-base
+ unevaluatedProperties: false
+ description: A port node pointing to DPI host port node
+
+ properties:
+ endpoint:
+ $ref: /schemas/graph.yaml#/$defs/endpoint-base
+ unevaluatedProperties: false
+
+ properties:
+ link-frequencies:
+ minItems: 1
+ maxItems: 1
+ description: Allowed max link frequencies in Hz
+
+ port@1:
+ $ref: /schemas/graph.yaml#/$defs/port-base
+ unevaluatedProperties: false
+ description: Video port for DP output
+
+ properties:
+ endpoint:
+ $ref: /schemas/graph.yaml#/$defs/endpoint-base
+ unevaluatedProperties: false
+
+ properties:
+ data-lanes:
+ minItems: 1
+ uniqueItems: true
+ items:
+ - enum: [ 0, 1 ]
+ - const: 1
+ - const: 2
+ - const: 3
+
+ required:
+ - port@0
+ - port@1
required:
- compatible
@@ -63,6 +103,7 @@ required:
- interrupts
- reset-gpios
- extcon
+ - ports
additionalProperties: false
@@ -85,9 +126,24 @@ examples:
reset-gpios = <&pio 179 1>;
extcon = <&usbc_extcon>;
- port {
- it6505_in: endpoint {
- remote-endpoint = <&dpi_out>;
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ it6505_in: endpoint {
+ remote-endpoint = <&dpi_out>;
+ link-frequencies = /bits/ 64 <150000000>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ it6505_out: endpoint {
+ remote-endpoint = <&dp_in>;
+ data-lanes = <0 1>;
+ };
};
};
};
diff --git a/Documentation/devicetree/bindings/display/bridge/ite,it66121.yaml b/Documentation/devicetree/bindings/display/bridge/ite,it66121.yaml
index d3454da1247a..a7eb2603691f 100644
--- a/Documentation/devicetree/bindings/display/bridge/ite,it66121.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/ite,it66121.yaml
@@ -17,7 +17,9 @@ description: |
properties:
compatible:
- const: ite,it66121
+ enum:
+ - ite,it66121
+ - ite,it6610
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/display/panel/focaltech,gpt3.yaml b/Documentation/devicetree/bindings/display/panel/focaltech,gpt3.yaml
new file mode 100644
index 000000000000..d54e96b2a9e1
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/focaltech,gpt3.yaml
@@ -0,0 +1,56 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/focaltech,gpt3.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Focaltech GPT3 3.0" (640x480 pixels) IPS LCD panel
+
+maintainers:
+ - Christophe Branchereau <cbranchereau@gmail.com>
+
+allOf:
+ - $ref: panel-common.yaml#
+ - $ref: /schemas/spi/spi-peripheral-props.yaml#
+
+properties:
+ compatible:
+ const: focaltech,gpt3
+
+ reg:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - power-supply
+ - reset-gpios
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+
+ spi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ panel@0 {
+ compatible = "focaltech,gpt3";
+ reg = <0>;
+
+ spi-max-frequency = <3125000>;
+
+ reset-gpios = <&gpe 2 GPIO_ACTIVE_LOW>;
+
+ backlight = <&backlight>;
+ power-supply = <&vcc>;
+
+ port {
+ panel_input: endpoint {
+ remote-endpoint = <&panel_output>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/display/panel/himax,hx8394.yaml b/Documentation/devicetree/bindings/display/panel/himax,hx8394.yaml
new file mode 100644
index 000000000000..1b2a1baa26f9
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/himax,hx8394.yaml
@@ -0,0 +1,76 @@
+# SPDX-License-Identifier: (GPL-2.0-only or BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/himax,hx8394.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Himax HX8394 MIPI-DSI LCD panel controller
+
+maintainers:
+ - Ondrej Jirman <megi@xff.cz>
+ - Javier Martinez Canillas <javierm@redhat.com>
+
+description:
+ Device tree bindings for panels based on the Himax HX8394 controller,
+ such as the HannStar HSD060BHW4 720x1440 TFT LCD panel connected with
+ a MIPI-DSI video interface.
+
+allOf:
+ - $ref: panel-common.yaml#
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - hannstar,hsd060bhw4
+ - const: himax,hx8394
+
+ reg: true
+
+ reset-gpios: true
+
+ backlight: true
+
+ port: true
+
+ vcc-supply:
+ description: Panel power supply
+
+ iovcc-supply:
+ description: I/O voltage supply
+
+required:
+ - compatible
+ - reg
+ - reset-gpios
+ - backlight
+ - port
+ - vcc-supply
+ - iovcc-supply
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+
+ dsi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ panel@0 {
+ compatible = "hannstar,hsd060bhw4", "himax,hx8394";
+ reg = <0>;
+ vcc-supply = <&reg_2v8_p>;
+ iovcc-supply = <&reg_1v8_p>;
+ reset-gpios = <&gpio3 13 GPIO_ACTIVE_LOW>;
+ backlight = <&backlight>;
+
+ port {
+ mipi_in_panel: endpoint {
+ remote-endpoint = <&mipi_out_panel>;
+ };
+ };
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/display/panel/panel-mipi-dbi-spi.yaml b/Documentation/devicetree/bindings/display/panel/panel-mipi-dbi-spi.yaml
index c2df8d28aaf5..9b701df5e9d2 100644
--- a/Documentation/devicetree/bindings/display/panel/panel-mipi-dbi-spi.yaml
+++ b/Documentation/devicetree/bindings/display/panel/panel-mipi-dbi-spi.yaml
@@ -22,8 +22,9 @@ description: |
The standard defines the following interface signals for type C:
- Power:
- Vdd: Power supply for display module
+ Called power-supply in this binding.
- Vddi: Logic level supply for interface signals
- Combined into one in this binding called: power-supply
+ Called io-supply in this binding.
- Interface:
- CSx: Chip select
- SCL: Serial clock
@@ -80,6 +81,11 @@ properties:
Controller data/command selection (D/CX) in 4-line SPI mode.
If not set, the controller is in 3-line SPI mode.
+ io-supply:
+ description: |
+ Logic level supply for interface signals (Vddi).
+ No need to set if this is the same as power-supply.
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/display/panel/visionox,vtdr6130.yaml b/Documentation/devicetree/bindings/display/panel/visionox,vtdr6130.yaml
new file mode 100644
index 000000000000..49e2fd4b4e99
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/visionox,vtdr6130.yaml
@@ -0,0 +1,53 @@
+# SPDX-License-Identifier: GPL-2.0-only or BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/visionox,vtdr6130.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Visionox VTDR6130 AMOLED DSI Panel
+
+maintainers:
+ - Neil Armstrong <neil.armstrong@linaro.org>
+
+allOf:
+ - $ref: panel-common.yaml#
+
+properties:
+ compatible:
+ const: visionox,vtdr6130
+
+ vddio-supply: true
+ vci-supply: true
+ vdd-supply: true
+ port: true
+ reset-gpios: true
+
+additionalProperties: false
+
+required:
+ - compatible
+ - vddio-supply
+ - vci-supply
+ - vdd-supply
+ - reset-gpios
+ - port
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+ panel {
+ compatible = "visionox,vtdr6130";
+
+ vddio-supply = <&vreg_l12b_1p8>;
+ vci-supply = <&vreg_l13b_3p0>;
+ vdd-supply = <&vreg_l11b_1p2>;
+
+ reset-gpios = <&tlmm 133 GPIO_ACTIVE_LOW>;
+
+ port {
+ panel0_in: endpoint {
+ remote-endpoint = <&dsi0_out>;
+ };
+ };
+ };
+...
diff --git a/Documentation/fb/modedb.rst b/Documentation/fb/modedb.rst
index e53375033146..bb2889c6ea27 100644
--- a/Documentation/fb/modedb.rst
+++ b/Documentation/fb/modedb.rst
@@ -29,7 +29,10 @@ Things between square brackets are optional.
Valid names are::
- NSTC: 480i output, with the CCIR System-M TV mode and NTSC color encoding
+ - NTSC-J: 480i output, with the CCIR System-M TV mode, the NTSC color
+ encoding, and a black level equal to the blanking level.
- PAL: 576i output, with the CCIR System-B TV mode and PAL color encoding
+ - PAL-M: 480i output, with the CCIR System-M TV mode and PAL color encoding
If 'M' is specified in the mode_option argument (after <yres> and before
<bpp> and <refresh>, if specified) the timings will be calculated using
@@ -70,6 +73,8 @@ Valid options are::
- reflect_y (boolean): Perform an axial symmetry on the Y axis
- rotate (integer): Rotate the initial framebuffer by x
degrees. Valid values are 0, 90, 180 and 270.
+ - tv_mode: Analog TV mode. One of "NTSC", "NTSC-443", "NTSC-J", "PAL",
+ "PAL-M", "PAL-N", or "SECAM".
- panel_orientation, one of "normal", "upside_down", "left_side_up", or
"right_side_up". For KMS drivers only, this sets the "panel orientation"
property on the kms connector as hint for kms users.
diff --git a/Documentation/gpu/drm-kms-helpers.rst b/Documentation/gpu/drm-kms-helpers.rst
index a4860ffd6e86..b8ab05e42dbb 100644
--- a/Documentation/gpu/drm-kms-helpers.rst
+++ b/Documentation/gpu/drm-kms-helpers.rst
@@ -188,6 +188,13 @@ Bridge Helper Reference
.. kernel-doc:: drivers/gpu/drm/drm_bridge.c
:export:
+MIPI-DSI bridge operation
+-------------------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_bridge.c
+ :doc: dsi bridge operations
+
+
Bridge Connector Helper Reference
---------------------------------
diff --git a/Documentation/gpu/drm-kms.rst b/Documentation/gpu/drm-kms.rst
index b4377a545425..c92d425cb2dd 100644
--- a/Documentation/gpu/drm-kms.rst
+++ b/Documentation/gpu/drm-kms.rst
@@ -520,6 +520,12 @@ HDMI Specific Connector Properties
.. kernel-doc:: drivers/gpu/drm/drm_connector.c
:doc: HDMI connector properties
+Analog TV Specific Connector Properties
+---------------------------------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_connector.c
+ :doc: Analog TV Connector Properties
+
Standard CRTC Properties
------------------------
diff --git a/Documentation/gpu/drm-uapi.rst b/Documentation/gpu/drm-uapi.rst
index ce47b4292481..65fb3036a580 100644
--- a/Documentation/gpu/drm-uapi.rst
+++ b/Documentation/gpu/drm-uapi.rst
@@ -402,19 +402,19 @@ It's possible to run the IGT-tests in a VM in two ways:
1. Use IGT inside a VM
2. Use IGT from the host machine and write the results in a shared directory.
-As follow, there is an example of using a VM with a shared directory with
-the host machine to run igt-tests. As an example it's used virtme::
+Following is an example of using a VM with a shared directory with
+the host machine to run igt-tests. This example uses virtme::
$ virtme-run --rwdir /path/for/shared_dir --kdir=path/for/kernel/directory --mods=auto
-Run the igt-tests in the guest machine, as example it's ran the 'kms_flip'
+Run the igt-tests in the guest machine. This example runs the 'kms_flip'
tests::
$ /path/for/igt-gpu-tools/scripts/run-tests.sh -p -s -t "kms_flip.*" -v
-In this example, instead of build the igt_runner, Piglit is used
-(-p option); it's created html summary of the tests results and it's saved
-in the folder "igt-gpu-tools/results"; it's executed only the igt-tests
+In this example, instead of building the igt_runner, Piglit is used
+(-p option). It creates an HTML summary of the test results and saves
+them in the folder "igt-gpu-tools/results". It executes only the igt-tests
matching the -t option.
Display CRC Support
diff --git a/Documentation/gpu/todo.rst b/Documentation/gpu/todo.rst
index b2c6aaf1edf2..1f8a5ebe188e 100644
--- a/Documentation/gpu/todo.rst
+++ b/Documentation/gpu/todo.rst
@@ -508,17 +508,18 @@ Clean up the debugfs support
There's a bunch of issues with it:
-- The drm_info_list ->show() function doesn't even bother to cast to the drm
- structure for you. This is lazy.
+- Convert drivers to support the drm_debugfs_add_files() function instead of
+ the drm_debugfs_create_files() function.
+
+- Improve late-register debugfs by rolling out the same debugfs pre-register
+ infrastructure for connector and crtc too. That way, the drivers won't need to
+ split their setup code into init and register anymore.
- We probably want to have some support for debugfs files on crtc/connectors and
maybe other kms objects directly in core. There's even drm_print support in
the funcs for these objects to dump kms state, so it's all there. And then the
->show() functions should obviously give you a pointer to the right object.
-- The drm_info_list stuff is centered on drm_minor instead of drm_device. For
- anything we want to print drm_device (or maybe drm_file) is the right thing.
-
- The drm_driver->debugfs_init hooks we have is just an artifact of the old
midlayered load sequence. DRM debugfs should work more like sysfs, where you
can create properties/files for an object anytime you want, and the core
@@ -527,8 +528,6 @@ There's a bunch of issues with it:
this (together with the drm_minor->drm_device move) would allow us to remove
debugfs_init.
-Previous RFC that hasn't landed yet: https://lore.kernel.org/dri-devel/20200513114130.28641-2-wambui.karugax@gmail.com/
-
Contact: Daniel Vetter
Level: Intermediate
diff --git a/Documentation/gpu/vc4.rst b/Documentation/gpu/vc4.rst
index 5df1d98b9544..5e5e92e40919 100644
--- a/Documentation/gpu/vc4.rst
+++ b/Documentation/gpu/vc4.rst
@@ -54,6 +54,25 @@ VEC (Composite TV out) encoder
.. kernel-doc:: drivers/gpu/drm/vc4/vc4_vec.c
:doc: VC4 SDTV module
+KUnit Tests
+===========
+
+The VC4 Driver uses KUnit to perform driver-specific unit and
+integration tests.
+
+These tests are using a mock driver and can be ran using the
+command below, on either arm or arm64 architectures,
+
+.. code-block:: bash
+
+ $ ./tools/testing/kunit/kunit.py run \
+ --kunitconfig=drivers/gpu/drm/vc4/tests/.kunitconfig \
+ --cross_compile aarch64-linux-gnu- --arch arm64
+
+Parts of the driver that are currently covered by tests are:
+ * The HVS to PixelValve dynamic FIFO assignment, for the BCM2835-7
+ and BCM2711.
+
Memory Management and 3D Command Submission
===========================================
diff --git a/Documentation/maintainer/maintainer-entry-profile.rst b/Documentation/maintainer/maintainer-entry-profile.rst
index 93b2ae6c34a9..cfd37f31077f 100644
--- a/Documentation/maintainer/maintainer-entry-profile.rst
+++ b/Documentation/maintainer/maintainer-entry-profile.rst
@@ -104,3 +104,4 @@ to do something different in the near future.
../riscv/patch-acceptance
../driver-api/media/maintainer-entry-profile
../driver-api/vfio-pci-device-specific-driver-acceptance
+ ../nvme/feature-and-quirk-policy
diff --git a/Documentation/nvme/feature-and-quirk-policy.rst b/Documentation/nvme/feature-and-quirk-policy.rst
new file mode 100644
index 000000000000..c01d836d8e41
--- /dev/null
+++ b/Documentation/nvme/feature-and-quirk-policy.rst
@@ -0,0 +1,77 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=======================================
+Linux NVMe feature and and quirk policy
+=======================================
+
+This file explains the policy used to decide what is supported by the
+Linux NVMe driver and what is not.
+
+
+Introduction
+============
+
+NVM Express is an open collection of standards and information.
+
+The Linux NVMe host driver in drivers/nvme/host/ supports devices
+implementing the NVM Express (NVMe) family of specifications, which
+currently consists of a number of documents:
+
+ - the NVMe Base specification
+ - various Command Set specifications (e.g. NVM Command Set)
+ - various Transport specifications (e.g. PCIe, Fibre Channel, RDMA, TCP)
+ - the NVMe Management Interface specification
+
+See https://nvmexpress.org/developers/ for the NVMe specifications.
+
+
+Supported features
+==================
+
+NVMe is a large suite of specifications, and contains features that are only
+useful or suitable for specific use-cases. It is important to note that Linux
+does not aim to implement every feature in the specification. Every additional
+feature implemented introduces more code, more maintenance and potentially more
+bugs. Hence there is an inherent tradeoff between functionality and
+maintainability of the NVMe host driver.
+
+Any feature implemented in the Linux NVMe host driver must support the
+following requirements:
+
+ 1. The feature is specified in a release version of an official NVMe
+ specification, or in a ratified Technical Proposal (TP) that is
+ available on NVMe website. Or if it is not directly related to the
+ on-wire protocol, does not contradict any of the NVMe specifications.
+ 2. Does not conflict with the Linux architecture, nor the design of the
+ NVMe host driver.
+ 3. Has a clear, indisputable value-proposition and a wide consensus across
+ the community.
+
+Vendor specific extensions are generally not supported in the NVMe host
+driver.
+
+It is strongly recommended to work with the Linux NVMe and block layer
+maintainers and get feedback on specification changes that are intended
+to be used by the Linux NVMe host driver in order to avoid conflict at a
+later stage.
+
+
+Quirks
+======
+
+Sometimes implementations of open standards fail to correctly implement parts
+of the standards. Linux uses identifier-based quirks to work around such
+implementation bugs. The intent of quirks is to deal with widely available
+hardware, usually consumer, which Linux users can't use without these quirks.
+Typically these implementations are not or only superficially tested with Linux
+by the hardware manufacturer.
+
+The Linux NVMe maintainers decide ad hoc whether to quirk implementations
+based on the impact of the problem to Linux users and how it impacts
+maintainability of the driver. In general quirks are a last resort, if no
+firmware updates or other workarounds are available from the vendor.
+
+Quirks will not be added to the Linux kernel for hardware that isn't available
+on the mass market. Hardware that fails qualification for enterprise Linux
+distributions, ChromeOS, Android or other consumers of the Linux kernel
+should be fixed before it is shipped instead of relying on Linux quirks.
diff --git a/Documentation/userspace-api/ioctl/ioctl-number.rst b/Documentation/userspace-api/ioctl/ioctl-number.rst
index eb045fc495a4..e48101c970cc 100644
--- a/Documentation/userspace-api/ioctl/ioctl-number.rst
+++ b/Documentation/userspace-api/ioctl/ioctl-number.rst
@@ -222,6 +222,7 @@ Code Seq# Include File Comments
'a' 00-0F drivers/crypto/qat/qat_common/adf_cfg_common.h conflict! qat driver
'b' 00-FF conflict! bit3 vme host bridge
<mailto:natalia@nikhefk.nikhef.nl>
+'b' 00-0F linux/dma-buf.h conflict!
'c' all linux/cm4000_cs.h conflict!
'c' 00-7F linux/comstats.h conflict!
'c' 00-7F linux/coda.h conflict!
diff --git a/Documentation/userspace-api/media/v4l/subdev-formats.rst b/Documentation/userspace-api/media/v4l/subdev-formats.rst
index 16ef3b41932e..a3a35eeed708 100644
--- a/Documentation/userspace-api/media/v4l/subdev-formats.rst
+++ b/Documentation/userspace-api/media/v4l/subdev-formats.rst
@@ -949,6 +949,43 @@ The following tables list existing packed RGB formats.
- b\ :sub:`2`
- b\ :sub:`1`
- b\ :sub:`0`
+ * .. _MEDIA-BUS-FMT-BGR666-1X18:
+
+ - MEDIA_BUS_FMT_BGR666_1X18
+ - 0x1023
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ - b\ :sub:`5`
+ - b\ :sub:`4`
+ - b\ :sub:`3`
+ - b\ :sub:`2`
+ - b\ :sub:`1`
+ - b\ :sub:`0`
+ - g\ :sub:`5`
+ - g\ :sub:`4`
+ - g\ :sub:`3`
+ - g\ :sub:`2`
+ - g\ :sub:`1`
+ - g\ :sub:`0`
+ - r\ :sub:`5`
+ - r\ :sub:`4`
+ - r\ :sub:`3`
+ - r\ :sub:`2`
+ - r\ :sub:`1`
+ - r\ :sub:`0`
* .. _MEDIA-BUS-FMT-RBG888-1X24:
- MEDIA_BUS_FMT_RBG888_1X24
@@ -1023,6 +1060,80 @@ The following tables list existing packed RGB formats.
- b\ :sub:`2`
- b\ :sub:`1`
- b\ :sub:`0`
+ * .. _MEDIA-BUS-FMT-BGR666-1X24_CPADHI:
+
+ - MEDIA_BUS_FMT_BGR666_1X24_CPADHI
+ - 0x1024
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ - 0
+ - 0
+ - b\ :sub:`5`
+ - b\ :sub:`4`
+ - b\ :sub:`3`
+ - b\ :sub:`2`
+ - b\ :sub:`1`
+ - b\ :sub:`0`
+ - 0
+ - 0
+ - g\ :sub:`5`
+ - g\ :sub:`4`
+ - g\ :sub:`3`
+ - g\ :sub:`2`
+ - g\ :sub:`1`
+ - g\ :sub:`0`
+ - 0
+ - 0
+ - r\ :sub:`5`
+ - r\ :sub:`4`
+ - r\ :sub:`3`
+ - r\ :sub:`2`
+ - r\ :sub:`1`
+ - r\ :sub:`0`
+ * .. _MEDIA-BUS-FMT-RGB565-1X24_CPADHI:
+
+ - MEDIA_BUS_FMT_RGB565_1X24_CPADHI
+ - 0x1022
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ - 0
+ - 0
+ - 0
+ - r\ :sub:`4`
+ - r\ :sub:`3`
+ - r\ :sub:`2`
+ - r\ :sub:`1`
+ - r\ :sub:`0`
+ - 0
+ - 0
+ - g\ :sub:`5`
+ - g\ :sub:`4`
+ - g\ :sub:`3`
+ - g\ :sub:`2`
+ - g\ :sub:`1`
+ - g\ :sub:`0`
+ - 0
+ - 0
+ - 0
+ - b\ :sub:`4`
+ - b\ :sub:`3`
+ - b\ :sub:`2`
+ - b\ :sub:`1`
+ - b\ :sub:`0`
* .. _MEDIA-BUS-FMT-BGR888-1X24:
- MEDIA_BUS_FMT_BGR888_1X24
diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst
index 0dd5d8733dd5..deb494f759ed 100644
--- a/Documentation/virt/kvm/api.rst
+++ b/Documentation/virt/kvm/api.rst
@@ -5343,9 +5343,9 @@ KVM_XEN_ATTR_TYPE_SHARED_INFO
32 vCPUs in the shared_info page, KVM does not automatically do so
and instead requires that KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO be used
explicitly even when the vcpu_info for a given vCPU resides at the
- "default" location in the shared_info page. This is because KVM is
- not aware of the Xen CPU id which is used as the index into the
- vcpu_info[] array, so cannot know the correct default location.
+ "default" location in the shared_info page. This is because KVM may
+ not be aware of the Xen CPU id which is used as the index into the
+ vcpu_info[] array, so may know the correct default location.
Note that the shared info page may be constantly written to by KVM;
it contains the event channel bitmap used to deliver interrupts to
@@ -5356,23 +5356,29 @@ KVM_XEN_ATTR_TYPE_SHARED_INFO
any vCPU has been running or any event channel interrupts can be
routed to the guest.
+ Setting the gfn to KVM_XEN_INVALID_GFN will disable the shared info
+ page.
+
KVM_XEN_ATTR_TYPE_UPCALL_VECTOR
Sets the exception vector used to deliver Xen event channel upcalls.
This is the HVM-wide vector injected directly by the hypervisor
(not through the local APIC), typically configured by a guest via
- HVM_PARAM_CALLBACK_IRQ.
+ HVM_PARAM_CALLBACK_IRQ. This can be disabled again (e.g. for guest
+ SHUTDOWN_soft_reset) by setting it to zero.
KVM_XEN_ATTR_TYPE_EVTCHN
This attribute is available when the KVM_CAP_XEN_HVM ioctl indicates
support for KVM_XEN_HVM_CONFIG_EVTCHN_SEND features. It configures
an outbound port number for interception of EVTCHNOP_send requests
- from the guest. A given sending port number may be directed back
- to a specified vCPU (by APIC ID) / port / priority on the guest,
- or to trigger events on an eventfd. The vCPU and priority can be
- changed by setting KVM_XEN_EVTCHN_UPDATE in a subsequent call,
- but other fields cannot change for a given sending port. A port
- mapping is removed by using KVM_XEN_EVTCHN_DEASSIGN in the flags
- field.
+ from the guest. A given sending port number may be directed back to
+ a specified vCPU (by APIC ID) / port / priority on the guest, or to
+ trigger events on an eventfd. The vCPU and priority can be changed
+ by setting KVM_XEN_EVTCHN_UPDATE in a subsequent call, but but other
+ fields cannot change for a given sending port. A port mapping is
+ removed by using KVM_XEN_EVTCHN_DEASSIGN in the flags field. Passing
+ KVM_XEN_EVTCHN_RESET in the flags field removes all interception of
+ outbound event channels. The values of the flags field are mutually
+ exclusive and cannot be combined as a bitmask.
KVM_XEN_ATTR_TYPE_XEN_VERSION
This attribute is available when the KVM_CAP_XEN_HVM ioctl indicates
@@ -5388,7 +5394,7 @@ KVM_XEN_ATTR_TYPE_RUNSTATE_UPDATE_FLAG
support for KVM_XEN_HVM_CONFIG_RUNSTATE_UPDATE_FLAG. It enables the
XEN_RUNSTATE_UPDATE flag which allows guest vCPUs to safely read
other vCPUs' vcpu_runstate_info. Xen guests enable this feature via
- the VM_ASST_TYPE_runstate_update_flag of the HYPERVISOR_vm_assist
+ the VMASST_TYPE_runstate_update_flag of the HYPERVISOR_vm_assist
hypercall.
4.127 KVM_XEN_HVM_GET_ATTR
@@ -5446,15 +5452,18 @@ KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO
As with the shared_info page for the VM, the corresponding page may be
dirtied at any time if event channel interrupt delivery is enabled, so
userspace should always assume that the page is dirty without relying
- on dirty logging.
+ on dirty logging. Setting the gpa to KVM_XEN_INVALID_GPA will disable
+ the vcpu_info.
KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO
Sets the guest physical address of an additional pvclock structure
for a given vCPU. This is typically used for guest vsyscall support.
+ Setting the gpa to KVM_XEN_INVALID_GPA will disable the structure.
KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR
Sets the guest physical address of the vcpu_runstate_info for a given
vCPU. This is how a Xen guest tracks CPU state such as steal time.
+ Setting the gpa to KVM_XEN_INVALID_GPA will disable the runstate area.
KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_CURRENT
Sets the runstate (RUNSTATE_running/_runnable/_blocked/_offline) of
@@ -5487,7 +5496,8 @@ KVM_XEN_VCPU_ATTR_TYPE_TIMER
This attribute is available when the KVM_CAP_XEN_HVM ioctl indicates
support for KVM_XEN_HVM_CONFIG_EVTCHN_SEND features. It sets the
event channel port/priority for the VIRQ_TIMER of the vCPU, as well
- as allowing a pending timer to be saved/restored.
+ as allowing a pending timer to be saved/restored. Setting the timer
+ port to zero disables kernel handling of the singleshot timer.
KVM_XEN_VCPU_ATTR_TYPE_UPCALL_VECTOR
This attribute is available when the KVM_CAP_XEN_HVM ioctl indicates
@@ -5495,7 +5505,8 @@ KVM_XEN_VCPU_ATTR_TYPE_UPCALL_VECTOR
per-vCPU local APIC upcall vector, configured by a Xen guest with
the HVMOP_set_evtchn_upcall_vector hypercall. This is typically
used by Windows guests, and is distinct from the HVM-wide upcall
- vector configured with HVM_PARAM_CALLBACK_IRQ.
+ vector configured with HVM_PARAM_CALLBACK_IRQ. It is disabled by
+ setting the vector to zero.
4.129 KVM_XEN_VCPU_GET_ATTR
@@ -6577,11 +6588,6 @@ Please note that the kernel is allowed to use the kvm_run structure as the
primary storage for certain register types. Therefore, the kernel may use the
values in kvm_run even if the corresponding bit in kvm_dirty_regs is not set.
-::
-
- };
-
-
6. Capabilities that can be enabled on vCPUs
============================================
diff --git a/Documentation/virt/kvm/locking.rst b/Documentation/virt/kvm/locking.rst
index 845a561629f1..a3ca76f9be75 100644
--- a/Documentation/virt/kvm/locking.rst
+++ b/Documentation/virt/kvm/locking.rst
@@ -16,17 +16,26 @@ The acquisition orders for mutexes are as follows:
- kvm->slots_lock is taken outside kvm->irq_lock, though acquiring
them together is quite rare.
-- Unlike kvm->slots_lock, kvm->slots_arch_lock is released before
- synchronize_srcu(&kvm->srcu). Therefore kvm->slots_arch_lock
- can be taken inside a kvm->srcu read-side critical section,
- while kvm->slots_lock cannot.
-
- kvm->mn_active_invalidate_count ensures that pairs of
invalidate_range_start() and invalidate_range_end() callbacks
use the same memslots array. kvm->slots_lock and kvm->slots_arch_lock
are taken on the waiting side in install_new_memslots, so MMU notifiers
must not take either kvm->slots_lock or kvm->slots_arch_lock.
+For SRCU:
+
+- ``synchronize_srcu(&kvm->srcu)`` is called _inside_
+ the kvm->slots_lock critical section, therefore kvm->slots_lock
+ cannot be taken inside a kvm->srcu read-side critical section.
+ Instead, kvm->slots_arch_lock is released before the call
+ to ``synchronize_srcu()`` and _can_ be taken inside a
+ kvm->srcu read-side critical section.
+
+- kvm->lock is taken inside kvm->srcu, therefore
+ ``synchronize_srcu(&kvm->srcu)`` cannot be called inside
+ a kvm->lock critical section. If you cannot delay the
+ call until after kvm->lock is released, use ``call_srcu``.
+
On x86:
- vcpu->mutex is taken outside kvm->arch.hyperv.hv_lock
diff --git a/MAINTAINERS b/MAINTAINERS
index f61eb221415b..7fc9e31ab03f 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -6550,6 +6550,14 @@ S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
F: drivers/gpu/drm/tiny/gm12u320.c
+DRM DRIVER FOR HIMAX HX8394 MIPI-DSI LCD panels
+M: Ondrej Jirman <megi@xff.cz>
+M: Javier Martinez Canillas <javierm@redhat.com>
+S: Maintained
+T: git git://anongit.freedesktop.org/drm/drm-misc
+F: Documentation/devicetree/bindings/display/panel/himax,hx8394.yaml
+F: drivers/gpu/drm/panel/panel-himax-hx8394.c
+
DRM DRIVER FOR HX8357D PANELS
M: Emma Anholt <emma@anholt.net>
S: Maintained
@@ -6984,7 +6992,7 @@ M: Philipp Zabel <p.zabel@pengutronix.de>
L: dri-devel@lists.freedesktop.org
S: Maintained
F: Documentation/devicetree/bindings/display/imx/
-F: drivers/gpu/drm/imx/
+F: drivers/gpu/drm/imx/ipuv3/
F: drivers/gpu/ipu-v3/
DRM DRIVERS FOR FREESCALE IMX BRIDGE
@@ -7009,7 +7017,6 @@ M: Xinliang Liu <xinliang.liu@linaro.org>
M: Tian Tao <tiantao6@hisilicon.com>
R: John Stultz <jstultz@google.com>
R: Xinwei Kong <kong.kongxinwei@hisilicon.com>
-R: Chen Feng <puck.chen@hisilicon.com>
L: dri-devel@lists.freedesktop.org
S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
@@ -11468,7 +11475,7 @@ F: arch/x86/kvm/hyperv.*
F: arch/x86/kvm/kvm_onhyperv.*
F: arch/x86/kvm/svm/hyperv.*
F: arch/x86/kvm/svm/svm_onhyperv.*
-F: arch/x86/kvm/vmx/evmcs.*
+F: arch/x86/kvm/vmx/hyperv.*
KVM X86 Xen (KVM/Xen)
M: David Woodhouse <dwmw2@infradead.org>
@@ -14916,6 +14923,7 @@ L: linux-nvme@lists.infradead.org
S: Supported
W: http://git.infradead.org/nvme.git
T: git://git.infradead.org/nvme.git
+F: Documentation/nvme/
F: drivers/nvme/host/
F: drivers/nvme/common/
F: include/linux/nvme*
diff --git a/Makefile b/Makefile
index d4b6af8c09e9..c05b4fb7121e 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
VERSION = 6
PATCHLEVEL = 2
SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc2
NAME = Hurr durr I'ma ninja sloth
# *DOCUMENTATION*
@@ -297,7 +297,7 @@ no-compiler-targets := $(no-dot-config-targets) install dtbs_install \
headers_install modules_install kernelrelease image_name
no-sync-config-targets := $(no-dot-config-targets) %install kernelrelease \
image_name
-single-targets := %.a %.i %.rsi %.ko %.lds %.ll %.lst %.mod %.o %.s %.symtypes %/
+single-targets := %.a %.i %.ko %.lds %.ll %.lst %.mod %.o %.rsi %.s %.symtypes %/
config-build :=
mixed-build :=
diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c
index d6f3703e4119..4386b10682ce 100644
--- a/arch/x86/events/amd/core.c
+++ b/arch/x86/events/amd/core.c
@@ -1387,7 +1387,7 @@ static int __init amd_core_pmu_init(void)
* numbered counter following it.
*/
for (i = 0; i < x86_pmu.num_counters - 1; i += 2)
- even_ctr_mask |= 1 << i;
+ even_ctr_mask |= BIT_ULL(i);
pair_constraint = (struct event_constraint)
__EVENT_CONSTRAINT(0, even_ctr_mask, 0,
diff --git a/arch/x86/kernel/callthunks.c b/arch/x86/kernel/callthunks.c
index 7d2c75ec9a8c..ffea98f9064b 100644
--- a/arch/x86/kernel/callthunks.c
+++ b/arch/x86/kernel/callthunks.c
@@ -119,7 +119,7 @@ static bool is_coretext(const struct core_text *ct, void *addr)
return within_module_coretext(addr);
}
-static __init_or_module bool skip_addr(void *dest)
+static bool skip_addr(void *dest)
{
if (dest == error_entry)
return true;
@@ -181,7 +181,7 @@ static const u8 nops[] = {
0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90,
};
-static __init_or_module void *patch_dest(void *dest, bool direct)
+static void *patch_dest(void *dest, bool direct)
{
unsigned int tsize = SKL_TMPL_SIZE;
u8 *pad = dest - tsize;
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index 66299682b6b7..b36f3c367cb2 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -37,6 +37,7 @@
#include <linux/extable.h>
#include <linux/kdebug.h>
#include <linux/kallsyms.h>
+#include <linux/kgdb.h>
#include <linux/ftrace.h>
#include <linux/kasan.h>
#include <linux/moduleloader.h>
@@ -281,12 +282,15 @@ static int can_probe(unsigned long paddr)
if (ret < 0)
return 0;
+#ifdef CONFIG_KGDB
/*
- * Another debugging subsystem might insert this breakpoint.
- * In that case, we can't recover it.
+ * If there is a dynamically installed kgdb sw breakpoint,
+ * this function should not be probed.
*/
- if (insn.opcode.bytes[0] == INT3_INSN_OPCODE)
+ if (insn.opcode.bytes[0] == INT3_INSN_OPCODE &&
+ kgdb_has_hit_break(addr))
return 0;
+#endif
addr += insn.length;
}
diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
index e6b8c5362b94..e57e07b0edb6 100644
--- a/arch/x86/kernel/kprobes/opt.c
+++ b/arch/x86/kernel/kprobes/opt.c
@@ -15,6 +15,7 @@
#include <linux/extable.h>
#include <linux/kdebug.h>
#include <linux/kallsyms.h>
+#include <linux/kgdb.h>
#include <linux/ftrace.h>
#include <linux/objtool.h>
#include <linux/pgtable.h>
@@ -279,19 +280,6 @@ static int insn_is_indirect_jump(struct insn *insn)
return ret;
}
-static bool is_padding_int3(unsigned long addr, unsigned long eaddr)
-{
- unsigned char ops;
-
- for (; addr < eaddr; addr++) {
- if (get_kernel_nofault(ops, (void *)addr) < 0 ||
- ops != INT3_INSN_OPCODE)
- return false;
- }
-
- return true;
-}
-
/* Decode whole function to ensure any instructions don't jump into target */
static int can_optimize(unsigned long paddr)
{
@@ -334,15 +322,15 @@ static int can_optimize(unsigned long paddr)
ret = insn_decode_kernel(&insn, (void *)recovered_insn);
if (ret < 0)
return 0;
-
+#ifdef CONFIG_KGDB
/*
- * In the case of detecting unknown breakpoint, this could be
- * a padding INT3 between functions. Let's check that all the
- * rest of the bytes are also INT3.
+ * If there is a dynamically installed kgdb sw breakpoint,
+ * this function should not be probed.
*/
- if (insn.opcode.bytes[0] == INT3_INSN_OPCODE)
- return is_padding_int3(addr, paddr - offset + size) ? 1 : 0;
-
+ if (insn.opcode.bytes[0] == INT3_INSN_OPCODE &&
+ kgdb_has_hit_break(addr))
+ return 0;
+#endif
/* Recover address */
insn.kaddr = (void *)addr;
insn.next_byte = (void *)(addr + insn.length);
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index 2c7f2a26421e..e8296942a868 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -1769,6 +1769,7 @@ static bool hv_is_vp_in_sparse_set(u32 vp_id, u64 valid_bank_mask, u64 sparse_ba
}
struct kvm_hv_hcall {
+ /* Hypercall input data */
u64 param;
u64 ingpa;
u64 outgpa;
@@ -1779,12 +1780,21 @@ struct kvm_hv_hcall {
bool fast;
bool rep;
sse128_t xmm[HV_HYPERCALL_MAX_XMM_REGISTERS];
+
+ /*
+ * Current read offset when KVM reads hypercall input data gradually,
+ * either offset in bytes from 'ingpa' for regular hypercalls or the
+ * number of already consumed 'XMM halves' for 'fast' hypercalls.
+ */
+ union {
+ gpa_t data_offset;
+ int consumed_xmm_halves;
+ };
};
static int kvm_hv_get_hc_data(struct kvm *kvm, struct kvm_hv_hcall *hc,
- u16 orig_cnt, u16 cnt_cap, u64 *data,
- int consumed_xmm_halves, gpa_t offset)
+ u16 orig_cnt, u16 cnt_cap, u64 *data)
{
/*
* Preserve the original count when ignoring entries via a "cap", KVM
@@ -1799,11 +1809,11 @@ static int kvm_hv_get_hc_data(struct kvm *kvm, struct kvm_hv_hcall *hc,
* Each XMM holds two sparse banks, but do not count halves that
* have already been consumed for hypercall parameters.
*/
- if (orig_cnt > 2 * HV_HYPERCALL_MAX_XMM_REGISTERS - consumed_xmm_halves)
+ if (orig_cnt > 2 * HV_HYPERCALL_MAX_XMM_REGISTERS - hc->consumed_xmm_halves)
return HV_STATUS_INVALID_HYPERCALL_INPUT;
for (i = 0; i < cnt; i++) {
- j = i + consumed_xmm_halves;
+ j = i + hc->consumed_xmm_halves;
if (j % 2)
data[i] = sse128_hi(hc->xmm[j / 2]);
else
@@ -1812,27 +1822,24 @@ static int kvm_hv_get_hc_data(struct kvm *kvm, struct kvm_hv_hcall *hc,
return 0;
}
- return kvm_read_guest(kvm, hc->ingpa + offset, data,
+ return kvm_read_guest(kvm, hc->ingpa + hc->data_offset, data,
cnt * sizeof(*data));
}
static u64 kvm_get_sparse_vp_set(struct kvm *kvm, struct kvm_hv_hcall *hc,
- u64 *sparse_banks, int consumed_xmm_halves,
- gpa_t offset)
+ u64 *sparse_banks)
{
if (hc->var_cnt > HV_MAX_SPARSE_VCPU_BANKS)
return -EINVAL;
/* Cap var_cnt to ignore banks that cannot contain a legal VP index. */
return kvm_hv_get_hc_data(kvm, hc, hc->var_cnt, KVM_HV_MAX_SPARSE_VCPU_SET_BITS,
- sparse_banks, consumed_xmm_halves, offset);
+ sparse_banks);
}
-static int kvm_hv_get_tlb_flush_entries(struct kvm *kvm, struct kvm_hv_hcall *hc, u64 entries[],
- int consumed_xmm_halves, gpa_t offset)
+static int kvm_hv_get_tlb_flush_entries(struct kvm *kvm, struct kvm_hv_hcall *hc, u64 entries[])
{
- return kvm_hv_get_hc_data(kvm, hc, hc->rep_cnt, hc->rep_cnt,
- entries, consumed_xmm_halves, offset);
+ return kvm_hv_get_hc_data(kvm, hc, hc->rep_cnt, hc->rep_cnt, entries);
}
static void hv_tlb_flush_enqueue(struct kvm_vcpu *vcpu,
@@ -1926,8 +1933,6 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
struct kvm_vcpu *v;
unsigned long i;
bool all_cpus;
- int consumed_xmm_halves = 0;
- gpa_t data_offset;
/*
* The Hyper-V TLFS doesn't allow more than HV_MAX_SPARSE_VCPU_BANKS
@@ -1955,12 +1960,12 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
flush.address_space = hc->ingpa;
flush.flags = hc->outgpa;
flush.processor_mask = sse128_lo(hc->xmm[0]);
- consumed_xmm_halves = 1;
+ hc->consumed_xmm_halves = 1;
} else {
if (unlikely(kvm_read_guest(kvm, hc->ingpa,
&flush, sizeof(flush))))
return HV_STATUS_INVALID_HYPERCALL_INPUT;
- data_offset = sizeof(flush);
+ hc->data_offset = sizeof(flush);
}
trace_kvm_hv_flush_tlb(flush.processor_mask,
@@ -1985,12 +1990,12 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
flush_ex.flags = hc->outgpa;
memcpy(&flush_ex.hv_vp_set,
&hc->xmm[0], sizeof(hc->xmm[0]));
- consumed_xmm_halves = 2;
+ hc->consumed_xmm_halves = 2;
} else {
if (unlikely(kvm_read_guest(kvm, hc->ingpa, &flush_ex,
sizeof(flush_ex))))
return HV_STATUS_INVALID_HYPERCALL_INPUT;
- data_offset = sizeof(flush_ex);
+ hc->data_offset = sizeof(flush_ex);
}
trace_kvm_hv_flush_tlb_ex(flush_ex.hv_vp_set.valid_bank_mask,
@@ -2009,8 +2014,7 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
if (!hc->var_cnt)
goto ret_success;
- if (kvm_get_sparse_vp_set(kvm, hc, sparse_banks,
- consumed_xmm_halves, data_offset))
+ if (kvm_get_sparse_vp_set(kvm, hc, sparse_banks))
return HV_STATUS_INVALID_HYPERCALL_INPUT;
}
@@ -2021,8 +2025,10 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
* consumed_xmm_halves to make sure TLB flush entries are read
* from the correct offset.
*/
- data_offset += hc->var_cnt * sizeof(sparse_banks[0]);
- consumed_xmm_halves += hc->var_cnt;
+ if (hc->fast)
+ hc->consumed_xmm_halves += hc->var_cnt;
+ else
+ hc->data_offset += hc->var_cnt * sizeof(sparse_banks[0]);
}
if (hc->code == HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE ||
@@ -2030,8 +2036,7 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
hc->rep_cnt > ARRAY_SIZE(__tlb_flush_entries)) {
tlb_flush_entries = NULL;
} else {
- if (kvm_hv_get_tlb_flush_entries(kvm, hc, __tlb_flush_entries,
- consumed_xmm_halves, data_offset))
+ if (kvm_hv_get_tlb_flush_entries(kvm, hc, __tlb_flush_entries))
return HV_STATUS_INVALID_HYPERCALL_INPUT;
tlb_flush_entries = __tlb_flush_entries;
}
@@ -2180,9 +2185,13 @@ static u64 kvm_hv_send_ipi(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
if (!hc->var_cnt)
goto ret_success;
- if (kvm_get_sparse_vp_set(kvm, hc, sparse_banks, 1,
- offsetof(struct hv_send_ipi_ex,
- vp_set.bank_contents)))
+ if (!hc->fast)
+ hc->data_offset = offsetof(struct hv_send_ipi_ex,
+ vp_set.bank_contents);
+ else
+ hc->consumed_xmm_halves = 1;
+
+ if (kvm_get_sparse_vp_set(kvm, hc, sparse_banks))
return HV_STATUS_INVALID_HYPERCALL_INPUT;
}
diff --git a/arch/x86/kvm/irq_comm.c b/arch/x86/kvm/irq_comm.c
index 0687162c4f22..3742d9adacfc 100644
--- a/arch/x86/kvm/irq_comm.c
+++ b/arch/x86/kvm/irq_comm.c
@@ -426,8 +426,9 @@ void kvm_scan_ioapic_routes(struct kvm_vcpu *vcpu,
kvm_set_msi_irq(vcpu->kvm, entry, &irq);
if (irq.trig_mode &&
- kvm_apic_match_dest(vcpu, NULL, APIC_DEST_NOSHORT,
- irq.dest_id, irq.dest_mode))
+ (kvm_apic_match_dest(vcpu, NULL, APIC_DEST_NOSHORT,
+ irq.dest_id, irq.dest_mode) ||
+ kvm_apic_pending_eoi(vcpu, irq.vector)))
__set_bit(irq.vector, ioapic_handled_vectors);
}
}
diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
index 28e3769066e2..58c3242fcc7a 100644
--- a/arch/x86/kvm/lapic.h
+++ b/arch/x86/kvm/lapic.h
@@ -188,11 +188,11 @@ static inline bool lapic_in_kernel(struct kvm_vcpu *vcpu)
extern struct static_key_false_deferred apic_hw_disabled;
-static inline int kvm_apic_hw_enabled(struct kvm_lapic *apic)
+static inline bool kvm_apic_hw_enabled(struct kvm_lapic *apic)
{
if (static_branch_unlikely(&apic_hw_disabled.key))
return apic->vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE;
- return MSR_IA32_APICBASE_ENABLE;
+ return true;
}
extern struct static_key_false_deferred apic_sw_disabled;
diff --git a/arch/x86/kvm/mmu/spte.h b/arch/x86/kvm/mmu/spte.h
index 1f03701b943a..6f54dc9409c9 100644
--- a/arch/x86/kvm/mmu/spte.h
+++ b/arch/x86/kvm/mmu/spte.h
@@ -363,7 +363,7 @@ static __always_inline bool is_rsvd_spte(struct rsvd_bits_validate *rsvd_check,
* A shadow-present leaf SPTE may be non-writable for 4 possible reasons:
*
* 1. To intercept writes for dirty logging. KVM write-protects huge pages
- * so that they can be split be split down into the dirty logging
+ * so that they can be split down into the dirty logging
* granularity (4KiB) whenever the guest writes to them. KVM also
* write-protects 4KiB pages so that writes can be recorded in the dirty log
* (e.g. if not using PML). SPTEs are write-protected for dirty logging
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index 771210ce5181..d6df38d371a0 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -1074,7 +1074,9 @@ static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu,
int ret = RET_PF_FIXED;
bool wrprot = false;
- WARN_ON(sp->role.level != fault->goal_level);
+ if (WARN_ON_ONCE(sp->role.level != fault->goal_level))
+ return RET_PF_RETRY;
+
if (unlikely(!fault->slot))
new_spte = make_mmio_spte(vcpu, iter->gfn, ACC_ALL);
else
@@ -1173,9 +1175,6 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
if (fault->nx_huge_page_workaround_enabled)
disallowed_hugepage_adjust(fault, iter.old_spte, iter.level);
- if (iter.level == fault->goal_level)
- break;
-
/*
* If SPTE has been frozen by another thread, just give up and
* retry, avoiding unnecessary page table allocation and free.
@@ -1183,6 +1182,9 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
if (is_removed_spte(iter.old_spte))
goto retry;
+ if (iter.level == fault->goal_level)
+ goto map_target_level;
+
/* Step down into the lower level page table if it exists. */
if (is_shadow_present_pte(iter.old_spte) &&
!is_large_pte(iter.old_spte))
@@ -1203,8 +1205,8 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
r = tdp_mmu_link_sp(kvm, &iter, sp, true);
/*
- * Also force the guest to retry the access if the upper level SPTEs
- * aren't in place.
+ * Force the guest to retry if installing an upper level SPTE
+ * failed, e.g. because a different task modified the SPTE.
*/
if (r) {
tdp_mmu_free_sp(sp);
@@ -1214,11 +1216,20 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
if (fault->huge_page_disallowed &&
fault->req_level >= iter.level) {
spin_lock(&kvm->arch.tdp_mmu_pages_lock);
- track_possible_nx_huge_page(kvm, sp);
+ if (sp->nx_huge_page_disallowed)
+ track_possible_nx_huge_page(kvm, sp);
spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
}
}
+ /*
+ * The walk aborted before reaching the target level, e.g. because the
+ * iterator detected an upper level SPTE was frozen during traversal.
+ */
+ WARN_ON_ONCE(iter.level == fault->goal_level);
+ goto retry;
+
+map_target_level:
ret = tdp_mmu_map_handle_target_level(vcpu, fault, &iter);
retry:
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index 684393c22105..eb594620dd75 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -238,7 +238,8 @@ static bool pmc_resume_counter(struct kvm_pmc *pmc)
return false;
/* recalibrate sample period and check if it's accepted by perf core */
- if (perf_event_period(pmc->perf_event,
+ if (is_sampling_event(pmc->perf_event) &&
+ perf_event_period(pmc->perf_event,
get_sample_period(pmc, pmc->counter)))
return false;
diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h
index 85ff3c0588ba..cdb91009701d 100644
--- a/arch/x86/kvm/pmu.h
+++ b/arch/x86/kvm/pmu.h
@@ -140,7 +140,8 @@ static inline u64 get_sample_period(struct kvm_pmc *pmc, u64 counter_value)
static inline void pmc_update_sample_period(struct kvm_pmc *pmc)
{
- if (!pmc->perf_event || pmc->is_paused)
+ if (!pmc->perf_event || pmc->is_paused ||
+ !is_sampling_event(pmc->perf_event))
return;
perf_event_period(pmc->perf_event,
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index b6f4411b613e..d93c715cda6a 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -5296,10 +5296,19 @@ static int handle_vmclear(struct kvm_vcpu *vcpu)
if (vmptr == vmx->nested.current_vmptr)
nested_release_vmcs12(vcpu);
- kvm_vcpu_write_guest(vcpu,
- vmptr + offsetof(struct vmcs12,
- launch_state),
- &zero, sizeof(zero));
+ /*
+ * Silently ignore memory errors on VMCLEAR, Intel's pseudocode
+ * for VMCLEAR includes a "ensure that data for VMCS referenced
+ * by the operand is in memory" clause that guards writes to
+ * memory, i.e. doing nothing for I/O is architecturally valid.
+ *
+ * FIXME: Suppress failures if and only if no memslot is found,
+ * i.e. exit to userspace if __copy_to_user() fails.
+ */
+ (void)kvm_vcpu_write_guest(vcpu,
+ vmptr + offsetof(struct vmcs12,
+ launch_state),
+ &zero, sizeof(zero));
} else if (vmx->nested.hv_evmcs && vmptr == vmx->nested.hv_evmcs_vmptr) {
nested_release_evmcs(vcpu);
}
@@ -6873,7 +6882,8 @@ void nested_vmx_setup_ctls_msrs(struct vmcs_config *vmcs_conf, u32 ept_caps)
SECONDARY_EXEC_ENABLE_INVPCID |
SECONDARY_EXEC_RDSEED_EXITING |
SECONDARY_EXEC_XSAVES |
- SECONDARY_EXEC_TSC_SCALING;
+ SECONDARY_EXEC_TSC_SCALING |
+ SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE;
/*
* We can emulate "VMCS shadowing," even if the hardware
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index fe5615fd8295..fc9008dbed33 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -4459,6 +4459,13 @@ vmx_adjust_secondary_exec_control(struct vcpu_vmx *vmx, u32 *exec_control,
* controls for features that are/aren't exposed to the guest.
*/
if (nested) {
+ /*
+ * All features that can be added or removed to VMX MSRs must
+ * be supported in the first place for nested virtualization.
+ */
+ if (WARN_ON_ONCE(!(vmcs_config.nested.secondary_ctls_high & control)))
+ enabled = false;
+
if (enabled)
vmx->nested.msrs.secondary_ctls_high |= control;
else
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 312aea1854ae..da4bbd043a7b 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -13132,6 +13132,9 @@ int kvm_handle_memory_failure(struct kvm_vcpu *vcpu, int r,
struct x86_exception *e)
{
if (r == X86EMUL_PROPAGATE_FAULT) {
+ if (KVM_BUG_ON(!e, vcpu->kvm))
+ return -EIO;
+
kvm_inject_emulated_page_fault(vcpu, e);
return 1;
}
diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c
index d7af40240248..2e29bdc2949c 100644
--- a/arch/x86/kvm/xen.c
+++ b/arch/x86/kvm/xen.c
@@ -41,7 +41,7 @@ static int kvm_xen_shared_info_init(struct kvm *kvm, gfn_t gfn)
int ret = 0;
int idx = srcu_read_lock(&kvm->srcu);
- if (gfn == GPA_INVALID) {
+ if (gfn == KVM_XEN_INVALID_GFN) {
kvm_gpc_deactivate(gpc);
goto out;
}
@@ -659,7 +659,7 @@ int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
if (kvm->arch.xen.shinfo_cache.active)
data->u.shared_info.gfn = gpa_to_gfn(kvm->arch.xen.shinfo_cache.gpa);
else
- data->u.shared_info.gfn = GPA_INVALID;
+ data->u.shared_info.gfn = KVM_XEN_INVALID_GFN;
r = 0;
break;
@@ -705,7 +705,7 @@ int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
BUILD_BUG_ON(offsetof(struct vcpu_info, time) !=
offsetof(struct compat_vcpu_info, time));
- if (data->u.gpa == GPA_INVALID) {
+ if (data->u.gpa == KVM_XEN_INVALID_GPA) {
kvm_gpc_deactivate(&vcpu->arch.xen.vcpu_info_cache);
r = 0;
break;
@@ -719,7 +719,7 @@ int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
break;
case KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO:
- if (data->u.gpa == GPA_INVALID) {
+ if (data->u.gpa == KVM_XEN_INVALID_GPA) {
kvm_gpc_deactivate(&vcpu->arch.xen.vcpu_time_info_cache);
r = 0;
break;
@@ -739,7 +739,7 @@ int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
r = -EOPNOTSUPP;
break;
}
- if (data->u.gpa == GPA_INVALID) {
+ if (data->u.gpa == KVM_XEN_INVALID_GPA) {
r = 0;
deactivate_out:
kvm_gpc_deactivate(&vcpu->arch.xen.runstate_cache);
@@ -937,7 +937,7 @@ int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
if (vcpu->arch.xen.vcpu_info_cache.active)
data->u.gpa = vcpu->arch.xen.vcpu_info_cache.gpa;
else
- data->u.gpa = GPA_INVALID;
+ data->u.gpa = KVM_XEN_INVALID_GPA;
r = 0;
break;
@@ -945,7 +945,7 @@ int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
if (vcpu->arch.xen.vcpu_time_info_cache.active)
data->u.gpa = vcpu->arch.xen.vcpu_time_info_cache.gpa;
else
- data->u.gpa = GPA_INVALID;
+ data->u.gpa = KVM_XEN_INVALID_GPA;
r = 0;
break;
@@ -1069,6 +1069,7 @@ int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data)
u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
: kvm->arch.xen_hvm_config.blob_size_32;
u8 *page;
+ int ret;
if (page_num >= blob_size)
return 1;
@@ -1079,10 +1080,10 @@ int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data)
if (IS_ERR(page))
return PTR_ERR(page);
- if (kvm_vcpu_write_guest(vcpu, page_addr, page, PAGE_SIZE)) {
- kfree(page);
+ ret = kvm_vcpu_write_guest(vcpu, page_addr, page, PAGE_SIZE);
+ kfree(page);
+ if (ret)
return 1;
- }
}
return 0;
}
@@ -1183,30 +1184,22 @@ static bool wait_pending_event(struct kvm_vcpu *vcpu, int nr_ports,
static bool kvm_xen_schedop_poll(struct kvm_vcpu *vcpu, bool longmode,
u64 param, u64 *r)
{
- int idx, i;
struct sched_poll sched_poll;
evtchn_port_t port, *ports;
- gpa_t gpa;
+ struct x86_exception e;
+ int i;
if (!lapic_in_kernel(vcpu) ||
!(vcpu->kvm->arch.xen_hvm_config.flags & KVM_XEN_HVM_CONFIG_EVTCHN_SEND))
return false;
- idx = srcu_read_lock(&vcpu->kvm->srcu);
- gpa = kvm_mmu_gva_to_gpa_system(vcpu, param, NULL);
- srcu_read_unlock(&vcpu->kvm->srcu, idx);
- if (!gpa) {
- *r = -EFAULT;
- return true;
- }
-
if (IS_ENABLED(CONFIG_64BIT) && !longmode) {
struct compat_sched_poll sp32;
/* Sanity check that the compat struct definition is correct */
BUILD_BUG_ON(sizeof(sp32) != 16);
- if (kvm_vcpu_read_guest(vcpu, gpa, &sp32, sizeof(sp32))) {
+ if (kvm_read_guest_virt(vcpu, param, &sp32, sizeof(sp32), &e)) {
*r = -EFAULT;
return true;
}
@@ -1220,8 +1213,8 @@ static bool kvm_xen_schedop_poll(struct kvm_vcpu *vcpu, bool longmode,
sched_poll.nr_ports = sp32.nr_ports;
sched_poll.timeout = sp32.timeout;
} else {
- if (kvm_vcpu_read_guest(vcpu, gpa, &sched_poll,
- sizeof(sched_poll))) {
+ if (kvm_read_guest_virt(vcpu, param, &sched_poll,
+ sizeof(sched_poll), &e)) {
*r = -EFAULT;
return true;
}
@@ -1243,18 +1236,13 @@ static bool kvm_xen_schedop_poll(struct kvm_vcpu *vcpu, bool longmode,
} else
ports = &port;
+ if (kvm_read_guest_virt(vcpu, (gva_t)sched_poll.ports, ports,
+ sched_poll.nr_ports * sizeof(*ports), &e)) {
+ *r = -EFAULT;
+ return true;
+ }
+
for (i = 0; i < sched_poll.nr_ports; i++) {
- idx = srcu_read_lock(&vcpu->kvm->srcu);
- gpa = kvm_mmu_gva_to_gpa_system(vcpu,
- (gva_t)(sched_poll.ports + i),
- NULL);
- srcu_read_unlock(&vcpu->kvm->srcu, idx);
-
- if (!gpa || kvm_vcpu_read_guest(vcpu, gpa,
- &ports[i], sizeof(port))) {
- *r = -EFAULT;
- goto out;
- }
if (ports[i] >= max_evtchn_port(vcpu->kvm)) {
*r = -EINVAL;
goto out;
@@ -1330,9 +1318,8 @@ static bool kvm_xen_hcall_vcpu_op(struct kvm_vcpu *vcpu, bool longmode, int cmd,
int vcpu_id, u64 param, u64 *r)
{
struct vcpu_set_singleshot_timer oneshot;
+ struct x86_exception e;
s64 delta;
- gpa_t gpa;
- int idx;
if (!kvm_xen_timer_enabled(vcpu))
return false;
@@ -1343,9 +1330,6 @@ static bool kvm_xen_hcall_vcpu_op(struct kvm_vcpu *vcpu, bool longmode, int cmd,
*r = -EINVAL;
return true;
}
- idx = srcu_read_lock(&vcpu->kvm->srcu);
- gpa = kvm_mmu_gva_to_gpa_system(vcpu, param, NULL);
- srcu_read_unlock(&vcpu->kvm->srcu, idx);
/*
* The only difference for 32-bit compat is the 4 bytes of
@@ -1363,9 +1347,8 @@ static bool kvm_xen_hcall_vcpu_op(struct kvm_vcpu *vcpu, bool longmode, int cmd,
BUILD_BUG_ON(sizeof_field(struct compat_vcpu_set_singleshot_timer, flags) !=
sizeof_field(struct vcpu_set_singleshot_timer, flags));
- if (!gpa ||
- kvm_vcpu_read_guest(vcpu, gpa, &oneshot, longmode ? sizeof(oneshot) :
- sizeof(struct compat_vcpu_set_singleshot_timer))) {
+ if (kvm_read_guest_virt(vcpu, param, &oneshot, longmode ? sizeof(oneshot) :
+ sizeof(struct compat_vcpu_set_singleshot_timer), &e)) {
*r = -EFAULT;
return true;
}
@@ -1825,20 +1808,20 @@ static int kvm_xen_eventfd_update(struct kvm *kvm,
{
u32 port = data->u.evtchn.send_port;
struct evtchnfd *evtchnfd;
+ int ret;
- if (!port || port >= max_evtchn_port(kvm))
- return -EINVAL;
-
+ /* Protect writes to evtchnfd as well as the idr lookup. */
mutex_lock(&kvm->lock);
evtchnfd = idr_find(&kvm->arch.xen.evtchn_ports, port);
- mutex_unlock(&kvm->lock);
+ ret = -ENOENT;
if (!evtchnfd)
- return -ENOENT;
+ goto out_unlock;
/* For an UPDATE, nothing may change except the priority/vcpu */
+ ret = -EINVAL;
if (evtchnfd->type != data->u.evtchn.type)
- return -EINVAL;
+ goto out_unlock;
/*
* Port cannot change, and if it's zero that was an eventfd
@@ -1846,20 +1829,21 @@ static int kvm_xen_eventfd_update(struct kvm *kvm,
*/
if (!evtchnfd->deliver.port.port ||
evtchnfd->deliver.port.port != data->u.evtchn.deliver.port.port)
- return -EINVAL;
+ goto out_unlock;
/* We only support 2 level event channels for now */
if (data->u.evtchn.deliver.port.priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL)
- return -EINVAL;
+ goto out_unlock;
- mutex_lock(&kvm->lock);
evtchnfd->deliver.port.priority = data->u.evtchn.deliver.port.priority;
if (evtchnfd->deliver.port.vcpu_id != data->u.evtchn.deliver.port.vcpu) {
evtchnfd->deliver.port.vcpu_id = data->u.evtchn.deliver.port.vcpu;
evtchnfd->deliver.port.vcpu_idx = -1;
}
+ ret = 0;
+out_unlock:
mutex_unlock(&kvm->lock);
- return 0;
+ return ret;
}
/*
@@ -1871,12 +1855,9 @@ static int kvm_xen_eventfd_assign(struct kvm *kvm,
{
u32 port = data->u.evtchn.send_port;
struct eventfd_ctx *eventfd = NULL;
- struct evtchnfd *evtchnfd = NULL;
+ struct evtchnfd *evtchnfd;
int ret = -EINVAL;
- if (!port || port >= max_evtchn_port(kvm))
- return -EINVAL;
-
evtchnfd = kzalloc(sizeof(struct evtchnfd), GFP_KERNEL);
if (!evtchnfd)
return -ENOMEM;
@@ -1952,8 +1933,7 @@ static int kvm_xen_eventfd_deassign(struct kvm *kvm, u32 port)
if (!evtchnfd)
return -ENOENT;
- if (kvm)
- synchronize_srcu(&kvm->srcu);
+ synchronize_srcu(&kvm->srcu);
if (!evtchnfd->deliver.port.port)
eventfd_ctx_put(evtchnfd->deliver.eventfd.ctx);
kfree(evtchnfd);
@@ -1962,18 +1942,42 @@ static int kvm_xen_eventfd_deassign(struct kvm *kvm, u32 port)
static int kvm_xen_eventfd_reset(struct kvm *kvm)
{
- struct evtchnfd *evtchnfd;
+ struct evtchnfd *evtchnfd, **all_evtchnfds;
int i;
+ int n = 0;
mutex_lock(&kvm->lock);
+
+ /*
+ * Because synchronize_srcu() cannot be called inside the
+ * critical section, first collect all the evtchnfd objects
+ * in an array as they are removed from evtchn_ports.
+ */
+ idr_for_each_entry(&kvm->arch.xen.evtchn_ports, evtchnfd, i)
+ n++;
+
+ all_evtchnfds = kmalloc_array(n, sizeof(struct evtchnfd *), GFP_KERNEL);
+ if (!all_evtchnfds) {
+ mutex_unlock(&kvm->lock);
+ return -ENOMEM;
+ }
+
+ n = 0;
idr_for_each_entry(&kvm->arch.xen.evtchn_ports, evtchnfd, i) {
+ all_evtchnfds[n++] = evtchnfd;
idr_remove(&kvm->arch.xen.evtchn_ports, evtchnfd->send_port);
- synchronize_srcu(&kvm->srcu);
+ }
+ mutex_unlock(&kvm->lock);
+
+ synchronize_srcu(&kvm->srcu);
+
+ while (n--) {
+ evtchnfd = all_evtchnfds[n];
if (!evtchnfd->deliver.port.port)
eventfd_ctx_put(evtchnfd->deliver.eventfd.ctx);
kfree(evtchnfd);
}
- mutex_unlock(&kvm->lock);
+ kfree(all_evtchnfds);
return 0;
}
@@ -2002,20 +2006,22 @@ static bool kvm_xen_hcall_evtchn_send(struct kvm_vcpu *vcpu, u64 param, u64 *r)
{
struct evtchnfd *evtchnfd;
struct evtchn_send send;
- gpa_t gpa;
- int idx;
-
- idx = srcu_read_lock(&vcpu->kvm->srcu);
- gpa = kvm_mmu_gva_to_gpa_system(vcpu, param, NULL);
- srcu_read_unlock(&vcpu->kvm->srcu, idx);
+ struct x86_exception e;
- if (!gpa || kvm_vcpu_read_guest(vcpu, gpa, &send, sizeof(send))) {
+ /* Sanity check: this structure is the same for 32-bit and 64-bit */
+ BUILD_BUG_ON(sizeof(send) != 4);
+ if (kvm_read_guest_virt(vcpu, param, &send, sizeof(send), &e)) {
*r = -EFAULT;
return true;
}
- /* The evtchn_ports idr is protected by vcpu->kvm->srcu */
+ /*
+ * evtchnfd is protected by kvm->srcu; the idr lookup instead
+ * is protected by RCU.
+ */
+ rcu_read_lock();
evtchnfd = idr_find(&vcpu->kvm->arch.xen.evtchn_ports, send.port);
+ rcu_read_unlock();
if (!evtchnfd)
return false;
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index 16f43bbc575a..ccf2204477a5 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -5317,8 +5317,8 @@ static void bfq_exit_icq_bfqq(struct bfq_io_cq *bic, bool is_sync)
unsigned long flags;
spin_lock_irqsave(&bfqd->lock, flags);
- bfq_exit_bfqq(bfqd, bfqq);
bic_set_bfqq(bic, NULL, is_sync);
+ bfq_exit_bfqq(bfqd, bfqq);
spin_unlock_irqrestore(&bfqd->lock, flags);
}
}
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index 30d8fd03fec7..97b711e57bff 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -70,11 +70,7 @@ module_param(device_id_scheme, bool, 0444);
static int only_lcd = -1;
module_param(only_lcd, int, 0444);
-/*
- * Display probing is known to take up to 5 seconds, so delay the fallback
- * backlight registration by 5 seconds + 3 seconds for some extra margin.
- */
-static int register_backlight_delay = 8;
+static int register_backlight_delay;
module_param(register_backlight_delay, int, 0444);
MODULE_PARM_DESC(register_backlight_delay,
"Delay in seconds before doing fallback (non GPU driver triggered) "
@@ -2176,6 +2172,17 @@ static bool should_check_lcd_flag(void)
return false;
}
+/*
+ * At least one graphics driver has reported that no LCD is connected
+ * via the native interface. cancel the registration for fallback acpi_video0.
+ * If another driver still deems this necessary, it can explicitly register it.
+ */
+void acpi_video_report_nolcd(void)
+{
+ cancel_delayed_work(&video_bus_register_backlight_work);
+}
+EXPORT_SYMBOL(acpi_video_report_nolcd);
+
int acpi_video_register(void)
{
int ret = 0;
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index f27914aedbd5..16dcd31d124f 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -432,10 +432,24 @@ static const struct dmi_system_id asus_laptop[] = {
DMI_MATCH(DMI_BOARD_NAME, "S5602ZA"),
},
},
+ {
+ .ident = "Asus ExpertBook B2502",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "B2502CBA"),
+ },
+ },
{ }
};
-static const struct dmi_system_id lenovo_82ra[] = {
+static const struct dmi_system_id lenovo_laptop[] = {
+ {
+ .ident = "LENOVO IdeaPad Flex 5 14ALC7",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "82R9"),
+ },
+ },
{
.ident = "LENOVO IdeaPad Flex 5 16ALC7",
.matches = {
@@ -446,6 +460,17 @@ static const struct dmi_system_id lenovo_82ra[] = {
{ }
};
+static const struct dmi_system_id schenker_gm_rg[] = {
+ {
+ .ident = "XMG CORE 15 (M22)",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SchenkerTechnologiesGmbH"),
+ DMI_MATCH(DMI_BOARD_NAME, "GMxRGxx"),
+ },
+ },
+ { }
+};
+
struct irq_override_cmp {
const struct dmi_system_id *system;
unsigned char irq;
@@ -458,8 +483,9 @@ struct irq_override_cmp {
static const struct irq_override_cmp override_table[] = {
{ medion_laptop, 1, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, false },
{ asus_laptop, 1, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, false },
- { lenovo_82ra, 6, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, true },
- { lenovo_82ra, 10, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, true },
+ { lenovo_laptop, 6, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, true },
+ { lenovo_laptop, 10, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, true },
+ { schenker_gm_rg, 1, ACPI_EDGE_SENSITIVE, ACPI_ACTIVE_LOW, 1, true },
};
static bool acpi_dev_irq_override(u32 gsi, u8 triggering, u8 polarity,
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index a934bbc9dd37..1b78c7434492 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -34,6 +34,7 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/platform_data/x86/nvidia-wmi-ec-backlight.h>
+#include <linux/pnp.h>
#include <linux/types.h>
#include <linux/workqueue.h>
#include <acpi/video.h>
@@ -105,6 +106,26 @@ static bool nvidia_wmi_ec_supported(void)
}
#endif
+static bool apple_gmux_backlight_present(void)
+{
+ struct acpi_device *adev;
+ struct device *dev;
+
+ adev = acpi_dev_get_first_match_dev(GMUX_ACPI_HID, NULL, -1);
+ if (!adev)
+ return false;
+
+ dev = acpi_get_first_physical_node(adev);
+ if (!dev)
+ return false;
+
+ /*
+ * drivers/platform/x86/apple-gmux.c only supports old style
+ * Apple GMUX with an IO-resource.
+ */
+ return pnp_get_resource(to_pnp_dev(dev), IORESOURCE_IO, 0) != NULL;
+}
+
/* Force to use vendor driver when the ACPI device is known to be
* buggy */
static int video_detect_force_vendor(const struct dmi_system_id *d)
@@ -767,7 +788,7 @@ static enum acpi_backlight_type __acpi_video_get_backlight_type(bool native)
if (nvidia_wmi_ec_present)
return acpi_backlight_nvidia_wmi_ec;
- if (apple_gmux_present())
+ if (apple_gmux_backlight_present())
return acpi_backlight_apple_gmux;
/* Use ACPI video if available, except when native should be preferred. */
diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c
index 5350c73564b6..c7afce465a07 100644
--- a/drivers/acpi/x86/s2idle.c
+++ b/drivers/acpi/x86/s2idle.c
@@ -28,10 +28,6 @@ static bool sleep_no_lps0 __read_mostly;
module_param(sleep_no_lps0, bool, 0644);
MODULE_PARM_DESC(sleep_no_lps0, "Do not use the special LPS0 device interface");
-static bool prefer_microsoft_dsm_guid __read_mostly;
-module_param(prefer_microsoft_dsm_guid, bool, 0644);
-MODULE_PARM_DESC(prefer_microsoft_dsm_guid, "Prefer using Microsoft GUID in LPS0 device _DSM evaluation");
-
static const struct acpi_device_id lps0_device_ids[] = {
{"PNP0D80", },
{"", },
@@ -369,27 +365,15 @@ out:
}
struct amd_lps0_hid_device_data {
- const unsigned int rev_id;
const bool check_off_by_one;
- const bool prefer_amd_guid;
};
static const struct amd_lps0_hid_device_data amd_picasso = {
- .rev_id = 0,
.check_off_by_one = true,
- .prefer_amd_guid = false,
};
static const struct amd_lps0_hid_device_data amd_cezanne = {
- .rev_id = 0,
- .check_off_by_one = false,
- .prefer_amd_guid = false,
-};
-
-static const struct amd_lps0_hid_device_data amd_rembrandt = {
- .rev_id = 2,
.check_off_by_one = false,
- .prefer_amd_guid = true,
};
static const struct acpi_device_id amd_hid_ids[] = {
@@ -397,69 +381,27 @@ static const struct acpi_device_id amd_hid_ids[] = {
{"AMD0005", (kernel_ulong_t)&amd_picasso, },
{"AMDI0005", (kernel_ulong_t)&amd_picasso, },
{"AMDI0006", (kernel_ulong_t)&amd_cezanne, },
- {"AMDI0007", (kernel_ulong_t)&amd_rembrandt, },
{}
};
-static int lps0_prefer_microsoft(const struct dmi_system_id *id)
+static int lps0_prefer_amd(const struct dmi_system_id *id)
{
- pr_debug("Preferring Microsoft GUID.\n");
- prefer_microsoft_dsm_guid = true;
+ pr_debug("Using AMD GUID w/ _REV 2.\n");
+ rev_id = 2;
return 0;
}
-
static const struct dmi_system_id s2idle_dmi_table[] __initconst = {
{
/*
- * ASUS TUF Gaming A17 FA707RE
- * https://bugzilla.kernel.org/show_bug.cgi?id=216101
- */
- .callback = lps0_prefer_microsoft,
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "ASUS TUF Gaming A17"),
- },
- },
- {
- /* ASUS ROG Zephyrus G14 (2022) */
- .callback = lps0_prefer_microsoft,
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "ROG Zephyrus G14 GA402"),
- },
- },
- {
- /*
- * Lenovo Yoga Slim 7 Pro X 14ARH7
- * https://bugzilla.kernel.org/show_bug.cgi?id=216473 : 82V2
- * https://bugzilla.kernel.org/show_bug.cgi?id=216438 : 82TL
- */
- .callback = lps0_prefer_microsoft,
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_NAME, "82"),
- },
- },
- {
- /*
- * ASUSTeK COMPUTER INC. ROG Flow X13 GV301RE_GV301RE
- * https://gitlab.freedesktop.org/drm/amd/-/issues/2148
+ * AMD Rembrandt based HP EliteBook 835/845/865 G9
+ * Contains specialized AML in AMD/_REV 2 path to avoid
+ * triggering a bug in Qualcomm WLAN firmware. This may be
+ * removed in the future if that firmware is fixed.
*/
- .callback = lps0_prefer_microsoft,
+ .callback = lps0_prefer_amd,
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "ROG Flow X13 GV301"),
- },
- },
- {
- /*
- * ASUSTeK COMPUTER INC. ROG Flow X16 GV601RW_GV601RW
- * https://gitlab.freedesktop.org/drm/amd/-/issues/2148
- */
- .callback = lps0_prefer_microsoft,
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "ROG Flow X16 GV601"),
+ DMI_MATCH(DMI_BOARD_VENDOR, "HP"),
+ DMI_MATCH(DMI_BOARD_NAME, "8990"),
},
},
{}
@@ -484,16 +426,14 @@ static int lps0_device_attach(struct acpi_device *adev,
if (dev_id->id[0])
data = (const struct amd_lps0_hid_device_data *) dev_id->driver_data;
else
- data = &amd_rembrandt;
- rev_id = data->rev_id;
+ data = &amd_cezanne;
lps0_dsm_func_mask = validate_dsm(adev->handle,
ACPI_LPS0_DSM_UUID_AMD, rev_id, &lps0_dsm_guid);
if (lps0_dsm_func_mask > 0x3 && data->check_off_by_one) {
lps0_dsm_func_mask = (lps0_dsm_func_mask << 1) | 0x1;
acpi_handle_debug(adev->handle, "_DSM UUID %s: Adjusted function mask: 0x%x\n",
ACPI_LPS0_DSM_UUID_AMD, lps0_dsm_func_mask);
- } else if (lps0_dsm_func_mask_microsoft > 0 && data->prefer_amd_guid &&
- !prefer_microsoft_dsm_guid) {
+ } else if (lps0_dsm_func_mask_microsoft > 0 && rev_id) {
lps0_dsm_func_mask_microsoft = -EINVAL;
acpi_handle_debug(adev->handle, "_DSM Using AMD method\n");
}
@@ -501,8 +441,7 @@ static int lps0_device_attach(struct acpi_device *adev,
rev_id = 1;
lps0_dsm_func_mask = validate_dsm(adev->handle,
ACPI_LPS0_DSM_UUID, rev_id, &lps0_dsm_guid);
- if (!prefer_microsoft_dsm_guid)
- lps0_dsm_func_mask_microsoft = -EINVAL;
+ lps0_dsm_func_mask_microsoft = -EINVAL;
}
if (lps0_dsm_func_mask < 0 && lps0_dsm_func_mask_microsoft < 0)
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 0cfd0ec6229b..14a1c0d14916 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -83,6 +83,7 @@ enum board_ids {
static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
static void ahci_remove_one(struct pci_dev *dev);
static void ahci_shutdown_one(struct pci_dev *dev);
+static void ahci_intel_pcs_quirk(struct pci_dev *pdev, struct ahci_host_priv *hpriv);
static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline);
static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class,
@@ -676,6 +677,25 @@ static void ahci_pci_save_initial_config(struct pci_dev *pdev,
ahci_save_initial_config(&pdev->dev, hpriv);
}
+static int ahci_pci_reset_controller(struct ata_host *host)
+{
+ struct pci_dev *pdev = to_pci_dev(host->dev);
+ struct ahci_host_priv *hpriv = host->private_data;
+ int rc;
+
+ rc = ahci_reset_controller(host);
+ if (rc)
+ return rc;
+
+ /*
+ * If platform firmware failed to enable ports, try to enable
+ * them here.
+ */
+ ahci_intel_pcs_quirk(pdev, hpriv);
+
+ return 0;
+}
+
static void ahci_pci_init_controller(struct ata_host *host)
{
struct ahci_host_priv *hpriv = host->private_data;
@@ -870,7 +890,7 @@ static int ahci_pci_device_runtime_resume(struct device *dev)
struct ata_host *host = pci_get_drvdata(pdev);
int rc;
- rc = ahci_reset_controller(host);
+ rc = ahci_pci_reset_controller(host);
if (rc)
return rc;
ahci_pci_init_controller(host);
@@ -906,7 +926,7 @@ static int ahci_pci_device_resume(struct device *dev)
ahci_mcp89_apple_enable(pdev);
if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
- rc = ahci_reset_controller(host);
+ rc = ahci_pci_reset_controller(host);
if (rc)
return rc;
@@ -1784,12 +1804,6 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* save initial config */
ahci_pci_save_initial_config(pdev, hpriv);
- /*
- * If platform firmware failed to enable ports, try to enable
- * them here.
- */
- ahci_intel_pcs_quirk(pdev, hpriv);
-
/* prepare host */
if (hpriv->cap & HOST_CAP_NCQ) {
pi.flags |= ATA_FLAG_NCQ;
@@ -1899,7 +1913,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
return rc;
- rc = ahci_reset_controller(host);
+ rc = ahci_pci_reset_controller(host);
if (rc)
return rc;
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index b6c36914e7c6..24e656a3ee74 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -1263,7 +1263,7 @@ EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment_unlocked, DMA_BUF);
*
* @dmabuf: [in] buffer which is moving
*
- * Informs all attachmenst that they need to destroy and recreated all their
+ * Informs all attachments that they need to destroy and recreate all their
* mappings.
*/
void dma_buf_move_notify(struct dma_buf *dmabuf)
@@ -1281,11 +1281,11 @@ EXPORT_SYMBOL_NS_GPL(dma_buf_move_notify, DMA_BUF);
/**
* DOC: cpu access
*
- * There are mutliple reasons for supporting CPU access to a dma buffer object:
+ * There are multiple reasons for supporting CPU access to a dma buffer object:
*
* - Fallback operations in the kernel, for example when a device is connected
* over USB and the kernel needs to shuffle the data around first before
- * sending it away. Cache coherency is handled by braketing any transactions
+ * sending it away. Cache coherency is handled by bracketing any transactions
* with calls to dma_buf_begin_cpu_access() and dma_buf_end_cpu_access()
* access.
*
@@ -1312,7 +1312,7 @@ EXPORT_SYMBOL_NS_GPL(dma_buf_move_notify, DMA_BUF);
* replace ION buffers mmap support was needed.
*
* There is no special interfaces, userspace simply calls mmap on the dma-buf
- * fd. But like for CPU access there's a need to braket the actual access,
+ * fd. But like for CPU access there's a need to bracket the actual access,
* which is handled by the ioctl (DMA_BUF_IOCTL_SYNC). Note that
* DMA_BUF_IOCTL_SYNC can fail with -EAGAIN or -EINTR, in which case it must
* be restarted.
@@ -1386,10 +1386,10 @@ static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
* preparations. Coherency is only guaranteed in the specified range for the
* specified access direction.
* @dmabuf: [in] buffer to prepare cpu access for.
- * @direction: [in] length of range for cpu access.
+ * @direction: [in] direction of access.
*
* After the cpu access is complete the caller should call
- * dma_buf_end_cpu_access(). Only when cpu access is braketed by both calls is
+ * dma_buf_end_cpu_access(). Only when cpu access is bracketed by both calls is
* it guaranteed to be coherent with other DMA access.
*
* This function will also wait for any DMA transactions tracked through
@@ -1429,7 +1429,7 @@ EXPORT_SYMBOL_NS_GPL(dma_buf_begin_cpu_access, DMA_BUF);
* actions. Coherency is only guaranteed in the specified range for the
* specified access direction.
* @dmabuf: [in] buffer to complete cpu access for.
- * @direction: [in] length of range for cpu access.
+ * @direction: [in] direction of access.
*
* This terminates CPU access started with dma_buf_begin_cpu_access().
*
diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c
index 283816fbd72f..740d6e426ee9 100644
--- a/drivers/dma-buf/udmabuf.c
+++ b/drivers/dma-buf/udmabuf.c
@@ -13,6 +13,8 @@
#include <linux/slab.h>
#include <linux/udmabuf.h>
#include <linux/hugetlb.h>
+#include <linux/vmalloc.h>
+#include <linux/iosys-map.h>
static int list_limit = 1024;
module_param(list_limit, int, 0644);
@@ -60,6 +62,30 @@ static int mmap_udmabuf(struct dma_buf *buf, struct vm_area_struct *vma)
return 0;
}
+static int vmap_udmabuf(struct dma_buf *buf, struct iosys_map *map)
+{
+ struct udmabuf *ubuf = buf->priv;
+ void *vaddr;
+
+ dma_resv_assert_held(buf->resv);
+
+ vaddr = vm_map_ram(ubuf->pages, ubuf->pagecount, -1);
+ if (!vaddr)
+ return -EINVAL;
+
+ iosys_map_set_vaddr(map, vaddr);
+ return 0;
+}
+
+static void vunmap_udmabuf(struct dma_buf *buf, struct iosys_map *map)
+{
+ struct udmabuf *ubuf = buf->priv;
+
+ dma_resv_assert_held(buf->resv);
+
+ vm_unmap_ram(map->vaddr, ubuf->pagecount);
+}
+
static struct sg_table *get_sg_table(struct device *dev, struct dma_buf *buf,
enum dma_data_direction direction)
{
@@ -162,6 +188,8 @@ static const struct dma_buf_ops udmabuf_ops = {
.unmap_dma_buf = unmap_udmabuf,
.release = release_udmabuf,
.mmap = mmap_udmabuf,
+ .vmap = vmap_udmabuf,
+ .vunmap = vunmap_udmabuf,
.begin_cpu_access = begin_cpu_udmabuf,
.end_cpu_access = end_cpu_udmabuf,
};
diff --git a/drivers/firmware/sysfb_simplefb.c b/drivers/firmware/sysfb_simplefb.c
index a353e27f83f5..ce9c007ed66f 100644
--- a/drivers/firmware/sysfb_simplefb.c
+++ b/drivers/firmware/sysfb_simplefb.c
@@ -27,25 +27,56 @@ static const struct simplefb_format formats[] = SIMPLEFB_FORMATS;
__init bool sysfb_parse_mode(const struct screen_info *si,
struct simplefb_platform_data *mode)
{
- const struct simplefb_format *f;
__u8 type;
+ u32 bits_per_pixel;
unsigned int i;
type = si->orig_video_isVGA;
if (type != VIDEO_TYPE_VLFB && type != VIDEO_TYPE_EFI)
return false;
+ /*
+ * The meaning of depth and bpp for direct-color formats is
+ * inconsistent:
+ *
+ * - DRM format info specifies depth as the number of color
+ * bits; including alpha, but not including filler bits.
+ * - Linux' EFI platform code computes lfb_depth from the
+ * individual color channels, including the reserved bits.
+ * - VBE 1.1 defines lfb_depth for XRGB1555 as 16, but later
+ * versions use 15.
+ * - On the kernel command line, 'bpp' of 32 is usually
+ * XRGB8888 including the filler bits, but 15 is XRGB1555
+ * not including the filler bit.
+ *
+ * It's not easily possible to fix this in struct screen_info,
+ * as this could break UAPI. The best solution is to compute
+ * bits_per_pixel here and ignore lfb_depth. In the loop below,
+ * ignore simplefb formats with alpha bits, as EFI and VESA
+ * don't specify alpha channels.
+ */
+ if (si->lfb_depth > 8) {
+ bits_per_pixel = max(max3(si->red_size + si->red_pos,
+ si->green_size + si->green_pos,
+ si->blue_size + si->blue_pos),
+ si->rsvd_size + si->rsvd_pos);
+ } else {
+ bits_per_pixel = si->lfb_depth;
+ }
+
for (i = 0; i < ARRAY_SIZE(formats); ++i) {
- f = &formats[i];
- if (si->lfb_depth == f->bits_per_pixel &&
+ const struct simplefb_format *f = &formats[i];
+
+ if (f->transp.length)
+ continue; /* transparent formats are unsupported by VESA/EFI */
+
+ if (bits_per_pixel == f->bits_per_pixel &&
si->red_size == f->red.length &&
si->red_pos == f->red.offset &&
si->green_size == f->green.length &&
si->green_pos == f->green.offset &&
si->blue_size == f->blue.length &&
- si->blue_pos == f->blue.offset &&
- si->rsvd_size == f->transp.length &&
- si->rsvd_pos == f->transp.offset) {
+ si->blue_pos == f->blue.offset) {
mode->format = f->name;
mode->width = si->lfb_width;
mode->height = si->lfb_height;
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 315cbdf61979..748b93d00184 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -12,7 +12,6 @@ menuconfig DRM
select HDMI
select FB_CMDLINE
select I2C
- select I2C_ALGOBIT
select DMA_SHARED_BUFFER
select SYNC_FILE
# gallium uses SYS_kcmp for os_same_file_description() to de-duplicate
@@ -63,6 +62,12 @@ config DRM_USE_DYNAMIC_DEBUG
bytes per callsite, the .data costs can be substantial, and
are therefore configurable.
+config DRM_KUNIT_TEST_HELPERS
+ tristate
+ depends on DRM && KUNIT
+ help
+ KUnit Helpers for KMS drivers.
+
config DRM_KUNIT_TEST
tristate "KUnit tests for DRM" if !KUNIT_ALL_TESTS
depends on DRM && KUNIT
@@ -73,6 +78,7 @@ config DRM_KUNIT_TEST
select DRM_KMS_HELPER
select DRM_BUDDY
select DRM_EXPORT_FOR_TESTS if m
+ select DRM_KUNIT_TEST_HELPERS
default KUNIT_ALL_TESTS
help
This builds unit tests for DRM. This option is not useful for
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index cc637343d87b..496fa5a6147a 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -126,7 +126,7 @@ obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o
# Drivers and the rest
#
-obj-$(CONFIG_DRM_KUNIT_TEST) += tests/
+obj-y += tests/
obj-$(CONFIG_DRM_MIPI_DBI) += drm_mipi_dbi.o
obj-$(CONFIG_DRM_MIPI_DSI) += drm_mipi_dsi.o
diff --git a/drivers/gpu/drm/amd/amdgpu/Kconfig b/drivers/gpu/drm/amd/amdgpu/Kconfig
index 5fcd510f1abb..5341b6b242c3 100644
--- a/drivers/gpu/drm/amd/amdgpu/Kconfig
+++ b/drivers/gpu/drm/amd/amdgpu/Kconfig
@@ -13,6 +13,8 @@ config DRM_AMDGPU
select DRM_TTM_HELPER
select POWER_SUPPLY
select HWMON
+ select I2C
+ select I2C_ALGOBIT
select BACKLIGHT_CLASS_DEVICE
select INTERVAL_TREE
select DRM_BUDDY
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 798d0e9a60b7..332cf8bda7a2 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -81,7 +81,8 @@ amdgpu-y += \
# add DF block
amdgpu-y += \
df_v1_7.o \
- df_v3_6.o
+ df_v3_6.o \
+ df_v4_3.o
# add GMC block
amdgpu-y += \
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 6b74df446694..4e4efd10cb89 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -52,8 +52,7 @@
#include <linux/pci.h>
#include <linux/aer.h>
-#include <drm/ttm/ttm_bo_api.h>
-#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_bo.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_execbuf_util.h>
@@ -150,7 +149,7 @@ struct amdgpu_watchdog_timer
* Modules parameters.
*/
extern int amdgpu_modeset;
-extern int amdgpu_vram_limit;
+extern unsigned int amdgpu_vram_limit;
extern int amdgpu_vis_vram_limit;
extern int amdgpu_gart_size;
extern int amdgpu_gtt_size;
@@ -195,6 +194,7 @@ extern int amdgpu_emu_mode;
extern uint amdgpu_smu_memory_pool_size;
extern int amdgpu_smu_pptable_id;
extern uint amdgpu_dc_feature_mask;
+extern uint amdgpu_freesync_vid_mode;
extern uint amdgpu_dc_debug_mask;
extern uint amdgpu_dc_visual_confirm;
extern uint amdgpu_dm_abm_level;
@@ -608,7 +608,7 @@ int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
/* VRAM scratch page for HDP bug, default vram page */
-struct amdgpu_vram_scratch {
+struct amdgpu_mem_scratch {
struct amdgpu_bo *robj;
volatile uint32_t *ptr;
u64 gpu_addr;
@@ -755,6 +755,11 @@ struct amdgpu_mqd {
#define AMDGPU_PRODUCT_NAME_LEN 64
struct amdgpu_reset_domain;
+/*
+ * Non-zero (true) if the GPU has VRAM. Zero (false) otherwise.
+ */
+#define AMDGPU_HAS_VRAM(_adev) ((_adev)->gmc.real_vram_size)
+
struct amdgpu_device {
struct device *dev;
struct pci_dev *pdev;
@@ -848,7 +853,7 @@ struct amdgpu_device {
/* memory management */
struct amdgpu_mman mman;
- struct amdgpu_vram_scratch vram_scratch;
+ struct amdgpu_mem_scratch mem_scratch;
struct amdgpu_wb wb;
atomic64_t num_bytes_moved;
atomic64_t num_evictions;
@@ -870,7 +875,7 @@ struct amdgpu_device {
struct amdgpu_vkms_output *amdgpu_vkms_output;
struct amdgpu_mode_info mode_info;
/* For pre-DCE11. DCE11 and later are in "struct amdgpu_device->dm" */
- struct work_struct hotplug_work;
+ struct delayed_work hotplug_work;
struct amdgpu_irq_src crtc_irq;
struct amdgpu_irq_src vline0_irq;
struct amdgpu_irq_src vupdate_irq;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index 0040deaf8a83..333780491867 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -97,7 +97,7 @@ struct amdgpu_amdkfd_fence {
struct amdgpu_kfd_dev {
struct kfd_dev *dev;
- uint64_t vram_used;
+ int64_t vram_used;
uint64_t vram_used_aligned;
bool init_complete;
struct work_struct reset_work;
@@ -271,9 +271,9 @@ int amdgpu_amdkfd_get_pcie_bandwidth_mbytes(struct amdgpu_device *adev, bool is_
((struct drm_file *)(drm_priv))->driver_priv)->vm)
int amdgpu_amdkfd_gpuvm_set_vm_pasid(struct amdgpu_device *adev,
- struct file *filp, u32 pasid);
+ struct amdgpu_vm *avm, u32 pasid);
int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct amdgpu_device *adev,
- struct file *filp,
+ struct amdgpu_vm *avm,
void **process_info,
struct dma_fence **ef);
void amdgpu_amdkfd_gpuvm_release_process_vm(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index b15091d8310d..d6320c836251 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -25,6 +25,7 @@
#include <linux/pagemap.h>
#include <linux/sched/mm.h>
#include <linux/sched/task.h>
+#include <drm/ttm/ttm_tt.h>
#include "amdgpu_object.h"
#include "amdgpu_gem.h"
@@ -1430,18 +1431,11 @@ static void amdgpu_amdkfd_gpuvm_unpin_bo(struct amdgpu_bo *bo)
}
int amdgpu_amdkfd_gpuvm_set_vm_pasid(struct amdgpu_device *adev,
- struct file *filp, u32 pasid)
+ struct amdgpu_vm *avm, u32 pasid)
{
- struct amdgpu_fpriv *drv_priv;
- struct amdgpu_vm *avm;
int ret;
- ret = amdgpu_file_to_fpriv(filp, &drv_priv);
- if (ret)
- return ret;
- avm = &drv_priv->vm;
-
/* Free the original amdgpu allocated pasid,
* will be replaced with kfd allocated pasid.
*/
@@ -1458,19 +1452,12 @@ int amdgpu_amdkfd_gpuvm_set_vm_pasid(struct amdgpu_device *adev,
}
int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct amdgpu_device *adev,
- struct file *filp,
+ struct amdgpu_vm *avm,
void **process_info,
struct dma_fence **ef)
{
- struct amdgpu_fpriv *drv_priv;
- struct amdgpu_vm *avm;
int ret;
- ret = amdgpu_file_to_fpriv(filp, &drv_priv);
- if (ret)
- return ret;
- avm = &drv_priv->vm;
-
/* Already a compute VM? */
if (avm->process_info)
return -EINVAL;
@@ -1612,6 +1599,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
struct amdgpu_bo *bo;
struct drm_gem_object *gobj = NULL;
u32 domain, alloc_domain;
+ uint64_t aligned_size;
u64 alloc_flags;
int ret;
@@ -1667,22 +1655,23 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
* the memory.
*/
if ((*mem)->aql_queue)
- size = size >> 1;
+ size >>= 1;
+ aligned_size = PAGE_ALIGN(size);
(*mem)->alloc_flags = flags;
amdgpu_sync_create(&(*mem)->sync);
- ret = amdgpu_amdkfd_reserve_mem_limit(adev, size, flags);
+ ret = amdgpu_amdkfd_reserve_mem_limit(adev, aligned_size, flags);
if (ret) {
pr_debug("Insufficient memory\n");
goto err_reserve_limit;
}
pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n",
- va, size, domain_string(alloc_domain));
+ va, (*mem)->aql_queue ? size << 1 : size, domain_string(alloc_domain));
- ret = amdgpu_gem_object_create(adev, size, 1, alloc_domain, alloc_flags,
+ ret = amdgpu_gem_object_create(adev, aligned_size, 1, alloc_domain, alloc_flags,
bo_type, NULL, &gobj);
if (ret) {
pr_debug("Failed to create BO on domain %s. ret %d\n",
@@ -1739,7 +1728,7 @@ err_node_allow:
/* Don't unreserve system mem limit twice */
goto err_reserve_limit;
err_bo_create:
- amdgpu_amdkfd_unreserve_mem_limit(adev, size, flags);
+ amdgpu_amdkfd_unreserve_mem_limit(adev, aligned_size, flags);
err_reserve_limit:
mutex_destroy(&(*mem)->lock);
if (gobj)
@@ -2099,7 +2088,7 @@ int amdgpu_amdkfd_map_gtt_bo_to_gart(struct amdgpu_device *adev, struct amdgpu_b
}
amdgpu_amdkfd_remove_eviction_fence(
- bo, bo->kfd_bo->process_info->eviction_fence);
+ bo, bo->vm_bo->vm->process_info->eviction_fence);
amdgpu_bo_unreserve(bo);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
index e4d78491bcc7..ededdc01ca28 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
@@ -28,6 +28,8 @@
struct hmm_range;
+struct drm_file;
+
struct amdgpu_device;
struct amdgpu_bo;
struct amdgpu_bo_va;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index f1a050379190..456e385333b6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -411,17 +411,10 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
return -EINVAL;
}
- err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
- if (err) {
- DRM_ERROR("Failed to request firmware\n");
- return err;
- }
-
- err = amdgpu_ucode_validate(adev->pm.fw);
+ err = amdgpu_ucode_request(adev, &adev->pm.fw, fw_name);
if (err) {
DRM_ERROR("Failed to load firmware \"%s\"", fw_name);
- release_firmware(adev->pm.fw);
- adev->pm.fw = NULL;
+ amdgpu_ucode_release(&adev->pm.fw);
return err;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index 2ebbc6382a06..d2abd334b1b5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -996,13 +996,33 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force)
}
}
+ if (amdgpu_connector->detected_hpd_without_ddc) {
+ force = true;
+ amdgpu_connector->detected_hpd_without_ddc = false;
+ }
+
if (!force && amdgpu_connector_check_hpd_status_unchanged(connector)) {
ret = connector->status;
goto exit;
}
- if (amdgpu_connector->ddc_bus)
+ if (amdgpu_connector->ddc_bus) {
dret = amdgpu_display_ddc_probe(amdgpu_connector, false);
+
+ /* Sometimes the pins required for the DDC probe on DVI
+ * connectors don't make contact at the same time that the ones
+ * for HPD do. If the DDC probe fails even though we had an HPD
+ * signal, try again later
+ */
+ if (!dret && !force &&
+ amdgpu_display_hpd_sense(adev, amdgpu_connector->hpd.hpd)) {
+ DRM_DEBUG_KMS("hpd detected without ddc, retrying in 1 second\n");
+ amdgpu_connector->detected_hpd_without_ddc = true;
+ schedule_delayed_work(&adev->hotplug_work,
+ msecs_to_jiffies(1000));
+ goto exit;
+ }
+ }
if (dret) {
amdgpu_connector->detected_by_load = false;
amdgpu_connector_free_edid(connector);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 8516c814bc9b..8b7a09b392ac 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -32,6 +32,8 @@
#include <drm/amdgpu_drm.h>
#include <drm/drm_syncobj.h>
+#include <drm/ttm/ttm_tt.h>
+
#include "amdgpu_cs.h"
#include "amdgpu.h"
#include "amdgpu_trace.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.h
index 113f39510a72..fb3e3d56d427 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.h
@@ -23,6 +23,8 @@
#ifndef __AMDGPU_CS_H__
#define __AMDGPU_CS_H__
+#include <linux/ww_mutex.h>
+
#include "amdgpu_job.h"
#include "amdgpu_bo_list.h"
#include "amdgpu_ring.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index 0f16d3c09309..f60753f97ac5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -1717,7 +1717,7 @@ no_preempt:
static int amdgpu_debugfs_ib_preempt(void *data, u64 val)
{
- int r, resched, length;
+ int r, length;
struct amdgpu_ring *ring;
struct dma_fence **fences = NULL;
struct amdgpu_device *adev = (struct amdgpu_device *)data;
@@ -1747,8 +1747,6 @@ static int amdgpu_debugfs_ib_preempt(void *data, u64 val)
/* stop the scheduler */
kthread_park(ring->sched.thread);
- resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
-
/* preempt the IB */
r = amdgpu_ring_preempt_ib(ring);
if (r) {
@@ -1785,8 +1783,6 @@ failure:
up_read(&adev->reset_domain->sem);
- ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
-
pro_end:
kfree(fences);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index afe6af9c0138..4f38c55e767e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -36,6 +36,7 @@
#include <generated/utsrelease.h>
#include <linux/pci-p2pdma.h>
+#include <drm/drm_aperture.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_probe_helper.h>
@@ -90,6 +91,8 @@ MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
#define AMDGPU_MAX_RETRY_LIMIT 2
#define AMDGPU_RETRY_SRIOV_RESET(r) ((r) == -EBUSY || (r) == -ETIMEDOUT || (r) == -EINVAL)
+static const struct drm_driver amdgpu_kms_driver;
+
const char *amdgpu_asic_name[] = {
"TAHITI",
"PITCAIRN",
@@ -924,32 +927,33 @@ static int amdgpu_device_asic_init(struct amdgpu_device *adev)
}
/**
- * amdgpu_device_vram_scratch_init - allocate the VRAM scratch page
+ * amdgpu_device_mem_scratch_init - allocate the VRAM scratch page
*
* @adev: amdgpu_device pointer
*
* Allocates a scratch page of VRAM for use by various things in the
* driver.
*/
-static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev)
+static int amdgpu_device_mem_scratch_init(struct amdgpu_device *adev)
{
- return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
- PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
- &adev->vram_scratch.robj,
- &adev->vram_scratch.gpu_addr,
- (void **)&adev->vram_scratch.ptr);
+ return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
+ &adev->mem_scratch.robj,
+ &adev->mem_scratch.gpu_addr,
+ (void **)&adev->mem_scratch.ptr);
}
/**
- * amdgpu_device_vram_scratch_fini - Free the VRAM scratch page
+ * amdgpu_device_mem_scratch_fini - Free the VRAM scratch page
*
* @adev: amdgpu_device pointer
*
* Frees the VRAM scratch page.
*/
-static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev)
+static void amdgpu_device_mem_scratch_fini(struct amdgpu_device *adev)
{
- amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
+ amdgpu_bo_free_kernel(&adev->mem_scratch.robj, NULL, NULL);
}
/**
@@ -1981,17 +1985,10 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
}
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
- err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
- if (err) {
- dev_err(adev->dev,
- "Failed to load gpu_info firmware \"%s\"\n",
- fw_name);
- goto out;
- }
- err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw);
+ err = amdgpu_ucode_request(adev, &adev->firmware.gpu_info_fw, fw_name);
if (err) {
dev_err(adev->dev,
- "Failed to validate gpu_info firmware \"%s\"\n",
+ "Failed to get gpu_info firmware \"%s\"\n",
fw_name);
goto out;
}
@@ -2390,9 +2387,9 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
if (amdgpu_sriov_vf(adev))
amdgpu_virt_exchange_data(adev);
- r = amdgpu_device_vram_scratch_init(adev);
+ r = amdgpu_device_mem_scratch_init(adev);
if (r) {
- DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
+ DRM_ERROR("amdgpu_mem_scratch_init failed %d\n", r);
goto init_failed;
}
r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
@@ -2410,8 +2407,9 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
/* right after GMC hw init, we create CSA */
if (amdgpu_mcbp) {
r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
- AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_CSA_SIZE);
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
+ AMDGPU_CSA_SIZE);
if (r) {
DRM_ERROR("allocate CSA failed %d\n", r);
goto init_failed;
@@ -2581,9 +2579,10 @@ int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
if (!adev->ip_blocks[i].status.late_initialized)
continue;
- /* skip CG for GFX on S0ix */
+ /* skip CG for GFX, SDMA on S0ix */
if (adev->in_s0ix &&
- adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)
+ (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
+ adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
continue;
/* skip CG for VCE/UVD, it's handled specially */
if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
@@ -2617,9 +2616,10 @@ int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
if (!adev->ip_blocks[i].status.late_initialized)
continue;
- /* skip PG for GFX on S0ix */
+ /* skip PG for GFX, SDMA on S0ix */
if (adev->in_s0ix &&
- adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)
+ (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
+ adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
continue;
/* skip CG for VCE/UVD, it's handled specially */
if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
@@ -2871,7 +2871,7 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
amdgpu_ucode_free_bo(adev);
amdgpu_free_static_csa(&adev->virt.csa_obj);
amdgpu_device_wb_fini(adev);
- amdgpu_device_vram_scratch_fini(adev);
+ amdgpu_device_mem_scratch_fini(adev);
amdgpu_ib_pool_fini(adev);
}
@@ -3027,6 +3027,12 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_MES))
continue;
+ /* SDMA 5.x+ is part of GFX power domain so it's covered by GFXOFF */
+ if (adev->in_s0ix &&
+ (adev->ip_versions[SDMA0_HWIP][0] >= IP_VERSION(5, 0, 0)) &&
+ (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
+ continue;
+
/* XXX handle errors */
r = adev->ip_blocks[i].version->funcs->suspend(adev);
/* XXX handle errors */
@@ -3227,15 +3233,6 @@ static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
return r;
}
adev->ip_blocks[i].status.hw = true;
-
- if (adev->in_s0ix && adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
- /* disable gfxoff for IP resume. The gfxoff will be re-enabled in
- * amdgpu_device_resume() after IP resume.
- */
- amdgpu_gfx_off_ctrl(adev, false);
- DRM_DEBUG("will disable gfxoff for re-initializing other blocks\n");
- }
-
}
return 0;
@@ -3687,6 +3684,11 @@ int amdgpu_device_init(struct amdgpu_device *adev,
if (r)
return r;
+ /* Get rid of things like offb */
+ r = drm_aperture_remove_conflicting_pci_framebuffers(adev->pdev, &amdgpu_kms_driver);
+ if (r)
+ return r;
+
/* Enable TMZ based on IP_VERSION */
amdgpu_gmc_tmz_set(adev);
@@ -3989,10 +3991,8 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
}
amdgpu_fence_driver_hw_fini(adev);
- if (adev->mman.initialized) {
- flush_delayed_work(&adev->mman.bdev.wq);
- ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
- }
+ if (adev->mman.initialized)
+ drain_workqueue(adev->mman.bdev.wq);
if (adev->pm_sysfs_en)
amdgpu_pm_sysfs_fini(adev);
@@ -4024,8 +4024,7 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev)
amdgpu_fence_driver_sw_fini(adev);
amdgpu_device_ip_fini(adev);
- release_firmware(adev->firmware.gpu_info_fw);
- adev->firmware.gpu_info_fw = NULL;
+ amdgpu_ucode_release(&adev->firmware.gpu_info_fw);
adev->accel_working = false;
dma_fence_put(rcu_dereference_protected(adev->gang_submit, true));
@@ -4223,13 +4222,6 @@ exit:
/* Make sure IB tests flushed */
flush_delayed_work(&adev->delayed_init_work);
- if (adev->in_s0ix) {
- /* re-enable gfxoff after IP resume. This re-enables gfxoff after
- * it was disabled for IP resume in amdgpu_device_ip_resume_phase2().
- */
- amdgpu_gfx_off_ctrl(adev, true);
- DRM_DEBUG("will enable gfxoff for the mission mode\n");
- }
if (fbcon)
drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, false);
@@ -4610,11 +4602,6 @@ bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
if (!amdgpu_ras_is_poison_mode_supported(adev))
return true;
- if (!amdgpu_device_ip_check_soft_reset(adev)) {
- dev_info(adev->dev,"Timeout, but no hardware hang detected.\n");
- return false;
- }
-
if (amdgpu_sriov_vf(adev))
return true;
@@ -4739,7 +4726,8 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
if (!need_full_reset)
need_full_reset = amdgpu_device_ip_need_full_reset(adev);
- if (!need_full_reset && amdgpu_gpu_recovery) {
+ if (!need_full_reset && amdgpu_gpu_recovery &&
+ amdgpu_device_ip_check_soft_reset(adev)) {
amdgpu_device_ip_pre_soft_reset(adev);
r = amdgpu_device_ip_soft_reset(adev);
amdgpu_device_ip_post_soft_reset(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
index 1bbd56029a4f..b719852daa07 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -33,6 +33,7 @@
#include "gmc_v9_0.h"
#include "df_v1_7.h"
#include "df_v3_6.h"
+#include "df_v4_3.h"
#include "nbio_v6_1.h"
#include "nbio_v7_0.h"
#include "nbio_v7_4.h"
@@ -2329,6 +2330,9 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(3, 5, 2):
adev->df.funcs = &df_v1_7_funcs;
break;
+ case IP_VERSION(4, 3, 0):
+ adev->df.funcs = &df_v4_3_funcs;
+ break;
default:
break;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index b22471b3bd63..a876648e3d7a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -63,7 +63,7 @@
void amdgpu_display_hotplug_work_func(struct work_struct *work)
{
struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
- hotplug_work);
+ hotplug_work.work);
struct drm_device *dev = adev_to_drm(adev);
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_connector *connector;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index 271e30e34d93..0c001bb8fc2b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -37,6 +37,7 @@
#include "amdgpu_dma_buf.h"
#include "amdgpu_xgmi.h"
#include <drm/amdgpu_drm.h>
+#include <drm/ttm/ttm_tt.h>
#include <linux/dma-buf.h>
#include <linux/dma-fence-array.h>
#include <linux/pci-p2pdma.h>
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index b4f2d61ea0d5..82b9f85f922b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -23,7 +23,6 @@
*/
#include <drm/amdgpu_drm.h>
-#include <drm/drm_aperture.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_generic.h>
#include <drm/drm_gem.h>
@@ -105,13 +104,15 @@
* - 3.46.0 - To enable hot plug amdgpu tests in libdrm
* - 3.47.0 - Add AMDGPU_GEM_CREATE_DISCARDABLE and AMDGPU_VM_NOALLOC flags
* - 3.48.0 - Add IP discovery version info to HW INFO
- * 3.49.0 - Add gang submit into CS IOCTL
+ * - 3.49.0 - Add gang submit into CS IOCTL
+ * - 3.50.0 - Update AMDGPU_INFO_DEV_INFO IOCTL for minimum engine and memory clock
+ * Update AMDGPU_INFO_SENSOR IOCTL for PEAK_PSTATE engine and memory clock
*/
#define KMS_DRIVER_MAJOR 3
-#define KMS_DRIVER_MINOR 49
+#define KMS_DRIVER_MINOR 50
#define KMS_DRIVER_PATCHLEVEL 0
-int amdgpu_vram_limit;
+unsigned int amdgpu_vram_limit = UINT_MAX;
int amdgpu_vis_vram_limit;
int amdgpu_gart_size = -1; /* auto */
int amdgpu_gtt_size = -1; /* auto */
@@ -181,6 +182,7 @@ int amdgpu_mes_kiq;
int amdgpu_noretry = -1;
int amdgpu_force_asic_type = -1;
int amdgpu_tmz = -1; /* auto */
+uint amdgpu_freesync_vid_mode;
int amdgpu_reset_method = -1; /* auto */
int amdgpu_num_kcq = -1;
int amdgpu_smartshift_bias;
@@ -880,6 +882,32 @@ MODULE_PARM_DESC(tmz, "Enable TMZ feature (-1 = auto (default), 0 = off, 1 = on)
module_param_named(tmz, amdgpu_tmz, int, 0444);
/**
+ * DOC: freesync_video (uint)
+ * Enable the optimization to adjust front porch timing to achieve seamless
+ * mode change experience when setting a freesync supported mode for which full
+ * modeset is not needed.
+ *
+ * The Display Core will add a set of modes derived from the base FreeSync
+ * video mode into the corresponding connector's mode list based on commonly
+ * used refresh rates and VRR range of the connected display, when users enable
+ * this feature. From the userspace perspective, they can see a seamless mode
+ * change experience when the change between different refresh rates under the
+ * same resolution. Additionally, userspace applications such as Video playback
+ * can read this modeset list and change the refresh rate based on the video
+ * frame rate. Finally, the userspace can also derive an appropriate mode for a
+ * particular refresh rate based on the FreeSync Mode and add it to the
+ * connector's mode list.
+ *
+ * Note: This is an experimental feature.
+ *
+ * The default value: 0 (off).
+ */
+MODULE_PARM_DESC(
+ freesync_video,
+ "Enable freesync modesetting optimization feature (0 = off (default), 1 = on)");
+module_param_named(freesync_video, amdgpu_freesync_vid_mode, uint, 0444);
+
+/**
* DOC: reset_method (int)
* GPU reset method (-1 = auto (default), 0 = legacy, 1 = mode0, 2 = mode1, 3 = mode2, 4 = baco)
*/
@@ -2095,11 +2123,6 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
}
#endif
- /* Get rid of things like offb */
- ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &amdgpu_kms_driver);
- if (ret)
- return ret;
-
adev = devm_drm_dev_alloc(&pdev->dev, &amdgpu_kms_driver, typeof(*adev), ddev);
if (IS_ERR(adev))
return PTR_ERR(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.h
index 41a4c7056729..e86834bfea1d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.h
@@ -30,7 +30,6 @@
#include <linux/rbtree.h>
#include <drm/gpu_scheduler.h>
#include <drm/drm_file.h>
-#include <drm/ttm/ttm_bo_driver.h>
#include <linux/sched/mm.h>
#include "amdgpu_sync.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index bb7350ea1d75..ed1164a87fce 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -34,6 +34,7 @@
#include <drm/amdgpu_drm.h>
#include <drm/drm_drv.h>
#include <drm/drm_gem_ttm_helper.h>
+#include <drm/ttm/ttm_tt.h>
#include "amdgpu.h"
#include "amdgpu_display.h"
@@ -61,10 +62,10 @@ static vm_fault_t amdgpu_gem_fault(struct vm_fault *vmf)
goto unlock;
}
- ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
- TTM_BO_VM_NUM_PREFAULT);
+ ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
+ TTM_BO_VM_NUM_PREFAULT);
- drm_dev_exit(idx);
+ drm_dev_exit(idx);
} else {
ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index 23692e5d4d13..42a939cd2eac 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -372,8 +372,11 @@ int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev,
* KIQ MQD no matter SRIOV or Bare-metal
*/
r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM, &ring->mqd_obj,
- &ring->mqd_gpu_addr, &ring->mqd_ptr);
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
+ &ring->mqd_obj,
+ &ring->mqd_gpu_addr,
+ &ring->mqd_ptr);
if (r) {
dev_warn(adev->dev, "failed to create ring mqd ob (%d)", r);
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index 02a4c93673ce..94f10ac0eef7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -35,6 +35,7 @@
#include "amdgpu_xgmi.h"
#include <drm/drm_drv.h>
+#include <drm/ttm/ttm_tt.h>
/**
* amdgpu_gmc_pdb0_alloc - allocate vram for pdb0
@@ -201,13 +202,20 @@ uint64_t amdgpu_gmc_agp_addr(struct ttm_buffer_object *bo)
void amdgpu_gmc_vram_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc,
u64 base)
{
+ uint64_t vis_limit = (uint64_t)amdgpu_vis_vram_limit << 20;
uint64_t limit = (uint64_t)amdgpu_vram_limit << 20;
mc->vram_start = base;
mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
- if (limit && limit < mc->real_vram_size)
+ if (limit < mc->real_vram_size)
mc->real_vram_size = limit;
+ if (vis_limit && vis_limit < mc->visible_vram_size)
+ mc->visible_vram_size = vis_limit;
+
+ if (mc->real_vram_size < mc->visible_vram_size)
+ mc->visible_vram_size = mc->real_vram_size;
+
if (mc->xgmi.num_physical_nodes == 0) {
mc->fb_start = mc->vram_start;
mc->fb_end = mc->vram_end;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 7aa7e52ca784..2947159d7d78 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -785,9 +785,15 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
if (adev->pm.dpm_enabled) {
dev_info->max_engine_clock = amdgpu_dpm_get_sclk(adev, false) * 10;
dev_info->max_memory_clock = amdgpu_dpm_get_mclk(adev, false) * 10;
+ dev_info->min_engine_clock = amdgpu_dpm_get_sclk(adev, true) * 10;
+ dev_info->min_memory_clock = amdgpu_dpm_get_mclk(adev, true) * 10;
} else {
- dev_info->max_engine_clock = adev->clock.default_sclk * 10;
- dev_info->max_memory_clock = adev->clock.default_mclk * 10;
+ dev_info->max_engine_clock =
+ dev_info->min_engine_clock =
+ adev->clock.default_sclk * 10;
+ dev_info->max_memory_clock =
+ dev_info->min_memory_clock =
+ adev->clock.default_mclk * 10;
}
dev_info->enabled_rb_pipes_mask = adev->gfx.config.backend_enable_mask;
dev_info->num_rb_pipes = adev->gfx.config.max_backends_per_se *
@@ -1014,6 +1020,24 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
}
ui32 /= 100;
break;
+ case AMDGPU_INFO_SENSOR_PEAK_PSTATE_GFX_SCLK:
+ /* get peak pstate sclk in Mhz */
+ if (amdgpu_dpm_read_sensor(adev,
+ AMDGPU_PP_SENSOR_PEAK_PSTATE_SCLK,
+ (void *)&ui32, &ui32_size)) {
+ return -EINVAL;
+ }
+ ui32 /= 100;
+ break;
+ case AMDGPU_INFO_SENSOR_PEAK_PSTATE_GFX_MCLK:
+ /* get peak pstate mclk in Mhz */
+ if (amdgpu_dpm_read_sensor(adev,
+ AMDGPU_PP_SENSOR_PEAK_PSTATE_MCLK,
+ (void *)&ui32, &ui32_size)) {
+ return -EINVAL;
+ }
+ ui32 /= 100;
+ break;
default:
DRM_DEBUG_KMS("Invalid request %d\n",
info->sensor_info.type);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
index 0c546245793b..82e27bd4f038 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
@@ -21,6 +21,8 @@
*
*/
+#include <linux/firmware.h>
+
#include "amdgpu_mes.h"
#include "amdgpu.h"
#include "soc15_common.h"
@@ -1423,3 +1425,60 @@ error_pasid:
kfree(vm);
return 0;
}
+
+int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe)
+{
+ const struct mes_firmware_header_v1_0 *mes_hdr;
+ struct amdgpu_firmware_info *info;
+ char ucode_prefix[30];
+ char fw_name[40];
+ int r;
+
+ amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes%s.bin",
+ ucode_prefix,
+ pipe == AMDGPU_MES_SCHED_PIPE ? "" : "1");
+ r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe], fw_name);
+ if (r)
+ goto out;
+
+ mes_hdr = (const struct mes_firmware_header_v1_0 *)
+ adev->mes.fw[pipe]->data;
+ adev->mes.uc_start_addr[pipe] =
+ le32_to_cpu(mes_hdr->mes_uc_start_addr_lo) |
+ ((uint64_t)(le32_to_cpu(mes_hdr->mes_uc_start_addr_hi)) << 32);
+ adev->mes.data_start_addr[pipe] =
+ le32_to_cpu(mes_hdr->mes_data_start_addr_lo) |
+ ((uint64_t)(le32_to_cpu(mes_hdr->mes_data_start_addr_hi)) << 32);
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ int ucode, ucode_data;
+
+ if (pipe == AMDGPU_MES_SCHED_PIPE) {
+ ucode = AMDGPU_UCODE_ID_CP_MES;
+ ucode_data = AMDGPU_UCODE_ID_CP_MES_DATA;
+ } else {
+ ucode = AMDGPU_UCODE_ID_CP_MES1;
+ ucode_data = AMDGPU_UCODE_ID_CP_MES1_DATA;
+ }
+
+ info = &adev->firmware.ucode[ucode];
+ info->ucode_id = ucode;
+ info->fw = adev->mes.fw[pipe];
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(mes_hdr->mes_ucode_size_bytes),
+ PAGE_SIZE);
+
+ info = &adev->firmware.ucode[ucode_data];
+ info->ucode_id = ucode_data;
+ info->fw = adev->mes.fw[pipe];
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes),
+ PAGE_SIZE);
+ }
+
+ return 0;
+out:
+ amdgpu_ucode_release(&adev->mes.fw[pipe]);
+ return r;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
index 97c05d08a551..547ec35691fa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
@@ -306,6 +306,7 @@ struct amdgpu_mes_funcs {
int amdgpu_mes_ctx_get_offs(struct amdgpu_ring *ring, unsigned int id_offs);
+int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe);
int amdgpu_mes_init(struct amdgpu_device *adev);
void amdgpu_mes_fini(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index 8a39300b1a84..93c73faa5714 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -534,6 +534,7 @@ struct amdgpu_connector {
void *con_priv;
bool dac_load_detect;
bool detected_by_load; /* if the connection status was determined by load */
+ bool detected_hpd_without_ddc; /* if an HPD signal was detected on DVI, but ddc probing failed */
uint16_t connector_object_id;
struct amdgpu_hpd hpd;
struct amdgpu_router router;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 4e684c2afc70..25a68d8888e0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -470,8 +470,9 @@ static bool amdgpu_bo_validate_size(struct amdgpu_device *adev,
return true;
fail:
- DRM_DEBUG("BO size %lu > total memory in domain: %llu\n", size,
- man->size);
+ if (man)
+ DRM_DEBUG("BO size %lu > total memory in domain: %llu\n", size,
+ man->size);
return false;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 7a2fc920739b..0b59465b1494 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -66,7 +66,8 @@ static int psp_ring_init(struct psp_context *psp,
/* allocate 4k Page of Local Frame Buffer memory for ring */
ring->ring_size = 0x1000;
ret = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
&adev->firmware.rbuf,
&ring->ring_mem_mc_addr,
(void **)&ring->ring_mem);
@@ -122,6 +123,38 @@ static void psp_check_pmfw_centralized_cstate_management(struct psp_context *psp
}
}
+static int psp_init_sriov_microcode(struct psp_context *psp)
+{
+ struct amdgpu_device *adev = psp->adev;
+ char ucode_prefix[30];
+ int ret = 0;
+
+ amdgpu_ucode_ip_version_decode(adev, MP0_HWIP, ucode_prefix, sizeof(ucode_prefix));
+
+ switch (adev->ip_versions[MP0_HWIP][0]) {
+ case IP_VERSION(9, 0, 0):
+ case IP_VERSION(11, 0, 7):
+ case IP_VERSION(11, 0, 9):
+ adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
+ ret = psp_init_cap_microcode(psp, ucode_prefix);
+ break;
+ case IP_VERSION(13, 0, 2):
+ adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
+ ret = psp_init_cap_microcode(psp, ucode_prefix);
+ ret &= psp_init_ta_microcode(psp, ucode_prefix);
+ break;
+ case IP_VERSION(13, 0, 0):
+ adev->virt.autoload_ucode_id = 0;
+ break;
+ case IP_VERSION(13, 0, 10):
+ adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MES1_DATA;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return ret;
+}
+
static int psp_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -192,7 +225,10 @@ static int psp_early_init(void *handle)
psp_check_pmfw_centralized_cstate_management(psp);
- return 0;
+ if (amdgpu_sriov_vf(adev))
+ return psp_init_sriov_microcode(psp);
+ else
+ return psp_init_microcode(psp);
}
void psp_ta_free_shared_buf(struct ta_mem_context *mem_ctx)
@@ -350,42 +386,6 @@ static bool psp_get_runtime_db_entry(struct amdgpu_device *adev,
return ret;
}
-static int psp_init_sriov_microcode(struct psp_context *psp)
-{
- struct amdgpu_device *adev = psp->adev;
- int ret = 0;
-
- switch (adev->ip_versions[MP0_HWIP][0]) {
- case IP_VERSION(9, 0, 0):
- adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
- ret = psp_init_cap_microcode(psp, "vega10");
- break;
- case IP_VERSION(11, 0, 9):
- adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
- ret = psp_init_cap_microcode(psp, "navi12");
- break;
- case IP_VERSION(11, 0, 7):
- adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
- ret = psp_init_cap_microcode(psp, "sienna_cichlid");
- break;
- case IP_VERSION(13, 0, 2):
- adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
- ret = psp_init_cap_microcode(psp, "aldebaran");
- ret &= psp_init_ta_microcode(psp, "aldebaran");
- break;
- case IP_VERSION(13, 0, 0):
- adev->virt.autoload_ucode_id = 0;
- break;
- case IP_VERSION(13, 0, 10):
- adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MES1_DATA;
- break;
- default:
- BUG();
- break;
- }
- return ret;
-}
-
static int psp_sw_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -401,15 +401,6 @@ static int psp_sw_init(void *handle)
ret = -ENOMEM;
}
- if (amdgpu_sriov_vf(adev))
- ret = psp_init_sriov_microcode(psp);
- else
- ret = psp_init_microcode(psp);
- if (ret) {
- DRM_ERROR("Failed to load psp firmware!\n");
- return ret;
- }
-
adev->psp.xgmi_context.supports_extended_data =
!adev->gmc.xgmi.connected_to_cpu &&
adev->ip_versions[MP0_HWIP][0] == IP_VERSION(13, 0, 2);
@@ -514,20 +505,11 @@ static int psp_sw_fini(void *handle)
psp_memory_training_fini(psp);
- release_firmware(psp->sos_fw);
- psp->sos_fw = NULL;
-
- release_firmware(psp->asd_fw);
- psp->asd_fw = NULL;
-
- release_firmware(psp->ta_fw);
- psp->ta_fw = NULL;
-
- release_firmware(psp->cap_fw);
- psp->cap_fw = NULL;
-
- release_firmware(psp->toc_fw);
- psp->toc_fw = NULL;
+ amdgpu_ucode_release(&psp->sos_fw);
+ amdgpu_ucode_release(&psp->asd_fw);
+ amdgpu_ucode_release(&psp->ta_fw);
+ amdgpu_ucode_release(&psp->cap_fw);
+ amdgpu_ucode_release(&psp->toc_fw);
if (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 0) ||
adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 7))
@@ -797,9 +779,13 @@ static int psp_tmr_init(struct psp_context *psp)
if (!psp->tmr_bo) {
pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
- ret = amdgpu_bo_create_kernel(psp->adev, tmr_size, PSP_TMR_ALIGNMENT,
- AMDGPU_GEM_DOMAIN_VRAM,
- &psp->tmr_bo, &psp->tmr_mc_addr, pptr);
+ ret = amdgpu_bo_create_kernel(psp->adev, tmr_size,
+ PSP_TMR_ALIGNMENT,
+ AMDGPU_HAS_VRAM(psp->adev) ?
+ AMDGPU_GEM_DOMAIN_VRAM :
+ AMDGPU_GEM_DOMAIN_GTT,
+ &psp->tmr_bo, &psp->tmr_mc_addr,
+ pptr);
}
return ret;
@@ -1092,7 +1078,8 @@ int psp_ta_init_shared_buf(struct psp_context *psp,
* physical) for ta to host memory
*/
return amdgpu_bo_create_kernel(psp->adev, mem_ctx->shared_mem_size,
- PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
&mem_ctx->shared_bo,
&mem_ctx->shared_mc_addr,
&mem_ctx->shared_buf);
@@ -1901,7 +1888,7 @@ out_unlock:
static int psp_securedisplay_initialize(struct psp_context *psp)
{
int ret;
- struct securedisplay_cmd *securedisplay_cmd;
+ struct ta_securedisplay_cmd *securedisplay_cmd;
/*
* TODO: bypass the initialize in sriov for now
@@ -2908,25 +2895,15 @@ int psp_ring_cmd_submit(struct psp_context *psp,
return 0;
}
-int psp_init_asd_microcode(struct psp_context *psp,
- const char *chip_name)
+int psp_init_asd_microcode(struct psp_context *psp, const char *chip_name)
{
struct amdgpu_device *adev = psp->adev;
char fw_name[PSP_FW_NAME_LEN];
const struct psp_firmware_header_v1_0 *asd_hdr;
int err = 0;
- if (!chip_name) {
- dev_err(adev->dev, "invalid chip name for asd microcode\n");
- return -EINVAL;
- }
-
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_asd.bin", chip_name);
- err = request_firmware(&adev->psp.asd_fw, fw_name, adev->dev);
- if (err)
- goto out;
-
- err = amdgpu_ucode_validate(adev->psp.asd_fw);
+ err = amdgpu_ucode_request(adev, &adev->psp.asd_fw, fw_name);
if (err)
goto out;
@@ -2938,31 +2915,19 @@ int psp_init_asd_microcode(struct psp_context *psp,
le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes);
return 0;
out:
- dev_err(adev->dev, "fail to initialize asd microcode\n");
- release_firmware(adev->psp.asd_fw);
- adev->psp.asd_fw = NULL;
+ amdgpu_ucode_release(&adev->psp.asd_fw);
return err;
}
-int psp_init_toc_microcode(struct psp_context *psp,
- const char *chip_name)
+int psp_init_toc_microcode(struct psp_context *psp, const char *chip_name)
{
struct amdgpu_device *adev = psp->adev;
char fw_name[PSP_FW_NAME_LEN];
const struct psp_firmware_header_v1_0 *toc_hdr;
int err = 0;
- if (!chip_name) {
- dev_err(adev->dev, "invalid chip name for toc microcode\n");
- return -EINVAL;
- }
-
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_toc.bin", chip_name);
- err = request_firmware(&adev->psp.toc_fw, fw_name, adev->dev);
- if (err)
- goto out;
-
- err = amdgpu_ucode_validate(adev->psp.toc_fw);
+ err = amdgpu_ucode_request(adev, &adev->psp.toc_fw, fw_name);
if (err)
goto out;
@@ -2974,9 +2939,7 @@ int psp_init_toc_microcode(struct psp_context *psp,
le32_to_cpu(toc_hdr->header.ucode_array_offset_bytes);
return 0;
out:
- dev_err(adev->dev, "fail to request/validate toc microcode\n");
- release_firmware(adev->psp.toc_fw);
- adev->psp.toc_fw = NULL;
+ amdgpu_ucode_release(&adev->psp.toc_fw);
return err;
}
@@ -3107,8 +3070,7 @@ static int psp_init_sos_base_fw(struct amdgpu_device *adev)
return 0;
}
-int psp_init_sos_microcode(struct psp_context *psp,
- const char *chip_name)
+int psp_init_sos_microcode(struct psp_context *psp, const char *chip_name)
{
struct amdgpu_device *adev = psp->adev;
char fw_name[PSP_FW_NAME_LEN];
@@ -3121,17 +3083,8 @@ int psp_init_sos_microcode(struct psp_context *psp,
uint8_t *ucode_array_start_addr;
int fw_index = 0;
- if (!chip_name) {
- dev_err(adev->dev, "invalid chip name for sos microcode\n");
- return -EINVAL;
- }
-
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sos.bin", chip_name);
- err = request_firmware(&adev->psp.sos_fw, fw_name, adev->dev);
- if (err)
- goto out;
-
- err = amdgpu_ucode_validate(adev->psp.sos_fw);
+ err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, fw_name);
if (err)
goto out;
@@ -3203,10 +3156,7 @@ int psp_init_sos_microcode(struct psp_context *psp,
return 0;
out:
- dev_err(adev->dev,
- "failed to init sos firmware\n");
- release_firmware(adev->psp.sos_fw);
- adev->psp.sos_fw = NULL;
+ amdgpu_ucode_release(&adev->psp.sos_fw);
return err;
}
@@ -3272,41 +3222,76 @@ static int parse_ta_bin_descriptor(struct psp_context *psp,
return 0;
}
-int psp_init_ta_microcode(struct psp_context *psp,
- const char *chip_name)
+static int parse_ta_v1_microcode(struct psp_context *psp)
{
+ const struct ta_firmware_header_v1_0 *ta_hdr;
struct amdgpu_device *adev = psp->adev;
- char fw_name[PSP_FW_NAME_LEN];
- const struct ta_firmware_header_v2_0 *ta_hdr;
- int err = 0;
- int ta_index = 0;
- if (!chip_name) {
- dev_err(adev->dev, "invalid chip name for ta microcode\n");
+ ta_hdr = (const struct ta_firmware_header_v1_0 *) adev->psp.ta_fw->data;
+
+ if (le16_to_cpu(ta_hdr->header.header_version_major) != 1)
return -EINVAL;
- }
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name);
- err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev);
- if (err)
- goto out;
+ adev->psp.xgmi_context.context.bin_desc.fw_version =
+ le32_to_cpu(ta_hdr->xgmi.fw_version);
+ adev->psp.xgmi_context.context.bin_desc.size_bytes =
+ le32_to_cpu(ta_hdr->xgmi.size_bytes);
+ adev->psp.xgmi_context.context.bin_desc.start_addr =
+ (uint8_t *)ta_hdr +
+ le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
+
+ adev->psp.ras_context.context.bin_desc.fw_version =
+ le32_to_cpu(ta_hdr->ras.fw_version);
+ adev->psp.ras_context.context.bin_desc.size_bytes =
+ le32_to_cpu(ta_hdr->ras.size_bytes);
+ adev->psp.ras_context.context.bin_desc.start_addr =
+ (uint8_t *)adev->psp.xgmi_context.context.bin_desc.start_addr +
+ le32_to_cpu(ta_hdr->ras.offset_bytes);
+
+ adev->psp.hdcp_context.context.bin_desc.fw_version =
+ le32_to_cpu(ta_hdr->hdcp.fw_version);
+ adev->psp.hdcp_context.context.bin_desc.size_bytes =
+ le32_to_cpu(ta_hdr->hdcp.size_bytes);
+ adev->psp.hdcp_context.context.bin_desc.start_addr =
+ (uint8_t *)ta_hdr +
+ le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
+
+ adev->psp.dtm_context.context.bin_desc.fw_version =
+ le32_to_cpu(ta_hdr->dtm.fw_version);
+ adev->psp.dtm_context.context.bin_desc.size_bytes =
+ le32_to_cpu(ta_hdr->dtm.size_bytes);
+ adev->psp.dtm_context.context.bin_desc.start_addr =
+ (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
+ le32_to_cpu(ta_hdr->dtm.offset_bytes);
+
+ adev->psp.securedisplay_context.context.bin_desc.fw_version =
+ le32_to_cpu(ta_hdr->securedisplay.fw_version);
+ adev->psp.securedisplay_context.context.bin_desc.size_bytes =
+ le32_to_cpu(ta_hdr->securedisplay.size_bytes);
+ adev->psp.securedisplay_context.context.bin_desc.start_addr =
+ (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
+ le32_to_cpu(ta_hdr->securedisplay.offset_bytes);
+
+ adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);
- err = amdgpu_ucode_validate(adev->psp.ta_fw);
- if (err)
- goto out;
+ return 0;
+}
+
+static int parse_ta_v2_microcode(struct psp_context *psp)
+{
+ const struct ta_firmware_header_v2_0 *ta_hdr;
+ struct amdgpu_device *adev = psp->adev;
+ int err = 0;
+ int ta_index = 0;
ta_hdr = (const struct ta_firmware_header_v2_0 *)adev->psp.ta_fw->data;
- if (le16_to_cpu(ta_hdr->header.header_version_major) != 2) {
- dev_err(adev->dev, "unsupported TA header version\n");
- err = -EINVAL;
- goto out;
- }
+ if (le16_to_cpu(ta_hdr->header.header_version_major) != 2)
+ return -EINVAL;
if (le32_to_cpu(ta_hdr->ta_fw_bin_count) >= UCODE_MAX_PSP_PACKAGING) {
dev_err(adev->dev, "packed TA count exceeds maximum limit\n");
- err = -EINVAL;
- goto out;
+ return -EINVAL;
}
for (ta_index = 0; ta_index < le32_to_cpu(ta_hdr->ta_fw_bin_count); ta_index++) {
@@ -3314,19 +3299,44 @@ int psp_init_ta_microcode(struct psp_context *psp,
&ta_hdr->ta_fw_bin[ta_index],
ta_hdr);
if (err)
- goto out;
+ return err;
}
return 0;
-out:
- dev_err(adev->dev, "fail to initialize ta microcode\n");
- release_firmware(adev->psp.ta_fw);
- adev->psp.ta_fw = NULL;
+}
+
+int psp_init_ta_microcode(struct psp_context *psp, const char *chip_name)
+{
+ const struct common_firmware_header *hdr;
+ struct amdgpu_device *adev = psp->adev;
+ char fw_name[PSP_FW_NAME_LEN];
+ int err;
+
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name);
+ err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, fw_name);
+ if (err)
+ return err;
+
+ hdr = (const struct common_firmware_header *)adev->psp.ta_fw->data;
+ switch (le16_to_cpu(hdr->header_version_major)) {
+ case 1:
+ err = parse_ta_v1_microcode(psp);
+ break;
+ case 2:
+ err = parse_ta_v2_microcode(psp);
+ break;
+ default:
+ dev_err(adev->dev, "unsupported TA header version\n");
+ err = -EINVAL;
+ }
+
+ if (err)
+ amdgpu_ucode_release(&adev->psp.ta_fw);
+
return err;
}
-int psp_init_cap_microcode(struct psp_context *psp,
- const char *chip_name)
+int psp_init_cap_microcode(struct psp_context *psp, const char *chip_name)
{
struct amdgpu_device *adev = psp->adev;
char fw_name[PSP_FW_NAME_LEN];
@@ -3334,28 +3344,20 @@ int psp_init_cap_microcode(struct psp_context *psp,
struct amdgpu_firmware_info *info = NULL;
int err = 0;
- if (!chip_name) {
- dev_err(adev->dev, "invalid chip name for cap microcode\n");
- return -EINVAL;
- }
-
if (!amdgpu_sriov_vf(adev)) {
dev_err(adev->dev, "cap microcode should only be loaded under SRIOV\n");
return -EINVAL;
}
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_cap.bin", chip_name);
- err = request_firmware(&adev->psp.cap_fw, fw_name, adev->dev);
- if (err) {
- dev_warn(adev->dev, "cap microcode does not exist, skip\n");
- err = 0;
- goto out;
- }
-
- err = amdgpu_ucode_validate(adev->psp.cap_fw);
+ err = amdgpu_ucode_request(adev, &adev->psp.cap_fw, fw_name);
if (err) {
+ if (err == -ENODEV) {
+ dev_warn(adev->dev, "cap microcode does not exist, skip\n");
+ err = 0;
+ goto out;
+ }
dev_err(adev->dev, "fail to initialize cap microcode\n");
- goto out;
}
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CAP];
@@ -3372,8 +3374,7 @@ int psp_init_cap_microcode(struct psp_context *psp,
return 0;
out:
- release_firmware(adev->psp.cap_fw);
- adev->psp.cap_fw = NULL;
+ amdgpu_ucode_release(&adev->psp.cap_fw);
return err;
}
@@ -3444,10 +3445,10 @@ static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev,
/* LFB address which is aligned to 1MB boundary per PSP request */
ret = amdgpu_bo_create_kernel(adev, usbc_pd_fw->size, 0x100000,
- AMDGPU_GEM_DOMAIN_VRAM,
- &fw_buf_bo,
- &fw_pri_mc_addr,
- &fw_pri_cpu_addr);
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
+ &fw_buf_bo, &fw_pri_mc_addr,
+ &fw_pri_cpu_addr);
if (ret)
goto rel_buf;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index ad490c1e2f57..d06beb884a16 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -706,13 +706,23 @@ static int __amdgpu_ras_feature_enable(struct amdgpu_device *adev,
return 0;
}
+static int amdgpu_ras_check_feature_allowed(struct amdgpu_device *adev,
+ struct ras_common_if *head)
+{
+ if (amdgpu_ras_is_feature_allowed(adev, head) ||
+ amdgpu_ras_is_poison_mode_supported(adev))
+ return 1;
+ else
+ return 0;
+}
+
/* wrapper of psp_ras_enable_features */
int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
struct ras_common_if *head, bool enable)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
union ta_ras_cmd_input *info;
- int ret;
+ int ret = 0;
if (!con)
return -EINVAL;
@@ -736,7 +746,8 @@ int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
}
/* Do not enable if it is not allowed. */
- WARN_ON(enable && !amdgpu_ras_is_feature_allowed(adev, head));
+ if (enable && !amdgpu_ras_check_feature_allowed(adev, head))
+ goto out;
/* Only enable ras feature operation handle on host side */
if (head->block == AMDGPU_RAS_BLOCK__GFX &&
@@ -754,7 +765,6 @@ int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
/* setup the obj */
__amdgpu_ras_feature_enable(adev, head, enable);
- ret = 0;
out:
if (head->block == AMDGPU_RAS_BLOCK__GFX)
kfree(info);
@@ -1087,6 +1097,10 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev,
info->head.block,
info->head.sub_block_index);
+ /* inject on guest isn't allowed, return success directly */
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
if (!obj)
return -EINVAL;
@@ -1122,11 +1136,54 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev,
}
/**
- * amdgpu_ras_query_error_count -- Get error counts of all IPs
+ * amdgpu_ras_query_error_count_helper -- Get error counter for specific IP
+ * @adev: pointer to AMD GPU device
+ * @ce_count: pointer to an integer to be set to the count of correctible errors.
+ * @ue_count: pointer to an integer to be set to the count of uncorrectible errors.
+ * @query_info: pointer to ras_query_if
+ *
+ * Return 0 for query success or do nothing, otherwise return an error
+ * on failures
+ */
+static int amdgpu_ras_query_error_count_helper(struct amdgpu_device *adev,
+ unsigned long *ce_count,
+ unsigned long *ue_count,
+ struct ras_query_if *query_info)
+{
+ int ret;
+
+ if (!query_info)
+ /* do nothing if query_info is not specified */
+ return 0;
+
+ ret = amdgpu_ras_query_error_status(adev, query_info);
+ if (ret)
+ return ret;
+
+ *ce_count += query_info->ce_count;
+ *ue_count += query_info->ue_count;
+
+ /* some hardware/IP supports read to clear
+ * no need to explictly reset the err status after the query call */
+ if (adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) &&
+ adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)) {
+ if (amdgpu_ras_reset_error_status(adev, query_info->head.block))
+ dev_warn(adev->dev,
+ "Failed to reset error counter and error status\n");
+ }
+
+ return 0;
+}
+
+/**
+ * amdgpu_ras_query_error_count -- Get error counts of all IPs or specific IP
* @adev: pointer to AMD GPU device
* @ce_count: pointer to an integer to be set to the count of correctible errors.
* @ue_count: pointer to an integer to be set to the count of uncorrectible
* errors.
+ * @query_info: pointer to ras_query_if if the query request is only for
+ * specific ip block; if info is NULL, then the qurey request is for
+ * all the ip blocks that support query ras error counters/status
*
* If set, @ce_count or @ue_count, count and return the corresponding
* error counts in those integer pointers. Return 0 if the device
@@ -1134,11 +1191,13 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev,
*/
int amdgpu_ras_query_error_count(struct amdgpu_device *adev,
unsigned long *ce_count,
- unsigned long *ue_count)
+ unsigned long *ue_count,
+ struct ras_query_if *query_info)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct ras_manager *obj;
unsigned long ce, ue;
+ int ret;
if (!adev->ras_enabled || !con)
return -EOPNOTSUPP;
@@ -1150,26 +1209,23 @@ int amdgpu_ras_query_error_count(struct amdgpu_device *adev,
ce = 0;
ue = 0;
- list_for_each_entry(obj, &con->head, node) {
- struct ras_query_if info = {
- .head = obj->head,
- };
- int res;
-
- res = amdgpu_ras_query_error_status(adev, &info);
- if (res)
- return res;
+ if (!query_info) {
+ /* query all the ip blocks that support ras query interface */
+ list_for_each_entry(obj, &con->head, node) {
+ struct ras_query_if info = {
+ .head = obj->head,
+ };
- if (adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) &&
- adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)) {
- if (amdgpu_ras_reset_error_status(adev, info.head.block))
- dev_warn(adev->dev, "Failed to reset error counter and error status");
+ ret = amdgpu_ras_query_error_count_helper(adev, &ce, &ue, &info);
}
-
- ce += info.ce_count;
- ue += info.ue_count;
+ } else {
+ /* query specific ip block */
+ ret = amdgpu_ras_query_error_count_helper(adev, &ce, &ue, query_info);
}
+ if (ret)
+ return ret;
+
if (ce_count)
*ce_count = ce;
@@ -2344,22 +2400,24 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev)
if (amdgpu_atomfirmware_sram_ecc_supported(adev)) {
dev_info(adev->dev, "SRAM ECC is active.\n");
- if (!amdgpu_sriov_vf(adev)) {
+ if (!amdgpu_sriov_vf(adev))
adev->ras_hw_enabled |= ~(1 << AMDGPU_RAS_BLOCK__UMC |
1 << AMDGPU_RAS_BLOCK__DF);
-
- if (adev->ip_versions[VCN_HWIP][0] == IP_VERSION(2, 6, 0) ||
- adev->ip_versions[VCN_HWIP][0] == IP_VERSION(4, 0, 0))
- adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__VCN |
- 1 << AMDGPU_RAS_BLOCK__JPEG);
- else
- adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__VCN |
- 1 << AMDGPU_RAS_BLOCK__JPEG);
- } else {
+ else
adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__PCIE_BIF |
1 << AMDGPU_RAS_BLOCK__SDMA |
1 << AMDGPU_RAS_BLOCK__GFX);
- }
+
+ /* VCN/JPEG RAS can be supported on both bare metal and
+ * SRIOV environment
+ */
+ if (adev->ip_versions[VCN_HWIP][0] == IP_VERSION(2, 6, 0) ||
+ adev->ip_versions[VCN_HWIP][0] == IP_VERSION(4, 0, 0))
+ adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__VCN |
+ 1 << AMDGPU_RAS_BLOCK__JPEG);
+ else
+ adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__VCN |
+ 1 << AMDGPU_RAS_BLOCK__JPEG);
} else {
dev_info(adev->dev, "SRAM ECC is not presented.\n");
}
@@ -2395,7 +2453,7 @@ static void amdgpu_ras_counte_dw(struct work_struct *work)
/* Cache new values.
*/
- if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count) == 0) {
+ if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count, NULL) == 0) {
atomic_set(&con->ras_ce_count, ce_count);
atomic_set(&con->ras_ue_count, ue_count);
}
@@ -2405,11 +2463,42 @@ Out:
pm_runtime_put_autosuspend(dev->dev);
}
+static void amdgpu_ras_query_poison_mode(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ bool df_poison, umc_poison;
+
+ /* poison setting is useless on SRIOV guest */
+ if (amdgpu_sriov_vf(adev) || !con)
+ return;
+
+ /* Init poison supported flag, the default value is false */
+ if (adev->gmc.xgmi.connected_to_cpu) {
+ /* enabled by default when GPU is connected to CPU */
+ con->poison_supported = true;
+ } else if (adev->df.funcs &&
+ adev->df.funcs->query_ras_poison_mode &&
+ adev->umc.ras &&
+ adev->umc.ras->query_ras_poison_mode) {
+ df_poison =
+ adev->df.funcs->query_ras_poison_mode(adev);
+ umc_poison =
+ adev->umc.ras->query_ras_poison_mode(adev);
+
+ /* Only poison is set in both DF and UMC, we can support it */
+ if (df_poison && umc_poison)
+ con->poison_supported = true;
+ else if (df_poison != umc_poison)
+ dev_warn(adev->dev,
+ "Poison setting is inconsistent in DF/UMC(%d:%d)!\n",
+ df_poison, umc_poison);
+ }
+}
+
int amdgpu_ras_init(struct amdgpu_device *adev)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
int r;
- bool df_poison, umc_poison;
if (con)
return 0;
@@ -2484,26 +2573,7 @@ int amdgpu_ras_init(struct amdgpu_device *adev)
goto release_con;
}
- /* Init poison supported flag, the default value is false */
- if (adev->gmc.xgmi.connected_to_cpu) {
- /* enabled by default when GPU is connected to CPU */
- con->poison_supported = true;
- }
- else if (adev->df.funcs &&
- adev->df.funcs->query_ras_poison_mode &&
- adev->umc.ras &&
- adev->umc.ras->query_ras_poison_mode) {
- df_poison =
- adev->df.funcs->query_ras_poison_mode(adev);
- umc_poison =
- adev->umc.ras->query_ras_poison_mode(adev);
- /* Only poison is set in both DF and UMC, we can support it */
- if (df_poison && umc_poison)
- con->poison_supported = true;
- else if (df_poison != umc_poison)
- dev_warn(adev->dev, "Poison setting is inconsistent in DF/UMC(%d:%d)!\n",
- df_poison, umc_poison);
- }
+ amdgpu_ras_query_poison_mode(adev);
if (amdgpu_ras_fs_init(adev)) {
r = -EINVAL;
@@ -2564,6 +2634,7 @@ int amdgpu_ras_block_late_init(struct amdgpu_device *adev,
{
struct amdgpu_ras_block_object *ras_obj = NULL;
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_query_if *query_info;
unsigned long ue_count, ce_count;
int r;
@@ -2605,11 +2676,17 @@ int amdgpu_ras_block_late_init(struct amdgpu_device *adev,
/* Those are the cached values at init.
*/
- if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count) == 0) {
+ query_info = kzalloc(sizeof(struct ras_query_if), GFP_KERNEL);
+ if (!query_info)
+ return -ENOMEM;
+ memcpy(&query_info->head, ras_block, sizeof(struct ras_common_if));
+
+ if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count, query_info) == 0) {
atomic_set(&con->ras_ce_count, ce_count);
atomic_set(&con->ras_ue_count, ue_count);
}
+ kfree(query_info);
return 0;
interrupt:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
index bf5a95104ec1..f2ad999993f6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
@@ -540,7 +540,8 @@ void amdgpu_ras_suspend(struct amdgpu_device *adev);
int amdgpu_ras_query_error_count(struct amdgpu_device *adev,
unsigned long *ce_count,
- unsigned long *ue_count);
+ unsigned long *ue_count,
+ struct ras_query_if *query_info);
/* error handling functions */
int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
index 012b72d00e04..85fb730d9fc8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
@@ -93,7 +93,8 @@ int amdgpu_gfx_rlc_init_sr(struct amdgpu_device *adev, u32 dws)
/* allocate save restore block */
r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
&adev->gfx.rlc.save_restore_obj,
&adev->gfx.rlc.save_restore_gpu_addr,
(void **)&adev->gfx.rlc.sr_ptr);
@@ -130,7 +131,8 @@ int amdgpu_gfx_rlc_init_csb(struct amdgpu_device *adev)
/* allocate clear state block */
adev->gfx.rlc.clear_state_size = dws = adev->gfx.rlc.funcs->get_csb_size(adev);
r = amdgpu_bo_create_kernel(adev, dws * 4, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
&adev->gfx.rlc.clear_state_obj,
&adev->gfx.rlc.clear_state_gpu_addr,
(void **)&adev->gfx.rlc.cs_ptr);
@@ -156,7 +158,8 @@ int amdgpu_gfx_rlc_init_cpt(struct amdgpu_device *adev)
int r;
r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size,
- PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
&adev->gfx.rlc.cp_table_obj,
&adev->gfx.rlc.cp_table_gpu_addr,
(void **)&adev->gfx.rlc.cp_table_ptr);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
index ea5278f094c0..e9b78739b9ff 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
@@ -154,16 +154,11 @@ int amdgpu_sdma_process_ecc_irq(struct amdgpu_device *adev,
static int amdgpu_sdma_init_inst_ctx(struct amdgpu_sdma_instance *sdma_inst)
{
- int err = 0;
uint16_t version_major;
const struct common_firmware_header *header = NULL;
const struct sdma_firmware_header_v1_0 *hdr;
const struct sdma_firmware_header_v2_0 *hdr_v2;
- err = amdgpu_ucode_validate(sdma_inst->fw);
- if (err)
- return err;
-
header = (const struct common_firmware_header *)
sdma_inst->fw->data;
version_major = le16_to_cpu(header->header_version_major);
@@ -195,7 +190,7 @@ void amdgpu_sdma_destroy_inst_ctx(struct amdgpu_device *adev,
int i;
for (i = 0; i < adev->sdma.num_instances; i++) {
- release_firmware(adev->sdma.instance[i].fw);
+ amdgpu_ucode_release(&adev->sdma.instance[i].fw);
if (duplicate)
break;
}
@@ -205,16 +200,22 @@ void amdgpu_sdma_destroy_inst_ctx(struct amdgpu_device *adev,
}
int amdgpu_sdma_init_microcode(struct amdgpu_device *adev,
- char *fw_name, u32 instance,
- bool duplicate)
+ u32 instance, bool duplicate)
{
struct amdgpu_firmware_info *info = NULL;
const struct common_firmware_header *header = NULL;
- int err = 0, i;
+ int err, i;
const struct sdma_firmware_header_v2_0 *sdma_hdr;
uint16_t version_major;
-
- err = request_firmware(&adev->sdma.instance[instance].fw, fw_name, adev->dev);
+ char ucode_prefix[30];
+ char fw_name[40];
+
+ amdgpu_ucode_ip_version_decode(adev, SDMA0_HWIP, ucode_prefix, sizeof(ucode_prefix));
+ if (instance == 0)
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", ucode_prefix);
+ else
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s%d.bin", ucode_prefix, instance);
+ err = amdgpu_ucode_request(adev, &adev->sdma.instance[instance].fw, fw_name);
if (err)
goto out;
@@ -279,10 +280,8 @@ int amdgpu_sdma_init_microcode(struct amdgpu_device *adev,
}
out:
- if (err) {
- DRM_ERROR("SDMA: Failed to init firmware \"%s\"\n", fw_name);
+ if (err)
amdgpu_sdma_destroy_inst_ctx(adev, duplicate);
- }
return err;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
index 7d99205c2e01..2d16e6d36728 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
@@ -124,8 +124,8 @@ int amdgpu_sdma_process_ras_data_cb(struct amdgpu_device *adev,
int amdgpu_sdma_process_ecc_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry);
-int amdgpu_sdma_init_microcode(struct amdgpu_device *adev,
- char *fw_name, u32 instance, bool duplicate);
+int amdgpu_sdma_init_microcode(struct amdgpu_device *adev, u32 instance,
+ bool duplicate);
void amdgpu_sdma_destroy_inst_ctx(struct amdgpu_device *adev,
bool duplicate);
void amdgpu_sdma_unset_buffer_funcs_helper(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c
index 2c1d82fc4c34..8ed0e073656f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c
@@ -77,11 +77,11 @@ void psp_securedisplay_parse_resp_status(struct psp_context *psp,
}
}
-void psp_prep_securedisplay_cmd_buf(struct psp_context *psp, struct securedisplay_cmd **cmd,
+void psp_prep_securedisplay_cmd_buf(struct psp_context *psp, struct ta_securedisplay_cmd **cmd,
enum ta_securedisplay_command command_id)
{
- *cmd = (struct securedisplay_cmd *)psp->securedisplay_context.context.mem_context.shared_buf;
- memset(*cmd, 0, sizeof(struct securedisplay_cmd));
+ *cmd = (struct ta_securedisplay_cmd *)psp->securedisplay_context.context.mem_context.shared_buf;
+ memset(*cmd, 0, sizeof(struct ta_securedisplay_cmd));
(*cmd)->status = TA_SECUREDISPLAY_STATUS__GENERIC_FAILURE;
(*cmd)->cmd_id = command_id;
}
@@ -93,7 +93,7 @@ static ssize_t amdgpu_securedisplay_debugfs_write(struct file *f, const char __u
{
struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
struct psp_context *psp = &adev->psp;
- struct securedisplay_cmd *securedisplay_cmd;
+ struct ta_securedisplay_cmd *securedisplay_cmd;
struct drm_device *dev = adev_to_drm(adev);
uint32_t phy_id;
uint32_t op;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.h
index fe98574748f4..456ad68ed4b2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.h
@@ -30,7 +30,7 @@
void amdgpu_securedisplay_debugfs_init(struct amdgpu_device *adev);
void psp_securedisplay_parse_resp_status(struct psp_context *psp,
enum ta_securedisplay_status status);
-void psp_prep_securedisplay_cmd_buf(struct psp_context *psp, struct securedisplay_cmd **cmd,
+void psp_prep_securedisplay_cmd_buf(struct psp_context *psp, struct ta_securedisplay_cmd **cmd,
enum ta_securedisplay_command command_id);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 55e0284b2bdd..c5ef7f7bdc15 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -44,10 +44,10 @@
#include <linux/module.h>
#include <drm/drm_drv.h>
-#include <drm/ttm/ttm_bo_api.h>
-#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_bo.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_range_manager.h>
+#include <drm/ttm/ttm_tt.h>
#include <drm/amdgpu_drm.h>
#include <drm/drm_drv.h>
@@ -1679,10 +1679,10 @@ static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev)
/* reserve vram for mem train according to TMR location */
amdgpu_ttm_training_data_block_init(adev);
ret = amdgpu_bo_create_kernel_at(adev,
- ctx->c2p_train_data_offset,
- ctx->train_data_size,
- &ctx->c2p_bo,
- NULL);
+ ctx->c2p_train_data_offset,
+ ctx->train_data_size,
+ &ctx->c2p_bo,
+ NULL);
if (ret) {
DRM_ERROR("alloc c2p_bo failed(%d)!\n", ret);
amdgpu_ttm_training_reserve_vram_fini(adev);
@@ -1692,10 +1692,10 @@ static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev)
}
ret = amdgpu_bo_create_kernel_at(adev,
- adev->gmc.real_vram_size - adev->mman.discovery_tmr_size,
- adev->mman.discovery_tmr_size,
- &adev->mman.discovery_memory,
- NULL);
+ adev->gmc.real_vram_size - adev->mman.discovery_tmr_size,
+ adev->mman.discovery_tmr_size,
+ &adev->mman.discovery_memory,
+ NULL);
if (ret) {
DRM_ERROR("alloc tmr failed(%d)!\n", ret);
amdgpu_bo_free_kernel(&adev->mman.discovery_memory, NULL, NULL);
@@ -1718,7 +1718,6 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
{
uint64_t gtt_size;
int r;
- u64 vis_vram_limit;
mutex_init(&adev->mman.gtt_window_lock);
@@ -1741,12 +1740,6 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
return r;
}
- /* Reduce size of CPU-visible VRAM if requested */
- vis_vram_limit = (u64)amdgpu_vis_vram_limit * 1024 * 1024;
- if (amdgpu_vis_vram_limit > 0 &&
- vis_vram_limit <= adev->gmc.visible_vram_size)
- adev->gmc.visible_vram_size = vis_vram_limit;
-
/* Change the size here instead of the init above so only lpfn is affected */
amdgpu_ttm_set_buffer_funcs_status(adev, false);
#ifdef CONFIG_64BIT
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
index 5cb62e6249c2..47549d659d9b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
@@ -504,7 +504,7 @@ void amdgpu_ucode_print_gpu_info_hdr(const struct common_firmware_header *hdr)
}
}
-int amdgpu_ucode_validate(const struct firmware *fw)
+static int amdgpu_ucode_validate(const struct firmware *fw)
{
const struct common_firmware_header *hdr =
(const struct common_firmware_header *)fw->data;
@@ -1059,12 +1059,233 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
return 0;
}
+static const char *amdgpu_ucode_legacy_naming(struct amdgpu_device *adev, int block_type)
+{
+ if (block_type == MP0_HWIP) {
+ switch (adev->ip_versions[MP0_HWIP][0]) {
+ case IP_VERSION(9, 0, 0):
+ switch (adev->asic_type) {
+ case CHIP_VEGA10:
+ return "vega10";
+ case CHIP_VEGA12:
+ return "vega12";
+ default:
+ return NULL;
+ }
+ break;
+ case IP_VERSION(10, 0, 0):
+ case IP_VERSION(10, 0, 1):
+ if (adev->asic_type == CHIP_RAVEN) {
+ if (adev->apu_flags & AMD_APU_IS_RAVEN2)
+ return "raven2";
+ else if (adev->apu_flags & AMD_APU_IS_PICASSO)
+ return "picasso";
+ return "raven";
+ }
+ break;
+ case IP_VERSION(11, 0, 0):
+ return "navi10";
+ case IP_VERSION(11, 0, 2):
+ return "vega20";
+ case IP_VERSION(11, 0, 4):
+ return "arcturus";
+ case IP_VERSION(11, 0, 5):
+ return "navi14";
+ case IP_VERSION(11, 0, 7):
+ return "sienna_cichlid";
+ case IP_VERSION(11, 0, 9):
+ return "navi12";
+ case IP_VERSION(11, 0, 11):
+ return "navy_flounder";
+ case IP_VERSION(11, 0, 12):
+ return "dimgrey_cavefish";
+ case IP_VERSION(11, 0, 13):
+ return "beige_goby";
+ case IP_VERSION(11, 5, 0):
+ return "vangogh";
+ case IP_VERSION(12, 0, 1):
+ if (adev->asic_type == CHIP_RENOIR) {
+ if (adev->apu_flags & AMD_APU_IS_RENOIR)
+ return "renoir";
+ return "green_sardine";
+ }
+ break;
+ case IP_VERSION(13, 0, 2):
+ return "aldebaran";
+ case IP_VERSION(13, 0, 1):
+ case IP_VERSION(13, 0, 3):
+ return "yellow_carp";
+ }
+ } else if (block_type == MP1_HWIP) {
+ switch (adev->ip_versions[MP1_HWIP][0]) {
+ case IP_VERSION(9, 0, 0):
+ case IP_VERSION(10, 0, 0):
+ case IP_VERSION(10, 0, 1):
+ case IP_VERSION(11, 0, 2):
+ if (adev->asic_type == CHIP_ARCTURUS)
+ return "arcturus_smc";
+ return NULL;
+ case IP_VERSION(11, 0, 0):
+ return "navi10_smc";
+ case IP_VERSION(11, 0, 5):
+ return "navi14_smc";
+ case IP_VERSION(11, 0, 9):
+ return "navi12_smc";
+ case IP_VERSION(11, 0, 7):
+ return "sienna_cichlid_smc";
+ case IP_VERSION(11, 0, 11):
+ return "navy_flounder_smc";
+ case IP_VERSION(11, 0, 12):
+ return "dimgrey_cavefish_smc";
+ case IP_VERSION(11, 0, 13):
+ return "beige_goby_smc";
+ case IP_VERSION(13, 0, 2):
+ return "aldebaran_smc";
+ }
+ } else if (block_type == SDMA0_HWIP) {
+ switch (adev->ip_versions[SDMA0_HWIP][0]) {
+ case IP_VERSION(4, 0, 0):
+ return "vega10_sdma";
+ case IP_VERSION(4, 0, 1):
+ return "vega12_sdma";
+ case IP_VERSION(4, 1, 0):
+ case IP_VERSION(4, 1, 1):
+ if (adev->apu_flags & AMD_APU_IS_RAVEN2)
+ return "raven2_sdma";
+ else if (adev->apu_flags & AMD_APU_IS_PICASSO)
+ return "picasso_sdma";
+ return "raven_sdma";
+ case IP_VERSION(4, 1, 2):
+ if (adev->apu_flags & AMD_APU_IS_RENOIR)
+ return "renoir_sdma";
+ return "green_sardine_sdma";
+ case IP_VERSION(4, 2, 0):
+ return "vega20_sdma";
+ case IP_VERSION(4, 2, 2):
+ return "arcturus_sdma";
+ case IP_VERSION(4, 4, 0):
+ return "aldebaran_sdma";
+ case IP_VERSION(5, 0, 0):
+ return "navi10_sdma";
+ case IP_VERSION(5, 0, 1):
+ return "cyan_skillfish2_sdma";
+ case IP_VERSION(5, 0, 2):
+ return "navi14_sdma";
+ case IP_VERSION(5, 0, 5):
+ return "navi12_sdma";
+ case IP_VERSION(5, 2, 0):
+ return "sienna_cichlid_sdma";
+ case IP_VERSION(5, 2, 2):
+ return "navy_flounder_sdma";
+ case IP_VERSION(5, 2, 4):
+ return "dimgrey_cavefish_sdma";
+ case IP_VERSION(5, 2, 5):
+ return "beige_goby_sdma";
+ case IP_VERSION(5, 2, 3):
+ return "yellow_carp_sdma";
+ case IP_VERSION(5, 2, 1):
+ return "vangogh_sdma";
+ }
+ } else if (block_type == UVD_HWIP) {
+ switch (adev->ip_versions[UVD_HWIP][0]) {
+ case IP_VERSION(1, 0, 0):
+ case IP_VERSION(1, 0, 1):
+ if (adev->apu_flags & AMD_APU_IS_RAVEN2)
+ return "raven2_vcn";
+ else if (adev->apu_flags & AMD_APU_IS_PICASSO)
+ return "picasso_vcn";
+ return "raven_vcn";
+ case IP_VERSION(2, 5, 0):
+ return "arcturus_vcn";
+ case IP_VERSION(2, 2, 0):
+ if (adev->apu_flags & AMD_APU_IS_RENOIR)
+ return "renoir_vcn";
+ return "green_sardine_vcn";
+ case IP_VERSION(2, 6, 0):
+ return "aldebaran_vcn";
+ case IP_VERSION(2, 0, 0):
+ return "navi10_vcn";
+ case IP_VERSION(2, 0, 2):
+ if (adev->asic_type == CHIP_NAVI12)
+ return "navi12_vcn";
+ return "navi14_vcn";
+ case IP_VERSION(3, 0, 0):
+ case IP_VERSION(3, 0, 64):
+ case IP_VERSION(3, 0, 192):
+ if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0))
+ return "sienna_cichlid_vcn";
+ return "navy_flounder_vcn";
+ case IP_VERSION(3, 0, 2):
+ return "vangogh_vcn";
+ case IP_VERSION(3, 0, 16):
+ return "dimgrey_cavefish_vcn";
+ case IP_VERSION(3, 0, 33):
+ return "beige_goby_vcn";
+ case IP_VERSION(3, 1, 1):
+ return "yellow_carp_vcn";
+ }
+ } else if (block_type == GC_HWIP) {
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(9, 0, 1):
+ return "vega10";
+ case IP_VERSION(9, 2, 1):
+ return "vega12";
+ case IP_VERSION(9, 4, 0):
+ return "vega20";
+ case IP_VERSION(9, 2, 2):
+ case IP_VERSION(9, 1, 0):
+ if (adev->apu_flags & AMD_APU_IS_RAVEN2)
+ return "raven2";
+ else if (adev->apu_flags & AMD_APU_IS_PICASSO)
+ return "picasso";
+ return "raven";
+ case IP_VERSION(9, 4, 1):
+ return "arcturus";
+ case IP_VERSION(9, 3, 0):
+ if (adev->apu_flags & AMD_APU_IS_RENOIR)
+ return "renoir";
+ return "green_sardine";
+ case IP_VERSION(9, 4, 2):
+ return "aldebaran";
+ case IP_VERSION(10, 1, 10):
+ return "navi10";
+ case IP_VERSION(10, 1, 1):
+ return "navi14";
+ case IP_VERSION(10, 1, 2):
+ return "navi12";
+ case IP_VERSION(10, 3, 0):
+ return "sienna_cichlid";
+ case IP_VERSION(10, 3, 2):
+ return "navy_flounder";
+ case IP_VERSION(10, 3, 1):
+ return "vangogh";
+ case IP_VERSION(10, 3, 4):
+ return "dimgrey_cavefish";
+ case IP_VERSION(10, 3, 5):
+ return "beige_goby";
+ case IP_VERSION(10, 3, 3):
+ return "yellow_carp";
+ case IP_VERSION(10, 1, 3):
+ case IP_VERSION(10, 1, 4):
+ return "cyan_skillfish2";
+ }
+ }
+ return NULL;
+}
+
void amdgpu_ucode_ip_version_decode(struct amdgpu_device *adev, int block_type, char *ucode_prefix, int len)
{
int maj, min, rev;
char *ip_name;
+ const char *legacy;
uint32_t version = adev->ip_versions[block_type][0];
+ legacy = amdgpu_ucode_legacy_naming(adev, block_type);
+ if (legacy) {
+ snprintf(ucode_prefix, len, "%s", legacy);
+ return;
+ }
+
switch (block_type) {
case GC_HWIP:
ip_name = "gc";
@@ -1091,3 +1312,39 @@ void amdgpu_ucode_ip_version_decode(struct amdgpu_device *adev, int block_type,
snprintf(ucode_prefix, len, "%s_%d_%d_%d", ip_name, maj, min, rev);
}
+
+/*
+ * amdgpu_ucode_request - Fetch and validate amdgpu microcode
+ *
+ * @adev: amdgpu device
+ * @fw: pointer to load firmware to
+ * @fw_name: firmware to load
+ *
+ * This is a helper that will use request_firmware and amdgpu_ucode_validate
+ * to load and run basic validation on firmware. If the load fails, remap
+ * the error code to -ENODEV, so that early_init functions will fail to load.
+ */
+int amdgpu_ucode_request(struct amdgpu_device *adev, const struct firmware **fw,
+ const char *fw_name)
+{
+ int err = request_firmware(fw, fw_name, adev->dev);
+
+ if (err)
+ return -ENODEV;
+ err = amdgpu_ucode_validate(*fw);
+ if (err)
+ dev_dbg(adev->dev, "\"%s\" failed to validate\n", fw_name);
+
+ return err;
+}
+
+/*
+ * amdgpu_ucode_release - Release firmware microcode
+ *
+ * @fw: pointer to firmware to release
+ */
+void amdgpu_ucode_release(const struct firmware **fw)
+{
+ release_firmware(*fw);
+ *fw = NULL;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
index 552e06929229..bee93ab4298f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
@@ -543,7 +543,9 @@ void amdgpu_ucode_print_rlc_hdr(const struct common_firmware_header *hdr);
void amdgpu_ucode_print_sdma_hdr(const struct common_firmware_header *hdr);
void amdgpu_ucode_print_psp_hdr(const struct common_firmware_header *hdr);
void amdgpu_ucode_print_gpu_info_hdr(const struct common_firmware_header *hdr);
-int amdgpu_ucode_validate(const struct firmware *fw);
+int amdgpu_ucode_request(struct amdgpu_device *adev, const struct firmware **fw,
+ const char *fw_name);
+void amdgpu_ucode_release(const struct firmware **fw);
bool amdgpu_ucode_hdr_version(union amdgpu_firmware_header *hdr,
uint16_t hdr_major, uint16_t hdr_minor);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
index f76c19fc0392..1c7fcb4f2380 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
@@ -169,25 +169,33 @@ int amdgpu_umc_poison_handler(struct amdgpu_device *adev, bool reset)
{
int ret = AMDGPU_RAS_SUCCESS;
- if (!adev->gmc.xgmi.connected_to_cpu) {
- struct ras_err_data err_data = {0, 0, 0, NULL};
- struct ras_common_if head = {
- .block = AMDGPU_RAS_BLOCK__UMC,
- };
- struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head);
-
- ret = amdgpu_umc_do_page_retirement(adev, &err_data, NULL, reset);
-
- if (ret == AMDGPU_RAS_SUCCESS && obj) {
- obj->err_data.ue_count += err_data.ue_count;
- obj->err_data.ce_count += err_data.ce_count;
+ if (!amdgpu_sriov_vf(adev)) {
+ if (!adev->gmc.xgmi.connected_to_cpu) {
+ struct ras_err_data err_data = {0, 0, 0, NULL};
+ struct ras_common_if head = {
+ .block = AMDGPU_RAS_BLOCK__UMC,
+ };
+ struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head);
+
+ ret = amdgpu_umc_do_page_retirement(adev, &err_data, NULL, reset);
+
+ if (ret == AMDGPU_RAS_SUCCESS && obj) {
+ obj->err_data.ue_count += err_data.ue_count;
+ obj->err_data.ce_count += err_data.ce_count;
+ }
+ } else if (reset) {
+ /* MCA poison handler is only responsible for GPU reset,
+ * let MCA notifier do page retirement.
+ */
+ kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
+ amdgpu_ras_reset_gpu(adev);
}
- } else if (reset) {
- /* MCA poison handler is only responsible for GPU reset,
- * let MCA notifier do page retirement.
- */
- kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
- amdgpu_ras_reset_gpu(adev);
+ } else {
+ if (adev->virt.ops && adev->virt.ops->ras_poison_handler)
+ adev->virt.ops->ras_poison_handler(adev);
+ else
+ dev_warn(adev->dev,
+ "No ras_poison_handler interface in SRIOV!\n");
}
return ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index e00bb654e24b..632a6ded5735 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -260,19 +260,11 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
return -EINVAL;
}
- r = request_firmware(&adev->uvd.fw, fw_name, adev->dev);
- if (r) {
- dev_err(adev->dev, "amdgpu_uvd: Can't load firmware \"%s\"\n",
- fw_name);
- return r;
- }
-
- r = amdgpu_ucode_validate(adev->uvd.fw);
+ r = amdgpu_ucode_request(adev, &adev->uvd.fw, fw_name);
if (r) {
dev_err(adev->dev, "amdgpu_uvd: Can't validate firmware \"%s\"\n",
fw_name);
- release_firmware(adev->uvd.fw);
- adev->uvd.fw = NULL;
+ amdgpu_ucode_release(&adev->uvd.fw);
return r;
}
@@ -331,8 +323,11 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
if (adev->uvd.harvest_config & (1 << j))
continue;
r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM, &adev->uvd.inst[j].vcpu_bo,
- &adev->uvd.inst[j].gpu_addr, &adev->uvd.inst[j].cpu_addr);
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
+ &adev->uvd.inst[j].vcpu_bo,
+ &adev->uvd.inst[j].gpu_addr,
+ &adev->uvd.inst[j].cpu_addr);
if (r) {
dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r);
return r;
@@ -394,7 +389,7 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]);
}
amdgpu_bo_free_kernel(&adev->uvd.ib_bo, NULL, &addr);
- release_firmware(adev->uvd.fw);
+ amdgpu_ucode_release(&adev->uvd.fw);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index b239e874f2d5..2fb61410b1c0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -158,19 +158,11 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
return -EINVAL;
}
- r = request_firmware(&adev->vce.fw, fw_name, adev->dev);
- if (r) {
- dev_err(adev->dev, "amdgpu_vce: Can't load firmware \"%s\"\n",
- fw_name);
- return r;
- }
-
- r = amdgpu_ucode_validate(adev->vce.fw);
+ r = amdgpu_ucode_request(adev, &adev->vce.fw, fw_name);
if (r) {
dev_err(adev->dev, "amdgpu_vce: Can't validate firmware \"%s\"\n",
fw_name);
- release_firmware(adev->vce.fw);
- adev->vce.fw = NULL;
+ amdgpu_ucode_release(&adev->vce.fw);
return r;
}
@@ -186,7 +178,9 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
(binary_id << 8));
r = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM, &adev->vce.vcpu_bo,
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
+ &adev->vce.vcpu_bo,
&adev->vce.gpu_addr, &adev->vce.cpu_addr);
if (r) {
dev_err(adev->dev, "(%d) failed to allocate VCE bo\n", r);
@@ -226,7 +220,7 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
for (i = 0; i < adev->vce.num_rings; i++)
amdgpu_ring_fini(&adev->vce.ring[i]);
- release_firmware(adev->vce.fw);
+ amdgpu_ucode_release(&adev->vce.fw);
mutex_destroy(&adev->vce.idle_mutex);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index b1622ac9949f..a7eae84c7bf7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -80,10 +80,24 @@ MODULE_FIRMWARE(FIRMWARE_VCN4_0_4);
static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
+int amdgpu_vcn_early_init(struct amdgpu_device *adev)
+{
+ char ucode_prefix[30];
+ char fw_name[40];
+ int r;
+
+ amdgpu_ucode_ip_version_decode(adev, UVD_HWIP, ucode_prefix, sizeof(ucode_prefix));
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", ucode_prefix);
+ r = amdgpu_ucode_request(adev, &adev->vcn.fw, fw_name);
+ if (r)
+ amdgpu_ucode_release(&adev->vcn.fw);
+
+ return r;
+}
+
int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
{
unsigned long bo_size;
- const char *fw_name;
const struct common_firmware_header *hdr;
unsigned char fw_check;
unsigned int fw_shared_size, log_offset;
@@ -99,46 +113,27 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
switch (adev->ip_versions[UVD_HWIP][0]) {
case IP_VERSION(1, 0, 0):
case IP_VERSION(1, 0, 1):
- if (adev->apu_flags & AMD_APU_IS_RAVEN2)
- fw_name = FIRMWARE_RAVEN2;
- else if (adev->apu_flags & AMD_APU_IS_PICASSO)
- fw_name = FIRMWARE_PICASSO;
- else
- fw_name = FIRMWARE_RAVEN;
- break;
case IP_VERSION(2, 5, 0):
- fw_name = FIRMWARE_ARCTURUS;
if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
adev->vcn.indirect_sram = true;
break;
case IP_VERSION(2, 2, 0):
- if (adev->apu_flags & AMD_APU_IS_RENOIR)
- fw_name = FIRMWARE_RENOIR;
- else
- fw_name = FIRMWARE_GREEN_SARDINE;
-
if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
adev->vcn.indirect_sram = true;
break;
case IP_VERSION(2, 6, 0):
- fw_name = FIRMWARE_ALDEBARAN;
if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
adev->vcn.indirect_sram = true;
break;
case IP_VERSION(2, 0, 0):
- fw_name = FIRMWARE_NAVI10;
if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
adev->vcn.indirect_sram = true;
break;
case IP_VERSION(2, 0, 2):
- if (adev->asic_type == CHIP_NAVI12)
- fw_name = FIRMWARE_NAVI12;
- else
- fw_name = FIRMWARE_NAVI14;
if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
adev->vcn.indirect_sram = true;
@@ -146,58 +141,46 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
case IP_VERSION(3, 0, 0):
case IP_VERSION(3, 0, 64):
case IP_VERSION(3, 0, 192):
- if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0))
- fw_name = FIRMWARE_SIENNA_CICHLID;
- else
- fw_name = FIRMWARE_NAVY_FLOUNDER;
if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
adev->vcn.indirect_sram = true;
break;
case IP_VERSION(3, 0, 2):
- fw_name = FIRMWARE_VANGOGH;
if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
adev->vcn.indirect_sram = true;
break;
case IP_VERSION(3, 0, 16):
- fw_name = FIRMWARE_DIMGREY_CAVEFISH;
if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
adev->vcn.indirect_sram = true;
break;
case IP_VERSION(3, 0, 33):
- fw_name = FIRMWARE_BEIGE_GOBY;
if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
adev->vcn.indirect_sram = true;
break;
case IP_VERSION(3, 1, 1):
- fw_name = FIRMWARE_YELLOW_CARP;
if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
adev->vcn.indirect_sram = true;
break;
case IP_VERSION(3, 1, 2):
- fw_name = FIRMWARE_VCN_3_1_2;
if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
adev->vcn.indirect_sram = true;
break;
case IP_VERSION(4, 0, 0):
- fw_name = FIRMWARE_VCN4_0_0;
if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
adev->vcn.indirect_sram = true;
break;
case IP_VERSION(4, 0, 2):
- fw_name = FIRMWARE_VCN4_0_2;
if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
adev->vcn.indirect_sram = true;
break;
case IP_VERSION(4, 0, 4):
- fw_name = FIRMWARE_VCN4_0_4;
if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
adev->vcn.indirect_sram = true;
@@ -206,22 +189,6 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
return -EINVAL;
}
- r = request_firmware(&adev->vcn.fw, fw_name, adev->dev);
- if (r) {
- dev_err(adev->dev, "amdgpu_vcn: Can't load firmware \"%s\"\n",
- fw_name);
- return r;
- }
-
- r = amdgpu_ucode_validate(adev->vcn.fw);
- if (r) {
- dev_err(adev->dev, "amdgpu_vcn: Can't validate firmware \"%s\"\n",
- fw_name);
- release_firmware(adev->vcn.fw);
- adev->vcn.fw = NULL;
- return r;
- }
-
hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version);
@@ -274,8 +241,11 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
continue;
r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.inst[i].vcpu_bo,
- &adev->vcn.inst[i].gpu_addr, &adev->vcn.inst[i].cpu_addr);
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
+ &adev->vcn.inst[i].vcpu_bo,
+ &adev->vcn.inst[i].gpu_addr,
+ &adev->vcn.inst[i].cpu_addr);
if (r) {
dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r);
return r;
@@ -296,8 +266,11 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
if (adev->vcn.indirect_sram) {
r = amdgpu_bo_create_kernel(adev, 64 * 2 * 4, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.inst[i].dpg_sram_bo,
- &adev->vcn.inst[i].dpg_sram_gpu_addr, &adev->vcn.inst[i].dpg_sram_cpu_addr);
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
+ &adev->vcn.inst[i].dpg_sram_bo,
+ &adev->vcn.inst[i].dpg_sram_gpu_addr,
+ &adev->vcn.inst[i].dpg_sram_cpu_addr);
if (r) {
dev_err(adev->dev, "VCN %d (%d) failed to allocate DPG bo\n", i, r);
return r;
@@ -333,7 +306,7 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
amdgpu_ring_fini(&adev->vcn.inst[j].ring_enc[i]);
}
- release_firmware(adev->vcn.fw);
+ amdgpu_ucode_release(&adev->vcn.fw);
mutex_destroy(&adev->vcn.vcn1_jpeg1_workaround);
mutex_destroy(&adev->vcn.vcn_pg_lock);
@@ -1250,8 +1223,16 @@ int amdgpu_vcn_process_poison_irq(struct amdgpu_device *adev,
if (!ras_if)
return 0;
- ih_data.head = *ras_if;
- amdgpu_ras_interrupt_dispatch(adev, &ih_data);
+ if (!amdgpu_sriov_vf(adev)) {
+ ih_data.head = *ras_if;
+ amdgpu_ras_interrupt_dispatch(adev, &ih_data);
+ } else {
+ if (adev->virt.ops && adev->virt.ops->ras_poison_handler)
+ adev->virt.ops->ras_poison_handler(adev);
+ else
+ dev_warn(adev->dev,
+ "No ras_poison_handler interface in SRIOV for VCN!\n");
+ }
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index dbb8d68a30c6..d3e2af902907 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -369,6 +369,7 @@ enum vcn_ring_type {
VCN_UNIFIED_RING,
};
+int amdgpu_vcn_early_init(struct amdgpu_device *adev);
int amdgpu_vcn_sw_init(struct amdgpu_device *adev);
int amdgpu_vcn_sw_fini(struct amdgpu_device *adev);
int amdgpu_vcn_suspend(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index 2994b9db196f..f39391e03d46 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -232,7 +232,8 @@ int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev)
return 0;
r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
&adev->virt.mm_table.bo,
&adev->virt.mm_table.gpu_addr,
(void *)&adev->virt.mm_table.cpu_addr);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index 2b9d806e23af..b9e9480448af 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -88,6 +88,7 @@ struct amdgpu_virt_ops {
int (*wait_reset)(struct amdgpu_device *adev);
void (*trans_msg)(struct amdgpu_device *adev, enum idh_request req,
u32 data1, u32 data2, u32 data3);
+ void (*ras_poison_handler)(struct amdgpu_device *adev);
};
/*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index dc379dc22c77..b9441ab457ea 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -33,6 +33,7 @@
#include <drm/amdgpu_drm.h>
#include <drm/drm_drv.h>
+#include <drm/ttm/ttm_tt.h>
#include "amdgpu.h"
#include "amdgpu_trace.h"
#include "amdgpu_amdkfd.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 094bb4807303..856a64bc7a89 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -29,7 +29,7 @@
#include <linux/rbtree.h>
#include <drm/gpu_scheduler.h>
#include <drm/drm_file.h>
-#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_bo.h>
#include <linux/sched/mm.h>
#include "amdgpu_sync.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index faa12146635c..9fa1d814508a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -882,7 +882,7 @@ void amdgpu_vram_mgr_fini(struct amdgpu_device *adev)
kfree(rsv);
list_for_each_entry_safe(rsv, temp, &mgr->reserved_pages, blocks) {
- drm_buddy_free_list(&mgr->mm, &rsv->blocks);
+ drm_buddy_free_list(&mgr->mm, &rsv->allocated);
kfree(rsv);
}
drm_buddy_fini(&mgr->mm);
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index cbca9866645c..67d16236b216 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -73,10 +73,9 @@ u32 amdgpu_cik_gpu_check_soft_reset(struct amdgpu_device *adev);
static void cik_sdma_free_microcode(struct amdgpu_device *adev)
{
int i;
- for (i = 0; i < adev->sdma.num_instances; i++) {
- release_firmware(adev->sdma.instance[i].fw);
- adev->sdma.instance[i].fw = NULL;
- }
+
+ for (i = 0; i < adev->sdma.num_instances; i++)
+ amdgpu_ucode_release(&adev->sdma.instance[i].fw);
}
/*
@@ -137,18 +136,15 @@ static int cik_sdma_init_microcode(struct amdgpu_device *adev)
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name);
else
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma1.bin", chip_name);
- err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev);
+ err = amdgpu_ucode_request(adev, &adev->sdma.instance[i].fw, fw_name);
if (err)
goto out;
- err = amdgpu_ucode_validate(adev->sdma.instance[i].fw);
}
out:
if (err) {
pr_err("cik_sdma: Failed to load firmware \"%s\"\n", fw_name);
- for (i = 0; i < adev->sdma.num_instances; i++) {
- release_firmware(adev->sdma.instance[i].fw);
- adev->sdma.instance[i].fw = NULL;
- }
+ for (i = 0; i < adev->sdma.num_instances; i++)
+ amdgpu_ucode_release(&adev->sdma.instance[i].fw);
}
return err;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 248f1a4e915f..e85e57933cc4 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -2837,7 +2837,7 @@ static int dce_v10_0_sw_init(void *handle)
if (r)
return r;
- INIT_WORK(&adev->hotplug_work,
+ INIT_DELAYED_WORK(&adev->hotplug_work,
amdgpu_display_hotplug_work_func);
drm_kms_helper_poll_init(adev_to_drm(adev));
@@ -2902,7 +2902,7 @@ static int dce_v10_0_hw_fini(void *handle)
dce_v10_0_pageflip_interrupt_fini(adev);
- flush_work(&adev->hotplug_work);
+ flush_delayed_work(&adev->hotplug_work);
return 0;
}
@@ -3302,7 +3302,7 @@ static int dce_v10_0_hpd_irq(struct amdgpu_device *adev,
if (disp_int & mask) {
dce_v10_0_hpd_int_ack(adev, hpd);
- schedule_work(&adev->hotplug_work);
+ schedule_delayed_work(&adev->hotplug_work, 0);
DRM_DEBUG("IH: HPD%d\n", hpd + 1);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index cd9c19060d89..6b406bb7f3f3 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -2956,7 +2956,7 @@ static int dce_v11_0_sw_init(void *handle)
if (r)
return r;
- INIT_WORK(&adev->hotplug_work,
+ INIT_DELAYED_WORK(&adev->hotplug_work,
amdgpu_display_hotplug_work_func);
drm_kms_helper_poll_init(adev_to_drm(adev));
@@ -3032,7 +3032,7 @@ static int dce_v11_0_hw_fini(void *handle)
dce_v11_0_pageflip_interrupt_fini(adev);
- flush_work(&adev->hotplug_work);
+ flush_delayed_work(&adev->hotplug_work);
return 0;
}
@@ -3426,7 +3426,7 @@ static int dce_v11_0_hpd_irq(struct amdgpu_device *adev,
if (disp_int & mask) {
dce_v11_0_hpd_int_ack(adev, hpd);
- schedule_work(&adev->hotplug_work);
+ schedule_delayed_work(&adev->hotplug_work, 0);
DRM_DEBUG("IH: HPD%d\n", hpd + 1);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index 76323deecc58..2aa21eec0e06 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -2715,7 +2715,7 @@ static int dce_v6_0_sw_init(void *handle)
return r;
/* Pre-DCE11 */
- INIT_WORK(&adev->hotplug_work,
+ INIT_DELAYED_WORK(&adev->hotplug_work,
amdgpu_display_hotplug_work_func);
drm_kms_helper_poll_init(adev_to_drm(adev));
@@ -2776,7 +2776,7 @@ static int dce_v6_0_hw_fini(void *handle)
dce_v6_0_pageflip_interrupt_fini(adev);
- flush_work(&adev->hotplug_work);
+ flush_delayed_work(&adev->hotplug_work);
return 0;
}
@@ -3103,7 +3103,7 @@ static int dce_v6_0_hpd_irq(struct amdgpu_device *adev,
tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
- schedule_work(&adev->hotplug_work);
+ schedule_delayed_work(&adev->hotplug_work, 0);
DRM_DEBUG("IH: HPD%d\n", hpd + 1);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 01cf3ab111cb..9da338889d36 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -2739,7 +2739,7 @@ static int dce_v8_0_sw_init(void *handle)
return r;
/* Pre-DCE11 */
- INIT_WORK(&adev->hotplug_work,
+ INIT_DELAYED_WORK(&adev->hotplug_work,
amdgpu_display_hotplug_work_func);
drm_kms_helper_poll_init(adev_to_drm(adev));
@@ -2802,7 +2802,7 @@ static int dce_v8_0_hw_fini(void *handle)
dce_v8_0_pageflip_interrupt_fini(adev);
- flush_work(&adev->hotplug_work);
+ flush_delayed_work(&adev->hotplug_work);
return 0;
}
@@ -3195,7 +3195,7 @@ static int dce_v8_0_hpd_irq(struct amdgpu_device *adev,
tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
- schedule_work(&adev->hotplug_work);
+ schedule_delayed_work(&adev->hotplug_work, 0);
DRM_DEBUG("IH: HPD%d\n", hpd + 1);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/df_v4_3.c b/drivers/gpu/drm/amd/amdgpu/df_v4_3.c
new file mode 100644
index 000000000000..e8b9e19ede2e
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/df_v4_3.c
@@ -0,0 +1,61 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "df_v4_3.h"
+
+#include "df/df_4_3_offset.h"
+#include "df/df_4_3_sh_mask.h"
+
+static bool df_v4_3_query_ras_poison_mode(struct amdgpu_device *adev)
+{
+ uint32_t hw_assert_msklo, hw_assert_mskhi;
+ uint32_t v0, v1, v28, v31;
+
+ hw_assert_msklo = RREG32_SOC15(DF, 0,
+ regDF_CS_UMC_AON0_HardwareAssertMaskLow);
+ hw_assert_mskhi = RREG32_SOC15(DF, 0,
+ regDF_NCS_PG0_HardwareAssertMaskHigh);
+
+ v0 = REG_GET_FIELD(hw_assert_msklo,
+ DF_CS_UMC_AON0_HardwareAssertMaskLow, HWAssertMsk0);
+ v1 = REG_GET_FIELD(hw_assert_msklo,
+ DF_CS_UMC_AON0_HardwareAssertMaskLow, HWAssertMsk1);
+ v28 = REG_GET_FIELD(hw_assert_mskhi,
+ DF_NCS_PG0_HardwareAssertMaskHigh, HWAssertMsk28);
+ v31 = REG_GET_FIELD(hw_assert_mskhi,
+ DF_NCS_PG0_HardwareAssertMaskHigh, HWAssertMsk31);
+
+ if (v0 && v1 && v28 && v31)
+ return true;
+ else if (!v0 && !v1 && !v28 && !v31)
+ return false;
+ else {
+ dev_warn(adev->dev, "DF poison setting is inconsistent(%d:%d:%d:%d)!\n",
+ v0, v1, v28, v31);
+ return false;
+ }
+}
+
+const struct amdgpu_df_funcs df_v4_3_funcs = {
+ .query_ras_poison_mode = df_v4_3_query_ras_poison_mode,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/df_v4_3.h b/drivers/gpu/drm/amd/amdgpu/df_v4_3.h
new file mode 100644
index 000000000000..06ef0724edd3
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/df_v4_3.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __DF_V4_3_H__
+#define __DF_V4_3_H__
+
+#include "soc15_common.h"
+
+extern const struct amdgpu_df_funcs df_v4_3_funcs;
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index 49d34c7bbf20..6983acc456b2 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -3891,18 +3891,12 @@ err1:
static void gfx_v10_0_free_microcode(struct amdgpu_device *adev)
{
- release_firmware(adev->gfx.pfp_fw);
- adev->gfx.pfp_fw = NULL;
- release_firmware(adev->gfx.me_fw);
- adev->gfx.me_fw = NULL;
- release_firmware(adev->gfx.ce_fw);
- adev->gfx.ce_fw = NULL;
- release_firmware(adev->gfx.rlc_fw);
- adev->gfx.rlc_fw = NULL;
- release_firmware(adev->gfx.mec_fw);
- adev->gfx.mec_fw = NULL;
- release_firmware(adev->gfx.mec2_fw);
- adev->gfx.mec2_fw = NULL;
+ amdgpu_ucode_release(&adev->gfx.pfp_fw);
+ amdgpu_ucode_release(&adev->gfx.me_fw);
+ amdgpu_ucode_release(&adev->gfx.ce_fw);
+ amdgpu_ucode_release(&adev->gfx.rlc_fw);
+ amdgpu_ucode_release(&adev->gfx.mec_fw);
+ amdgpu_ucode_release(&adev->gfx.mec2_fw);
kfree(adev->gfx.rlc.register_list_format);
}
@@ -3974,9 +3968,9 @@ static void gfx_v10_0_check_gfxoff_flag(struct amdgpu_device *adev)
static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
{
- const char *chip_name;
char fw_name[40];
- char *wks = "";
+ char ucode_prefix[30];
+ const char *wks = "";
int err;
const struct rlc_firmware_header_v2_0 *rlc_hdr;
uint16_t version_major;
@@ -3984,90 +3978,40 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
DRM_DEBUG("\n");
- switch (adev->ip_versions[GC_HWIP][0]) {
- case IP_VERSION(10, 1, 10):
- chip_name = "navi10";
- break;
- case IP_VERSION(10, 1, 1):
- chip_name = "navi14";
- if (!(adev->pdev->device == 0x7340 &&
- adev->pdev->revision != 0x00))
- wks = "_wks";
- break;
- case IP_VERSION(10, 1, 2):
- chip_name = "navi12";
- break;
- case IP_VERSION(10, 3, 0):
- chip_name = "sienna_cichlid";
- break;
- case IP_VERSION(10, 3, 2):
- chip_name = "navy_flounder";
- break;
- case IP_VERSION(10, 3, 1):
- chip_name = "vangogh";
- break;
- case IP_VERSION(10, 3, 4):
- chip_name = "dimgrey_cavefish";
- break;
- case IP_VERSION(10, 3, 5):
- chip_name = "beige_goby";
- break;
- case IP_VERSION(10, 3, 3):
- chip_name = "yellow_carp";
- break;
- case IP_VERSION(10, 3, 6):
- chip_name = "gc_10_3_6";
- break;
- case IP_VERSION(10, 1, 3):
- case IP_VERSION(10, 1, 4):
- chip_name = "cyan_skillfish2";
- break;
- case IP_VERSION(10, 3, 7):
- chip_name = "gc_10_3_7";
- break;
- default:
- BUG();
- }
+ if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 1) &&
+ (!(adev->pdev->device == 0x7340 && adev->pdev->revision != 0x00)))
+ wks = "_wks";
+ amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp%s.bin", chip_name, wks);
- err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp%s.bin", ucode_prefix, wks);
+ err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw, fw_name);
if (err)
goto out;
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_PFP);
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me%s.bin", chip_name, wks);
- err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->gfx.me_fw);
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me%s.bin", ucode_prefix, wks);
+ err = amdgpu_ucode_request(adev, &adev->gfx.me_fw, fw_name);
if (err)
goto out;
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_ME);
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce%s.bin", chip_name, wks);
- err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->gfx.ce_fw);
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce%s.bin", ucode_prefix, wks);
+ err = amdgpu_ucode_request(adev, &adev->gfx.ce_fw, fw_name);
if (err)
goto out;
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_CE);
if (!amdgpu_sriov_vf(adev)) {
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
- err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
- if (err)
- goto out;
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", ucode_prefix);
+ err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, fw_name);
/* don't check this. There are apparently firmwares in the wild with
* incorrect size in the header
*/
- err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
+ if (err == -ENODEV)
+ goto out;
if (err)
dev_dbg(adev->dev,
- "gfx10: amdgpu_ucode_validate() failed \"%s\"\n",
+ "gfx10: amdgpu_ucode_request() failed \"%s\"\n",
fw_name);
rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
@@ -4077,47 +4021,34 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
goto out;
}
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec%s.bin", chip_name, wks);
- err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->gfx.mec_fw);
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec%s.bin", ucode_prefix, wks);
+ err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, fw_name);
if (err)
goto out;
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1);
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1_JT);
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2%s.bin", chip_name, wks);
- err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2%s.bin", ucode_prefix, wks);
+ err = amdgpu_ucode_request(adev, &adev->gfx.mec2_fw, fw_name);
if (!err) {
- err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
- if (err)
- goto out;
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC2);
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC2_JT);
} else {
err = 0;
adev->gfx.mec2_fw = NULL;
}
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC2);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC2_JT);
gfx_v10_0_check_fw_write_wait(adev);
out:
if (err) {
- dev_err(adev->dev,
- "gfx10: Failed to init firmware \"%s\"\n",
- fw_name);
- release_firmware(adev->gfx.pfp_fw);
- adev->gfx.pfp_fw = NULL;
- release_firmware(adev->gfx.me_fw);
- adev->gfx.me_fw = NULL;
- release_firmware(adev->gfx.ce_fw);
- adev->gfx.ce_fw = NULL;
- release_firmware(adev->gfx.rlc_fw);
- adev->gfx.rlc_fw = NULL;
- release_firmware(adev->gfx.mec_fw);
- adev->gfx.mec_fw = NULL;
- release_firmware(adev->gfx.mec2_fw);
- adev->gfx.mec2_fw = NULL;
+ amdgpu_ucode_release(&adev->gfx.pfp_fw);
+ amdgpu_ucode_release(&adev->gfx.me_fw);
+ amdgpu_ucode_release(&adev->gfx.ce_fw);
+ amdgpu_ucode_release(&adev->gfx.rlc_fw);
+ amdgpu_ucode_release(&adev->gfx.mec_fw);
+ amdgpu_ucode_release(&adev->gfx.mec2_fw);
}
gfx_v10_0_check_gfxoff_flag(adev);
@@ -4270,19 +4201,11 @@ static void gfx_v10_0_mec_fini(struct amdgpu_device *adev)
amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL);
}
-static int gfx_v10_0_me_init(struct amdgpu_device *adev)
+static void gfx_v10_0_me_init(struct amdgpu_device *adev)
{
- int r;
-
bitmap_zero(adev->gfx.me.queue_bitmap, AMDGPU_MAX_GFX_QUEUES);
amdgpu_gfx_graphics_queue_acquire(adev);
-
- r = gfx_v10_0_init_microcode(adev);
- if (r)
- DRM_ERROR("Failed to load gfx firmware!\n");
-
- return r;
}
static int gfx_v10_0_mec_init(struct amdgpu_device *adev)
@@ -4650,9 +4573,7 @@ static int gfx_v10_0_sw_init(void *handle)
adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
- r = gfx_v10_0_me_init(adev);
- if (r)
- return r;
+ gfx_v10_0_me_init(adev);
if (adev->gfx.rlc.funcs) {
if (adev->gfx.rlc.funcs->init) {
@@ -7630,7 +7551,7 @@ static int gfx_v10_0_early_init(void *handle)
/* init rlcg reg access ctrl */
gfx_v10_0_init_rlcg_reg_access_ctrl(adev);
- return 0;
+ return gfx_v10_0_init_microcode(adev);
}
static int gfx_v10_0_late_init(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
index a56c6e106d00..985fe704203e 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
@@ -431,18 +431,37 @@ err1:
static void gfx_v11_0_free_microcode(struct amdgpu_device *adev)
{
- release_firmware(adev->gfx.pfp_fw);
- adev->gfx.pfp_fw = NULL;
- release_firmware(adev->gfx.me_fw);
- adev->gfx.me_fw = NULL;
- release_firmware(adev->gfx.rlc_fw);
- adev->gfx.rlc_fw = NULL;
- release_firmware(adev->gfx.mec_fw);
- adev->gfx.mec_fw = NULL;
+ amdgpu_ucode_release(&adev->gfx.pfp_fw);
+ amdgpu_ucode_release(&adev->gfx.me_fw);
+ amdgpu_ucode_release(&adev->gfx.rlc_fw);
+ amdgpu_ucode_release(&adev->gfx.mec_fw);
kfree(adev->gfx.rlc.register_list_format);
}
+static int gfx_v11_0_init_toc_microcode(struct amdgpu_device *adev, const char *ucode_prefix)
+{
+ const struct psp_firmware_header_v1_0 *toc_hdr;
+ int err = 0;
+ char fw_name[40];
+
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_toc.bin", ucode_prefix);
+ err = amdgpu_ucode_request(adev, &adev->psp.toc_fw, fw_name);
+ if (err)
+ goto out;
+
+ toc_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.toc_fw->data;
+ adev->psp.toc.fw_version = le32_to_cpu(toc_hdr->header.ucode_version);
+ adev->psp.toc.feature_version = le32_to_cpu(toc_hdr->sos.fw_version);
+ adev->psp.toc.size_bytes = le32_to_cpu(toc_hdr->header.ucode_size_bytes);
+ adev->psp.toc.start_addr = (uint8_t *)toc_hdr +
+ le32_to_cpu(toc_hdr->header.ucode_array_offset_bytes);
+ return 0;
+out:
+ amdgpu_ucode_release(&adev->psp.toc_fw);
+ return err;
+}
+
static int gfx_v11_0_init_microcode(struct amdgpu_device *adev)
{
char fw_name[40];
@@ -457,10 +476,7 @@ static int gfx_v11_0_init_microcode(struct amdgpu_device *adev)
amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", ucode_prefix);
- err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
+ err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw, fw_name);
if (err)
goto out;
/* check pfp fw hdr version to decide if enable rs64 for gfx11.*/
@@ -477,10 +493,7 @@ static int gfx_v11_0_init_microcode(struct amdgpu_device *adev)
}
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", ucode_prefix);
- err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->gfx.me_fw);
+ err = amdgpu_ucode_request(adev, &adev->gfx.me_fw, fw_name);
if (err)
goto out;
if (adev->gfx.rs64_enable) {
@@ -493,10 +506,7 @@ static int gfx_v11_0_init_microcode(struct amdgpu_device *adev)
if (!amdgpu_sriov_vf(adev)) {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", ucode_prefix);
- err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
+ err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, fw_name);
if (err)
goto out;
rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
@@ -508,10 +518,7 @@ static int gfx_v11_0_init_microcode(struct amdgpu_device *adev)
}
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", ucode_prefix);
- err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->gfx.mec_fw);
+ err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, fw_name);
if (err)
goto out;
if (adev->gfx.rs64_enable) {
@@ -525,59 +532,23 @@ static int gfx_v11_0_init_microcode(struct amdgpu_device *adev)
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1_JT);
}
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO)
+ err = gfx_v11_0_init_toc_microcode(adev, ucode_prefix);
+
/* only one MEC for gfx 11.0.0. */
adev->gfx.mec2_fw = NULL;
out:
if (err) {
- dev_err(adev->dev,
- "gfx11: Failed to init firmware \"%s\"\n",
- fw_name);
- release_firmware(adev->gfx.pfp_fw);
- adev->gfx.pfp_fw = NULL;
- release_firmware(adev->gfx.me_fw);
- adev->gfx.me_fw = NULL;
- release_firmware(adev->gfx.rlc_fw);
- adev->gfx.rlc_fw = NULL;
- release_firmware(adev->gfx.mec_fw);
- adev->gfx.mec_fw = NULL;
+ amdgpu_ucode_release(&adev->gfx.pfp_fw);
+ amdgpu_ucode_release(&adev->gfx.me_fw);
+ amdgpu_ucode_release(&adev->gfx.rlc_fw);
+ amdgpu_ucode_release(&adev->gfx.mec_fw);
}
return err;
}
-static int gfx_v11_0_init_toc_microcode(struct amdgpu_device *adev)
-{
- const struct psp_firmware_header_v1_0 *toc_hdr;
- int err = 0;
- char fw_name[40];
- char ucode_prefix[30];
-
- amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
-
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_toc.bin", ucode_prefix);
- err = request_firmware(&adev->psp.toc_fw, fw_name, adev->dev);
- if (err)
- goto out;
-
- err = amdgpu_ucode_validate(adev->psp.toc_fw);
- if (err)
- goto out;
-
- toc_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.toc_fw->data;
- adev->psp.toc.fw_version = le32_to_cpu(toc_hdr->header.ucode_version);
- adev->psp.toc.feature_version = le32_to_cpu(toc_hdr->sos.fw_version);
- adev->psp.toc.size_bytes = le32_to_cpu(toc_hdr->header.ucode_size_bytes);
- adev->psp.toc.start_addr = (uint8_t *)toc_hdr +
- le32_to_cpu(toc_hdr->header.ucode_array_offset_bytes);
- return 0;
-out:
- dev_err(adev->dev, "Failed to load TOC microcode\n");
- release_firmware(adev->psp.toc_fw);
- adev->psp.toc_fw = NULL;
- return err;
-}
-
static u32 gfx_v11_0_get_csb_size(struct amdgpu_device *adev)
{
u32 count = 0;
@@ -714,19 +685,11 @@ static void gfx_v11_0_mec_fini(struct amdgpu_device *adev)
amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_data_obj, NULL, NULL);
}
-static int gfx_v11_0_me_init(struct amdgpu_device *adev)
+static void gfx_v11_0_me_init(struct amdgpu_device *adev)
{
- int r;
-
bitmap_zero(adev->gfx.me.queue_bitmap, AMDGPU_MAX_GFX_QUEUES);
amdgpu_gfx_graphics_queue_acquire(adev);
-
- r = gfx_v11_0_init_microcode(adev);
- if (r)
- DRM_ERROR("Failed to load gfx firmware!\n");
-
- return r;
}
static int gfx_v11_0_mec_init(struct amdgpu_device *adev)
@@ -987,10 +950,11 @@ static int gfx_v11_0_rlc_autoload_buffer_init(struct amdgpu_device *adev)
total_size = gfx_v11_0_calc_toc_total_size(adev);
r = amdgpu_bo_create_reserved(adev, total_size, 64 * 1024,
- AMDGPU_GEM_DOMAIN_VRAM,
- &adev->gfx.rlc.rlc_autoload_bo,
- &adev->gfx.rlc.rlc_autoload_gpu_addr,
- (void **)&adev->gfx.rlc.rlc_autoload_ptr);
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
+ &adev->gfx.rlc.rlc_autoload_bo,
+ &adev->gfx.rlc.rlc_autoload_gpu_addr,
+ (void **)&adev->gfx.rlc.rlc_autoload_ptr);
if (r) {
dev_err(adev->dev, "(%d) failed to create fw autoload bo\n", r);
@@ -1339,9 +1303,7 @@ static int gfx_v11_0_sw_init(void *handle)
}
}
- r = gfx_v11_0_me_init(adev);
- if (r)
- return r;
+ gfx_v11_0_me_init(adev);
r = gfx_v11_0_rlc_init(adev);
if (r) {
@@ -1409,9 +1371,6 @@ static int gfx_v11_0_sw_init(void *handle)
/* allocate visible FB for rlc auto-loading fw */
if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) {
- r = gfx_v11_0_init_toc_microcode(adev);
- if (r)
- dev_err(adev->dev, "Failed to load toc firmware!\n");
r = gfx_v11_0_rlc_autoload_buffer_init(adev);
if (r)
return r;
@@ -2649,7 +2608,9 @@ static int gfx_v11_0_cp_gfx_load_pfp_microcode_rs64(struct amdgpu_device *adev)
/* 64kb align */
r = amdgpu_bo_create_reserved(adev, fw_ucode_size,
- 64 * 1024, AMDGPU_GEM_DOMAIN_VRAM,
+ 64 * 1024,
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
&adev->gfx.pfp.pfp_fw_obj,
&adev->gfx.pfp.pfp_fw_gpu_addr,
(void **)&adev->gfx.pfp.pfp_fw_ptr);
@@ -2660,7 +2621,9 @@ static int gfx_v11_0_cp_gfx_load_pfp_microcode_rs64(struct amdgpu_device *adev)
}
r = amdgpu_bo_create_reserved(adev, fw_data_size,
- 64 * 1024, AMDGPU_GEM_DOMAIN_VRAM,
+ 64 * 1024,
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
&adev->gfx.pfp.pfp_fw_data_obj,
&adev->gfx.pfp.pfp_fw_data_gpu_addr,
(void **)&adev->gfx.pfp.pfp_fw_data_ptr);
@@ -2863,7 +2826,9 @@ static int gfx_v11_0_cp_gfx_load_me_microcode_rs64(struct amdgpu_device *adev)
/* 64kb align*/
r = amdgpu_bo_create_reserved(adev, fw_ucode_size,
- 64 * 1024, AMDGPU_GEM_DOMAIN_VRAM,
+ 64 * 1024,
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
&adev->gfx.me.me_fw_obj,
&adev->gfx.me.me_fw_gpu_addr,
(void **)&adev->gfx.me.me_fw_ptr);
@@ -2874,7 +2839,9 @@ static int gfx_v11_0_cp_gfx_load_me_microcode_rs64(struct amdgpu_device *adev)
}
r = amdgpu_bo_create_reserved(adev, fw_data_size,
- 64 * 1024, AMDGPU_GEM_DOMAIN_VRAM,
+ 64 * 1024,
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
&adev->gfx.me.me_fw_data_obj,
&adev->gfx.me.me_fw_data_gpu_addr,
(void **)&adev->gfx.me.me_fw_data_ptr);
@@ -3380,7 +3347,9 @@ static int gfx_v11_0_cp_compute_load_microcode_rs64(struct amdgpu_device *adev)
fw_data_size = le32_to_cpu(mec_hdr->data_size_bytes);
r = amdgpu_bo_create_reserved(adev, fw_ucode_size,
- 64 * 1024, AMDGPU_GEM_DOMAIN_VRAM,
+ 64 * 1024,
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
&adev->gfx.mec.mec_fw_obj,
&adev->gfx.mec.mec_fw_gpu_addr,
(void **)&fw_ucode_ptr);
@@ -3391,7 +3360,9 @@ static int gfx_v11_0_cp_compute_load_microcode_rs64(struct amdgpu_device *adev)
}
r = amdgpu_bo_create_reserved(adev, fw_data_size,
- 64 * 1024, AMDGPU_GEM_DOMAIN_VRAM,
+ 64 * 1024,
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
&adev->gfx.mec.mec_fw_data_obj,
&adev->gfx.mec.mec_fw_data_gpu_addr,
(void **)&fw_data_ptr);
@@ -4680,7 +4651,7 @@ static int gfx_v11_0_early_init(void *handle)
gfx_v11_0_init_rlcg_reg_access_ctrl(adev);
- return 0;
+ return gfx_v11_0_init_microcode(adev);
}
static int gfx_v11_0_ras_late_init(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
index 204b246f0e3f..c41219e23151 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
@@ -338,10 +338,7 @@ static int gfx_v6_0_init_microcode(struct amdgpu_device *adev)
}
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
- err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
+ err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw, fw_name);
if (err)
goto out;
cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
@@ -349,10 +346,7 @@ static int gfx_v6_0_init_microcode(struct amdgpu_device *adev)
adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
- err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->gfx.me_fw);
+ err = amdgpu_ucode_request(adev, &adev->gfx.me_fw, fw_name);
if (err)
goto out;
cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
@@ -360,10 +354,7 @@ static int gfx_v6_0_init_microcode(struct amdgpu_device *adev)
adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
- err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->gfx.ce_fw);
+ err = amdgpu_ucode_request(adev, &adev->gfx.ce_fw, fw_name);
if (err)
goto out;
cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
@@ -371,10 +362,9 @@ static int gfx_v6_0_init_microcode(struct amdgpu_device *adev)
adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
- err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
+ err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, fw_name);
if (err)
goto out;
- err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
rlc_hdr = (const struct rlc_firmware_header_v1_0 *)adev->gfx.rlc_fw->data;
adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
@@ -382,14 +372,10 @@ static int gfx_v6_0_init_microcode(struct amdgpu_device *adev)
out:
if (err) {
pr_err("gfx6: Failed to load firmware \"%s\"\n", fw_name);
- release_firmware(adev->gfx.pfp_fw);
- adev->gfx.pfp_fw = NULL;
- release_firmware(adev->gfx.me_fw);
- adev->gfx.me_fw = NULL;
- release_firmware(adev->gfx.ce_fw);
- adev->gfx.ce_fw = NULL;
- release_firmware(adev->gfx.rlc_fw);
- adev->gfx.rlc_fw = NULL;
+ amdgpu_ucode_release(&adev->gfx.pfp_fw);
+ amdgpu_ucode_release(&adev->gfx.me_fw);
+ amdgpu_ucode_release(&adev->gfx.ce_fw);
+ amdgpu_ucode_release(&adev->gfx.rlc_fw);
}
return err;
}
@@ -2375,7 +2361,8 @@ static int gfx_v6_0_rlc_init(struct amdgpu_device *adev)
dws = adev->gfx.rlc.clear_state_size + (256 / 4);
r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
&adev->gfx.rlc.clear_state_obj,
&adev->gfx.rlc.clear_state_gpu_addr,
(void **)&adev->gfx.rlc.cs_ptr);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 0f2976507e48..9d5c1e29b4a3 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -887,6 +887,16 @@ static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev, volatile u32 *bu
static void gfx_v7_0_init_pg(struct amdgpu_device *adev);
static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev);
+static void gfx_v7_0_free_microcode(struct amdgpu_device *adev)
+{
+ amdgpu_ucode_release(&adev->gfx.pfp_fw);
+ amdgpu_ucode_release(&adev->gfx.me_fw);
+ amdgpu_ucode_release(&adev->gfx.ce_fw);
+ amdgpu_ucode_release(&adev->gfx.mec_fw);
+ amdgpu_ucode_release(&adev->gfx.mec2_fw);
+ amdgpu_ucode_release(&adev->gfx.rlc_fw);
+}
+
/*
* Core functions
*/
@@ -927,88 +937,44 @@ static int gfx_v7_0_init_microcode(struct amdgpu_device *adev)
}
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
- err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
+ err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw, fw_name);
if (err)
goto out;
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
- err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->gfx.me_fw);
+ err = amdgpu_ucode_request(adev, &adev->gfx.me_fw, fw_name);
if (err)
goto out;
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
- err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->gfx.ce_fw);
+ err = amdgpu_ucode_request(adev, &adev->gfx.ce_fw, fw_name);
if (err)
goto out;
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
- err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->gfx.mec_fw);
+ err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, fw_name);
if (err)
goto out;
if (adev->asic_type == CHIP_KAVERI) {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
- err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
+ err = amdgpu_ucode_request(adev, &adev->gfx.mec2_fw, fw_name);
if (err)
goto out;
}
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
- err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
+ err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, fw_name);
if (err)
goto out;
- err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
-
out:
if (err) {
pr_err("gfx7: Failed to load firmware \"%s\"\n", fw_name);
- release_firmware(adev->gfx.pfp_fw);
- adev->gfx.pfp_fw = NULL;
- release_firmware(adev->gfx.me_fw);
- adev->gfx.me_fw = NULL;
- release_firmware(adev->gfx.ce_fw);
- adev->gfx.ce_fw = NULL;
- release_firmware(adev->gfx.mec_fw);
- adev->gfx.mec_fw = NULL;
- release_firmware(adev->gfx.mec2_fw);
- adev->gfx.mec2_fw = NULL;
- release_firmware(adev->gfx.rlc_fw);
- adev->gfx.rlc_fw = NULL;
+ gfx_v7_0_free_microcode(adev);
}
return err;
}
-static void gfx_v7_0_free_microcode(struct amdgpu_device *adev)
-{
- release_firmware(adev->gfx.pfp_fw);
- adev->gfx.pfp_fw = NULL;
- release_firmware(adev->gfx.me_fw);
- adev->gfx.me_fw = NULL;
- release_firmware(adev->gfx.ce_fw);
- adev->gfx.ce_fw = NULL;
- release_firmware(adev->gfx.mec_fw);
- adev->gfx.mec_fw = NULL;
- release_firmware(adev->gfx.mec2_fw);
- adev->gfx.mec2_fw = NULL;
- release_firmware(adev->gfx.rlc_fw);
- adev->gfx.rlc_fw = NULL;
-}
-
/**
* gfx_v7_0_tiling_mode_table_init - init the hw tiling table
*
@@ -2772,7 +2738,8 @@ static int gfx_v7_0_mec_init(struct amdgpu_device *adev)
* GFX7_MEC_HPD_SIZE * 2;
r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
&adev->gfx.mec.hpd_eop_obj,
&adev->gfx.mec.hpd_eop_gpu_addr,
(void **)&hpd);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index d47135606e3e..4fb577d047fd 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -924,20 +924,14 @@ err1:
static void gfx_v8_0_free_microcode(struct amdgpu_device *adev)
{
- release_firmware(adev->gfx.pfp_fw);
- adev->gfx.pfp_fw = NULL;
- release_firmware(adev->gfx.me_fw);
- adev->gfx.me_fw = NULL;
- release_firmware(adev->gfx.ce_fw);
- adev->gfx.ce_fw = NULL;
- release_firmware(adev->gfx.rlc_fw);
- adev->gfx.rlc_fw = NULL;
- release_firmware(adev->gfx.mec_fw);
- adev->gfx.mec_fw = NULL;
+ amdgpu_ucode_release(&adev->gfx.pfp_fw);
+ amdgpu_ucode_release(&adev->gfx.me_fw);
+ amdgpu_ucode_release(&adev->gfx.ce_fw);
+ amdgpu_ucode_release(&adev->gfx.rlc_fw);
+ amdgpu_ucode_release(&adev->gfx.mec_fw);
if ((adev->asic_type != CHIP_STONEY) &&
(adev->asic_type != CHIP_TOPAZ))
- release_firmware(adev->gfx.mec2_fw);
- adev->gfx.mec2_fw = NULL;
+ amdgpu_ucode_release(&adev->gfx.mec2_fw);
kfree(adev->gfx.rlc.register_list_format);
}
@@ -989,40 +983,34 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp_2.bin", chip_name);
- err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
- if (err == -ENOENT) {
+ err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw, fw_name);
+ if (err == -ENODEV) {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
- err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
+ err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw, fw_name);
}
} else {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
- err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
+ err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw, fw_name);
}
if (err)
goto out;
- err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
- if (err)
- goto out;
cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me_2.bin", chip_name);
- err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
- if (err == -ENOENT) {
+ err = amdgpu_ucode_request(adev, &adev->gfx.me_fw, fw_name);
+ if (err == -ENODEV) {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
- err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
+ err = amdgpu_ucode_request(adev, &adev->gfx.me_fw, fw_name);
}
} else {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
- err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
+ err = amdgpu_ucode_request(adev, &adev->gfx.me_fw, fw_name);
}
if (err)
goto out;
- err = amdgpu_ucode_validate(adev->gfx.me_fw);
- if (err)
- goto out;
cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
@@ -1030,20 +1018,17 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce_2.bin", chip_name);
- err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
- if (err == -ENOENT) {
+ err = amdgpu_ucode_request(adev, &adev->gfx.ce_fw, fw_name);
+ if (err == -ENODEV) {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
- err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
+ err = amdgpu_ucode_request(adev, &adev->gfx.ce_fw, fw_name);
}
} else {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
- err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
+ err = amdgpu_ucode_request(adev, &adev->gfx.ce_fw, fw_name);
}
if (err)
goto out;
- err = amdgpu_ucode_validate(adev->gfx.ce_fw);
- if (err)
- goto out;
cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
@@ -1060,10 +1045,9 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
adev->virt.chained_ib_support = false;
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
- err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
+ err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, fw_name);
if (err)
goto out;
- err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
@@ -1110,20 +1094,17 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec_2.bin", chip_name);
- err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
- if (err == -ENOENT) {
+ err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, fw_name);
+ if (err == -ENODEV) {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
- err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
+ err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, fw_name);
}
} else {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
- err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
+ err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, fw_name);
}
if (err)
goto out;
- err = amdgpu_ucode_validate(adev->gfx.mec_fw);
- if (err)
- goto out;
cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
@@ -1132,19 +1113,16 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
(adev->asic_type != CHIP_TOPAZ)) {
if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2_2.bin", chip_name);
- err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
- if (err == -ENOENT) {
+ err = amdgpu_ucode_request(adev, &adev->gfx.mec2_fw, fw_name);
+ if (err == -ENODEV) {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
- err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
+ err = amdgpu_ucode_request(adev, &adev->gfx.mec2_fw, fw_name);
}
} else {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
- err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
+ err = amdgpu_ucode_request(adev, &adev->gfx.mec2_fw, fw_name);
}
if (!err) {
- err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
- if (err)
- goto out;
cp_hdr = (const struct gfx_firmware_header_v1_0 *)
adev->gfx.mec2_fw->data;
adev->gfx.mec2_fw_version =
@@ -1219,18 +1197,12 @@ out:
dev_err(adev->dev,
"gfx8: Failed to load firmware \"%s\"\n",
fw_name);
- release_firmware(adev->gfx.pfp_fw);
- adev->gfx.pfp_fw = NULL;
- release_firmware(adev->gfx.me_fw);
- adev->gfx.me_fw = NULL;
- release_firmware(adev->gfx.ce_fw);
- adev->gfx.ce_fw = NULL;
- release_firmware(adev->gfx.rlc_fw);
- adev->gfx.rlc_fw = NULL;
- release_firmware(adev->gfx.mec_fw);
- adev->gfx.mec_fw = NULL;
- release_firmware(adev->gfx.mec2_fw);
- adev->gfx.mec2_fw = NULL;
+ amdgpu_ucode_release(&adev->gfx.pfp_fw);
+ amdgpu_ucode_release(&adev->gfx.me_fw);
+ amdgpu_ucode_release(&adev->gfx.ce_fw);
+ amdgpu_ucode_release(&adev->gfx.rlc_fw);
+ amdgpu_ucode_release(&adev->gfx.mec_fw);
+ amdgpu_ucode_release(&adev->gfx.mec2_fw);
}
return err;
}
@@ -1340,7 +1312,8 @@ static int gfx_v8_0_mec_init(struct amdgpu_device *adev)
mec_hpd_size = adev->gfx.num_compute_rings * GFX8_MEC_HPD_SIZE;
if (mec_hpd_size) {
r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
&adev->gfx.mec.hpd_eop_obj,
&adev->gfx.mec.hpd_eop_gpu_addr,
(void **)&hpd);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index f202b45c413c..e80685d1e6c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -1078,18 +1078,12 @@ err1:
static void gfx_v9_0_free_microcode(struct amdgpu_device *adev)
{
- release_firmware(adev->gfx.pfp_fw);
- adev->gfx.pfp_fw = NULL;
- release_firmware(adev->gfx.me_fw);
- adev->gfx.me_fw = NULL;
- release_firmware(adev->gfx.ce_fw);
- adev->gfx.ce_fw = NULL;
- release_firmware(adev->gfx.rlc_fw);
- adev->gfx.rlc_fw = NULL;
- release_firmware(adev->gfx.mec_fw);
- adev->gfx.mec_fw = NULL;
- release_firmware(adev->gfx.mec2_fw);
- adev->gfx.mec2_fw = NULL;
+ amdgpu_ucode_release(&adev->gfx.pfp_fw);
+ amdgpu_ucode_release(&adev->gfx.me_fw);
+ amdgpu_ucode_release(&adev->gfx.ce_fw);
+ amdgpu_ucode_release(&adev->gfx.rlc_fw);
+ amdgpu_ucode_release(&adev->gfx.mec_fw);
+ amdgpu_ucode_release(&adev->gfx.mec2_fw);
kfree(adev->gfx.rlc.register_list_format);
}
@@ -1251,55 +1245,40 @@ static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
}
static int gfx_v9_0_init_cp_gfx_microcode(struct amdgpu_device *adev,
- const char *chip_name)
+ char *chip_name)
{
char fw_name[30];
int err;
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
- err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
+ err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw, fw_name);
if (err)
goto out;
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_PFP);
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
- err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->gfx.me_fw);
+ err = amdgpu_ucode_request(adev, &adev->gfx.me_fw, fw_name);
if (err)
goto out;
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_ME);
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
- err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->gfx.ce_fw);
+ err = amdgpu_ucode_request(adev, &adev->gfx.ce_fw, fw_name);
if (err)
goto out;
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_CE);
out:
if (err) {
- dev_err(adev->dev,
- "gfx9: Failed to init firmware \"%s\"\n",
- fw_name);
- release_firmware(adev->gfx.pfp_fw);
- adev->gfx.pfp_fw = NULL;
- release_firmware(adev->gfx.me_fw);
- adev->gfx.me_fw = NULL;
- release_firmware(adev->gfx.ce_fw);
- adev->gfx.ce_fw = NULL;
+ amdgpu_ucode_release(&adev->gfx.pfp_fw);
+ amdgpu_ucode_release(&adev->gfx.me_fw);
+ amdgpu_ucode_release(&adev->gfx.ce_fw);
}
return err;
}
static int gfx_v9_0_init_rlc_microcode(struct amdgpu_device *adev,
- const char *chip_name)
+ char *chip_name)
{
char fw_name[30];
int err;
@@ -1328,10 +1307,7 @@ static int gfx_v9_0_init_rlc_microcode(struct amdgpu_device *adev,
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_kicker_rlc.bin", chip_name);
else
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
- err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
+ err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, fw_name);
if (err)
goto out;
rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
@@ -1340,13 +1316,9 @@ static int gfx_v9_0_init_rlc_microcode(struct amdgpu_device *adev,
version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
err = amdgpu_gfx_rlc_init_microcode(adev, version_major, version_minor);
out:
- if (err) {
- dev_err(adev->dev,
- "gfx9: Failed to init firmware \"%s\"\n",
- fw_name);
- release_firmware(adev->gfx.rlc_fw);
- adev->gfx.rlc_fw = NULL;
- }
+ if (err)
+ amdgpu_ucode_release(&adev->gfx.rlc_fw);
+
return err;
}
@@ -1361,7 +1333,7 @@ static bool gfx_v9_0_load_mec2_fw_bin_support(struct amdgpu_device *adev)
}
static int gfx_v9_0_init_cp_compute_microcode(struct amdgpu_device *adev,
- const char *chip_name)
+ char *chip_name)
{
char fw_name[30];
int err;
@@ -1371,12 +1343,9 @@ static int gfx_v9_0_init_cp_compute_microcode(struct amdgpu_device *adev,
else
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
- err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->gfx.mec_fw);
+ err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, fw_name);
if (err)
- goto out;
+ return err;
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1);
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1_JT);
@@ -1386,11 +1355,8 @@ static int gfx_v9_0_init_cp_compute_microcode(struct amdgpu_device *adev,
else
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
- err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
+ err = amdgpu_ucode_request(adev, &adev->gfx.mec2_fw, fw_name);
if (!err) {
- err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
- if (err)
- goto out;
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC2);
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC2_JT);
} else {
@@ -1402,75 +1368,35 @@ static int gfx_v9_0_init_cp_compute_microcode(struct amdgpu_device *adev,
adev->gfx.mec2_feature_version = adev->gfx.mec_feature_version;
}
-out:
gfx_v9_0_check_if_need_gfxoff(adev);
gfx_v9_0_check_fw_write_wait(adev);
if (err) {
- dev_err(adev->dev,
- "gfx9: Failed to init firmware \"%s\"\n",
- fw_name);
- release_firmware(adev->gfx.mec_fw);
- adev->gfx.mec_fw = NULL;
- release_firmware(adev->gfx.mec2_fw);
- adev->gfx.mec2_fw = NULL;
+ amdgpu_ucode_release(&adev->gfx.mec_fw);
+ amdgpu_ucode_release(&adev->gfx.mec2_fw);
}
return err;
}
static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
{
- const char *chip_name;
+ char ucode_prefix[30];
int r;
DRM_DEBUG("\n");
-
- switch (adev->ip_versions[GC_HWIP][0]) {
- case IP_VERSION(9, 0, 1):
- chip_name = "vega10";
- break;
- case IP_VERSION(9, 2, 1):
- chip_name = "vega12";
- break;
- case IP_VERSION(9, 4, 0):
- chip_name = "vega20";
- break;
- case IP_VERSION(9, 2, 2):
- case IP_VERSION(9, 1, 0):
- if (adev->apu_flags & AMD_APU_IS_RAVEN2)
- chip_name = "raven2";
- else if (adev->apu_flags & AMD_APU_IS_PICASSO)
- chip_name = "picasso";
- else
- chip_name = "raven";
- break;
- case IP_VERSION(9, 4, 1):
- chip_name = "arcturus";
- break;
- case IP_VERSION(9, 3, 0):
- if (adev->apu_flags & AMD_APU_IS_RENOIR)
- chip_name = "renoir";
- else
- chip_name = "green_sardine";
- break;
- case IP_VERSION(9, 4, 2):
- chip_name = "aldebaran";
- break;
- default:
- BUG();
- }
+ amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
/* No CPG in Arcturus */
if (adev->gfx.num_gfx_rings) {
- r = gfx_v9_0_init_cp_gfx_microcode(adev, chip_name);
+ r = gfx_v9_0_init_cp_gfx_microcode(adev, ucode_prefix);
if (r)
return r;
}
- r = gfx_v9_0_init_rlc_microcode(adev, chip_name);
+ r = gfx_v9_0_init_rlc_microcode(adev, ucode_prefix);
if (r)
return r;
- r = gfx_v9_0_init_cp_compute_microcode(adev, chip_name);
+ r = gfx_v9_0_init_cp_compute_microcode(adev, ucode_prefix);
if (r)
return r;
@@ -1783,7 +1709,8 @@ static int gfx_v9_0_mec_init(struct amdgpu_device *adev)
mec_hpd_size = adev->gfx.num_compute_rings * GFX9_MEC_HPD_SIZE;
if (mec_hpd_size) {
r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
&adev->gfx.mec.hpd_eop_obj,
&adev->gfx.mec.hpd_eop_gpu_addr,
(void **)&hpd);
@@ -2158,12 +2085,6 @@ static int gfx_v9_0_sw_init(void *handle)
adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
- r = gfx_v9_0_init_microcode(adev);
- if (r) {
- DRM_ERROR("Failed to load gfx firmware!\n");
- return r;
- }
-
if (adev->gfx.rlc.funcs) {
if (adev->gfx.rlc.funcs->init) {
r = adev->gfx.rlc.funcs->init(adev);
@@ -4605,7 +4526,7 @@ static int gfx_v9_0_early_init(void *handle)
/* init rlcg reg access ctrl */
gfx_v9_0_init_rlcg_reg_access_ctrl(adev);
- return 0;
+ return gfx_v9_0_init_microcode(adev);
}
static int gfx_v9_0_ecc_late_init(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
index ec4d5e15b766..ab2325f6c7ac 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
@@ -120,7 +120,7 @@ static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
/* Set default page address. */
- value = amdgpu_gmc_vram_mc2pa(adev, adev->vram_scratch.gpu_addr);
+ value = amdgpu_gmc_vram_mc2pa(adev, adev->mem_scratch.gpu_addr);
WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
(u32)(value >> 12));
WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c
index 34513e8e1519..9b3a02527318 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c
@@ -165,7 +165,7 @@ static void gfxhub_v2_0_init_system_aperture_regs(struct amdgpu_device *adev)
max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
/* Set default page address. */
- value = amdgpu_gmc_vram_mc2pa(adev, adev->vram_scratch.gpu_addr);
+ value = amdgpu_gmc_vram_mc2pa(adev, adev->mem_scratch.gpu_addr);
WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
(u32)(value >> 12));
WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c
index 3f8676d23a5e..4aacbbec31e2 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c
@@ -167,7 +167,7 @@ static void gfxhub_v2_1_init_system_aperture_regs(struct amdgpu_device *adev)
max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
/* Set default page address. */
- value = amdgpu_gmc_vram_mc2pa(adev, adev->vram_scratch.gpu_addr);
+ value = amdgpu_gmc_vram_mc2pa(adev, adev->mem_scratch.gpu_addr);
WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
(u32)(value >> 12));
WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0.c
index 0e13370c2057..fa42d1907dfa 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0.c
@@ -163,7 +163,7 @@ static void gfxhub_v3_0_init_system_aperture_regs(struct amdgpu_device *adev)
adev->gmc.vram_end >> 18);
/* Set default page address. */
- value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start
+ value = adev->mem_scratch.gpu_addr - adev->gmc.vram_start
+ adev->vm_manager.vram_base_offset;
WREG32_SOC15(GC, 0, regGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
(u32)(value >> 12));
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0_3.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0_3.c
index 080ff11ca305..3dc17a3deedb 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0_3.c
@@ -169,7 +169,7 @@ static void gfxhub_v3_0_3_init_system_aperture_regs(struct amdgpu_device *adev)
adev->gmc.vram_end >> 18);
/* Set default page address. */
- value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start
+ value = adev->mem_scratch.gpu_addr - adev->gmc.vram_start
+ adev->vm_manager.vram_base_offset;
WREG32_SOC15(GC, 0, regGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
(u32)(value >> 12));
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index 21e46817d82d..7db1f1a7e33c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -78,13 +78,25 @@ gmc_v10_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
/* MM HUB */
amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, false);
/* GFX HUB */
- amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, false);
+ /* This works because this interrupt is only
+ * enabled at init/resume and disabled in
+ * fini/suspend, so the overall state doesn't
+ * change over the course of suspend/resume.
+ */
+ if (!adev->in_s0ix)
+ amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, false);
break;
case AMDGPU_IRQ_STATE_ENABLE:
/* MM HUB */
amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, true);
/* GFX HUB */
- amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, true);
+ /* This works because this interrupt is only
+ * enabled at init/resume and disabled in
+ * fini/suspend, so the overall state doesn't
+ * change over the course of suspend/resume.
+ */
+ if (!adev->in_s0ix)
+ amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, true);
break;
default:
break;
@@ -835,10 +847,7 @@ static int gmc_v10_0_mc_init(struct amdgpu_device *adev)
}
#endif
- /* In case the PCI BAR is larger than the actual amount of vram */
adev->gmc.visible_vram_size = adev->gmc.aper_size;
- if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
- adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
/* set the gart size */
if (amdgpu_gart_size == -1) {
@@ -1061,9 +1070,12 @@ static int gmc_v10_0_gart_enable(struct amdgpu_device *adev)
}
amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
- r = adev->gfxhub.funcs->gart_enable(adev);
- if (r)
- return r;
+
+ if (!adev->in_s0ix) {
+ r = adev->gfxhub.funcs->gart_enable(adev);
+ if (r)
+ return r;
+ }
r = adev->mmhub.funcs->gart_enable(adev);
if (r)
@@ -1077,10 +1089,12 @@ static int gmc_v10_0_gart_enable(struct amdgpu_device *adev)
value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
false : true;
- adev->gfxhub.funcs->set_fault_enable_default(adev, value);
+ if (!adev->in_s0ix)
+ adev->gfxhub.funcs->set_fault_enable_default(adev, value);
adev->mmhub.funcs->set_fault_enable_default(adev, value);
gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB_0, 0);
- gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB_0, 0);
+ if (!adev->in_s0ix)
+ gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB_0, 0);
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
(unsigned)(adev->gmc.gart_size >> 20),
@@ -1101,7 +1115,7 @@ static int gmc_v10_0_hw_init(void *handle)
* harvestable groups in gc_utcl2 need to be programmed before any GFX block
* register setup within GMC, or else system hang when harvesting SA.
*/
- if (adev->gfxhub.funcs && adev->gfxhub.funcs->utcl2_harvest)
+ if (!adev->in_s0ix && adev->gfxhub.funcs && adev->gfxhub.funcs->utcl2_harvest)
adev->gfxhub.funcs->utcl2_harvest(adev);
r = gmc_v10_0_gart_enable(adev);
@@ -1129,7 +1143,8 @@ static int gmc_v10_0_hw_init(void *handle)
*/
static void gmc_v10_0_gart_disable(struct amdgpu_device *adev)
{
- adev->gfxhub.funcs->gart_disable(adev);
+ if (!adev->in_s0ix)
+ adev->gfxhub.funcs->gart_disable(adev);
adev->mmhub.funcs->gart_disable(adev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
index 4326078689cd..5e0018fe7e7d 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
@@ -64,13 +64,25 @@ gmc_v11_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
/* MM HUB */
amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, false);
/* GFX HUB */
- amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, false);
+ /* This works because this interrupt is only
+ * enabled at init/resume and disabled in
+ * fini/suspend, so the overall state doesn't
+ * change over the course of suspend/resume.
+ */
+ if (!adev->in_s0ix)
+ amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, false);
break;
case AMDGPU_IRQ_STATE_ENABLE:
/* MM HUB */
amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, true);
/* GFX HUB */
- amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, true);
+ /* This works because this interrupt is only
+ * enabled at init/resume and disabled in
+ * fini/suspend, so the overall state doesn't
+ * change over the course of suspend/resume.
+ */
+ if (!adev->in_s0ix)
+ amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, true);
break;
default:
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index ec291d28edff..b7dad4e67813 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -131,19 +131,12 @@ static int gmc_v6_0_init_microcode(struct amdgpu_device *adev)
snprintf(fw_name, sizeof(fw_name), "amdgpu/si58_mc.bin");
else
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
- err = request_firmware(&adev->gmc.fw, fw_name, adev->dev);
- if (err)
- goto out;
-
- err = amdgpu_ucode_validate(adev->gmc.fw);
-
-out:
+ err = amdgpu_ucode_request(adev, &adev->gmc.fw, fw_name);
if (err) {
dev_err(adev->dev,
"si_mc: Failed to load firmware \"%s\"\n",
fw_name);
- release_firmware(adev->gmc.fw);
- adev->gmc.fw = NULL;
+ amdgpu_ucode_release(&adev->gmc.fw);
}
return err;
}
@@ -258,7 +251,7 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
adev->gmc.vram_end >> 12);
WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
- adev->vram_scratch.gpu_addr >> 12);
+ adev->mem_scratch.gpu_addr >> 12);
WREG32(mmMC_VM_AGP_BASE, 0);
WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF);
WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF);
@@ -894,8 +887,7 @@ static int gmc_v6_0_sw_fini(void *handle)
amdgpu_vm_manager_fini(adev);
amdgpu_gart_table_vram_free(adev);
amdgpu_bo_fini(adev);
- release_firmware(adev->gmc.fw);
- adev->gmc.fw = NULL;
+ amdgpu_ucode_release(&adev->gmc.fw);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 979da6f510e8..402960b0174e 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -156,16 +156,10 @@ static int gmc_v7_0_init_microcode(struct amdgpu_device *adev)
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
- err = request_firmware(&adev->gmc.fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->gmc.fw);
-
-out:
+ err = amdgpu_ucode_request(adev, &adev->gmc.fw, fw_name);
if (err) {
pr_err("cik_mc: Failed to load firmware \"%s\"\n", fw_name);
- release_firmware(adev->gmc.fw);
- adev->gmc.fw = NULL;
+ amdgpu_ucode_release(&adev->gmc.fw);
}
return err;
}
@@ -292,7 +286,7 @@ static void gmc_v7_0_mc_program(struct amdgpu_device *adev)
WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
adev->gmc.vram_end >> 12);
WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
- adev->vram_scratch.gpu_addr >> 12);
+ adev->mem_scratch.gpu_addr >> 12);
WREG32(mmMC_VM_AGP_BASE, 0);
WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF);
WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF);
@@ -389,10 +383,7 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
}
#endif
- /* In case the PCI BAR is larger than the actual amount of vram */
adev->gmc.visible_vram_size = adev->gmc.aper_size;
- if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
- adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
/* set the gart size */
if (amdgpu_gart_size == -1) {
@@ -1081,8 +1072,7 @@ static int gmc_v7_0_sw_fini(void *handle)
kfree(adev->gmc.vm_fault_info);
amdgpu_gart_table_vram_free(adev);
amdgpu_bo_fini(adev);
- release_firmware(adev->gmc.fw);
- adev->gmc.fw = NULL;
+ amdgpu_ucode_release(&adev->gmc.fw);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 382dde1ce74c..504c1b34dab7 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -264,16 +264,10 @@ static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
}
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
- err = request_firmware(&adev->gmc.fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->gmc.fw);
-
-out:
+ err = amdgpu_ucode_request(adev, &adev->gmc.fw, fw_name);
if (err) {
pr_err("mc: Failed to load firmware \"%s\"\n", fw_name);
- release_firmware(adev->gmc.fw);
- adev->gmc.fw = NULL;
+ amdgpu_ucode_release(&adev->gmc.fw);
}
return err;
}
@@ -474,7 +468,7 @@ static void gmc_v8_0_mc_program(struct amdgpu_device *adev)
WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
adev->gmc.vram_end >> 12);
WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
- adev->vram_scratch.gpu_addr >> 12);
+ adev->mem_scratch.gpu_addr >> 12);
if (amdgpu_sriov_vf(adev)) {
tmp = ((adev->gmc.vram_end >> 24) & 0xFFFF) << 16;
@@ -587,10 +581,7 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
}
#endif
- /* In case the PCI BAR is larger than the actual amount of vram */
adev->gmc.visible_vram_size = adev->gmc.aper_size;
- if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
- adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
/* set the gart size */
if (amdgpu_gart_size == -1) {
@@ -1203,8 +1194,7 @@ static int gmc_v8_0_sw_fini(void *handle)
kfree(adev->gmc.vm_fault_info);
amdgpu_gart_table_vram_free(adev);
amdgpu_bo_fini(adev);
- release_firmware(adev->gmc.fw);
- adev->gmc.fw = NULL;
+ amdgpu_ucode_release(&adev->gmc.fw);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 08d6cf79fb15..d65c6cea3445 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -484,6 +484,14 @@ static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
for (i = 0; i < 16; i++) {
reg = hub->vm_context0_cntl + i;
+ /* This works because this interrupt is only
+ * enabled at init/resume and disabled in
+ * fini/suspend, so the overall state doesn't
+ * change over the course of suspend/resume.
+ */
+ if (adev->in_s0ix && (j == AMDGPU_GFXHUB_0))
+ continue;
+
if (j == AMDGPU_GFXHUB_0)
tmp = RREG32_SOC15_IP(GC, reg);
else
@@ -504,6 +512,14 @@ static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
for (i = 0; i < 16; i++) {
reg = hub->vm_context0_cntl + i;
+ /* This works because this interrupt is only
+ * enabled at init/resume and disabled in
+ * fini/suspend, so the overall state doesn't
+ * change over the course of suspend/resume.
+ */
+ if (adev->in_s0ix && (j == AMDGPU_GFXHUB_0))
+ continue;
+
if (j == AMDGPU_GFXHUB_0)
tmp = RREG32_SOC15_IP(GC, reg);
else
@@ -1536,10 +1552,7 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
}
#endif
- /* In case the PCI BAR is larger than the actual amount of vram */
adev->gmc.visible_vram_size = adev->gmc.aper_size;
- if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
- adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
/* set the gart size */
if (amdgpu_gart_size == -1) {
@@ -1862,9 +1875,12 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
}
amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
- r = adev->gfxhub.funcs->gart_enable(adev);
- if (r)
- return r;
+
+ if (!adev->in_s0ix) {
+ r = adev->gfxhub.funcs->gart_enable(adev);
+ if (r)
+ return r;
+ }
r = adev->mmhub.funcs->gart_enable(adev);
if (r)
@@ -1911,11 +1927,15 @@ static int gmc_v9_0_hw_init(void *handle)
value = true;
if (!amdgpu_sriov_vf(adev)) {
- adev->gfxhub.funcs->set_fault_enable_default(adev, value);
+ if (!adev->in_s0ix)
+ adev->gfxhub.funcs->set_fault_enable_default(adev, value);
adev->mmhub.funcs->set_fault_enable_default(adev, value);
}
- for (i = 0; i < adev->num_vmhubs; ++i)
+ for (i = 0; i < adev->num_vmhubs; ++i) {
+ if (adev->in_s0ix && (i == AMDGPU_GFXHUB_0))
+ continue;
gmc_v9_0_flush_gpu_tlb(adev, 0, i, 0);
+ }
if (adev->umc.funcs && adev->umc.funcs->init_registers)
adev->umc.funcs->init_registers(adev);
@@ -1939,7 +1959,8 @@ static int gmc_v9_0_hw_init(void *handle)
*/
static void gmc_v9_0_gart_disable(struct amdgpu_device *adev)
{
- adev->gfxhub.funcs->gart_disable(adev);
+ if (!adev->in_s0ix)
+ adev->gfxhub.funcs->gart_disable(adev);
adev->mmhub.funcs->gart_disable(adev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c b/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c
index 95548c512f4f..ed0d368149aa 100644
--- a/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c
@@ -49,10 +49,7 @@ static int imu_v11_0_init_microcode(struct amdgpu_device *adev)
amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_imu.bin", ucode_prefix);
- err = request_firmware(&adev->gfx.imu_fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->gfx.imu_fw);
+ err = amdgpu_ucode_request(adev, &adev->gfx.imu_fw, fw_name);
if (err)
goto out;
imu_hdr = (const struct imu_firmware_header_v1_0 *)adev->gfx.imu_fw->data;
@@ -77,7 +74,7 @@ out:
dev_err(adev->dev,
"gfx11: Failed to load firmware \"%s\"\n",
fw_name);
- release_firmware(adev->gfx.imu_fw);
+ amdgpu_ucode_release(&adev->gfx.imu_fw);
}
return err;
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c b/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c
index 614394118a53..2e2062636d5f 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c
@@ -379,89 +379,6 @@ static const struct amdgpu_mes_funcs mes_v10_1_funcs = {
.resume_gang = mes_v10_1_resume_gang,
};
-static int mes_v10_1_init_microcode(struct amdgpu_device *adev,
- enum admgpu_mes_pipe pipe)
-{
- const char *chip_name;
- char fw_name[30];
- int err;
- const struct mes_firmware_header_v1_0 *mes_hdr;
- struct amdgpu_firmware_info *info;
-
- switch (adev->ip_versions[GC_HWIP][0]) {
- case IP_VERSION(10, 1, 10):
- chip_name = "navi10";
- break;
- case IP_VERSION(10, 3, 0):
- chip_name = "sienna_cichlid";
- break;
- default:
- BUG();
- }
-
- if (pipe == AMDGPU_MES_SCHED_PIPE)
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes.bin",
- chip_name);
- else
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes1.bin",
- chip_name);
-
- err = request_firmware(&adev->mes.fw[pipe], fw_name, adev->dev);
- if (err)
- return err;
-
- err = amdgpu_ucode_validate(adev->mes.fw[pipe]);
- if (err) {
- release_firmware(adev->mes.fw[pipe]);
- adev->mes.fw[pipe] = NULL;
- return err;
- }
-
- mes_hdr = (const struct mes_firmware_header_v1_0 *)
- adev->mes.fw[pipe]->data;
- adev->mes.uc_start_addr[pipe] =
- le32_to_cpu(mes_hdr->mes_uc_start_addr_lo) |
- ((uint64_t)(le32_to_cpu(mes_hdr->mes_uc_start_addr_hi)) << 32);
- adev->mes.data_start_addr[pipe] =
- le32_to_cpu(mes_hdr->mes_data_start_addr_lo) |
- ((uint64_t)(le32_to_cpu(mes_hdr->mes_data_start_addr_hi)) << 32);
-
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
- int ucode, ucode_data;
-
- if (pipe == AMDGPU_MES_SCHED_PIPE) {
- ucode = AMDGPU_UCODE_ID_CP_MES;
- ucode_data = AMDGPU_UCODE_ID_CP_MES_DATA;
- } else {
- ucode = AMDGPU_UCODE_ID_CP_MES1;
- ucode_data = AMDGPU_UCODE_ID_CP_MES1_DATA;
- }
-
- info = &adev->firmware.ucode[ucode];
- info->ucode_id = ucode;
- info->fw = adev->mes.fw[pipe];
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(mes_hdr->mes_ucode_size_bytes),
- PAGE_SIZE);
-
- info = &adev->firmware.ucode[ucode_data];
- info->ucode_id = ucode_data;
- info->fw = adev->mes.fw[pipe];
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes),
- PAGE_SIZE);
- }
-
- return 0;
-}
-
-static void mes_v10_1_free_microcode(struct amdgpu_device *adev,
- enum admgpu_mes_pipe pipe)
-{
- release_firmware(adev->mes.fw[pipe]);
- adev->mes.fw[pipe] = NULL;
-}
-
static int mes_v10_1_allocate_ucode_buffer(struct amdgpu_device *adev,
enum admgpu_mes_pipe pipe)
{
@@ -1007,7 +924,6 @@ static int mes_v10_1_sw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int pipe, r;
- adev->mes.adev = adev;
adev->mes.funcs = &mes_v10_1_funcs;
adev->mes.kiq_hw_init = &mes_v10_1_kiq_hw_init;
@@ -1019,10 +935,6 @@ static int mes_v10_1_sw_init(void *handle)
if (!adev->enable_mes_kiq && pipe == AMDGPU_MES_KIQ_PIPE)
continue;
- r = mes_v10_1_init_microcode(adev, pipe);
- if (r)
- return r;
-
r = mes_v10_1_allocate_eop_buf(adev, pipe);
if (r)
return r;
@@ -1059,8 +971,7 @@ static int mes_v10_1_sw_fini(void *handle)
amdgpu_bo_free_kernel(&adev->mes.eop_gpu_obj[pipe],
&adev->mes.eop_gpu_addr[pipe],
NULL);
-
- mes_v10_1_free_microcode(adev, pipe);
+ amdgpu_ucode_release(&adev->mes.fw[pipe]);
}
amdgpu_bo_free_kernel(&adev->gfx.kiq.ring.mqd_obj,
@@ -1229,6 +1140,22 @@ static int mes_v10_1_resume(void *handle)
return amdgpu_mes_resume(adev);
}
+static int mes_v10_0_early_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int pipe, r;
+
+ for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {
+ if (!adev->enable_mes_kiq && pipe == AMDGPU_MES_KIQ_PIPE)
+ continue;
+ r = amdgpu_mes_init_microcode(adev, pipe);
+ if (r)
+ return r;
+ }
+
+ return 0;
+}
+
static int mes_v10_0_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -1241,6 +1168,7 @@ static int mes_v10_0_late_init(void *handle)
static const struct amd_ip_funcs mes_v10_1_ip_funcs = {
.name = "mes_v10_1",
+ .early_init = mes_v10_0_early_init,
.late_init = mes_v10_0_late_init,
.sw_init = mes_v10_1_sw_init,
.sw_fini = mes_v10_1_sw_fini,
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
index 970b066b37bb..bfa305079bfc 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
@@ -459,80 +459,6 @@ static const struct amdgpu_mes_funcs mes_v11_0_funcs = {
.misc_op = mes_v11_0_misc_op,
};
-static int mes_v11_0_init_microcode(struct amdgpu_device *adev,
- enum admgpu_mes_pipe pipe)
-{
- char fw_name[30];
- char ucode_prefix[30];
- int err;
- const struct mes_firmware_header_v1_0 *mes_hdr;
- struct amdgpu_firmware_info *info;
-
- amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
-
- if (pipe == AMDGPU_MES_SCHED_PIPE)
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes.bin",
- ucode_prefix);
- else
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes1.bin",
- ucode_prefix);
-
- err = request_firmware(&adev->mes.fw[pipe], fw_name, adev->dev);
- if (err)
- return err;
-
- err = amdgpu_ucode_validate(adev->mes.fw[pipe]);
- if (err) {
- release_firmware(adev->mes.fw[pipe]);
- adev->mes.fw[pipe] = NULL;
- return err;
- }
-
- mes_hdr = (const struct mes_firmware_header_v1_0 *)
- adev->mes.fw[pipe]->data;
- adev->mes.uc_start_addr[pipe] =
- le32_to_cpu(mes_hdr->mes_uc_start_addr_lo) |
- ((uint64_t)(le32_to_cpu(mes_hdr->mes_uc_start_addr_hi)) << 32);
- adev->mes.data_start_addr[pipe] =
- le32_to_cpu(mes_hdr->mes_data_start_addr_lo) |
- ((uint64_t)(le32_to_cpu(mes_hdr->mes_data_start_addr_hi)) << 32);
-
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
- int ucode, ucode_data;
-
- if (pipe == AMDGPU_MES_SCHED_PIPE) {
- ucode = AMDGPU_UCODE_ID_CP_MES;
- ucode_data = AMDGPU_UCODE_ID_CP_MES_DATA;
- } else {
- ucode = AMDGPU_UCODE_ID_CP_MES1;
- ucode_data = AMDGPU_UCODE_ID_CP_MES1_DATA;
- }
-
- info = &adev->firmware.ucode[ucode];
- info->ucode_id = ucode;
- info->fw = adev->mes.fw[pipe];
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(mes_hdr->mes_ucode_size_bytes),
- PAGE_SIZE);
-
- info = &adev->firmware.ucode[ucode_data];
- info->ucode_id = ucode_data;
- info->fw = adev->mes.fw[pipe];
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes),
- PAGE_SIZE);
- }
-
- return 0;
-}
-
-static void mes_v11_0_free_microcode(struct amdgpu_device *adev,
- enum admgpu_mes_pipe pipe)
-{
- release_firmware(adev->mes.fw[pipe]);
- adev->mes.fw[pipe] = NULL;
-}
-
static int mes_v11_0_allocate_ucode_buffer(struct amdgpu_device *adev,
enum admgpu_mes_pipe pipe)
{
@@ -549,7 +475,9 @@ static int mes_v11_0_allocate_ucode_buffer(struct amdgpu_device *adev,
fw_size = le32_to_cpu(mes_hdr->mes_ucode_size_bytes);
r = amdgpu_bo_create_reserved(adev, fw_size,
- PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
+ PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
&adev->mes.ucode_fw_obj[pipe],
&adev->mes.ucode_fw_gpu_addr[pipe],
(void **)&adev->mes.ucode_fw_ptr[pipe]);
@@ -582,7 +510,9 @@ static int mes_v11_0_allocate_ucode_data_buffer(struct amdgpu_device *adev,
fw_size = le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes);
r = amdgpu_bo_create_reserved(adev, fw_size,
- 64 * 1024, AMDGPU_GEM_DOMAIN_VRAM,
+ 64 * 1024,
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
&adev->mes.data_fw_obj[pipe],
&adev->mes.data_fw_gpu_addr[pipe],
(void **)&adev->mes.data_fw_ptr[pipe]);
@@ -1087,7 +1017,6 @@ static int mes_v11_0_sw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int pipe, r;
- adev->mes.adev = adev;
adev->mes.funcs = &mes_v11_0_funcs;
adev->mes.kiq_hw_init = &mes_v11_0_kiq_hw_init;
adev->mes.kiq_hw_fini = &mes_v11_0_kiq_hw_fini;
@@ -1100,10 +1029,6 @@ static int mes_v11_0_sw_init(void *handle)
if (!adev->enable_mes_kiq && pipe == AMDGPU_MES_KIQ_PIPE)
continue;
- r = mes_v11_0_init_microcode(adev, pipe);
- if (r)
- return r;
-
r = mes_v11_0_allocate_eop_buf(adev, pipe);
if (r)
return r;
@@ -1140,8 +1065,7 @@ static int mes_v11_0_sw_fini(void *handle)
amdgpu_bo_free_kernel(&adev->mes.eop_gpu_obj[pipe],
&adev->mes.eop_gpu_addr[pipe],
NULL);
-
- mes_v11_0_free_microcode(adev, pipe);
+ amdgpu_ucode_release(&adev->mes.fw[pipe]);
}
amdgpu_bo_free_kernel(&adev->gfx.kiq.ring.mqd_obj,
@@ -1338,6 +1262,22 @@ static int mes_v11_0_resume(void *handle)
return amdgpu_mes_resume(adev);
}
+static int mes_v11_0_early_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int pipe, r;
+
+ for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {
+ if (!adev->enable_mes_kiq && pipe == AMDGPU_MES_KIQ_PIPE)
+ continue;
+ r = amdgpu_mes_init_microcode(adev, pipe);
+ if (r)
+ return r;
+ }
+
+ return 0;
+}
+
static int mes_v11_0_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -1352,6 +1292,7 @@ static int mes_v11_0_late_init(void *handle)
static const struct amd_ip_funcs mes_v11_0_ip_funcs = {
.name = "mes_v11_0",
+ .early_init = mes_v11_0_early_init,
.late_init = mes_v11_0_late_init,
.sw_init = mes_v11_0_sw_init,
.sw_fini = mes_v11_0_sw_fini,
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
index 3e51e773f92b..15e7cbeae75b 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
@@ -114,7 +114,7 @@ static void mmhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
return;
/* Set default page address. */
- value = amdgpu_gmc_vram_mc2pa(adev, adev->vram_scratch.gpu_addr);
+ value = amdgpu_gmc_vram_mc2pa(adev, adev->mem_scratch.gpu_addr);
WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
(u32)(value >> 12));
WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c
index 6fa7090bc6cb..73afbf2facc9 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c
@@ -134,7 +134,7 @@ static void mmhub_v1_7_init_system_aperture_regs(struct amdgpu_device *adev)
}
/* Set default page address. */
- value = amdgpu_gmc_vram_mc2pa(adev, adev->vram_scratch.gpu_addr);
+ value = amdgpu_gmc_vram_mc2pa(adev, adev->mem_scratch.gpu_addr);
WREG32_SOC15(MMHUB, 0, regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
(u32)(value >> 12));
WREG32_SOC15(MMHUB, 0, regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
index 0e664d0cc8d5..278e32db878d 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
@@ -234,7 +234,7 @@ static void mmhub_v2_0_init_system_aperture_regs(struct amdgpu_device *adev)
}
/* Set default page address. */
- value = amdgpu_gmc_vram_mc2pa(adev, adev->vram_scratch.gpu_addr);
+ value = amdgpu_gmc_vram_mc2pa(adev, adev->mem_scratch.gpu_addr);
WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
(u32)(value >> 12));
WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c
index 4638ea7c2eec..fcf2813e70db 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c
@@ -164,7 +164,7 @@ static void mmhub_v2_3_init_system_aperture_regs(struct amdgpu_device *adev)
max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
/* Set default page address. */
- value = amdgpu_gmc_vram_mc2pa(adev, adev->vram_scratch.gpu_addr);
+ value = amdgpu_gmc_vram_mc2pa(adev, adev->mem_scratch.gpu_addr);
WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
(u32)(value >> 12));
WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c
index 16cc82215e2e..e9dcd6fcde7f 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c
@@ -188,7 +188,7 @@ static void mmhub_v3_0_init_system_aperture_regs(struct amdgpu_device *adev)
}
/* Set default page address. */
- value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start +
+ value = adev->mem_scratch.gpu_addr - adev->gmc.vram_start +
adev->vm_manager.vram_base_offset;
WREG32_SOC15(MMHUB, 0, regMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
(u32)(value >> 12));
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c
index 6bdf2ef0298d..c8d478f2afdc 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c
@@ -188,7 +188,7 @@ static void mmhub_v3_0_1_init_system_aperture_regs(struct amdgpu_device *adev)
adev->gmc.vram_end >> 18);
/* Set default page address. */
- value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start +
+ value = adev->mem_scratch.gpu_addr - adev->gmc.vram_start +
adev->vm_manager.vram_base_offset;
WREG32_SOC15(MMHUB, 0, regMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
(u32)(value >> 12));
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_2.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_2.c
index 45465acaa943..c30e40e52fb2 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_2.c
@@ -181,7 +181,7 @@ static void mmhub_v3_0_2_init_system_aperture_regs(struct amdgpu_device *adev)
}
/* Set default page address. */
- value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start +
+ value = adev->mem_scratch.gpu_addr - adev->gmc.vram_start +
adev->vm_manager.vram_base_offset;
WREG32_SOC15(MMHUB, 0, regMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
(u32)(value >> 12));
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
index 445cb06b9d26..72083e96222f 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
@@ -136,7 +136,7 @@ static void mmhub_v9_4_init_system_aperture_regs(struct amdgpu_device *adev,
max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
/* Set default page address. */
- value = amdgpu_gmc_vram_mc2pa(adev, adev->vram_scratch.gpu_addr);
+ value = amdgpu_gmc_vram_mc2pa(adev, adev->mem_scratch.gpu_addr);
WREG32_SOC15_OFFSET(
MMHUB, 0,
mmVMSHAREDPF0_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
index 12906ba74462..63725b2ebc03 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
@@ -404,6 +404,11 @@ static int xgpu_ai_request_init_data(struct amdgpu_device *adev)
return xgpu_ai_send_access_requests(adev, IDH_REQ_GPU_INIT_DATA);
}
+static void xgpu_ai_ras_poison_handler(struct amdgpu_device *adev)
+{
+ xgpu_ai_send_access_requests(adev, IDH_RAS_POISON);
+}
+
const struct amdgpu_virt_ops xgpu_ai_virt_ops = {
.req_full_gpu = xgpu_ai_request_full_gpu_access,
.rel_full_gpu = xgpu_ai_release_full_gpu_access,
@@ -411,4 +416,5 @@ const struct amdgpu_virt_ops xgpu_ai_virt_ops = {
.wait_reset = NULL,
.trans_msg = xgpu_ai_mailbox_trans_msg,
.req_init_data = xgpu_ai_request_init_data,
+ .ras_poison_handler = xgpu_ai_ras_poison_handler,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
index fa7e13e0459e..af1a784696bd 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
@@ -39,6 +39,7 @@ enum idh_request {
IDH_LOG_VF_ERROR = 200,
IDH_READY_TO_RESET = 201,
+ IDH_RAS_POISON = 202,
};
enum idh_event {
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
index e07757eea7ad..cae1aaa4ddb6 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
@@ -426,6 +426,11 @@ void xgpu_nv_mailbox_put_irq(struct amdgpu_device *adev)
amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
}
+static void xgpu_nv_ras_poison_handler(struct amdgpu_device *adev)
+{
+ xgpu_nv_send_access_requests(adev, IDH_RAS_POISON);
+}
+
const struct amdgpu_virt_ops xgpu_nv_virt_ops = {
.req_full_gpu = xgpu_nv_request_full_gpu_access,
.rel_full_gpu = xgpu_nv_release_full_gpu_access,
@@ -433,4 +438,5 @@ const struct amdgpu_virt_ops xgpu_nv_virt_ops = {
.reset_gpu = xgpu_nv_request_reset,
.wait_reset = NULL,
.trans_msg = xgpu_nv_mailbox_trans_msg,
+ .ras_poison_handler = xgpu_nv_ras_poison_handler,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
index 73887b0aa1d6..d0221ce08769 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
@@ -39,6 +39,7 @@ enum idh_request {
IDH_LOG_VF_ERROR = 200,
IDH_READY_TO_RESET = 201,
+ IDH_RAS_POISON = 202,
};
enum idh_event {
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
index 9de46fa8f46c..e1b7fca09666 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
@@ -47,83 +47,17 @@ MODULE_FIRMWARE("amdgpu/raven_ta.bin");
static int psp_v10_0_init_microcode(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
- const char *chip_name;
- char fw_name[30];
+ char ucode_prefix[30];
int err = 0;
- const struct ta_firmware_header_v1_0 *ta_hdr;
DRM_DEBUG("\n");
- switch (adev->asic_type) {
- case CHIP_RAVEN:
- if (adev->apu_flags & AMD_APU_IS_RAVEN2)
- chip_name = "raven2";
- else if (adev->apu_flags & AMD_APU_IS_PICASSO)
- chip_name = "picasso";
- else
- chip_name = "raven";
- break;
- default: BUG();
- }
-
- err = psp_init_asd_microcode(psp, chip_name);
+ amdgpu_ucode_ip_version_decode(adev, MP0_HWIP, ucode_prefix, sizeof(ucode_prefix));
+
+ err = psp_init_asd_microcode(psp, ucode_prefix);
if (err)
- goto out;
-
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name);
- err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev);
- if (err) {
- release_firmware(adev->psp.ta_fw);
- adev->psp.ta_fw = NULL;
- dev_info(adev->dev,
- "psp v10.0: Failed to load firmware \"%s\"\n",
- fw_name);
- } else {
- err = amdgpu_ucode_validate(adev->psp.ta_fw);
- if (err)
- goto out2;
-
- ta_hdr = (const struct ta_firmware_header_v1_0 *)
- adev->psp.ta_fw->data;
- adev->psp.hdcp_context.context.bin_desc.fw_version =
- le32_to_cpu(ta_hdr->hdcp.fw_version);
- adev->psp.hdcp_context.context.bin_desc.size_bytes =
- le32_to_cpu(ta_hdr->hdcp.size_bytes);
- adev->psp.hdcp_context.context.bin_desc.start_addr =
- (uint8_t *)ta_hdr +
- le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
-
- adev->psp.dtm_context.context.bin_desc.fw_version =
- le32_to_cpu(ta_hdr->dtm.fw_version);
- adev->psp.dtm_context.context.bin_desc.size_bytes =
- le32_to_cpu(ta_hdr->dtm.size_bytes);
- adev->psp.dtm_context.context.bin_desc.start_addr =
- (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
- le32_to_cpu(ta_hdr->dtm.offset_bytes);
-
- adev->psp.securedisplay_context.context.bin_desc.fw_version =
- le32_to_cpu(ta_hdr->securedisplay.fw_version);
- adev->psp.securedisplay_context.context.bin_desc.size_bytes =
- le32_to_cpu(ta_hdr->securedisplay.size_bytes);
- adev->psp.securedisplay_context.context.bin_desc.start_addr =
- (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
- le32_to_cpu(ta_hdr->securedisplay.offset_bytes);
-
- adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);
- }
-
- return 0;
-
-out2:
- release_firmware(adev->psp.ta_fw);
- adev->psp.ta_fw = NULL;
-out:
- if (err) {
- dev_err(adev->dev,
- "psp v10.0: Failed to load firmware \"%s\"\n",
- fw_name);
- }
-
- return err;
+ return err;
+
+ return psp_init_ta_microcode(psp, ucode_prefix);
}
static int psp_v10_0_ring_create(struct psp_context *psp,
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
index bd3e3e23a939..8f84fe40abbb 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
@@ -88,159 +88,56 @@ MODULE_FIRMWARE("amdgpu/beige_goby_ta.bin");
static int psp_v11_0_init_microcode(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
- const char *chip_name;
- char fw_name[PSP_FW_NAME_LEN];
+ char ucode_prefix[30];
int err = 0;
- const struct ta_firmware_header_v1_0 *ta_hdr;
DRM_DEBUG("\n");
- switch (adev->ip_versions[MP0_HWIP][0]) {
- case IP_VERSION(11, 0, 2):
- chip_name = "vega20";
- break;
- case IP_VERSION(11, 0, 0):
- chip_name = "navi10";
- break;
- case IP_VERSION(11, 0, 5):
- chip_name = "navi14";
- break;
- case IP_VERSION(11, 0, 9):
- chip_name = "navi12";
- break;
- case IP_VERSION(11, 0, 4):
- chip_name = "arcturus";
- break;
- case IP_VERSION(11, 0, 7):
- chip_name = "sienna_cichlid";
- break;
- case IP_VERSION(11, 0, 11):
- chip_name = "navy_flounder";
- break;
- case IP_VERSION(11, 5, 0):
- chip_name = "vangogh";
- break;
- case IP_VERSION(11, 0, 12):
- chip_name = "dimgrey_cavefish";
- break;
- case IP_VERSION(11, 0, 13):
- chip_name = "beige_goby";
- break;
- default:
- BUG();
- }
-
+ amdgpu_ucode_ip_version_decode(adev, MP0_HWIP, ucode_prefix, sizeof(ucode_prefix));
switch (adev->ip_versions[MP0_HWIP][0]) {
case IP_VERSION(11, 0, 2):
case IP_VERSION(11, 0, 4):
- err = psp_init_sos_microcode(psp, chip_name);
+ err = psp_init_sos_microcode(psp, ucode_prefix);
if (err)
return err;
- err = psp_init_asd_microcode(psp, chip_name);
+ err = psp_init_asd_microcode(psp, ucode_prefix);
if (err)
return err;
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name);
- err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev);
- if (err) {
- release_firmware(adev->psp.ta_fw);
- adev->psp.ta_fw = NULL;
- dev_info(adev->dev,
- "psp v11.0: Failed to load firmware \"%s\"\n", fw_name);
- } else {
- err = amdgpu_ucode_validate(adev->psp.ta_fw);
- if (err)
- goto out2;
-
- ta_hdr = (const struct ta_firmware_header_v1_0 *)adev->psp.ta_fw->data;
- adev->psp.xgmi_context.context.bin_desc.fw_version =
- le32_to_cpu(ta_hdr->xgmi.fw_version);
- adev->psp.xgmi_context.context.bin_desc.size_bytes =
- le32_to_cpu(ta_hdr->xgmi.size_bytes);
- adev->psp.xgmi_context.context.bin_desc.start_addr =
- (uint8_t *)ta_hdr +
- le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
- adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);
- adev->psp.ras_context.context.bin_desc.fw_version =
- le32_to_cpu(ta_hdr->ras.fw_version);
- adev->psp.ras_context.context.bin_desc.size_bytes =
- le32_to_cpu(ta_hdr->ras.size_bytes);
- adev->psp.ras_context.context.bin_desc.start_addr =
- (uint8_t *)adev->psp.xgmi_context.context.bin_desc.start_addr +
- le32_to_cpu(ta_hdr->ras.offset_bytes);
- }
+ err = psp_init_ta_microcode(psp, ucode_prefix);
+ adev->psp.securedisplay_context.context.bin_desc.size_bytes = 0;
break;
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 5):
case IP_VERSION(11, 0, 9):
- err = psp_init_sos_microcode(psp, chip_name);
+ err = psp_init_sos_microcode(psp, ucode_prefix);
if (err)
return err;
- err = psp_init_asd_microcode(psp, chip_name);
+ err = psp_init_asd_microcode(psp, ucode_prefix);
if (err)
return err;
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name);
- err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev);
- if (err) {
- release_firmware(adev->psp.ta_fw);
- adev->psp.ta_fw = NULL;
- dev_info(adev->dev,
- "psp v11.0: Failed to load firmware \"%s\"\n", fw_name);
- } else {
- err = amdgpu_ucode_validate(adev->psp.ta_fw);
- if (err)
- goto out2;
-
- ta_hdr = (const struct ta_firmware_header_v1_0 *)adev->psp.ta_fw->data;
- adev->psp.hdcp_context.context.bin_desc.fw_version =
- le32_to_cpu(ta_hdr->hdcp.fw_version);
- adev->psp.hdcp_context.context.bin_desc.size_bytes =
- le32_to_cpu(ta_hdr->hdcp.size_bytes);
- adev->psp.hdcp_context.context.bin_desc.start_addr =
- (uint8_t *)ta_hdr +
- le32_to_cpu(
- ta_hdr->header.ucode_array_offset_bytes);
-
- adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);
-
- adev->psp.dtm_context.context.bin_desc.fw_version =
- le32_to_cpu(ta_hdr->dtm.fw_version);
- adev->psp.dtm_context.context.bin_desc.size_bytes =
- le32_to_cpu(ta_hdr->dtm.size_bytes);
- adev->psp.dtm_context.context.bin_desc.start_addr =
- (uint8_t *)adev->psp.hdcp_context.context
- .bin_desc.start_addr +
- le32_to_cpu(ta_hdr->dtm.offset_bytes);
- }
+ err = psp_init_ta_microcode(psp, ucode_prefix);
+ adev->psp.securedisplay_context.context.bin_desc.size_bytes = 0;
break;
case IP_VERSION(11, 0, 7):
case IP_VERSION(11, 0, 11):
case IP_VERSION(11, 0, 12):
case IP_VERSION(11, 0, 13):
- err = psp_init_sos_microcode(psp, chip_name);
- if (err)
- return err;
- err = psp_init_ta_microcode(psp, chip_name);
+ err = psp_init_sos_microcode(psp, ucode_prefix);
if (err)
return err;
+ err = psp_init_ta_microcode(psp, ucode_prefix);
break;
case IP_VERSION(11, 5, 0):
- err = psp_init_asd_microcode(psp, chip_name);
- if (err)
- return err;
- err = psp_init_toc_microcode(psp, chip_name);
+ err = psp_init_asd_microcode(psp, ucode_prefix);
if (err)
return err;
+ err = psp_init_toc_microcode(psp, ucode_prefix);
break;
default:
BUG();
}
- return 0;
-
-out2:
- release_firmware(adev->psp.ta_fw);
- adev->psp.ta_fw = NULL;
return err;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
index 8ed2281b6557..fcd708eae75c 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
@@ -48,83 +48,25 @@ MODULE_FIRMWARE("amdgpu/green_sardine_ta.bin");
static int psp_v12_0_init_microcode(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
- const char *chip_name;
- char fw_name[30];
+ char ucode_prefix[30];
int err = 0;
- const struct ta_firmware_header_v1_0 *ta_hdr;
DRM_DEBUG("\n");
- switch (adev->asic_type) {
- case CHIP_RENOIR:
- if (adev->apu_flags & AMD_APU_IS_RENOIR)
- chip_name = "renoir";
- else
- chip_name = "green_sardine";
- break;
- default:
- BUG();
- }
+ amdgpu_ucode_ip_version_decode(adev, MP0_HWIP, ucode_prefix, sizeof(ucode_prefix));
- err = psp_init_asd_microcode(psp, chip_name);
+ err = psp_init_asd_microcode(psp, ucode_prefix);
if (err)
return err;
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name);
- err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev);
- if (err) {
- release_firmware(adev->psp.ta_fw);
- adev->psp.ta_fw = NULL;
- dev_info(adev->dev,
- "psp v12.0: Failed to load firmware \"%s\"\n",
- fw_name);
- } else {
- err = amdgpu_ucode_validate(adev->psp.ta_fw);
- if (err)
- goto out;
-
- ta_hdr = (const struct ta_firmware_header_v1_0 *)
- adev->psp.ta_fw->data;
- adev->psp.hdcp_context.context.bin_desc.fw_version =
- le32_to_cpu(ta_hdr->hdcp.fw_version);
- adev->psp.hdcp_context.context.bin_desc.size_bytes =
- le32_to_cpu(ta_hdr->hdcp.size_bytes);
- adev->psp.hdcp_context.context.bin_desc.start_addr =
- (uint8_t *)ta_hdr +
- le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
-
- adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);
-
- adev->psp.dtm_context.context.bin_desc.fw_version =
- le32_to_cpu(ta_hdr->dtm.fw_version);
- adev->psp.dtm_context.context.bin_desc.size_bytes =
- le32_to_cpu(ta_hdr->dtm.size_bytes);
- adev->psp.dtm_context.context.bin_desc.start_addr =
- (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
- le32_to_cpu(ta_hdr->dtm.offset_bytes);
-
- if (adev->apu_flags & AMD_APU_IS_RENOIR) {
- adev->psp.securedisplay_context.context.bin_desc.fw_version =
- le32_to_cpu(ta_hdr->securedisplay.fw_version);
- adev->psp.securedisplay_context.context.bin_desc.size_bytes =
- le32_to_cpu(ta_hdr->securedisplay.size_bytes);
- adev->psp.securedisplay_context.context.bin_desc.start_addr =
- (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
- le32_to_cpu(ta_hdr->securedisplay.offset_bytes);
- }
- }
-
- return 0;
+ err = psp_init_ta_microcode(psp, ucode_prefix);
+ if (err)
+ return err;
-out:
- release_firmware(adev->psp.ta_fw);
- adev->psp.ta_fw = NULL;
- if (err) {
- dev_err(adev->dev,
- "psp v12.0: Failed to load firmware \"%s\"\n",
- fw_name);
- }
+ /* only supported on renoir */
+ if (!(adev->apu_flags & AMD_APU_IS_RENOIR))
+ adev->psp.securedisplay_context.context.bin_desc.size_bytes = 0;
- return err;
+ return 0;
}
static int psp_v12_0_bootloader_load_sysdrv(struct psp_context *psp)
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
index e6a26a7e5e5e..d62fcc77af95 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
@@ -70,32 +70,19 @@ MODULE_FIRMWARE("amdgpu/psp_13_0_11_ta.bin");
static int psp_v13_0_init_microcode(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
- const char *chip_name;
char ucode_prefix[30];
int err = 0;
- switch (adev->ip_versions[MP0_HWIP][0]) {
- case IP_VERSION(13, 0, 2):
- chip_name = "aldebaran";
- break;
- case IP_VERSION(13, 0, 1):
- case IP_VERSION(13, 0, 3):
- chip_name = "yellow_carp";
- break;
- default:
- amdgpu_ucode_ip_version_decode(adev, MP0_HWIP, ucode_prefix, sizeof(ucode_prefix));
- chip_name = ucode_prefix;
- break;
- }
+ amdgpu_ucode_ip_version_decode(adev, MP0_HWIP, ucode_prefix, sizeof(ucode_prefix));
switch (adev->ip_versions[MP0_HWIP][0]) {
case IP_VERSION(13, 0, 2):
- err = psp_init_sos_microcode(psp, chip_name);
+ err = psp_init_sos_microcode(psp, ucode_prefix);
if (err)
return err;
/* It's not necessary to load ras ta on Guest side */
if (!amdgpu_sriov_vf(adev)) {
- err = psp_init_ta_microcode(&adev->psp, chip_name);
+ err = psp_init_ta_microcode(psp, ucode_prefix);
if (err)
return err;
}
@@ -105,21 +92,21 @@ static int psp_v13_0_init_microcode(struct psp_context *psp)
case IP_VERSION(13, 0, 5):
case IP_VERSION(13, 0, 8):
case IP_VERSION(13, 0, 11):
- err = psp_init_toc_microcode(psp, chip_name);
+ err = psp_init_toc_microcode(psp, ucode_prefix);
if (err)
return err;
- err = psp_init_ta_microcode(psp, chip_name);
+ err = psp_init_ta_microcode(psp, ucode_prefix);
if (err)
return err;
break;
case IP_VERSION(13, 0, 0):
case IP_VERSION(13, 0, 7):
case IP_VERSION(13, 0, 10):
- err = psp_init_sos_microcode(psp, chip_name);
+ err = psp_init_sos_microcode(psp, ucode_prefix);
if (err)
return err;
/* It's not necessary to load ras ta on Guest side */
- err = psp_init_ta_microcode(psp, chip_name);
+ err = psp_init_ta_microcode(psp, ucode_prefix);
if (err)
return err;
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0_4.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0_4.c
index 9d4e24e518e8..d5ba58eba3e2 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0_4.c
@@ -35,25 +35,17 @@ MODULE_FIRMWARE("amdgpu/psp_13_0_4_ta.bin");
static int psp_v13_0_4_init_microcode(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
- const char *chip_name;
char ucode_prefix[30];
int err = 0;
- switch (adev->ip_versions[MP0_HWIP][0]) {
- case IP_VERSION(13, 0, 4):
- amdgpu_ucode_ip_version_decode(adev, MP0_HWIP, ucode_prefix, sizeof(ucode_prefix));
- chip_name = ucode_prefix;
- break;
- default:
- BUG();
- }
+ amdgpu_ucode_ip_version_decode(adev, MP0_HWIP, ucode_prefix, sizeof(ucode_prefix));
switch (adev->ip_versions[MP0_HWIP][0]) {
case IP_VERSION(13, 0, 4):
- err = psp_init_toc_microcode(psp, chip_name);
+ err = psp_init_toc_microcode(psp, ucode_prefix);
if (err)
return err;
- err = psp_init_ta_microcode(psp, chip_name);
+ err = psp_init_ta_microcode(psp, ucode_prefix);
if (err)
return err;
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
index 157147c6c94e..f6b75e3e47ff 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
@@ -57,26 +57,18 @@ static int psp_v3_1_ring_stop(struct psp_context *psp,
static int psp_v3_1_init_microcode(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
- const char *chip_name;
+ char ucode_prefix[30];
int err = 0;
DRM_DEBUG("\n");
- switch (adev->asic_type) {
- case CHIP_VEGA10:
- chip_name = "vega10";
- break;
- case CHIP_VEGA12:
- chip_name = "vega12";
- break;
- default: BUG();
- }
+ amdgpu_ucode_ip_version_decode(adev, MP0_HWIP, ucode_prefix, sizeof(ucode_prefix));
- err = psp_init_sos_microcode(psp, chip_name);
+ err = psp_init_sos_microcode(psp, ucode_prefix);
if (err)
return err;
- err = psp_init_asd_microcode(psp, chip_name);
+ err = psp_init_asd_microcode(psp, ucode_prefix);
if (err)
return err;
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index c52d246a1d96..fd2a7b66ac56 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -113,10 +113,9 @@ static void sdma_v2_4_init_golden_registers(struct amdgpu_device *adev)
static void sdma_v2_4_free_microcode(struct amdgpu_device *adev)
{
int i;
- for (i = 0; i < adev->sdma.num_instances; i++) {
- release_firmware(adev->sdma.instance[i].fw);
- adev->sdma.instance[i].fw = NULL;
- }
+
+ for (i = 0; i < adev->sdma.num_instances; i++)
+ amdgpu_ucode_release(&adev->sdma.instance[i].fw);
}
/**
@@ -151,10 +150,7 @@ static int sdma_v2_4_init_microcode(struct amdgpu_device *adev)
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name);
else
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma1.bin", chip_name);
- err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->sdma.instance[i].fw);
+ err = amdgpu_ucode_request(adev, &adev->sdma.instance[i].fw, fw_name);
if (err)
goto out;
hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
@@ -176,10 +172,8 @@ static int sdma_v2_4_init_microcode(struct amdgpu_device *adev)
out:
if (err) {
pr_err("sdma_v2_4: Failed to load firmware \"%s\"\n", fw_name);
- for (i = 0; i < adev->sdma.num_instances; i++) {
- release_firmware(adev->sdma.instance[i].fw);
- adev->sdma.instance[i].fw = NULL;
- }
+ for (i = 0; i < adev->sdma.num_instances; i++)
+ amdgpu_ucode_release(&adev->sdma.instance[i].fw);
}
return err;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index 486d9b5c1b9e..e572389089d2 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -250,10 +250,9 @@ static void sdma_v3_0_init_golden_registers(struct amdgpu_device *adev)
static void sdma_v3_0_free_microcode(struct amdgpu_device *adev)
{
int i;
- for (i = 0; i < adev->sdma.num_instances; i++) {
- release_firmware(adev->sdma.instance[i].fw);
- adev->sdma.instance[i].fw = NULL;
- }
+
+ for (i = 0; i < adev->sdma.num_instances; i++)
+ amdgpu_ucode_release(&adev->sdma.instance[i].fw);
}
/**
@@ -309,10 +308,7 @@ static int sdma_v3_0_init_microcode(struct amdgpu_device *adev)
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name);
else
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma1.bin", chip_name);
- err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->sdma.instance[i].fw);
+ err = amdgpu_ucode_request(adev, &adev->sdma.instance[i].fw, fw_name);
if (err)
goto out;
hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
@@ -332,10 +328,8 @@ static int sdma_v3_0_init_microcode(struct amdgpu_device *adev)
out:
if (err) {
pr_err("sdma_v3_0: Failed to load firmware \"%s\"\n", fw_name);
- for (i = 0; i < adev->sdma.num_instances; i++) {
- release_firmware(adev->sdma.instance[i].fw);
- adev->sdma.instance[i].fw = NULL;
- }
+ for (i = 0; i < adev->sdma.num_instances; i++)
+ amdgpu_ucode_release(&adev->sdma.instance[i].fw);
}
return err;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 4d780e4430e7..017ae298558e 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -575,60 +575,17 @@ static void sdma_v4_0_setup_ulv(struct amdgpu_device *adev)
// vega10 real chip need to use PSP to load firmware
static int sdma_v4_0_init_microcode(struct amdgpu_device *adev)
{
- const char *chip_name;
- char fw_name[30];
int ret, i;
- DRM_DEBUG("\n");
-
- switch (adev->ip_versions[SDMA0_HWIP][0]) {
- case IP_VERSION(4, 0, 0):
- chip_name = "vega10";
- break;
- case IP_VERSION(4, 0, 1):
- chip_name = "vega12";
- break;
- case IP_VERSION(4, 2, 0):
- chip_name = "vega20";
- break;
- case IP_VERSION(4, 1, 0):
- case IP_VERSION(4, 1, 1):
- if (adev->apu_flags & AMD_APU_IS_RAVEN2)
- chip_name = "raven2";
- else if (adev->apu_flags & AMD_APU_IS_PICASSO)
- chip_name = "picasso";
- else
- chip_name = "raven";
- break;
- case IP_VERSION(4, 2, 2):
- chip_name = "arcturus";
- break;
- case IP_VERSION(4, 1, 2):
- if (adev->apu_flags & AMD_APU_IS_RENOIR)
- chip_name = "renoir";
- else
- chip_name = "green_sardine";
- break;
- case IP_VERSION(4, 4, 0):
- chip_name = "aldebaran";
- break;
- default:
- BUG();
- }
-
for (i = 0; i < adev->sdma.num_instances; i++) {
- if (i == 0)
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name);
- else
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma%d.bin", chip_name, i);
if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 2) ||
adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 4, 0)) {
/* Acturus & Aldebaran will leverage the same FW memory
for every SDMA instance */
- ret = amdgpu_sdma_init_microcode(adev, fw_name, 0, true);
+ ret = amdgpu_sdma_init_microcode(adev, 0, true);
break;
} else {
- ret = amdgpu_sdma_init_microcode(adev, fw_name, i, false);
+ ret = amdgpu_sdma_init_microcode(adev, i, false);
if (ret)
return ret;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
index d4d9f196db83..1941b3b7c5d9 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
@@ -237,39 +237,13 @@ static void sdma_v5_0_init_golden_registers(struct amdgpu_device *adev)
// emulation only, won't work on real chip
// navi10 real chip need to use PSP to load firmware
static int sdma_v5_0_init_microcode(struct amdgpu_device *adev)
-{
- const char *chip_name;
- char fw_name[40];
- int ret, i;
+{ int ret, i;
if (amdgpu_sriov_vf(adev) && (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(5, 0, 5)))
return 0;
- DRM_DEBUG("\n");
-
- switch (adev->ip_versions[SDMA0_HWIP][0]) {
- case IP_VERSION(5, 0, 0):
- chip_name = "navi10";
- break;
- case IP_VERSION(5, 0, 2):
- chip_name = "navi14";
- break;
- case IP_VERSION(5, 0, 5):
- chip_name = "navi12";
- break;
- case IP_VERSION(5, 0, 1):
- chip_name = "cyan_skillfish2";
- break;
- default:
- BUG();
- }
-
for (i = 0; i < adev->sdma.num_instances; i++) {
- if (i == 0)
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name);
- else
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma1.bin", chip_name);
- ret = amdgpu_sdma_init_microcode(adev, fw_name, i, false);
+ ret = amdgpu_sdma_init_microcode(adev, i, false);
if (ret)
return ret;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
index 809eca54fc61..8e445eb9dd49 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
@@ -89,59 +89,6 @@ static u32 sdma_v5_2_get_reg_offset(struct amdgpu_device *adev, u32 instance, u3
return base + internal_offset;
}
-/**
- * sdma_v5_2_init_microcode - load ucode images from disk
- *
- * @adev: amdgpu_device pointer
- *
- * Use the firmware interface to load the ucode images into
- * the driver (not loaded into hw).
- * Returns 0 on success, error on failure.
- */
-
-// emulation only, won't work on real chip
-// navi10 real chip need to use PSP to load firmware
-static int sdma_v5_2_init_microcode(struct amdgpu_device *adev)
-{
- const char *chip_name;
- char fw_name[40];
-
- DRM_DEBUG("\n");
-
- switch (adev->ip_versions[SDMA0_HWIP][0]) {
- case IP_VERSION(5, 2, 0):
- chip_name = "sienna_cichlid_sdma";
- break;
- case IP_VERSION(5, 2, 2):
- chip_name = "navy_flounder_sdma";
- break;
- case IP_VERSION(5, 2, 1):
- chip_name = "vangogh_sdma";
- break;
- case IP_VERSION(5, 2, 4):
- chip_name = "dimgrey_cavefish_sdma";
- break;
- case IP_VERSION(5, 2, 5):
- chip_name = "beige_goby_sdma";
- break;
- case IP_VERSION(5, 2, 3):
- chip_name = "yellow_carp_sdma";
- break;
- case IP_VERSION(5, 2, 6):
- chip_name = "sdma_5_2_6";
- break;
- case IP_VERSION(5, 2, 7):
- chip_name = "sdma_5_2_7";
- break;
- default:
- BUG();
- }
-
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", chip_name);
-
- return amdgpu_sdma_init_microcode(adev, fw_name, 0, true);
-}
-
static unsigned sdma_v5_2_ring_init_cond_exec(struct amdgpu_ring *ring)
{
unsigned ret;
@@ -809,12 +756,6 @@ static int sdma_v5_2_start(struct amdgpu_device *adev)
msleep(1000);
}
- /* TODO: check whether can submit a doorbell request to raise
- * a doorbell fence to exit gfxoff.
- */
- if (adev->in_s0ix)
- amdgpu_gfx_off_ctrl(adev, false);
-
sdma_v5_2_soft_reset(adev);
/* unhalt the MEs */
sdma_v5_2_enable(adev, true);
@@ -823,8 +764,6 @@ static int sdma_v5_2_start(struct amdgpu_device *adev)
/* start the gfx rings and rlc compute queues */
r = sdma_v5_2_gfx_resume(adev);
- if (adev->in_s0ix)
- amdgpu_gfx_off_ctrl(adev, true);
if (r)
return r;
r = sdma_v5_2_rlc_resume(adev);
@@ -1296,7 +1235,7 @@ static int sdma_v5_2_sw_init(void *handle)
return r;
}
- r = sdma_v5_2_init_microcode(adev);
+ r = amdgpu_sdma_init_microcode(adev, 0, true);
if (r) {
DRM_ERROR("Failed to load sdma firmware!\n");
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
index 049c26a45d85..bf1fa5e8d2f9 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
@@ -78,29 +78,6 @@ static u32 sdma_v6_0_get_reg_offset(struct amdgpu_device *adev, u32 instance, u3
return base + internal_offset;
}
-/**
- * sdma_v6_0_init_microcode - load ucode images from disk
- *
- * @adev: amdgpu_device pointer
- *
- * Use the firmware interface to load the ucode images into
- * the driver (not loaded into hw).
- * Returns 0 on success, error on failure.
- */
-static int sdma_v6_0_init_microcode(struct amdgpu_device *adev)
-{
- char fw_name[30];
- char ucode_prefix[30];
-
- DRM_DEBUG("\n");
-
- amdgpu_ucode_ip_version_decode(adev, SDMA0_HWIP, ucode_prefix, sizeof(ucode_prefix));
-
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", ucode_prefix);
-
- return amdgpu_sdma_init_microcode(adev, fw_name, 0, true);
-}
-
static unsigned sdma_v6_0_ring_init_cond_exec(struct amdgpu_ring *ring)
{
unsigned ret;
@@ -1260,7 +1237,7 @@ static int sdma_v6_0_sw_init(void *handle)
if (r)
return r;
- r = sdma_v6_0_init_microcode(adev);
+ r = amdgpu_sdma_init_microcode(adev, 0, true);
if (r) {
DRM_ERROR("Failed to load sdma firmware!\n");
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/ta_secureDisplay_if.h b/drivers/gpu/drm/amd/amdgpu/ta_secureDisplay_if.h
index cf8ff064dc72..00d8bdb8254f 100644
--- a/drivers/gpu/drm/amd/amdgpu/ta_secureDisplay_if.h
+++ b/drivers/gpu/drm/amd/amdgpu/ta_secureDisplay_if.h
@@ -55,10 +55,10 @@ enum ta_securedisplay_status {
TA_SECUREDISPLAY_STATUS__MAX = 0x7FFFFFFF,/* Maximum Value for status*/
};
-/** @enum ta_securedisplay_max_phy
+/** @enum ta_securedisplay_phy_ID
* Physical ID number to use for reading corresponding DIO Scratch register for ROI
*/
-enum ta_securedisplay_max_phy {
+enum ta_securedisplay_phy_ID {
TA_SECUREDISPLAY_PHY0 = 0,
TA_SECUREDISPLAY_PHY1 = 1,
TA_SECUREDISPLAY_PHY2 = 2,
@@ -139,16 +139,16 @@ union ta_securedisplay_cmd_output {
uint32_t reserved[4];
};
-/** @struct securedisplay_cmd
- * Secure Display Command which is shared buffer memory
- */
-struct securedisplay_cmd {
- uint32_t cmd_id; /* +0 Bytes Command ID */
- enum ta_securedisplay_status status; /* +4 Bytes Status of Secure Display TA */
- uint32_t reserved[2]; /* +8 Bytes Reserved */
- union ta_securedisplay_cmd_input securedisplay_in_message; /* +16 Bytes Input Buffer */
- union ta_securedisplay_cmd_output securedisplay_out_message;/* +32 Bytes Output Buffer */
- /**@note Total 48 Bytes */
+/** @struct ta_securedisplay_cmd
+* Secure display command which is shared buffer memory
+*/
+struct ta_securedisplay_cmd {
+ uint32_t cmd_id; /**< +0 Bytes Command ID */
+ enum ta_securedisplay_status status; /**< +4 Bytes Status code returned by the secure display TA */
+ uint32_t reserved[2]; /**< +8 Bytes Reserved */
+ union ta_securedisplay_cmd_input securedisplay_in_message; /**< +16 Bytes Command input buffer */
+ union ta_securedisplay_cmd_output securedisplay_out_message; /**< +32 Bytes Command output buffer */
+ /**@note Total 48 Bytes */
};
#endif //_TA_SECUREDISPLAY_IF_H
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c b/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c
index b7da4528cf0a..da394bc06bba 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c
@@ -340,29 +340,13 @@ static void umc_v8_10_err_cnt_init(struct amdgpu_device *adev)
}
}
-static uint32_t umc_v8_10_query_ras_poison_mode_per_channel(
- struct amdgpu_device *adev,
- uint32_t umc_reg_offset)
-{
- uint32_t ecc_ctrl_addr, ecc_ctrl;
-
- ecc_ctrl_addr =
- SOC15_REG_OFFSET(UMC, 0, regUMCCH0_0_GeccCtrl);
- ecc_ctrl = RREG32_PCIE((ecc_ctrl_addr +
- umc_reg_offset) * 4);
-
- return REG_GET_FIELD(ecc_ctrl, UMCCH0_0_GeccCtrl, UCFatalEn);
-}
-
static bool umc_v8_10_query_ras_poison_mode(struct amdgpu_device *adev)
{
- uint32_t umc_reg_offset = 0;
-
- /* Enabling fatal error in umc node0 instance0 channel0 will be
- * considered as fatal error mode
+ /*
+ * Force return true, because UMCCH0_0_GeccCtrl
+ * is not accessible from host side
*/
- umc_reg_offset = get_umc_v8_10_reg_offset(adev, 0, 0, 0);
- return !umc_v8_10_query_ras_poison_mode_per_channel(adev, umc_reg_offset);
+ return true;
}
const struct amdgpu_ras_block_hw_ops umc_v8_10_ras_hw_ops = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index f0fbcda76f5e..c305b2cb8490 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -57,11 +57,12 @@ static void vcn_v1_0_idle_work_handler(struct work_struct *work);
static void vcn_v1_0_ring_begin_use(struct amdgpu_ring *ring);
/**
- * vcn_v1_0_early_init - set function pointers
+ * vcn_v1_0_early_init - set function pointers and load microcode
*
* @handle: amdgpu_device pointer
*
* Set ring and irq function pointers
+ * Load microcode from filesystem
*/
static int vcn_v1_0_early_init(void *handle)
{
@@ -75,7 +76,7 @@ static int vcn_v1_0_early_init(void *handle)
jpeg_v1_0_early_init(handle);
- return 0;
+ return amdgpu_vcn_early_init(adev);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
index 08871bad9994..4b4cd88414e0 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
@@ -62,11 +62,12 @@ static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev,
int inst_idx, struct dpg_pause_state *new_state);
static int vcn_v2_0_start_sriov(struct amdgpu_device *adev);
/**
- * vcn_v2_0_early_init - set function pointers
+ * vcn_v2_0_early_init - set function pointers and load microcode
*
* @handle: amdgpu_device pointer
*
* Set ring and irq function pointers
+ * Load microcode from filesystem
*/
static int vcn_v2_0_early_init(void *handle)
{
@@ -81,7 +82,7 @@ static int vcn_v2_0_early_init(void *handle)
vcn_v2_0_set_enc_ring_funcs(adev);
vcn_v2_0_set_irq_funcs(adev);
- return 0;
+ return amdgpu_vcn_early_init(adev);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
index ec87b00f2e05..b0b0e69c6a94 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
@@ -71,11 +71,12 @@ static int amdgpu_ih_clientid_vcns[] = {
};
/**
- * vcn_v2_5_early_init - set function pointers
+ * vcn_v2_5_early_init - set function pointers and load microcode
*
* @handle: amdgpu_device pointer
*
* Set ring and irq function pointers
+ * Load microcode from filesystem
*/
static int vcn_v2_5_early_init(void *handle)
{
@@ -107,7 +108,7 @@ static int vcn_v2_5_early_init(void *handle)
vcn_v2_5_set_irq_funcs(adev);
vcn_v2_5_set_ras_funcs(adev);
- return 0;
+ return amdgpu_vcn_early_init(adev);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
index 9c8b5fd99037..bd228512424a 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
@@ -78,11 +78,12 @@ static void vcn_v3_0_dec_ring_set_wptr(struct amdgpu_ring *ring);
static void vcn_v3_0_enc_ring_set_wptr(struct amdgpu_ring *ring);
/**
- * vcn_v3_0_early_init - set function pointers
+ * vcn_v3_0_early_init - set function pointers and load microcode
*
* @handle: amdgpu_device pointer
*
* Set ring and irq function pointers
+ * Load microcode from filesystem
*/
static int vcn_v3_0_early_init(void *handle)
{
@@ -109,7 +110,7 @@ static int vcn_v3_0_early_init(void *handle)
vcn_v3_0_set_enc_ring_funcs(adev);
vcn_v3_0_set_irq_funcs(adev);
- return 0;
+ return amdgpu_vcn_early_init(adev);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
index 1e2b22299975..a79b6088374b 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
@@ -68,11 +68,12 @@ static void vcn_v4_0_unified_ring_set_wptr(struct amdgpu_ring *ring);
static void vcn_v4_0_set_ras_funcs(struct amdgpu_device *adev);
/**
- * vcn_v4_0_early_init - set function pointers
+ * vcn_v4_0_early_init - set function pointers and load microcode
*
* @handle: amdgpu_device pointer
*
* Set ring and irq function pointers
+ * Load microcode from filesystem
*/
static int vcn_v4_0_early_init(void *handle)
{
@@ -88,7 +89,7 @@ static int vcn_v4_0_early_init(void *handle)
vcn_v4_0_set_irq_funcs(adev);
vcn_v4_0_set_ras_funcs(adev);
- return 0;
+ return amdgpu_vcn_early_init(adev);
}
/**
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 6d291aa6386b..f79b8e964140 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -1127,8 +1127,13 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
}
/* Update the VRAM usage count */
- if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM)
- WRITE_ONCE(pdd->vram_usage, pdd->vram_usage + args->size);
+ if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
+ uint64_t size = args->size;
+
+ if (flags & KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM)
+ size >>= 1;
+ WRITE_ONCE(pdd->vram_usage, pdd->vram_usage + PAGE_ALIGN(size));
+ }
mutex_unlock(&p->mutex);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index b8936340742b..3de7f616a001 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -262,23 +262,12 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
f2g = &gfx_v8_kfd2kgd;
break;
case CHIP_FIJI:
- gfx_target_version = 80003;
- f2g = &gfx_v8_kfd2kgd;
- break;
case CHIP_POLARIS10:
gfx_target_version = 80003;
f2g = &gfx_v8_kfd2kgd;
break;
case CHIP_POLARIS11:
- gfx_target_version = 80003;
- if (!vf)
- f2g = &gfx_v8_kfd2kgd;
- break;
case CHIP_POLARIS12:
- gfx_target_version = 80003;
- if (!vf)
- f2g = &gfx_v8_kfd2kgd;
- break;
case CHIP_VEGAM:
gfx_target_version = 80003;
if (!vf)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index ecb4c3abc629..c06ada0844ba 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -200,7 +200,7 @@ static int add_queue_mes(struct device_queue_manager *dqm, struct queue *q,
queue_input.wptr_addr = (uint64_t)q->properties.write_ptr;
if (q->wptr_bo) {
- wptr_addr_off = (uint64_t)q->properties.write_ptr - (uint64_t)q->wptr_bo->kfd_bo->va;
+ wptr_addr_off = (uint64_t)q->properties.write_ptr & (PAGE_SIZE - 1);
queue_input.wptr_mc_addr = ((uint64_t)q->wptr_bo->tbo.resource->start << PAGE_SHIFT) + wptr_addr_off;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
index 10048ce16aea..de8ce72344fc 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
@@ -1027,8 +1027,7 @@ int svm_migrate_init(struct amdgpu_device *adev)
/* Disable SVM support capability */
pgmap->type = 0;
if (pgmap->type == MEMORY_DEVICE_PRIVATE)
- devm_release_mem_region(adev->dev, res->start,
- res->end - res->start + 1);
+ devm_release_mem_region(adev->dev, res->start, resource_size(res));
return PTR_ERR(r);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 51b1683ac5c1..71db24fefe05 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -1563,6 +1563,8 @@ err_free_pdd:
int kfd_process_device_init_vm(struct kfd_process_device *pdd,
struct file *drm_file)
{
+ struct amdgpu_fpriv *drv_priv;
+ struct amdgpu_vm *avm;
struct kfd_process *p;
struct kfd_dev *dev;
int ret;
@@ -1573,10 +1575,15 @@ int kfd_process_device_init_vm(struct kfd_process_device *pdd,
if (pdd->drm_priv)
return -EBUSY;
+ ret = amdgpu_file_to_fpriv(drm_file, &drv_priv);
+ if (ret)
+ return ret;
+ avm = &drv_priv->vm;
+
p = pdd->process;
dev = pdd->dev;
- ret = amdgpu_amdkfd_gpuvm_acquire_process_vm(dev->adev, drm_file,
+ ret = amdgpu_amdkfd_gpuvm_acquire_process_vm(dev->adev, avm,
&p->kgd_process_info,
&p->ef);
if (ret) {
@@ -1593,7 +1600,7 @@ int kfd_process_device_init_vm(struct kfd_process_device *pdd,
if (ret)
goto err_init_cwsr;
- ret = amdgpu_amdkfd_gpuvm_set_vm_pasid(dev->adev, drm_file, p->pasid);
+ ret = amdgpu_amdkfd_gpuvm_set_vm_pasid(dev->adev, avm, p->pasid);
if (ret)
goto err_set_pasid;
@@ -1607,6 +1614,7 @@ err_init_cwsr:
kfd_process_device_destroy_ib_mem(pdd);
err_reserve_ib_mem:
pdd->drm_priv = NULL;
+ amdgpu_amdkfd_gpuvm_destroy_cb(dev->adev, avm);
return ret;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index 814f99888ab1..dc6fd6967050 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -23,6 +23,7 @@
#include <linux/types.h>
#include <linux/sched/task.h>
+#include <drm/ttm/ttm_tt.h>
#include "amdgpu_sync.h"
#include "amdgpu_object.h"
#include "amdgpu_vm.h"
@@ -570,6 +571,15 @@ svm_range_vram_node_new(struct amdgpu_device *adev, struct svm_range *prange,
goto reserve_bo_failed;
}
+ if (clear) {
+ r = amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false);
+ if (r) {
+ pr_debug("failed %d to sync bo\n", r);
+ amdgpu_bo_unreserve(bo);
+ goto reserve_bo_failed;
+ }
+ }
+
r = dma_resv_reserve_fences(bo->tbo.base.resv, 1);
if (r) {
pr_debug("failed %d to reserve bo\n", r);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index bceb1a5b2518..3fdaba56be6f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -801,7 +801,7 @@ static int kfd_build_sysfs_node_entry(struct kfd_topology_device *dev,
p2plink->attr.name = "properties";
p2plink->attr.mode = KFD_SYSFS_FILE_MODE;
- sysfs_attr_init(&iolink->attr);
+ sysfs_attr_init(&p2plink->attr);
ret = sysfs_create_file(p2plink->kobj, &p2plink->attr);
if (ret < 0)
return ret;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 50c783e19f5a..4300ce98ce8d 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -210,7 +210,7 @@ static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
struct amdgpu_dm_connector *amdgpu_dm_connector,
- uint32_t link_index,
+ u32 link_index,
struct amdgpu_encoder *amdgpu_encoder);
static int amdgpu_dm_encoder_init(struct drm_device *dev,
struct amdgpu_encoder *aencoder,
@@ -262,7 +262,7 @@ static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
u32 *vbl, u32 *position)
{
- uint32_t v_blank_start, v_blank_end, h_position, v_position;
+ u32 v_blank_start, v_blank_end, h_position, v_position;
if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
return -EINVAL;
@@ -361,7 +361,7 @@ static void dm_pflip_high_irq(void *interrupt_params)
struct amdgpu_device *adev = irq_params->adev;
unsigned long flags;
struct drm_pending_vblank_event *e;
- uint32_t vpos, hpos, v_blank_start, v_blank_end;
+ u32 vpos, hpos, v_blank_start, v_blank_end;
bool vrr_active;
amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
@@ -648,7 +648,7 @@ static void dmub_hpd_callback(struct amdgpu_device *adev,
struct drm_connector *connector;
struct drm_connector_list_iter iter;
struct dc_link *link;
- uint8_t link_index = 0;
+ u8 link_index = 0;
struct drm_device *dev;
if (adev == NULL)
@@ -749,7 +749,7 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params)
struct amdgpu_device *adev = irq_params->adev;
struct amdgpu_display_manager *dm = &adev->dm;
struct dmcub_trace_buf_entry entry = { 0 };
- uint32_t count = 0;
+ u32 count = 0;
struct dmub_hpd_work *dmub_hpd_wrk;
struct dc_link *plink = NULL;
@@ -1015,7 +1015,7 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
struct dmub_srv_hw_params hw_params;
enum dmub_status status;
const unsigned char *fw_inst_const, *fw_bss_data;
- uint32_t i, fw_inst_const_size, fw_bss_data_size;
+ u32 i, fw_inst_const_size, fw_bss_data_size;
bool has_hw_support;
if (!dmub_srv)
@@ -1176,10 +1176,10 @@ static void dm_dmub_hw_resume(struct amdgpu_device *adev)
static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
{
- uint64_t pt_base;
- uint32_t logical_addr_low;
- uint32_t logical_addr_high;
- uint32_t agp_base, agp_bot, agp_top;
+ u64 pt_base;
+ u32 logical_addr_low;
+ u32 logical_addr_high;
+ u32 agp_base, agp_bot, agp_top;
PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
memset(pa_config, 0, sizeof(*pa_config));
@@ -1642,7 +1642,10 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
}
#endif
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
- adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
+ adev->dm.secure_display_ctxs = amdgpu_dm_crtc_secure_display_create_contexts(adev);
+ if (!adev->dm.secure_display_ctxs) {
+ DRM_ERROR("amdgpu: failed to initialize secure_display_ctxs.\n");
+ }
#endif
if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
init_completion(&adev->dm.dmub_aux_transfer_done);
@@ -1737,10 +1740,15 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
amdgpu_dm_destroy_drm_device(&adev->dm);
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
- if (adev->dm.crc_rd_wrk) {
- flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
- kfree(adev->dm.crc_rd_wrk);
- adev->dm.crc_rd_wrk = NULL;
+ if (adev->dm.secure_display_ctxs) {
+ for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
+ if (adev->dm.secure_display_ctxs[i].crtc) {
+ flush_work(&adev->dm.secure_display_ctxs[i].notify_ta_work);
+ flush_work(&adev->dm.secure_display_ctxs[i].forward_roi_work);
+ }
+ }
+ kfree(adev->dm.secure_display_ctxs);
+ adev->dm.secure_display_ctxs = NULL;
}
#endif
#ifdef CONFIG_DRM_AMD_DC_HDCP
@@ -1875,25 +1883,17 @@ static int load_dmcu_fw(struct amdgpu_device *adev)
return 0;
}
- r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
- if (r == -ENOENT) {
+ r = amdgpu_ucode_request(adev, &adev->dm.fw_dmcu, fw_name_dmcu);
+ if (r == -ENODEV) {
/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
adev->dm.fw_dmcu = NULL;
return 0;
}
if (r) {
- dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
- fw_name_dmcu);
- return r;
- }
-
- r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
- if (r) {
dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
fw_name_dmcu);
- release_firmware(adev->dm.fw_dmcu);
- adev->dm.fw_dmcu = NULL;
+ amdgpu_ucode_release(&adev->dm.fw_dmcu);
return r;
}
@@ -1939,7 +1939,6 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
struct dmub_srv_fb_info *fb_info;
struct dmub_srv *dmub_srv;
const struct dmcub_firmware_header_v1_0 *hdr;
- const char *fw_name_dmub;
enum dmub_asic dmub_asic;
enum dmub_status status;
int r;
@@ -1947,73 +1946,46 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
switch (adev->ip_versions[DCE_HWIP][0]) {
case IP_VERSION(2, 1, 0):
dmub_asic = DMUB_ASIC_DCN21;
- fw_name_dmub = FIRMWARE_RENOIR_DMUB;
- if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
- fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
break;
case IP_VERSION(3, 0, 0):
- if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) {
+ if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0))
dmub_asic = DMUB_ASIC_DCN30;
- fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
- } else {
+ else
dmub_asic = DMUB_ASIC_DCN30;
- fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
- }
break;
case IP_VERSION(3, 0, 1):
dmub_asic = DMUB_ASIC_DCN301;
- fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
break;
case IP_VERSION(3, 0, 2):
dmub_asic = DMUB_ASIC_DCN302;
- fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
break;
case IP_VERSION(3, 0, 3):
dmub_asic = DMUB_ASIC_DCN303;
- fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
break;
case IP_VERSION(3, 1, 2):
case IP_VERSION(3, 1, 3):
dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31;
- fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
break;
case IP_VERSION(3, 1, 4):
dmub_asic = DMUB_ASIC_DCN314;
- fw_name_dmub = FIRMWARE_DCN_314_DMUB;
break;
case IP_VERSION(3, 1, 5):
dmub_asic = DMUB_ASIC_DCN315;
- fw_name_dmub = FIRMWARE_DCN_315_DMUB;
break;
case IP_VERSION(3, 1, 6):
dmub_asic = DMUB_ASIC_DCN316;
- fw_name_dmub = FIRMWARE_DCN316_DMUB;
break;
case IP_VERSION(3, 2, 0):
dmub_asic = DMUB_ASIC_DCN32;
- fw_name_dmub = FIRMWARE_DCN_V3_2_0_DMCUB;
break;
case IP_VERSION(3, 2, 1):
dmub_asic = DMUB_ASIC_DCN321;
- fw_name_dmub = FIRMWARE_DCN_V3_2_1_DMCUB;
break;
default:
/* ASIC doesn't support DMUB. */
return 0;
}
- r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
- if (r) {
- DRM_ERROR("DMUB firmware loading failed: %d\n", r);
- return 0;
- }
-
- r = amdgpu_ucode_validate(adev->dm.dmub_fw);
- if (r) {
- DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
- return 0;
- }
-
hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
@@ -2080,7 +2052,9 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
* TODO: Move this into GART.
*/
r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
+ &adev->dm.dmub_bo,
&adev->dm.dmub_bo_gpu_addr,
&adev->dm.dmub_bo_cpu_addr);
if (r)
@@ -2135,11 +2109,8 @@ static int dm_sw_fini(void *handle)
adev->dm.dmub_srv = NULL;
}
- release_firmware(adev->dm.dmub_fw);
- adev->dm.dmub_fw = NULL;
-
- release_firmware(adev->dm.fw_dmcu);
- adev->dm.fw_dmcu = NULL;
+ amdgpu_ucode_release(&adev->dm.dmub_fw);
+ amdgpu_ucode_release(&adev->dm.fw_dmcu);
return 0;
}
@@ -2165,6 +2136,8 @@ static int detect_mst_link_for_all_connectors(struct drm_device *dev)
DRM_ERROR("DM_MST: Failed to start MST\n");
aconnector->dc_link->type =
dc_connection_single;
+ ret = dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx,
+ aconnector->dc_link);
break;
}
}
@@ -2486,7 +2459,7 @@ struct amdgpu_dm_connector *
amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
struct drm_crtc *crtc)
{
- uint32_t i;
+ u32 i;
struct drm_connector_state *new_con_state;
struct drm_connector *connector;
struct drm_crtc *crtc_from_state;
@@ -2734,12 +2707,14 @@ static int dm_resume(void *handle)
drm_for_each_connector_iter(connector, &iter) {
aconnector = to_amdgpu_dm_connector(connector);
+ if (!aconnector->dc_link)
+ continue;
+
/*
* this is the case when traversing through already created
* MST connectors, should be skipped
*/
- if (aconnector->dc_link &&
- aconnector->dc_link->type == dc_connection_mst_branch)
+ if (aconnector->dc_link->type == dc_connection_mst_branch)
continue;
mutex_lock(&aconnector->hpd_lock);
@@ -3117,8 +3092,8 @@ static void handle_hpd_irq(void *param)
static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
{
- uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
- uint8_t dret;
+ u8 esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
+ u8 dret;
bool new_irq_handled = false;
int dpcd_addr;
int dpcd_bytes_to_read;
@@ -3146,7 +3121,7 @@ static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
while (dret == dpcd_bytes_to_read &&
process_count < max_process_count) {
- uint8_t retry;
+ u8 retry;
dret = 0;
process_count++;
@@ -3165,7 +3140,7 @@ static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
dpcd_bytes_to_read - 1;
for (retry = 0; retry < 3; retry++) {
- uint8_t wret;
+ u8 wret;
wret = drm_dp_dpcd_write(
&aconnector->dm_dp_aux.aux,
@@ -4179,12 +4154,12 @@ static void amdgpu_set_panel_orientation(struct drm_connector *connector);
static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
{
struct amdgpu_display_manager *dm = &adev->dm;
- int32_t i;
+ s32 i;
struct amdgpu_dm_connector *aconnector = NULL;
struct amdgpu_encoder *aencoder = NULL;
struct amdgpu_mode_info *mode_info = &adev->mode_info;
- uint32_t link_cnt;
- int32_t primary_planes;
+ u32 link_cnt;
+ s32 primary_planes;
enum dc_connection_type new_connection_type = dc_connection_none;
const struct dc_plane_cap *plane;
bool psr_feature_enabled = false;
@@ -4361,6 +4336,10 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
amdgpu_set_panel_orientation(&aconnector->base);
}
+ /* If we didn't find a panel, notify the acpi video detection */
+ if (dm->adev->flags & AMD_IS_APU && dm->num_of_edps == 0)
+ acpi_video_report_nolcd();
+
/* Software is initialized. Now we can register interrupt handlers. */
switch (adev->asic_type) {
#if defined(CONFIG_DRM_AMD_DC_SI)
@@ -4500,6 +4479,61 @@ DEVICE_ATTR_WO(s3_debug);
#endif
+static int dm_init_microcode(struct amdgpu_device *adev)
+{
+ char *fw_name_dmub;
+ int r;
+
+ switch (adev->ip_versions[DCE_HWIP][0]) {
+ case IP_VERSION(2, 1, 0):
+ fw_name_dmub = FIRMWARE_RENOIR_DMUB;
+ if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
+ fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
+ break;
+ case IP_VERSION(3, 0, 0):
+ if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0))
+ fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
+ else
+ fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
+ break;
+ case IP_VERSION(3, 0, 1):
+ fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
+ break;
+ case IP_VERSION(3, 0, 2):
+ fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
+ break;
+ case IP_VERSION(3, 0, 3):
+ fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
+ break;
+ case IP_VERSION(3, 1, 2):
+ case IP_VERSION(3, 1, 3):
+ fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
+ break;
+ case IP_VERSION(3, 1, 4):
+ fw_name_dmub = FIRMWARE_DCN_314_DMUB;
+ break;
+ case IP_VERSION(3, 1, 5):
+ fw_name_dmub = FIRMWARE_DCN_315_DMUB;
+ break;
+ case IP_VERSION(3, 1, 6):
+ fw_name_dmub = FIRMWARE_DCN316_DMUB;
+ break;
+ case IP_VERSION(3, 2, 0):
+ fw_name_dmub = FIRMWARE_DCN_V3_2_0_DMCUB;
+ break;
+ case IP_VERSION(3, 2, 1):
+ fw_name_dmub = FIRMWARE_DCN_V3_2_1_DMCUB;
+ break;
+ default:
+ /* ASIC doesn't support DMUB. */
+ return 0;
+ }
+ r = amdgpu_ucode_request(adev, &adev->dm.dmub_fw, fw_name_dmub);
+ if (r)
+ DRM_ERROR("DMUB firmware loading failed: %d\n", r);
+ return r;
+}
+
static int dm_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -4632,7 +4666,7 @@ static int dm_early_init(void *handle)
#endif
adev->dc_enabled = true;
- return 0;
+ return dm_init_microcode(adev);
}
static bool modereset_required(struct drm_crtc_state *crtc_state)
@@ -4697,7 +4731,7 @@ fill_plane_color_attributes(const struct drm_plane_state *plane_state,
static int
fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
const struct drm_plane_state *plane_state,
- const uint64_t tiling_flags,
+ const u64 tiling_flags,
struct dc_plane_info *plane_info,
struct dc_plane_address *address,
bool tmz_surface,
@@ -4872,7 +4906,7 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev,
static inline void fill_dc_dirty_rect(struct drm_plane *plane,
struct rect *dirty_rect, int32_t x,
- int32_t y, int32_t width, int32_t height,
+ s32 y, s32 width, s32 height,
int *i, bool ffu)
{
if (*i > DC_MAX_DIRTY_RECTS)
@@ -4928,11 +4962,11 @@ static void fill_dc_dirty_rects(struct drm_plane *plane,
{
struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
struct rect *dirty_rects = flip_addrs->dirty_rects;
- uint32_t num_clips;
+ u32 num_clips;
struct drm_mode_rect *clips;
bool bb_changed;
bool fb_changed;
- uint32_t i = 0;
+ u32 i = 0;
/*
* Cursor plane has it's own dirty rect update interface. See
@@ -5078,7 +5112,7 @@ static enum dc_color_depth
convert_color_depth_from_display_info(const struct drm_connector *connector,
bool is_y420, int requested_bpc)
{
- uint8_t bpc;
+ u8 bpc;
if (is_y420) {
bpc = 8;
@@ -5622,8 +5656,8 @@ static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
uint32_t max_dsc_target_bpp_limit_override)
{
const struct dc_link_settings *verified_link_cap = NULL;
- uint32_t link_bw_in_kbps;
- uint32_t edp_min_bpp_x16, edp_max_bpp_x16;
+ u32 link_bw_in_kbps;
+ u32 edp_min_bpp_x16, edp_max_bpp_x16;
struct dc *dc = sink->ctx->dc;
struct dc_dsc_bw_range bw_range = {0};
struct dc_dsc_config dsc_cfg = {0};
@@ -5680,11 +5714,11 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
struct dsc_dec_dpcd_caps *dsc_caps)
{
struct drm_connector *drm_connector = &aconnector->base;
- uint32_t link_bandwidth_kbps;
+ u32 link_bandwidth_kbps;
struct dc *dc = sink->ctx->dc;
- uint32_t max_supported_bw_in_kbps, timing_bw_in_kbps;
- uint32_t dsc_max_supported_bw_in_kbps;
- uint32_t max_dsc_target_bpp_limit_override =
+ u32 max_supported_bw_in_kbps, timing_bw_in_kbps;
+ u32 dsc_max_supported_bw_in_kbps;
+ u32 max_dsc_target_bpp_limit_override =
drm_connector->display_info.max_dsc_bpp;
link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
@@ -5831,7 +5865,8 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
*/
DRM_DEBUG_DRIVER("No preferred mode found\n");
} else {
- recalculate_timing = is_freesync_video_mode(&mode, aconnector);
+ recalculate_timing = amdgpu_freesync_vid_mode &&
+ is_freesync_video_mode(&mode, aconnector);
if (recalculate_timing) {
freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
drm_mode_copy(&saved_mode, &mode);
@@ -6905,7 +6940,7 @@ static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
const struct drm_display_mode *m;
struct drm_display_mode *new_mode;
uint i;
- uint32_t new_modes_count = 0;
+ u32 new_modes_count = 0;
/* Standard FPS values
*
@@ -6919,7 +6954,7 @@ static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
* 60 - Commonly used
* 48,72,96,120 - Multiples of 24
*/
- static const uint32_t common_rates[] = {
+ static const u32 common_rates[] = {
23976, 24000, 25000, 29970, 30000,
48000, 50000, 60000, 72000, 96000, 120000
};
@@ -6935,8 +6970,8 @@ static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
return 0;
for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
- uint64_t target_vtotal, target_vtotal_diff;
- uint64_t num, den;
+ u64 target_vtotal, target_vtotal_diff;
+ u64 num, den;
if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
continue;
@@ -6982,7 +7017,7 @@ static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connect
struct amdgpu_dm_connector *amdgpu_dm_connector =
to_amdgpu_dm_connector(connector);
- if (!edid)
+ if (!(amdgpu_freesync_vid_mode && edid))
return;
if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
@@ -7178,7 +7213,7 @@ create_i2c(struct ddc_service *ddc_service,
*/
static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
struct amdgpu_dm_connector *aconnector,
- uint32_t link_index,
+ u32 link_index,
struct amdgpu_encoder *aencoder)
{
int res = 0;
@@ -7363,27 +7398,55 @@ is_scaling_state_different(const struct dm_connector_state *dm_state,
}
#ifdef CONFIG_DRM_AMD_DC_HDCP
-static bool is_content_protection_different(struct drm_connector_state *state,
- const struct drm_connector_state *old_state,
- const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
+static bool is_content_protection_different(struct drm_crtc_state *new_crtc_state,
+ struct drm_crtc_state *old_crtc_state,
+ struct drm_connector_state *new_conn_state,
+ struct drm_connector_state *old_conn_state,
+ const struct drm_connector *connector,
+ struct hdcp_workqueue *hdcp_w)
{
struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
- /* Handle: Type0/1 change */
- if (old_state->hdcp_content_type != state->hdcp_content_type &&
- state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
- state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ pr_debug("[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n",
+ connector->index, connector->status, connector->dpms);
+ pr_debug("[HDCP_DM] state protection old: %x new: %x\n",
+ old_conn_state->content_protection, new_conn_state->content_protection);
+
+ if (old_crtc_state)
+ pr_debug("[HDCP_DM] old crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
+ old_crtc_state->enable,
+ old_crtc_state->active,
+ old_crtc_state->mode_changed,
+ old_crtc_state->active_changed,
+ old_crtc_state->connectors_changed);
+
+ if (new_crtc_state)
+ pr_debug("[HDCP_DM] NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
+ new_crtc_state->enable,
+ new_crtc_state->active,
+ new_crtc_state->mode_changed,
+ new_crtc_state->active_changed,
+ new_crtc_state->connectors_changed);
+
+ /* hdcp content type change */
+ if (old_conn_state->hdcp_content_type != new_conn_state->hdcp_content_type &&
+ new_conn_state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
+ new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ pr_debug("[HDCP_DM] Type0/1 change %s :true\n", __func__);
return true;
}
- /* CP is being re enabled, ignore this
- *
- * Handles: ENABLED -> DESIRED
- */
- if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
- state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
- state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
+ /* CP is being re enabled, ignore this */
+ if (old_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
+ new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
+ if (new_crtc_state && new_crtc_state->mode_changed) {
+ new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ pr_debug("[HDCP_DM] ENABLED->DESIRED & mode_changed %s :true\n", __func__);
+ return true;
+ }
+ new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
+ pr_debug("[HDCP_DM] ENABLED -> DESIRED %s :false\n", __func__);
return false;
}
@@ -7391,9 +7454,9 @@ static bool is_content_protection_different(struct drm_connector_state *state,
*
* Handles: UNDESIRED -> ENABLED
*/
- if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
- state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
- state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ if (old_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
+ new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
+ new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
/* Stream removed and re-enabled
*
@@ -7403,10 +7466,12 @@ static bool is_content_protection_different(struct drm_connector_state *state,
*
* Handles: DESIRED -> DESIRED (Special case)
*/
- if (!(old_state->crtc && old_state->crtc->enabled) &&
- state->crtc && state->crtc->enabled &&
+ if (!(old_conn_state->crtc && old_conn_state->crtc->enabled) &&
+ new_conn_state->crtc && new_conn_state->crtc->enabled &&
connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
dm_con_state->update_hdcp = false;
+ pr_debug("[HDCP_DM] DESIRED->DESIRED (Stream removed and re-enabled) %s :true\n",
+ __func__);
return true;
}
@@ -7418,35 +7483,42 @@ static bool is_content_protection_different(struct drm_connector_state *state,
*
* Handles: DESIRED -> DESIRED (Special case)
*/
- if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
- connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
+ if (dm_con_state->update_hdcp &&
+ new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
+ connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
dm_con_state->update_hdcp = false;
+ pr_debug("[HDCP_DM] DESIRED->DESIRED (Hot-plug, headless s3, dpms) %s :true\n",
+ __func__);
return true;
}
- /*
- * Handles: UNDESIRED -> UNDESIRED
- * DESIRED -> DESIRED
- * ENABLED -> ENABLED
- */
- if (old_state->content_protection == state->content_protection)
+ if (old_conn_state->content_protection == new_conn_state->content_protection) {
+ if (new_conn_state->content_protection >= DRM_MODE_CONTENT_PROTECTION_DESIRED) {
+ if (new_crtc_state && new_crtc_state->mode_changed) {
+ pr_debug("[HDCP_DM] DESIRED->DESIRED or ENABLE->ENABLE mode_change %s :true\n",
+ __func__);
+ return true;
+ }
+ pr_debug("[HDCP_DM] DESIRED->DESIRED & ENABLE->ENABLE %s :false\n",
+ __func__);
+ return false;
+ }
+
+ pr_debug("[HDCP_DM] UNDESIRED->UNDESIRED %s :false\n", __func__);
return false;
+ }
- /*
- * Handles: UNDESIRED -> DESIRED
- * DESIRED -> UNDESIRED
- * ENABLED -> UNDESIRED
- */
- if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
+ if (new_conn_state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED) {
+ pr_debug("[HDCP_DM] UNDESIRED->DESIRED or DESIRED->UNDESIRED or ENABLED->UNDESIRED %s :true\n",
+ __func__);
return true;
+ }
- /*
- * Handles: DESIRED -> ENABLED
- */
+ pr_debug("[HDCP_DM] DESIRED->ENABLED %s :false\n", __func__);
return false;
}
-
#endif
+
static void remove_stream(struct amdgpu_device *adev,
struct amdgpu_crtc *acrtc,
struct dc_stream_state *stream)
@@ -7662,8 +7734,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
struct drm_crtc *pcrtc,
bool wait_for_vblank)
{
- uint32_t i;
- uint64_t timestamp_ns;
+ u32 i;
+ u64 timestamp_ns;
struct drm_plane *plane;
struct drm_plane_state *old_plane_state, *new_plane_state;
struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
@@ -7674,7 +7746,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
int planes_count = 0, vpos, hpos;
unsigned long flags;
- uint32_t target_vblank, last_flip_vblank;
+ u32 target_vblank, last_flip_vblank;
bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
bool cursor_update = false;
bool pflip_present = false;
@@ -8112,7 +8184,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
struct amdgpu_display_manager *dm = &adev->dm;
struct dm_atomic_state *dm_state;
struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
- uint32_t i, j;
+ u32 i, j;
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
unsigned long flags;
@@ -8286,10 +8358,61 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+ pr_debug("[HDCP_DM] -------------- i : %x ----------\n", i);
+
+ if (!connector)
+ continue;
+
+ pr_debug("[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n",
+ connector->index, connector->status, connector->dpms);
+ pr_debug("[HDCP_DM] state protection old: %x new: %x\n",
+ old_con_state->content_protection, new_con_state->content_protection);
+
+ if (aconnector->dc_sink) {
+ if (aconnector->dc_sink->sink_signal != SIGNAL_TYPE_VIRTUAL &&
+ aconnector->dc_sink->sink_signal != SIGNAL_TYPE_NONE) {
+ pr_debug("[HDCP_DM] pipe_ctx dispname=%s\n",
+ aconnector->dc_sink->edid_caps.display_name);
+ }
+ }
+
+ new_crtc_state = NULL;
+ old_crtc_state = NULL;
+
+ if (acrtc) {
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
+ old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
+ }
+
+ if (old_crtc_state)
+ pr_debug("old crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
+ old_crtc_state->enable,
+ old_crtc_state->active,
+ old_crtc_state->mode_changed,
+ old_crtc_state->active_changed,
+ old_crtc_state->connectors_changed);
+
+ if (new_crtc_state)
+ pr_debug("NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
+ new_crtc_state->enable,
+ new_crtc_state->active,
+ new_crtc_state->mode_changed,
+ new_crtc_state->active_changed,
+ new_crtc_state->connectors_changed);
+ }
+
+ for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
+ struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+
new_crtc_state = NULL;
+ old_crtc_state = NULL;
- if (acrtc)
+ if (acrtc) {
new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
+ old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
+ }
dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
@@ -8301,11 +8424,44 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
continue;
}
- if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
+ if (is_content_protection_different(new_crtc_state, old_crtc_state, new_con_state,
+ old_con_state, connector, adev->dm.hdcp_workqueue)) {
+ /* when display is unplugged from mst hub, connctor will
+ * be destroyed within dm_dp_mst_connector_destroy. connector
+ * hdcp perperties, like type, undesired, desired, enabled,
+ * will be lost. So, save hdcp properties into hdcp_work within
+ * amdgpu_dm_atomic_commit_tail. if the same display is
+ * plugged back with same display index, its hdcp properties
+ * will be retrieved from hdcp_work within dm_dp_mst_get_modes
+ */
+
+ bool enable_encryption = false;
+
+ if (new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED)
+ enable_encryption = true;
+
+ if (aconnector->dc_link && aconnector->dc_sink &&
+ aconnector->dc_link->type == dc_connection_mst_branch) {
+ struct hdcp_workqueue *hdcp_work = adev->dm.hdcp_workqueue;
+ struct hdcp_workqueue *hdcp_w =
+ &hdcp_work[aconnector->dc_link->link_index];
+
+ hdcp_w->hdcp_content_type[connector->index] =
+ new_con_state->hdcp_content_type;
+ hdcp_w->content_protection[connector->index] =
+ new_con_state->content_protection;
+ }
+
+ if (new_crtc_state && new_crtc_state->mode_changed &&
+ new_con_state->content_protection >= DRM_MODE_CONTENT_PROTECTION_DESIRED)
+ enable_encryption = true;
+
+ DRM_INFO("[HDCP_DM] hdcp_update_display enable_encryption = %x\n", enable_encryption);
+
hdcp_update_display(
adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
- new_con_state->hdcp_content_type,
- new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
+ new_con_state->hdcp_content_type, enable_encryption);
+ }
}
#endif
@@ -8403,9 +8559,6 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
#ifdef CONFIG_DEBUG_FS
enum amdgpu_dm_pipe_crc_source cur_crc_src;
-#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
- struct crc_rd_work *crc_rd_wrk;
-#endif
#endif
/* Count number of newly disabled CRTCs for dropping PM refs later. */
if (old_crtc_state->active && !new_crtc_state->active)
@@ -8418,9 +8571,6 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
update_stream_irq_parameters(dm, dm_new_crtc_state);
#ifdef CONFIG_DEBUG_FS
-#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
- crc_rd_wrk = dm->crc_rd_wrk;
-#endif
spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
cur_crc_src = acrtc->dm_irq_params.crc_src;
spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
@@ -8449,10 +8599,12 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
if (amdgpu_dm_crc_window_is_activated(crtc)) {
spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
acrtc->dm_irq_params.window_param.update_win = true;
+
+ /**
+ * It takes 2 frames for HW to stably generate CRC when
+ * resuming from suspend, so we set skip_frame_cnt 2.
+ */
acrtc->dm_irq_params.window_param.skip_frame_cnt = 2;
- spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
- crc_rd_wrk->crtc = crtc;
- spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
}
#endif
@@ -8675,15 +8827,22 @@ static void get_freesync_config_for_crtc(
struct drm_display_mode *mode = &new_crtc_state->base.mode;
int vrefresh = drm_mode_vrefresh(mode);
bool fs_vid_mode = false;
+ bool drr_active = false;
new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
vrefresh >= aconnector->min_vfreq &&
vrefresh <= aconnector->max_vfreq;
- if (new_crtc_state->vrr_supported) {
+ drr_active = new_crtc_state->vrr_supported &&
+ new_crtc_state->freesync_config.state != VRR_STATE_DISABLED &&
+ new_crtc_state->freesync_config.state != VRR_STATE_INACTIVE &&
+ new_crtc_state->freesync_config.state != VRR_STATE_UNSUPPORTED;
+
+ if (drr_active)
new_crtc_state->stream->ignore_msa_timing_param = true;
- fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
+ if (new_crtc_state->vrr_supported) {
+ fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
config.vsif_supported = true;
@@ -8743,7 +8902,7 @@ is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
}
static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
- uint64_t num, den, res;
+ u64 num, den, res;
struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
@@ -8846,7 +9005,8 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
* TODO: Refactor this function to allow this check to work
* in all conditions.
*/
- if (dm_new_crtc_state->stream &&
+ if (amdgpu_freesync_vid_mode &&
+ dm_new_crtc_state->stream &&
is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
goto skip_modeset;
@@ -8881,7 +9041,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
if (!dm_old_crtc_state->stream)
goto skip_modeset;
- if (dm_new_crtc_state->stream &&
+ if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
is_timing_unchanged_for_freesync(new_crtc_state,
old_crtc_state)) {
new_crtc_state->mode_changed = false;
@@ -8893,7 +9053,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
set_freesync_fixed_config(dm_new_crtc_state);
goto skip_modeset;
- } else if (aconnector &&
+ } else if (amdgpu_freesync_vid_mode && aconnector &&
is_freesync_video_mode(&new_crtc_state->mode,
aconnector)) {
struct drm_display_mode *high_mode;
@@ -9879,7 +10039,7 @@ fail:
static bool is_dp_capable_without_timing_msa(struct dc *dc,
struct amdgpu_dm_connector *amdgpu_dm_connector)
{
- uint8_t dpcd_data;
+ u8 dpcd_data;
bool capable = false;
if (amdgpu_dm_connector->dc_link &&
@@ -9898,7 +10058,7 @@ static bool is_dp_capable_without_timing_msa(struct dc *dc,
static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
unsigned int offset,
unsigned int total_length,
- uint8_t *data,
+ u8 *data,
unsigned int length,
struct amdgpu_hdmi_vsdb_info *vsdb)
{
@@ -9953,7 +10113,7 @@ static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
}
static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
- uint8_t *edid_ext, int len,
+ u8 *edid_ext, int len,
struct amdgpu_hdmi_vsdb_info *vsdb_info)
{
int i;
@@ -9994,7 +10154,7 @@ static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
}
static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
- uint8_t *edid_ext, int len,
+ u8 *edid_ext, int len,
struct amdgpu_hdmi_vsdb_info *vsdb_info)
{
int i;
@@ -10010,7 +10170,7 @@ static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
}
static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
- uint8_t *edid_ext, int len,
+ u8 *edid_ext, int len,
struct amdgpu_hdmi_vsdb_info *vsdb_info)
{
struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
@@ -10024,7 +10184,7 @@ static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
{
- uint8_t *edid_ext = NULL;
+ u8 *edid_ext = NULL;
int i;
bool valid_vsdb_found = false;
@@ -10200,7 +10360,7 @@ void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
}
void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
- uint32_t value, const char *func_name)
+ u32 value, const char *func_name)
{
#ifdef DM_CHECK_ADDR_0
if (address == 0) {
@@ -10215,7 +10375,7 @@ void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
const char *func_name)
{
- uint32_t value;
+ u32 value;
#ifdef DM_CHECK_ADDR_0
if (address == 0) {
DC_ERR("invalid register read; address = 0\n");
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index df3c25e32c65..abbbb3813c1e 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -494,11 +494,12 @@ struct amdgpu_display_manager {
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
/**
- * @crc_rd_wrk:
+ * @secure_display_ctxs:
*
- * Work to be executed in a separate thread to communicate with PSP.
+ * Store the ROI information and the work_struct to command dmub and psp for
+ * all crtcs.
*/
- struct crc_rd_work *crc_rd_wrk;
+ struct secure_display_context *secure_display_ctxs;
#endif
/**
* @hpd_rx_offload_wq:
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
index 66df2394d7e4..8841c447d0e2 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
@@ -101,35 +101,44 @@ static void amdgpu_dm_set_crc_window_default(struct drm_crtc *crtc)
static void amdgpu_dm_crtc_notify_ta_to_read(struct work_struct *work)
{
- struct crc_rd_work *crc_rd_wrk;
- struct amdgpu_device *adev;
+ struct secure_display_context *secure_display_ctx;
struct psp_context *psp;
- struct securedisplay_cmd *securedisplay_cmd;
+ struct ta_securedisplay_cmd *securedisplay_cmd;
struct drm_crtc *crtc;
- uint8_t phy_id;
+ struct dc_stream_state *stream;
+ uint8_t phy_inst;
int ret;
- crc_rd_wrk = container_of(work, struct crc_rd_work, notify_ta_work);
- spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
- crtc = crc_rd_wrk->crtc;
+ secure_display_ctx = container_of(work, struct secure_display_context, notify_ta_work);
+ crtc = secure_display_ctx->crtc;
if (!crtc) {
- spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
return;
}
- adev = drm_to_adev(crtc->dev);
- psp = &adev->psp;
- phy_id = crc_rd_wrk->phy_inst;
- spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
+ psp = &drm_to_adev(crtc->dev)->psp;
+
+ if (!psp->securedisplay_context.context.initialized) {
+ DRM_DEBUG_DRIVER("Secure Display fails to notify PSP TA\n");
+ return;
+ }
+
+ stream = to_amdgpu_crtc(crtc)->dm_irq_params.stream;
+ phy_inst = stream->link->link_enc_hw_inst;
+ /* need lock for multiple crtcs to use the command buffer */
mutex_lock(&psp->securedisplay_context.mutex);
psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd,
TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC);
- securedisplay_cmd->securedisplay_in_message.send_roi_crc.phy_id =
- phy_id;
+
+ securedisplay_cmd->securedisplay_in_message.send_roi_crc.phy_id = phy_inst;
+
+ /* PSP TA is expected to finish data transmission over I2C within current frame,
+ * even there are up to 4 crtcs request to send in this frame.
+ */
ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC);
+
if (!ret) {
if (securedisplay_cmd->status != TA_SECUREDISPLAY_STATUS__SUCCESS) {
psp_securedisplay_parse_resp_status(psp, securedisplay_cmd->status);
@@ -142,17 +151,23 @@ static void amdgpu_dm_crtc_notify_ta_to_read(struct work_struct *work)
static void
amdgpu_dm_forward_crc_window(struct work_struct *work)
{
- struct crc_fw_work *crc_fw_wrk;
+ struct secure_display_context *secure_display_ctx;
struct amdgpu_display_manager *dm;
+ struct drm_crtc *crtc;
+ struct dc_stream_state *stream;
- crc_fw_wrk = container_of(work, struct crc_fw_work, forward_roi_work);
- dm = crc_fw_wrk->dm;
+ secure_display_ctx = container_of(work, struct secure_display_context, forward_roi_work);
+ crtc = secure_display_ctx->crtc;
+
+ if (!crtc)
+ return;
+
+ dm = &drm_to_adev(crtc->dev)->dm;
+ stream = to_amdgpu_crtc(crtc)->dm_irq_params.stream;
mutex_lock(&dm->dc_lock);
- dc_stream_forward_crc_window(dm->dc, &crc_fw_wrk->rect, crc_fw_wrk->stream, crc_fw_wrk->is_stop_cmd);
+ dc_stream_forward_crc_window(stream, &secure_display_ctx->rect, false);
mutex_unlock(&dm->dc_lock);
-
- kfree(crc_fw_wrk);
}
bool amdgpu_dm_crc_window_is_activated(struct drm_crtc *crtc)
@@ -189,6 +204,9 @@ int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc,
struct dm_crtc_state *dm_crtc_state,
enum amdgpu_dm_pipe_crc_source source)
{
+#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+ int i;
+#endif
struct amdgpu_device *adev = drm_to_adev(crtc->dev);
struct dc_stream_state *stream_state = dm_crtc_state->stream;
bool enable = amdgpu_dm_is_valid_crc_source(source);
@@ -200,21 +218,18 @@ int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc,
mutex_lock(&adev->dm.dc_lock);
- /* Enable CRTC CRC generation if necessary. */
+ /* Enable or disable CRTC CRC generation */
if (dm_is_crc_source_crtc(source) || source == AMDGPU_DM_PIPE_CRC_SOURCE_NONE) {
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+ /* Disable secure_display if it was enabled */
if (!enable) {
- if (adev->dm.crc_rd_wrk) {
- flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
- spin_lock_irq(&adev->dm.crc_rd_wrk->crc_rd_work_lock);
-
- if (adev->dm.crc_rd_wrk->crtc == crtc) {
+ for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
+ if (adev->dm.secure_display_ctxs[i].crtc == crtc) {
/* stop ROI update on this crtc */
- dc_stream_forward_crc_window(stream_state->ctx->dc,
- NULL, stream_state, true);
- adev->dm.crc_rd_wrk->crtc = NULL;
+ flush_work(&adev->dm.secure_display_ctxs[i].notify_ta_work);
+ flush_work(&adev->dm.secure_display_ctxs[i].forward_roi_work);
+ dc_stream_forward_crc_window(stream_state, NULL, true);
}
- spin_unlock_irq(&adev->dm.crc_rd_wrk->crc_rd_work_lock);
}
}
#endif
@@ -347,6 +362,7 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
}
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+ /* Reset secure_display when we change crc source from debugfs */
amdgpu_dm_set_crc_window_default(crtc);
#endif
@@ -456,14 +472,12 @@ void amdgpu_dm_crtc_handle_crc_irq(struct drm_crtc *crtc)
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
void amdgpu_dm_crtc_handle_crc_window_irq(struct drm_crtc *crtc)
{
- struct dc_stream_state *stream_state;
struct drm_device *drm_dev = NULL;
enum amdgpu_dm_pipe_crc_source cur_crc_src;
struct amdgpu_crtc *acrtc = NULL;
struct amdgpu_device *adev = NULL;
- struct crc_rd_work *crc_rd_wrk;
- struct crc_fw_work *crc_fw_wrk;
- unsigned long flags1, flags2;
+ struct secure_display_context *secure_display_ctx = NULL;
+ unsigned long flags1;
if (crtc == NULL)
return;
@@ -473,75 +487,74 @@ void amdgpu_dm_crtc_handle_crc_window_irq(struct drm_crtc *crtc)
drm_dev = crtc->dev;
spin_lock_irqsave(&drm_dev->event_lock, flags1);
- stream_state = acrtc->dm_irq_params.stream;
cur_crc_src = acrtc->dm_irq_params.crc_src;
/* Early return if CRC capture is not enabled. */
- if (!amdgpu_dm_is_valid_crc_source(cur_crc_src))
+ if (!amdgpu_dm_is_valid_crc_source(cur_crc_src) ||
+ !dm_is_crc_source_crtc(cur_crc_src))
goto cleanup;
- if (!dm_is_crc_source_crtc(cur_crc_src))
+ if (!acrtc->dm_irq_params.window_param.activated)
goto cleanup;
- if (!acrtc->dm_irq_params.window_param.activated)
+ if (acrtc->dm_irq_params.window_param.skip_frame_cnt) {
+ acrtc->dm_irq_params.window_param.skip_frame_cnt -= 1;
goto cleanup;
+ }
- if (acrtc->dm_irq_params.window_param.update_win) {
- if (acrtc->dm_irq_params.window_param.skip_frame_cnt) {
- acrtc->dm_irq_params.window_param.skip_frame_cnt -= 1;
- goto cleanup;
- }
+ secure_display_ctx = &adev->dm.secure_display_ctxs[acrtc->crtc_id];
+ if (WARN_ON(secure_display_ctx->crtc != crtc)) {
+ /* We have set the crtc when creating secure_display_context,
+ * don't expect it to be changed here.
+ */
+ secure_display_ctx->crtc = crtc;
+ }
+ if (acrtc->dm_irq_params.window_param.update_win) {
/* prepare work for dmub to update ROI */
- crc_fw_wrk = kzalloc(sizeof(*crc_fw_wrk), GFP_ATOMIC);
- if (!crc_fw_wrk)
- goto cleanup;
-
- INIT_WORK(&crc_fw_wrk->forward_roi_work, amdgpu_dm_forward_crc_window);
- crc_fw_wrk->dm = &adev->dm;
- crc_fw_wrk->stream = stream_state;
- crc_fw_wrk->rect.x = acrtc->dm_irq_params.window_param.x_start;
- crc_fw_wrk->rect.y = acrtc->dm_irq_params.window_param.y_start;
- crc_fw_wrk->rect.width = acrtc->dm_irq_params.window_param.x_end -
+ secure_display_ctx->rect.x = acrtc->dm_irq_params.window_param.x_start;
+ secure_display_ctx->rect.y = acrtc->dm_irq_params.window_param.y_start;
+ secure_display_ctx->rect.width = acrtc->dm_irq_params.window_param.x_end -
acrtc->dm_irq_params.window_param.x_start;
- crc_fw_wrk->rect.height = acrtc->dm_irq_params.window_param.y_end -
+ secure_display_ctx->rect.height = acrtc->dm_irq_params.window_param.y_end -
acrtc->dm_irq_params.window_param.y_start;
- schedule_work(&crc_fw_wrk->forward_roi_work);
+ schedule_work(&secure_display_ctx->forward_roi_work);
acrtc->dm_irq_params.window_param.update_win = false;
+
+ /* Statically skip 1 frame, because we may need to wait below things
+ * before sending ROI to dmub:
+ * 1. We defer the work by using system workqueue.
+ * 2. We may need to wait for dc_lock before accessing dmub.
+ */
acrtc->dm_irq_params.window_param.skip_frame_cnt = 1;
} else {
- if (acrtc->dm_irq_params.window_param.skip_frame_cnt) {
- acrtc->dm_irq_params.window_param.skip_frame_cnt -= 1;
- goto cleanup;
- }
-
- if (adev->dm.crc_rd_wrk) {
- crc_rd_wrk = adev->dm.crc_rd_wrk;
- spin_lock_irqsave(&crc_rd_wrk->crc_rd_work_lock, flags2);
- crc_rd_wrk->phy_inst = stream_state->link->link_enc_hw_inst;
- spin_unlock_irqrestore(&crc_rd_wrk->crc_rd_work_lock, flags2);
- schedule_work(&crc_rd_wrk->notify_ta_work);
- }
+ /* prepare work for psp to read ROI/CRC and send to I2C */
+ schedule_work(&secure_display_ctx->notify_ta_work);
}
cleanup:
spin_unlock_irqrestore(&drm_dev->event_lock, flags1);
}
-struct crc_rd_work *amdgpu_dm_crtc_secure_display_create_work(void)
+struct secure_display_context *
+amdgpu_dm_crtc_secure_display_create_contexts(struct amdgpu_device *adev)
{
- struct crc_rd_work *crc_rd_wrk = NULL;
+ struct secure_display_context *secure_display_ctxs = NULL;
+ int i;
- crc_rd_wrk = kzalloc(sizeof(*crc_rd_wrk), GFP_KERNEL);
+ secure_display_ctxs = kcalloc(AMDGPU_MAX_CRTCS, sizeof(struct secure_display_context), GFP_KERNEL);
- if (!crc_rd_wrk)
+ if (!secure_display_ctxs)
return NULL;
- spin_lock_init(&crc_rd_wrk->crc_rd_work_lock);
- INIT_WORK(&crc_rd_wrk->notify_ta_work, amdgpu_dm_crtc_notify_ta_to_read);
+ for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
+ INIT_WORK(&secure_display_ctxs[i].forward_roi_work, amdgpu_dm_forward_crc_window);
+ INIT_WORK(&secure_display_ctxs[i].notify_ta_work, amdgpu_dm_crtc_notify_ta_to_read);
+ secure_display_ctxs[i].crtc = &adev->mode_info.crtcs[i]->base;
+ }
- return crc_rd_wrk;
+ return secure_display_ctxs;
}
#endif
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h
index 71bce608d751..935adca6f048 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h
@@ -45,7 +45,7 @@ struct crc_window_param {
uint16_t y_start;
uint16_t x_end;
uint16_t y_end;
- /* CRC windwo is activated or not*/
+ /* CRC window is activated or not*/
bool activated;
/* Update crc window during vertical blank or not */
bool update_win;
@@ -53,22 +53,17 @@ struct crc_window_param {
int skip_frame_cnt;
};
-/* read_work for driver to call PSP to read */
-struct crc_rd_work {
+struct secure_display_context {
+ /* work to notify PSP TA*/
struct work_struct notify_ta_work;
- /* To protect crc_rd_work carried fields*/
- spinlock_t crc_rd_work_lock;
- struct drm_crtc *crtc;
- uint8_t phy_inst;
-};
-/* forward_work for driver to forward ROI to dmu */
-struct crc_fw_work {
+ /* work to forward ROI to dmcu/dmub */
struct work_struct forward_roi_work;
- struct amdgpu_display_manager *dm;
- struct dc_stream_state *stream;
+
+ struct drm_crtc *crtc;
+
+ /* Region of Interest (ROI) */
struct rect rect;
- bool is_stop_cmd;
};
#endif
@@ -100,11 +95,12 @@ void amdgpu_dm_crtc_handle_crc_irq(struct drm_crtc *crtc);
#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
bool amdgpu_dm_crc_window_is_activated(struct drm_crtc *crtc);
void amdgpu_dm_crtc_handle_crc_window_irq(struct drm_crtc *crtc);
-struct crc_rd_work *amdgpu_dm_crtc_secure_display_create_work(void);
+struct secure_display_context *amdgpu_dm_crtc_secure_display_create_contexts(
+ struct amdgpu_device *adev);
#else
#define amdgpu_dm_crc_window_is_activated(x)
#define amdgpu_dm_crtc_handle_crc_window_irq(x)
-#define amdgpu_dm_crtc_secure_display_create_work()
+#define amdgpu_dm_crtc_secure_display_create_contexts()
#endif
#endif /* AMD_DAL_DEV_AMDGPU_DM_AMDGPU_DM_CRC_H_ */
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index 461037a3dd75..ae54a9719910 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -1375,16 +1375,11 @@ static ssize_t dp_dsc_clock_en_read(struct file *f, char __user *buf,
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe_ctx && pipe_ctx->stream &&
+ if (pipe_ctx->stream &&
pipe_ctx->stream->link == aconnector->dc_link)
break;
}
- if (!pipe_ctx) {
- kfree(rd_buf);
- return -ENXIO;
- }
-
dsc = pipe_ctx->stream_res.dsc;
if (dsc)
dsc->funcs->dsc_read_state(dsc, &dsc_state);
@@ -1481,12 +1476,12 @@ static ssize_t dp_dsc_clock_en_write(struct file *f, const char __user *buf,
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe_ctx && pipe_ctx->stream &&
+ if (pipe_ctx->stream &&
pipe_ctx->stream->link == aconnector->dc_link)
break;
}
- if (!pipe_ctx || !pipe_ctx->stream)
+ if (!pipe_ctx->stream)
goto done;
// Get CRTC state
@@ -1566,16 +1561,11 @@ static ssize_t dp_dsc_slice_width_read(struct file *f, char __user *buf,
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe_ctx && pipe_ctx->stream &&
+ if (pipe_ctx->stream &&
pipe_ctx->stream->link == aconnector->dc_link)
break;
}
- if (!pipe_ctx) {
- kfree(rd_buf);
- return -ENXIO;
- }
-
dsc = pipe_ctx->stream_res.dsc;
if (dsc)
dsc->funcs->dsc_read_state(dsc, &dsc_state);
@@ -1670,12 +1660,12 @@ static ssize_t dp_dsc_slice_width_write(struct file *f, const char __user *buf,
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe_ctx && pipe_ctx->stream &&
+ if (pipe_ctx->stream &&
pipe_ctx->stream->link == aconnector->dc_link)
break;
}
- if (!pipe_ctx || !pipe_ctx->stream)
+ if (!pipe_ctx->stream)
goto done;
// Safely get CRTC state
@@ -1755,16 +1745,11 @@ static ssize_t dp_dsc_slice_height_read(struct file *f, char __user *buf,
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe_ctx && pipe_ctx->stream &&
+ if (pipe_ctx->stream &&
pipe_ctx->stream->link == aconnector->dc_link)
break;
}
- if (!pipe_ctx) {
- kfree(rd_buf);
- return -ENXIO;
- }
-
dsc = pipe_ctx->stream_res.dsc;
if (dsc)
dsc->funcs->dsc_read_state(dsc, &dsc_state);
@@ -1859,12 +1844,12 @@ static ssize_t dp_dsc_slice_height_write(struct file *f, const char __user *buf,
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe_ctx && pipe_ctx->stream &&
+ if (pipe_ctx->stream &&
pipe_ctx->stream->link == aconnector->dc_link)
break;
}
- if (!pipe_ctx || !pipe_ctx->stream)
+ if (!pipe_ctx->stream)
goto done;
// Get CRTC state
@@ -1940,16 +1925,11 @@ static ssize_t dp_dsc_bits_per_pixel_read(struct file *f, char __user *buf,
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe_ctx && pipe_ctx->stream &&
+ if (pipe_ctx->stream &&
pipe_ctx->stream->link == aconnector->dc_link)
break;
}
- if (!pipe_ctx) {
- kfree(rd_buf);
- return -ENXIO;
- }
-
dsc = pipe_ctx->stream_res.dsc;
if (dsc)
dsc->funcs->dsc_read_state(dsc, &dsc_state);
@@ -2041,12 +2021,12 @@ static ssize_t dp_dsc_bits_per_pixel_write(struct file *f, const char __user *bu
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe_ctx && pipe_ctx->stream &&
+ if (pipe_ctx->stream &&
pipe_ctx->stream->link == aconnector->dc_link)
break;
}
- if (!pipe_ctx || !pipe_ctx->stream)
+ if (!pipe_ctx->stream)
goto done;
// Get CRTC state
@@ -2120,16 +2100,11 @@ static ssize_t dp_dsc_pic_width_read(struct file *f, char __user *buf,
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe_ctx && pipe_ctx->stream &&
+ if (pipe_ctx->stream &&
pipe_ctx->stream->link == aconnector->dc_link)
break;
}
- if (!pipe_ctx) {
- kfree(rd_buf);
- return -ENXIO;
- }
-
dsc = pipe_ctx->stream_res.dsc;
if (dsc)
dsc->funcs->dsc_read_state(dsc, &dsc_state);
@@ -2181,16 +2156,11 @@ static ssize_t dp_dsc_pic_height_read(struct file *f, char __user *buf,
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe_ctx && pipe_ctx->stream &&
+ if (pipe_ctx->stream &&
pipe_ctx->stream->link == aconnector->dc_link)
break;
}
- if (!pipe_ctx) {
- kfree(rd_buf);
- return -ENXIO;
- }
-
dsc = pipe_ctx->stream_res.dsc;
if (dsc)
dsc->funcs->dsc_read_state(dsc, &dsc_state);
@@ -2257,16 +2227,11 @@ static ssize_t dp_dsc_chunk_size_read(struct file *f, char __user *buf,
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe_ctx && pipe_ctx->stream &&
+ if (pipe_ctx->stream &&
pipe_ctx->stream->link == aconnector->dc_link)
break;
}
- if (!pipe_ctx) {
- kfree(rd_buf);
- return -ENXIO;
- }
-
dsc = pipe_ctx->stream_res.dsc;
if (dsc)
dsc->funcs->dsc_read_state(dsc, &dsc_state);
@@ -2333,16 +2298,11 @@ static ssize_t dp_dsc_slice_bpg_offset_read(struct file *f, char __user *buf,
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe_ctx && pipe_ctx->stream &&
+ if (pipe_ctx->stream &&
pipe_ctx->stream->link == aconnector->dc_link)
break;
}
- if (!pipe_ctx) {
- kfree(rd_buf);
- return -ENXIO;
- }
-
dsc = pipe_ctx->stream_res.dsc;
if (dsc)
dsc->funcs->dsc_read_state(dsc, &dsc_state);
@@ -3245,46 +3205,24 @@ DEFINE_DEBUGFS_ATTRIBUTE(crc_win_y_end_fops, crc_win_y_end_get,
*/
static int crc_win_update_set(void *data, u64 val)
{
- struct drm_crtc *new_crtc = data;
- struct drm_crtc *old_crtc = NULL;
- struct amdgpu_crtc *new_acrtc, *old_acrtc;
- struct amdgpu_device *adev = drm_to_adev(new_crtc->dev);
- struct crc_rd_work *crc_rd_wrk = adev->dm.crc_rd_wrk;
-
- if (!crc_rd_wrk)
- return 0;
+ struct drm_crtc *crtc = data;
+ struct amdgpu_crtc *acrtc;
+ struct amdgpu_device *adev = drm_to_adev(crtc->dev);
if (val) {
- new_acrtc = to_amdgpu_crtc(new_crtc);
+ acrtc = to_amdgpu_crtc(crtc);
mutex_lock(&adev->dm.dc_lock);
/* PSR may write to OTG CRC window control register,
* so close it before starting secure_display.
*/
- amdgpu_dm_psr_disable(new_acrtc->dm_irq_params.stream);
+ amdgpu_dm_psr_disable(acrtc->dm_irq_params.stream);
spin_lock_irq(&adev_to_drm(adev)->event_lock);
- spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
- if (crc_rd_wrk->crtc) {
- old_crtc = crc_rd_wrk->crtc;
- old_acrtc = to_amdgpu_crtc(old_crtc);
- }
- if (old_crtc && old_crtc != new_crtc) {
- old_acrtc->dm_irq_params.window_param.activated = false;
- old_acrtc->dm_irq_params.window_param.update_win = false;
- old_acrtc->dm_irq_params.window_param.skip_frame_cnt = 0;
+ acrtc->dm_irq_params.window_param.activated = true;
+ acrtc->dm_irq_params.window_param.update_win = true;
+ acrtc->dm_irq_params.window_param.skip_frame_cnt = 0;
- new_acrtc->dm_irq_params.window_param.activated = true;
- new_acrtc->dm_irq_params.window_param.update_win = true;
- new_acrtc->dm_irq_params.window_param.skip_frame_cnt = 0;
- crc_rd_wrk->crtc = new_crtc;
- } else {
- new_acrtc->dm_irq_params.window_param.activated = true;
- new_acrtc->dm_irq_params.window_param.update_win = true;
- new_acrtc->dm_irq_params.window_param.skip_frame_cnt = 0;
- crc_rd_wrk->crtc = new_crtc;
- }
- spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
spin_unlock_irq(&adev_to_drm(adev)->event_lock);
mutex_unlock(&adev->dm.dc_lock);
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h
index 09294ff122fe..bbbf7d0eff82 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h
@@ -52,6 +52,20 @@ struct hdcp_workqueue {
struct mod_hdcp_link link;
enum mod_hdcp_encryption_status encryption_status;
+
+ /* when display is unplugged from mst hub, connctor will be
+ * destroyed within dm_dp_mst_connector_destroy. connector
+ * hdcp perperties, like type, undesired, desired, enabled,
+ * will be lost. So, save hdcp properties into hdcp_work within
+ * amdgpu_dm_atomic_commit_tail. if the same display is
+ * plugged back with same display index, its hdcp properties
+ * will be retrieved from hdcp_work within dm_dp_mst_get_modes
+ */
+ /* un-desired, desired, enabled */
+ unsigned int content_protection[AMDGPU_DM_MAX_DISPLAY_INDEX];
+ /* hdcp1.x, hdcp2.x */
+ unsigned int hdcp_content_type[AMDGPU_DM_MAX_DISPLAY_INDEX];
+
uint8_t max_link;
uint8_t *srm;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 1edf7385f8d8..41f35d75d0a8 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -32,6 +32,10 @@
#include "amdgpu_dm.h"
#include "amdgpu_dm_mst_types.h"
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+#include "amdgpu_dm_hdcp.h"
+#endif
+
#include "dc.h"
#include "dm_helpers.h"
@@ -344,6 +348,28 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
/* dc_link_add_remote_sink returns a new reference */
aconnector->dc_sink = dc_sink;
+ /* when display is unplugged from mst hub, connctor will be
+ * destroyed within dm_dp_mst_connector_destroy. connector
+ * hdcp perperties, like type, undesired, desired, enabled,
+ * will be lost. So, save hdcp properties into hdcp_work within
+ * amdgpu_dm_atomic_commit_tail. if the same display is
+ * plugged back with same display index, its hdcp properties
+ * will be retrieved from hdcp_work within dm_dp_mst_get_modes
+ */
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ if (aconnector->dc_sink && connector->state) {
+ struct drm_device *dev = connector->dev;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct hdcp_workqueue *hdcp_work = adev->dm.hdcp_workqueue;
+ struct hdcp_workqueue *hdcp_w = &hdcp_work[aconnector->dc_link->link_index];
+
+ connector->state->hdcp_content_type =
+ hdcp_w->hdcp_content_type[connector->index];
+ connector->state->content_protection =
+ hdcp_w->content_protection[connector->index];
+ }
+#endif
+
if (aconnector->dc_sink) {
amdgpu_dm_update_freesync_caps(
connector, aconnector->edid);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
index 26291db0a3cf..872d06fe1436 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
@@ -122,6 +122,9 @@ bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
psr_config.allow_multi_disp_optimizations =
(amdgpu_dc_feature_mask & DC_PSR_ALLOW_MULTI_DISP_OPT);
+ if (!psr_su_set_y_granularity(dc, link, stream, &psr_config))
+ return false;
+
ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
index 3ce0ee0d012f..694a9d3d92ae 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
@@ -577,8 +577,7 @@ void dcn3_clk_mgr_construct(
void dcn3_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr)
{
- if (clk_mgr->base.bw_params)
- kfree(clk_mgr->base.bw_params);
+ kfree(clk_mgr->base.bw_params);
if (clk_mgr->wm_range_table)
dm_helpers_free_gpu_mem(clk_mgr->base.ctx, DC_MEM_ALLOC_TYPE_GART,
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
index 200fcec19186..ba9814f88f48 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
@@ -783,8 +783,7 @@ void dcn32_clk_mgr_construct(
void dcn32_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr)
{
- if (clk_mgr->base.bw_params)
- kfree(clk_mgr->base.bw_params);
+ kfree(clk_mgr->base.bw_params);
if (clk_mgr->wm_range_table)
dm_helpers_free_gpu_mem(clk_mgr->base.ctx, DC_MEM_ALLOC_TYPE_GART,
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 0cb8d1f934d1..2c18c8527079 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -382,16 +382,18 @@ static void dc_perf_trace_destroy(struct dc_perf_trace **perf_trace)
}
/**
- * dc_stream_adjust_vmin_vmax:
+ * dc_stream_adjust_vmin_vmax - look up pipe context & update parts of DRR
+ * @dc: dc reference
+ * @stream: Initial dc stream state
+ * @adjust: Updated parameters for vertical_total_min and vertical_total_max
*
* Looks up the pipe context of dc_stream_state and updates the
* vertical_total_min and vertical_total_max of the DRR, Dynamic Refresh
* Rate, which is a power-saving feature that targets reducing panel
* refresh rate while the screen is static
*
- * @dc: dc reference
- * @stream: Initial dc stream state
- * @adjust: Updated parameters for vertical_total_min and vertical_total_max
+ * Return: %true if the pipe context is found and adjusted;
+ * %false if the pipe context is not found.
*/
bool dc_stream_adjust_vmin_vmax(struct dc *dc,
struct dc_stream_state *stream,
@@ -419,14 +421,17 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc,
}
/**
- * dc_stream_get_last_used_drr_vtotal - dc_stream_get_last_vrr_vtotal
+ * dc_stream_get_last_used_drr_vtotal - Looks up the pipe context of
+ * dc_stream_state and gets the last VTOTAL used by DRR (Dynamic Refresh Rate)
*
* @dc: [in] dc reference
* @stream: [in] Initial dc stream state
- * @adjust: [in] Updated parameters for vertical_total_min and
+ * @refresh_rate: [in] new refresh_rate
*
- * Looks up the pipe context of dc_stream_state and gets the last VTOTAL used
- * by DRR (Dynamic Refresh Rate)
+ * Return: %true if the pipe context is found and there is an associated
+ * timing_generator for the DC;
+ * %false if the pipe context is not found or there is no
+ * timing_generator for the DC.
*/
bool dc_stream_get_last_used_drr_vtotal(struct dc *dc,
struct dc_stream_state *stream,
@@ -518,14 +523,15 @@ dc_stream_forward_dmcu_crc_window(struct dmcu *dmcu,
}
bool
-dc_stream_forward_crc_window(struct dc *dc,
- struct rect *rect, struct dc_stream_state *stream, bool is_stop)
+dc_stream_forward_crc_window(struct dc_stream_state *stream,
+ struct rect *rect, bool is_stop)
{
struct dmcu *dmcu;
struct dc_dmub_srv *dmub_srv;
struct otg_phy_mux mux_mapping;
struct pipe_ctx *pipe;
int i;
+ struct dc *dc = stream->ctx->dc;
for (i = 0; i < MAX_PIPES; i++) {
pipe = &dc->current_state->res_ctx.pipe_ctx[i];
@@ -566,7 +572,10 @@ dc_stream_forward_crc_window(struct dc *dc,
* once.
*
* By default, only CRC0 is configured, and the entire frame is used to
- * calculate the crc.
+ * calculate the CRC.
+ *
+ * Return: %false if the stream is not found or CRC capture is not supported;
+ * %true if the stream has been configured.
*/
bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
struct crc_params *crc_window, bool enable, bool continuous)
@@ -635,7 +644,7 @@ bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
* dc_stream_configure_crc needs to be called beforehand to enable CRCs.
*
* Return:
- * false if stream is not found, or if CRCs are not enabled.
+ * %false if stream is not found, or if CRCs are not enabled.
*/
bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream,
uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb)
@@ -1740,6 +1749,8 @@ void dc_z10_save_init(struct dc *dc)
*
* Applies given context to the hardware and copy it into current context.
* It's up to the user to release the src context afterwards.
+ *
+ * Return: an enum dc_status result code for the operation
*/
static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *context)
{
@@ -2007,8 +2018,9 @@ bool dc_commit_state(struct dc *dc, struct dc_state *context)
return result == DC_OK;
}
- if (!streams_changed(dc, context->streams, context->stream_count))
+ if (!streams_changed(dc, context->streams, context->stream_count)) {
return DC_OK;
+ }
DC_LOG_DC("%s: %d streams\n",
__func__, context->stream_count);
@@ -3325,6 +3337,7 @@ static void commit_planes_for_stream(struct dc *dc,
struct pipe_ctx *top_pipe_to_program = NULL;
bool should_lock_all_pipes = (update_type != UPDATE_TYPE_FAST);
bool subvp_prev_use = false;
+ bool subvp_curr_use = false;
// Once we apply the new subvp context to hardware it won't be in the
// dc->current_state anymore, so we have to cache it before we apply
@@ -3381,6 +3394,15 @@ static void commit_planes_for_stream(struct dc *dc,
break;
}
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
+ subvp_curr_use = true;
+ break;
+ }
+ }
+
if (stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) {
struct pipe_ctx *mpcc_pipe;
struct pipe_ctx *odm_pipe;
@@ -3652,42 +3674,22 @@ static void commit_planes_for_stream(struct dc *dc,
top_pipe_to_program->stream_res.tg);
}
- /* For phantom pipe OTG enable, it has to be done after any previous pipe
- * that was in use has already been programmed at gotten its double buffer
- * update for "disable".
- */
- if (update_type != UPDATE_TYPE_FAST) {
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
- struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
-
- /* If an active, non-phantom pipe is being transitioned into a phantom
- * pipe, wait for the double buffer update to complete first before we do
- * ANY phantom pipe programming.
- */
- if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM &&
- old_pipe->stream && old_pipe->stream->mall_stream_config.type != SUBVP_PHANTOM) {
- old_pipe->stream_res.tg->funcs->wait_for_state(
- old_pipe->stream_res.tg,
- CRTC_STATE_VBLANK);
- old_pipe->stream_res.tg->funcs->wait_for_state(
- old_pipe->stream_res.tg,
- CRTC_STATE_VACTIVE);
- }
+ if (subvp_curr_use) {
+ /* If enabling subvp or transitioning from subvp->subvp, enable the
+ * phantom streams before we program front end for the phantom pipes.
+ */
+ if (update_type != UPDATE_TYPE_FAST) {
+ if (dc->hwss.enable_phantom_streams)
+ dc->hwss.enable_phantom_streams(dc, context);
}
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
+ }
- if ((new_pipe->stream && new_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) ||
- subvp_prev_use) {
- // If old context or new context has phantom pipes, apply
- // the phantom timings now. We can't change the phantom
- // pipe configuration safely without driver acquiring
- // the DMCUB lock first.
- dc->hwss.apply_ctx_to_hw(dc, context);
- break;
- }
- }
+ if (subvp_prev_use && !subvp_curr_use) {
+ /* If disabling subvp, disable phantom streams after front end
+ * programming has completed (we turn on phantom OTG in order
+ * to complete the plane disable for phantom pipes).
+ */
+ dc->hwss.apply_ctx_to_hw(dc, context);
}
if (update_type != UPDATE_TYPE_FAST)
@@ -4704,7 +4706,7 @@ bool dc_enable_dmub_notifications(struct dc *dc)
/**
* dc_enable_dmub_outbox - Enables DMUB unsolicited notification
*
- * dc: [in] dc structure
+ * @dc: [in] dc structure
*
* Enables DMUB unsolicited notifications to x86 via outbox.
*/
@@ -4905,8 +4907,8 @@ enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc,
/**
* dc_process_dmub_dpia_hpd_int_enable - Submits DPIA DPD interruption
*
- * @dc [in]: dc structure
- * @hpd_int_enable [in]: 1 for hpd int enable, 0 to disable
+ * @dc: [in] dc structure
+ * @hpd_int_enable: [in] 1 for hpd int enable, 0 to disable
*
* Submits dpia hpd int enable command to dmub via inbox message
*/
@@ -4987,7 +4989,7 @@ void dc_notify_vsync_int_state(struct dc *dc, struct dc_stream_state *stream, bo
}
/**
- * dc_extended_blank_supported 0 Decide whether extended blank is supported
+ * dc_extended_blank_supported - Decide whether extended blank is supported
*
* @dc: [in] Current DC state
*
@@ -4996,7 +4998,7 @@ void dc_notify_vsync_int_state(struct dc *dc, struct dc_stream_state *stream, bo
* ability to enter z9/z10.
*
* Return:
- * Indicate whether extended blank is supported (true or false)
+ * Indicate whether extended blank is supported (%true or %false)
*/
bool dc_extended_blank_supported(struct dc *dc)
{
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 342e906ae26e..ee20b4d3afd4 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -1916,12 +1916,6 @@ struct dc_link *link_create(const struct link_init_data *init_params)
if (false == dc_link_construct(link, init_params))
goto construct_fail;
- /*
- * Must use preferred_link_setting, not reported_link_cap or verified_link_cap,
- * since struct preferred_link_setting won't be reset after S3.
- */
- link->preferred_link_setting.dpcd_source_device_specific_field_support = true;
-
return link;
construct_fail:
@@ -4655,9 +4649,6 @@ void dc_link_set_preferred_training_settings(struct dc *dc,
if (link_setting != NULL) {
link->preferred_link_setting = *link_setting;
- if (dp_get_link_encoding_format(link_setting) == DP_128b_132b_ENCODING)
- /* TODO: add dc update for acquiring link res */
- skip_immediate_retrain = true;
} else {
link->preferred_link_setting.lane_count = LANE_COUNT_UNKNOWN;
link->preferred_link_setting.link_rate = LINK_RATE_UNKNOWN;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
index ce8d6a54ca54..651231387043 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
@@ -82,7 +82,6 @@ struct dp_hdmi_dongle_signature_data {
#define HDMI_SCDC_STATUS_FLAGS 0x40
#define HDMI_SCDC_ERR_DETECT 0x50
#define HDMI_SCDC_TEST_CONFIG 0xC0
-#define HDMI_SCDC_DEVICE_ID 0xD3
union hdmi_scdc_update_read_data {
uint8_t byte[2];
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index dedd1246ce58..d74ffc89810f 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -4094,6 +4094,12 @@ static void dp_test_send_link_training(struct dc_link *link)
dp_retrain_link_dp_test(link, &link_settings, false);
}
+static bool is_dp_phy_sqaure_pattern(enum dp_test_pattern test_pattern)
+{
+ return (DP_TEST_PATTERN_SQUARE_BEGIN <= test_pattern &&
+ test_pattern <= DP_TEST_PATTERN_SQUARE_END);
+}
+
/* TODO Raven hbr2 compliance eye output is unstable
* (toggling on and off) with debugger break
* This caueses intermittent PHY automation failure
@@ -4111,6 +4117,8 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link)
union lane_adjust dpcd_lane_adjust;
unsigned int lane;
struct link_training_settings link_training_settings;
+ unsigned char no_preshoot = 0;
+ unsigned char no_deemphasis = 0;
dpcd_test_pattern.raw = 0;
memset(dpcd_lane_adjustment, 0, sizeof(dpcd_lane_adjustment));
@@ -4204,8 +4212,21 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link)
case PHY_TEST_PATTERN_264BIT_CUSTOM:
test_pattern = DP_TEST_PATTERN_264BIT_CUSTOM;
break;
- case PHY_TEST_PATTERN_SQUARE_PULSE:
- test_pattern = DP_TEST_PATTERN_SQUARE_PULSE;
+ case PHY_TEST_PATTERN_SQUARE:
+ test_pattern = DP_TEST_PATTERN_SQUARE;
+ break;
+ case PHY_TEST_PATTERN_SQUARE_PRESHOOT_DISABLED:
+ test_pattern = DP_TEST_PATTERN_SQUARE_PRESHOOT_DISABLED;
+ no_preshoot = 1;
+ break;
+ case PHY_TEST_PATTERN_SQUARE_DEEMPHASIS_DISABLED:
+ test_pattern = DP_TEST_PATTERN_SQUARE_DEEMPHASIS_DISABLED;
+ no_deemphasis = 1;
+ break;
+ case PHY_TEST_PATTERN_SQUARE_PRESHOOT_DEEMPHASIS_DISABLED:
+ test_pattern = DP_TEST_PATTERN_SQUARE_PRESHOOT_DEEMPHASIS_DISABLED;
+ no_preshoot = 1;
+ no_deemphasis = 1;
break;
default:
test_pattern = DP_TEST_PATTERN_VIDEO_MODE;
@@ -4222,7 +4243,7 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link)
test_pattern_size);
}
- if (test_pattern == DP_TEST_PATTERN_SQUARE_PULSE) {
+ if (is_dp_phy_sqaure_pattern(test_pattern)) {
test_pattern_size = 1; // Square pattern data is 1 byte (DP spec)
core_link_read_dpcd(
link,
@@ -4259,8 +4280,10 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link)
((dpcd_post_cursor_2_adjustment >> (lane * 2)) & 0x03);
} else if (dp_get_link_encoding_format(&link->cur_link_settings) ==
DP_128b_132b_ENCODING) {
- link_training_settings.hw_lane_settings[lane].FFE_PRESET.raw =
+ link_training_settings.hw_lane_settings[lane].FFE_PRESET.settings.level =
dpcd_lane_adjust.tx_ffe.PRESET_VALUE;
+ link_training_settings.hw_lane_settings[lane].FFE_PRESET.settings.no_preshoot = no_preshoot;
+ link_training_settings.hw_lane_settings[lane].FFE_PRESET.settings.no_deemphasis = no_deemphasis;
}
}
@@ -6114,7 +6137,7 @@ bool dc_link_dp_set_test_pattern(
* MuteAudioEndpoint(pPathMode->pDisplayPath, true);
*/
/* Blank stream */
- pipes->stream_res.stream_enc->funcs->dp_blank(link, pipe_ctx->stream_res.stream_enc);
+ link->dc->hwss.blank_stream(pipe_ctx);
}
dp_set_hw_test_pattern(link, &pipe_ctx->link_res, test_pattern,
@@ -6178,8 +6201,17 @@ bool dc_link_dp_set_test_pattern(
case DP_TEST_PATTERN_264BIT_CUSTOM:
pattern = PHY_TEST_PATTERN_264BIT_CUSTOM;
break;
- case DP_TEST_PATTERN_SQUARE_PULSE:
- pattern = PHY_TEST_PATTERN_SQUARE_PULSE;
+ case DP_TEST_PATTERN_SQUARE:
+ pattern = PHY_TEST_PATTERN_SQUARE;
+ break;
+ case DP_TEST_PATTERN_SQUARE_PRESHOOT_DISABLED:
+ pattern = PHY_TEST_PATTERN_SQUARE_PRESHOOT_DISABLED;
+ break;
+ case DP_TEST_PATTERN_SQUARE_DEEMPHASIS_DISABLED:
+ pattern = PHY_TEST_PATTERN_SQUARE_DEEMPHASIS_DISABLED;
+ break;
+ case DP_TEST_PATTERN_SQUARE_PRESHOOT_DEEMPHASIS_DISABLED:
+ pattern = PHY_TEST_PATTERN_SQUARE_PRESHOOT_DEEMPHASIS_DISABLED;
break;
default:
return false;
@@ -6190,14 +6222,12 @@ bool dc_link_dp_set_test_pattern(
return false;
if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) {
-#if defined(CONFIG_DRM_AMD_DC_DCN)
- if (test_pattern == DP_TEST_PATTERN_SQUARE_PULSE)
+ if (is_dp_phy_sqaure_pattern(test_pattern))
core_link_write_dpcd(link,
DP_LINK_SQUARE_PATTERN,
p_custom_pattern,
1);
-#endif
/* tell receiver that we are sending qualification
* pattern DP 1.2 or later - DP receiver's link quality
* pattern is set using DPCD LINK_QUAL_LANEx_SET
@@ -6554,18 +6584,10 @@ void dpcd_set_source_specific_data(struct dc_link *link)
uint8_t hblank_size = (uint8_t)link->dc->caps.min_horizontal_blanking_period;
- if (link->preferred_link_setting.dpcd_source_device_specific_field_support) {
- result_write_min_hblank = core_link_write_dpcd(link,
- DP_SOURCE_MINIMUM_HBLANK_SUPPORTED, (uint8_t *)(&hblank_size),
- sizeof(hblank_size));
-
- if (result_write_min_hblank == DC_ERROR_UNEXPECTED)
- link->preferred_link_setting.dpcd_source_device_specific_field_support = false;
- } else {
- DC_LOG_DC("Sink device does not support 00340h DPCD write. Skipping on purpose.\n");
- }
+ result_write_min_hblank = core_link_write_dpcd(link,
+ DP_SOURCE_MINIMUM_HBLANK_SUPPORTED, (uint8_t *)(&hblank_size),
+ sizeof(hblank_size));
}
-
DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION,
WPP_BIT_FLAG_DC_DETECTION_DP_CAPS,
"result=%u link_index=%u enum dce_version=%d DPCD=0x%04X min_hblank=%u branch_dev_id=0x%x branch_dev_name='%c%c%c%c%c%c'",
@@ -7254,59 +7276,35 @@ void dp_retrain_link_dp_test(struct dc_link *link,
struct dc_link_settings *link_setting,
bool skip_video_pattern)
{
- struct pipe_ctx *pipes =
- &link->dc->current_state->res_ctx.pipe_ctx[0];
+ struct pipe_ctx *pipe;
unsigned int i;
- bool do_fallback = false;
+ udelay(100);
for (i = 0; i < MAX_PIPES; i++) {
- if (pipes[i].stream != NULL &&
- !pipes[i].top_pipe && !pipes[i].prev_odm_pipe &&
- pipes[i].stream->link != NULL &&
- pipes[i].stream_res.stream_enc != NULL &&
- pipes[i].stream->link == link) {
- udelay(100);
-
- pipes[i].stream_res.stream_enc->funcs->dp_blank(link,
- pipes[i].stream_res.stream_enc);
-
- /* disable any test pattern that might be active */
- dp_set_hw_test_pattern(link, &pipes[i].link_res,
- DP_TEST_PATTERN_VIDEO_MODE, NULL, 0);
-
- dp_receiver_power_ctrl(link, false);
-
- link->dc->hwss.disable_stream(&pipes[i]);
- if ((&pipes[i])->stream_res.audio && !link->dc->debug.az_endpoint_mute_only)
- (&pipes[i])->stream_res.audio->funcs->az_disable((&pipes[i])->stream_res.audio);
-
- if (link->link_enc)
- link->link_enc->funcs->disable_output(
- link->link_enc,
- SIGNAL_TYPE_DISPLAY_PORT);
-
- /* Clear current link setting. */
- memset(&link->cur_link_settings, 0,
- sizeof(link->cur_link_settings));
-
- if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
- do_fallback = true;
-
- perform_link_training_with_retries(
- link_setting,
- skip_video_pattern,
- LINK_TRAINING_ATTEMPTS,
- &pipes[i],
- SIGNAL_TYPE_DISPLAY_PORT,
- do_fallback);
-
- link->dc->hwss.enable_stream(&pipes[i]);
-
- link->dc->hwss.unblank_stream(&pipes[i],
- link_setting);
+ pipe = &link->dc->current_state->res_ctx.pipe_ctx[i];
+ if (pipe->stream != NULL &&
+ pipe->stream->link == link &&
+ !pipe->stream->dpms_off &&
+ !pipe->top_pipe && !pipe->prev_odm_pipe) {
+ core_link_disable_stream(pipe);
+ pipe->link_config.dp_link_settings = *link_setting;
+ update_dp_encoder_resources_for_test_harness(
+ link->dc,
+ pipe->stream->ctx->dc->current_state,
+ pipe);
+ }
+ }
- link->dc->hwss.enable_audio_stream(&pipes[i]);
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe = &link->dc->current_state->res_ctx.pipe_ctx[i];
+ if (pipe->stream != NULL &&
+ pipe->stream->link == link &&
+ !pipe->stream->dpms_off &&
+ !pipe->top_pipe && !pipe->prev_odm_pipe) {
+ core_link_enable_stream(
+ pipe->stream->ctx->dc->current_state,
+ pipe);
}
}
}
@@ -7496,7 +7494,6 @@ bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable, bool immediate_u
dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false;
dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg;
- DC_LOG_DSC(" ");
dsc->funcs->dsc_get_packed_pps(dsc, &dsc_cfg, &dsc_packed_pps[0]);
memcpy(&stream->dsc_packed_pps[0], &dsc_packed_pps[0], sizeof(stream->dsc_packed_pps));
if (dc_is_dp_signal(stream->signal)) {
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index da164685547d..06b5f49e0954 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -3810,6 +3810,8 @@ void check_syncd_pipes_for_disabled_master_pipe(struct dc *dc,
int i;
struct pipe_ctx *pipe_ctx, *pipe_ctx_check;
+ DC_LOGGER_INIT(dc->ctx->logger);
+
pipe_ctx = &context->res_ctx.pipe_ctx[disabled_master_pipe_idx];
if ((GET_PIPE_SYNCD_FROM_PIPE(pipe_ctx) != disabled_master_pipe_idx) ||
!IS_PIPE_SYNCD_VALID(pipe_ctx))
@@ -3820,9 +3822,16 @@ void check_syncd_pipes_for_disabled_master_pipe(struct dc *dc,
pipe_ctx_check = &context->res_ctx.pipe_ctx[i];
if ((GET_PIPE_SYNCD_FROM_PIPE(pipe_ctx_check) == disabled_master_pipe_idx) &&
- IS_PIPE_SYNCD_VALID(pipe_ctx_check) && (i != disabled_master_pipe_idx))
- DC_ERR("DC: Failure: pipe_idx[%d] syncd with disabled master pipe_idx[%d]\n",
- i, disabled_master_pipe_idx);
+ IS_PIPE_SYNCD_VALID(pipe_ctx_check) && (i != disabled_master_pipe_idx)) {
+ /* On dcn32, this error isn't fatal since hw supports odm transition in fast update*/
+ if (dc->ctx->dce_version == DCN_VERSION_3_2 ||
+ dc->ctx->dce_version == DCN_VERSION_3_21)
+ DC_LOG_DEBUG("DC: pipe_idx[%d] syncd with disabled master pipe_idx[%d]\n",
+ i, disabled_master_pipe_idx);
+ else
+ DC_ERR("DC: Failure: pipe_idx[%d] syncd with disabled master pipe_idx[%d]\n",
+ i, disabled_master_pipe_idx);
+ }
}
}
@@ -3981,3 +3990,42 @@ bool dc_resource_acquire_secondary_pipe_for_mpc_odm(
return true;
}
+
+enum dc_status update_dp_encoder_resources_for_test_harness(const struct dc *dc,
+ struct dc_state *context,
+ struct pipe_ctx *pipe_ctx)
+{
+ if (dp_get_link_encoding_format(&pipe_ctx->link_config.dp_link_settings) == DP_128b_132b_ENCODING) {
+ if (pipe_ctx->stream_res.hpo_dp_stream_enc == NULL) {
+ pipe_ctx->stream_res.hpo_dp_stream_enc =
+ find_first_free_match_hpo_dp_stream_enc_for_link(
+ &context->res_ctx, dc->res_pool, pipe_ctx->stream);
+
+ if (!pipe_ctx->stream_res.hpo_dp_stream_enc)
+ return DC_NO_STREAM_ENC_RESOURCE;
+
+ update_hpo_dp_stream_engine_usage(
+ &context->res_ctx, dc->res_pool,
+ pipe_ctx->stream_res.hpo_dp_stream_enc,
+ true);
+ }
+
+ if (pipe_ctx->link_res.hpo_dp_link_enc == NULL) {
+ if (!add_hpo_dp_link_enc_to_ctx(&context->res_ctx, dc->res_pool, pipe_ctx, pipe_ctx->stream))
+ return DC_NO_LINK_ENC_RESOURCE;
+ }
+ } else {
+ if (pipe_ctx->stream_res.hpo_dp_stream_enc) {
+ update_hpo_dp_stream_engine_usage(
+ &context->res_ctx, dc->res_pool,
+ pipe_ctx->stream_res.hpo_dp_stream_enc,
+ false);
+ pipe_ctx->stream_res.hpo_dp_stream_enc = NULL;
+ }
+ if (pipe_ctx->link_res.hpo_dp_link_enc)
+ remove_hpo_dp_link_enc_from_ctx(&context->res_ctx, pipe_ctx, pipe_ctx->stream);
+ }
+
+ return DC_OK;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stat.c b/drivers/gpu/drm/amd/display/dc/core/dc_stat.c
index 4b372aa52801..6c06587dd88c 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stat.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stat.c
@@ -65,6 +65,7 @@ void dc_stat_get_dmub_notification(const struct dc *dc, struct dmub_notification
/* For HPD/HPD RX, convert dpia port index into link index */
if (notify->type == DMUB_NOTIFICATION_HPD ||
notify->type == DMUB_NOTIFICATION_HPD_IRQ ||
+ notify->type == DMUB_NOTIFICATION_DPIA_NOTIFICATION ||
notify->type == DMUB_NOTIFICATION_SET_CONFIG_REPLY) {
notify->link_index =
get_link_index_from_dpia_port_index(dc, notify->link_index);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index 20e534f73513..72b261ad9587 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -408,7 +408,7 @@ bool dc_stream_set_cursor_position(
struct dc_stream_state *stream,
const struct dc_cursor_position *position)
{
- struct dc *dc = stream->ctx->dc;
+ struct dc *dc;
bool reset_idle_optimizations = false;
if (NULL == stream) {
@@ -481,6 +481,7 @@ bool dc_stream_add_writeback(struct dc *dc,
}
if (!isDrc) {
+ ASSERT(stream->num_wb_info + 1 <= MAX_DWB_PIPES);
stream->writeback_info[stream->num_wb_info++] = *wb_info;
}
@@ -526,6 +527,11 @@ bool dc_stream_remove_writeback(struct dc *dc,
return false;
}
+ if (stream->num_wb_info > MAX_DWB_PIPES) {
+ dm_error("DC: num_wb_info is invalid!\n");
+ return false;
+ }
+
// stream->writeback_info[dwb_pipe_inst].wb_enabled = false;
for (i = 0; i < stream->num_wb_info; i++) {
/*dynamic update*/
@@ -540,7 +546,8 @@ bool dc_stream_remove_writeback(struct dc *dc,
if (stream->writeback_info[i].wb_enabled) {
if (j < i)
/* trim the array */
- stream->writeback_info[j] = stream->writeback_info[i];
+ memcpy(&stream->writeback_info[j], &stream->writeback_info[i],
+ sizeof(struct dc_writeback_info));
j++;
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 85ebeaa2de18..72963617553e 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -47,7 +47,7 @@ struct aux_payload;
struct set_config_cmd_payload;
struct dmub_notification;
-#define DC_VER "3.2.215"
+#define DC_VER "3.2.217"
#define MAX_SURFACES 3
#define MAX_PLANES 6
@@ -872,6 +872,7 @@ struct dc_debug_options {
enum lttpr_mode lttpr_mode_override;
unsigned int dsc_delay_factor_wa_x1000;
unsigned int min_prefetch_in_strobe_ns;
+ bool disable_unbounded_requesting;
};
struct gpu_info_soc_bounding_box_v1_0;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
index 2c54b6e0498b..73f58ac3b93f 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
@@ -149,7 +149,6 @@ struct dc_link_settings {
enum dc_link_spread link_spread;
bool use_link_rate_set;
uint8_t link_rate_set;
- bool dpcd_source_device_specific_field_support;
};
union dc_dp_ffe_preset {
@@ -926,6 +925,9 @@ struct dpcd_usb4_dp_tunneling_info {
#ifndef DP_128b_132b_TRAINING_AUX_RD_INTERVAL
#define DP_128b_132b_TRAINING_AUX_RD_INTERVAL 0x2216
#endif
+#ifndef DP_LINK_SQUARE_PATTERN
+#define DP_LINK_SQUARE_PATTERN 0x10F
+#endif
#ifndef DP_CABLE_ATTRIBUTES_UPDATED_BY_DPRX
#define DP_CABLE_ATTRIBUTES_UPDATED_BY_DPRX 0x2217
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
index 2e18bcf6b11a..8565bbb75177 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -335,15 +335,18 @@ static inline bool dc_get_edp_link_panel_inst(const struct dc *dc,
unsigned int *inst_out)
{
struct dc_link *edp_links[MAX_NUM_EDP];
- int edp_num;
+ int edp_num, i;
- if (link->connector_signal != SIGNAL_TYPE_EDP)
+ *inst_out = 0;
+ if (link->connector_signal != SIGNAL_TYPE_EDP || !link->local_sink)
return false;
get_edp_links(dc, edp_links, &edp_num);
- if ((edp_num > 1) && (link->link_index > edp_links[0]->link_index))
- *inst_out = 1;
- else
- *inst_out = 0;
+ for (i = 0; i < edp_num; i++) {
+ if (link == edp_links[i])
+ break;
+ if (edp_links[i]->local_sink)
+ (*inst_out)++;
+ }
return true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index dfd3df1d2f7e..ef33d7d8a2bf 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -543,9 +543,8 @@ bool dc_stream_get_crtc_position(struct dc *dc,
unsigned int *nom_v_pos);
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
-bool dc_stream_forward_crc_window(struct dc *dc,
+bool dc_stream_forward_crc_window(struct dc_stream_state *stream,
struct rect *rect,
- struct dc_stream_state *stream,
bool is_stop);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 913a1fe6b3da..16e3b079fc56 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -1142,6 +1142,10 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx)
struct dc_link *link = stream->link;
struct dc *dc = pipe_ctx->stream->ctx->dc;
const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
+ struct dccg *dccg = dc->res_pool->dccg;
+ struct timing_generator *tg = pipe_ctx->stream_res.tg;
+ struct dtbclk_dto_params dto_params = {0};
+ int dp_hpo_inst;
if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal)) {
pipe_ctx->stream_res.stream_enc->funcs->stop_hdmi_info_packets(
@@ -1162,6 +1166,15 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx)
link_hwss->reset_stream_encoder(pipe_ctx);
if (is_dp_128b_132b_signal(pipe_ctx)) {
+ dto_params.otg_inst = tg->inst;
+ dto_params.timing = &pipe_ctx->stream->timing;
+ dp_hpo_inst = pipe_ctx->stream_res.hpo_dp_stream_enc->inst;
+ dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
+ dccg->funcs->disable_symclk32_se(dccg, dp_hpo_inst);
+ dccg->funcs->set_dpstreamclk(dccg, REFCLK, tg->inst, dp_hpo_inst);
+ }
+
+ if (is_dp_128b_132b_signal(pipe_ctx)) {
/* TODO: This looks like a bug to me as we are disabling HPO IO when
* we are just disabling a single HPO stream. Shouldn't we disable HPO
* HW control only when HPOs for all streams are disabled?
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
index 784a8b6f360d..c08c01e05dcf 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
@@ -200,7 +200,6 @@ static void dsc2_set_config(struct display_stream_compressor *dsc, const struct
bool is_config_ok;
struct dcn20_dsc *dsc20 = TO_DCN20_DSC(dsc);
- DC_LOG_DSC(" ");
DC_LOG_DSC("Setting DSC Config at DSC inst %d", dsc->inst);
dsc_config_log(dsc, dsc_cfg);
is_config_ok = dsc_prepare_config(dsc_cfg, &dsc20->reg_vals, dsc_optc_cfg);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index 6291a241158a..20c85ef2a957 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -582,6 +582,9 @@ void dcn20_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
if (pipe_ctx->stream_res.gsl_group != 0)
dcn20_setup_gsl_group_as_lock(dc, pipe_ctx, false);
+ if (hubp->funcs->hubp_update_mall_sel)
+ hubp->funcs->hubp_update_mall_sel(hubp, 0, false);
+
dc->hwss.set_flip_control_gsl(pipe_ctx, false);
hubp->funcs->hubp_clk_cntl(hubp, false);
@@ -605,6 +608,9 @@ void dcn20_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
void dcn20_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
+ bool is_phantom = pipe_ctx->plane_state && pipe_ctx->plane_state->is_phantom;
+ struct timing_generator *tg = is_phantom ? pipe_ctx->stream_res.tg : NULL;
+
DC_LOGGER_INIT(dc->ctx->logger);
if (!pipe_ctx->plane_res.hubp || pipe_ctx->plane_res.hubp->power_gated)
@@ -612,6 +618,12 @@ void dcn20_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx)
dcn20_plane_atomic_disable(dc, pipe_ctx);
+ /* Turn back off the phantom OTG after the phantom plane is fully disabled
+ */
+ if (is_phantom)
+ if (tg && tg->funcs->disable_phantom_crtc)
+ tg->funcs->disable_phantom_crtc(tg);
+
DC_LOG_DC("Power down front end %d\n",
pipe_ctx->pipe_idx);
}
@@ -1803,6 +1815,18 @@ void dcn20_program_front_end_for_ctx(
dcn20_detect_pipe_changes(&dc->current_state->res_ctx.pipe_ctx[i],
&context->res_ctx.pipe_ctx[i]);
+ /* When disabling phantom pipes, turn on phantom OTG first (so we can get double
+ * buffer updates properly)
+ */
+ for (i = 0; i < dc->res_pool->pipe_count; i++)
+ if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable
+ && dc->current_state->res_ctx.pipe_ctx[i].stream->mall_stream_config.type == SUBVP_PHANTOM) {
+ struct timing_generator *tg = dc->current_state->res_ctx.pipe_ctx[i].stream_res.tg;
+
+ if (tg->funcs->enable_crtc)
+ tg->funcs->enable_crtc(tg);
+ }
+
/* OTG blank before disabling all front ends */
for (i = 0; i < dc->res_pool->pipe_count; i++)
if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable
@@ -2615,6 +2639,37 @@ void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
hubp->mpcc_id = mpcc_id;
}
+static enum phyd32clk_clock_source get_phyd32clk_src(struct dc_link *link)
+{
+ switch (link->link_enc->transmitter) {
+ case TRANSMITTER_UNIPHY_A:
+ return PHYD32CLKA;
+ case TRANSMITTER_UNIPHY_B:
+ return PHYD32CLKB;
+ case TRANSMITTER_UNIPHY_C:
+ return PHYD32CLKC;
+ case TRANSMITTER_UNIPHY_D:
+ return PHYD32CLKD;
+ case TRANSMITTER_UNIPHY_E:
+ return PHYD32CLKE;
+ default:
+ return PHYD32CLKA;
+ }
+}
+
+static int get_odm_segment_count(struct pipe_ctx *pipe_ctx)
+{
+ struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe;
+ int count = 1;
+
+ while (odm_pipe != NULL) {
+ count++;
+ odm_pipe = odm_pipe->next_odm_pipe;
+ }
+
+ return count;
+}
+
void dcn20_enable_stream(struct pipe_ctx *pipe_ctx)
{
enum dc_lane_count lane_count =
@@ -2628,12 +2683,43 @@ void dcn20_enable_stream(struct pipe_ctx *pipe_ctx)
struct timing_generator *tg = pipe_ctx->stream_res.tg;
const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
struct dc *dc = pipe_ctx->stream->ctx->dc;
+ struct dtbclk_dto_params dto_params = {0};
+ struct dccg *dccg = dc->res_pool->dccg;
+ enum phyd32clk_clock_source phyd32clk;
+ int dp_hpo_inst;
+ struct dce_hwseq *hws = dc->hwseq;
+ unsigned int k1_div = PIXEL_RATE_DIV_NA;
+ unsigned int k2_div = PIXEL_RATE_DIV_NA;
if (is_dp_128b_132b_signal(pipe_ctx)) {
if (dc->hwseq->funcs.setup_hpo_hw_control)
dc->hwseq->funcs.setup_hpo_hw_control(dc->hwseq, true);
}
+ if (is_dp_128b_132b_signal(pipe_ctx)) {
+ dp_hpo_inst = pipe_ctx->stream_res.hpo_dp_stream_enc->inst;
+ dccg->funcs->set_dpstreamclk(dccg, DTBCLK0, tg->inst, dp_hpo_inst);
+
+ phyd32clk = get_phyd32clk_src(link);
+ dccg->funcs->enable_symclk32_se(dccg, dp_hpo_inst, phyd32clk);
+
+ dto_params.otg_inst = tg->inst;
+ dto_params.pixclk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10;
+ dto_params.num_odm_segments = get_odm_segment_count(pipe_ctx);
+ dto_params.timing = &pipe_ctx->stream->timing;
+ dto_params.ref_dtbclk_khz = dc->clk_mgr->funcs->get_dtb_ref_clk_frequency(dc->clk_mgr);
+ dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
+ }
+
+ if (hws->funcs.calculate_dccg_k1_k2_values && dc->res_pool->dccg->funcs->set_pixel_rate_div) {
+ hws->funcs.calculate_dccg_k1_k2_values(pipe_ctx, &k1_div, &k2_div);
+
+ dc->res_pool->dccg->funcs->set_pixel_rate_div(
+ dc->res_pool->dccg,
+ pipe_ctx->stream_res.tg->inst,
+ k1_div, k2_div);
+ }
+
link_hwss->setup_stream_encoder(pipe_ctx);
if (pipe_ctx->plane_state && pipe_ctx->plane_state->flip_immediate != 1) {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index 8a0dd0d7134b..71c7237413ef 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -1389,6 +1389,9 @@ enum dc_status dcn20_add_dsc_to_stream_resource(struct dc *dc,
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &dc_ctx->res_ctx.pipe_ctx[i];
+ if (pipe_ctx->top_pipe)
+ continue;
+
if (pipe_ctx->stream != dc_stream)
continue;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
index 8cf10351f271..ee62ae3eb98f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
@@ -1414,7 +1414,8 @@ static struct resource_funcs dcn301_res_pool_funcs = {
.find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link,
.acquire_post_bldn_3dlut = dcn30_acquire_post_bldn_3dlut,
.release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut,
- .update_bw_bounding_box = dcn301_update_bw_bounding_box
+ .update_bw_bounding_box = dcn301_update_bw_bounding_box,
+ .patch_unknown_plane_state = dcn20_patch_unknown_plane_state
};
static bool dcn301_resource_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c
index 80dfaa4d4d81..0b317ed31f91 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c
@@ -242,7 +242,10 @@ void dcn31_hpo_dp_link_enc_set_link_test_pattern(
REG_UPDATE(DP_DPHY_SYM32_CONTROL,
MODE, DP2_TEST_PATTERN);
break;
- case DP_TEST_PATTERN_SQUARE_PULSE:
+ case DP_TEST_PATTERN_SQUARE:
+ case DP_TEST_PATTERN_SQUARE_PRESHOOT_DISABLED:
+ case DP_TEST_PATTERN_SQUARE_DEEMPHASIS_DISABLED:
+ case DP_TEST_PATTERN_SQUARE_PRESHOOT_DEEMPHASIS_DISABLED:
REG_SET(DP_DPHY_SYM32_TP_SQ_PULSE, 0,
TP_SQ_PULSE_WIDTH, tp_params->custom_pattern[0]);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
index 4226a051df41..165c920ca776 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
@@ -623,43 +623,3 @@ void dcn31_setup_hpo_hw_control(const struct dce_hwseq *hws, bool enable)
if (hws->ctx->dc->debug.hpo_optimization)
REG_UPDATE(HPO_TOP_HW_CONTROL, HPO_IO_EN, !!enable);
}
-void dcn31_set_drr(struct pipe_ctx **pipe_ctx,
- int num_pipes, struct dc_crtc_timing_adjust adjust)
-{
- int i = 0;
- struct drr_params params = {0};
- unsigned int event_triggers = 0x2;/*Bit[1]: OTG_TRIG_A*/
- unsigned int num_frames = 2;
- params.vertical_total_max = adjust.v_total_max;
- params.vertical_total_min = adjust.v_total_min;
- params.vertical_total_mid = adjust.v_total_mid;
- params.vertical_total_mid_frame_num = adjust.v_total_mid_frame_num;
- for (i = 0; i < num_pipes; i++) {
- if ((pipe_ctx[i]->stream_res.tg != NULL) && pipe_ctx[i]->stream_res.tg->funcs) {
- if (pipe_ctx[i]->stream_res.tg->funcs->set_drr)
- pipe_ctx[i]->stream_res.tg->funcs->set_drr(
- pipe_ctx[i]->stream_res.tg, &params);
- if (adjust.v_total_max != 0 && adjust.v_total_min != 0)
- if (pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control)
- pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control(
- pipe_ctx[i]->stream_res.tg,
- event_triggers, num_frames);
- }
- }
-}
-void dcn31_set_static_screen_control(struct pipe_ctx **pipe_ctx,
- int num_pipes, const struct dc_static_screen_params *params)
-{
- unsigned int i;
- unsigned int triggers = 0;
- if (params->triggers.surface_update)
- triggers |= 0x600;/*bit 9 and bit10 : 110 0000 0000*/
- if (params->triggers.cursor_update)
- triggers |= 0x10;/*bit4*/
- if (params->triggers.force_trigger)
- triggers |= 0x1;
- for (i = 0; i < num_pipes; i++)
- pipe_ctx[i]->stream_res.tg->funcs->
- set_static_screen_control(pipe_ctx[i]->stream_res.tg,
- triggers, params->num_frames);
-}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h
index e7e03a8722e0..edfc01d6ad73 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h
@@ -56,8 +56,4 @@ bool dcn31_is_abm_supported(struct dc *dc,
void dcn31_init_pipes(struct dc *dc, struct dc_state *context);
void dcn31_setup_hpo_hw_control(const struct dce_hwseq *hws, bool enable);
-void dcn31_set_static_screen_control(struct pipe_ctx **pipe_ctx,
- int num_pipes, const struct dc_static_screen_params *params);
-void dcn31_set_drr(struct pipe_ctx **pipe_ctx,
- int num_pipes, struct dc_crtc_timing_adjust adjust);
#endif /* __DC_HWSS_DCN31_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
index 7c2da70ffe21..3a32810bbe38 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
@@ -64,9 +64,9 @@ static const struct hw_sequencer_funcs dcn31_funcs = {
.prepare_bandwidth = dcn20_prepare_bandwidth,
.optimize_bandwidth = dcn20_optimize_bandwidth,
.update_bandwidth = dcn20_update_bandwidth,
- .set_drr = dcn31_set_drr,
+ .set_drr = dcn10_set_drr,
.get_position = dcn10_get_position,
- .set_static_screen_control = dcn31_set_static_screen_control,
+ .set_static_screen_control = dcn10_set_static_screen_control,
.setup_stereo = dcn10_setup_stereo,
.set_avmute = dcn30_set_avmute,
.log_hw_state = dcn10_log_hw_state,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c
index fe449f7aa771..63a677c8ee27 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c
@@ -40,7 +40,6 @@
#define FN(reg_name, field_name) \
optc1->tg_shift->field_name, optc1->tg_mask->field_name
-#define STATIC_SCREEN_EVENT_MASK_DRR_DOUBLE_BUFFER_UPDATE_EN 0x2000 /*bit 13*/
static void optc31_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_cnt,
struct dc_crtc_timing *timing)
{
@@ -232,32 +231,6 @@ void optc3_init_odm(struct timing_generator *optc)
OPTC_MEM_SEL, 0);
optc1->opp_count = 1;
}
-void optc31_set_static_screen_control(
- struct timing_generator *optc,
- uint32_t event_triggers,
- uint32_t num_frames)
-{
- struct optc *optc1 = DCN10TG_FROM_TG(optc);
- uint32_t framecount;
- uint32_t events;
-
- if (num_frames > 0xFF)
- num_frames = 0xFF;
- REG_GET_2(OTG_STATIC_SCREEN_CONTROL,
- OTG_STATIC_SCREEN_EVENT_MASK, &events,
- OTG_STATIC_SCREEN_FRAME_COUNT, &framecount);
-
- if (events == event_triggers && num_frames == framecount)
- return;
- if ((event_triggers & STATIC_SCREEN_EVENT_MASK_DRR_DOUBLE_BUFFER_UPDATE_EN)
- != 0)
- event_triggers = event_triggers &
- ~STATIC_SCREEN_EVENT_MASK_DRR_DOUBLE_BUFFER_UPDATE_EN;
-
- REG_UPDATE_2(OTG_STATIC_SCREEN_CONTROL,
- OTG_STATIC_SCREEN_EVENT_MASK, event_triggers,
- OTG_STATIC_SCREEN_FRAME_COUNT, num_frames);
-}
static struct timing_generator_funcs dcn31_tg_funcs = {
.validate_timing = optc1_validate_timing,
@@ -293,7 +266,7 @@ static struct timing_generator_funcs dcn31_tg_funcs = {
.set_drr = optc31_set_drr,
.get_last_used_drr_vtotal = optc2_get_last_used_drr_vtotal,
.set_vtotal_min_max = optc1_set_vtotal_min_max,
- .set_static_screen_control = optc31_set_static_screen_control,
+ .set_static_screen_control = optc1_set_static_screen_control,
.program_stereo = optc1_program_stereo,
.is_stereo_left_eye = optc1_is_stereo_left_eye,
.tg_init = optc3_tg_init,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.h
index 5fc6c63580d7..30b81a448ce2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.h
@@ -263,8 +263,5 @@ bool optc31_immediate_disable_crtc(struct timing_generator *optc);
void optc31_set_drr(struct timing_generator *optc, const struct drr_params *params);
void optc3_init_odm(struct timing_generator *optc);
-void optc31_set_static_screen_control(
- struct timing_generator *optc,
- uint32_t event_triggers,
- uint32_t num_frames);
+
#endif /* __DC_OPTC_DCN31_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c
index 38842f938bed..0926db018338 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c
@@ -278,10 +278,10 @@ static void enc314_stream_encoder_dp_blank(
struct dc_link *link,
struct stream_encoder *enc)
{
- /* New to DCN314 - disable the FIFO before VID stream disable. */
- enc314_disable_fifo(enc);
-
enc1_stream_encoder_dp_blank(link, enc);
+
+ /* Disable FIFO after the DP vid stream is disabled to avoid corruption. */
+ enc314_disable_fifo(enc);
}
static void enc314_stream_encoder_dp_unblank(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c
index 31feb4b0edee..5b6c2d94ec71 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c
@@ -66,9 +66,9 @@ static const struct hw_sequencer_funcs dcn314_funcs = {
.prepare_bandwidth = dcn20_prepare_bandwidth,
.optimize_bandwidth = dcn20_optimize_bandwidth,
.update_bandwidth = dcn20_update_bandwidth,
- .set_drr = dcn31_set_drr,
+ .set_drr = dcn10_set_drr,
.get_position = dcn10_get_position,
- .set_static_screen_control = dcn31_set_static_screen_control,
+ .set_static_screen_control = dcn10_set_static_screen_control,
.setup_stereo = dcn10_setup_stereo,
.set_avmute = dcn30_set_avmute,
.log_hw_state = dcn10_log_hw_state,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.c
index 41edbd64ea21..0086cafb0f7a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.c
@@ -228,7 +228,7 @@ static struct timing_generator_funcs dcn314_tg_funcs = {
.set_drr = optc31_set_drr,
.get_last_used_drr_vtotal = optc2_get_last_used_drr_vtotal,
.set_vtotal_min_max = optc1_set_vtotal_min_max,
- .set_static_screen_control = optc31_set_static_screen_control,
+ .set_static_screen_control = optc1_set_static_screen_control,
.program_stereo = optc1_program_stereo,
.is_stereo_left_eye = optc1_is_stereo_left_eye,
.tg_init = optc3_tg_init,
@@ -241,7 +241,6 @@ static struct timing_generator_funcs dcn314_tg_funcs = {
.set_dsc_config = optc3_set_dsc_config,
.get_dsc_status = optc2_get_dsc_status,
.set_dwb_source = NULL,
- .set_odm_combine = optc314_set_odm_combine,
.get_optc_source = optc2_get_optc_source,
.set_out_mux = optc3_set_out_mux,
.set_drr_trigger_window = optc3_set_drr_trigger_window,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
index b8767be1e4c5..c10d8a60380a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
@@ -188,7 +188,8 @@ static bool dcn32_check_no_memory_request_for_cab(struct dc *dc)
/* First, check no-memory-request case */
for (i = 0; i < dc->current_state->stream_count; i++) {
- if (dc->current_state->stream_status[i].plane_count)
+ if ((dc->current_state->stream_status[i].plane_count) &&
+ (dc->current_state->streams[i]->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED))
/* Fail eligibility on a visible stream */
break;
}
@@ -1450,3 +1451,39 @@ void dcn32_update_dsc_pg(struct dc *dc,
}
}
}
+
+void dcn32_enable_phantom_streams(struct dc *dc, struct dc_state *context)
+{
+ unsigned int i;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ /* If an active, non-phantom pipe is being transitioned into a phantom
+ * pipe, wait for the double buffer update to complete first before we do
+ * ANY phantom pipe programming.
+ */
+ if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM &&
+ old_pipe->stream && old_pipe->stream->mall_stream_config.type != SUBVP_PHANTOM) {
+ old_pipe->stream_res.tg->funcs->wait_for_state(
+ old_pipe->stream_res.tg,
+ CRTC_STATE_VBLANK);
+ old_pipe->stream_res.tg->funcs->wait_for_state(
+ old_pipe->stream_res.tg,
+ CRTC_STATE_VACTIVE);
+ }
+ }
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (new_pipe->stream && new_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
+ // If old context or new context has phantom pipes, apply
+ // the phantom timings now. We can't change the phantom
+ // pipe configuration safely without driver acquiring
+ // the DMCUB lock first.
+ dc->hwss.apply_ctx_to_hw(dc, context);
+ break;
+ }
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h
index 7de36529cf99..e9e9534f3668 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h
@@ -102,4 +102,6 @@ void dcn32_update_dsc_pg(struct dc *dc,
struct dc_state *context,
bool safe_to_disable);
+void dcn32_enable_phantom_streams(struct dc *dc, struct dc_state *context);
+
#endif /* __DC_HWSS_DCN32_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c
index dc4649458567..330d7cbc7398 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c
@@ -106,6 +106,7 @@ static const struct hw_sequencer_funcs dcn32_funcs = {
.set_disp_pattern_generator = dcn30_set_disp_pattern_generator,
.get_dcc_en_bits = dcn10_get_dcc_en_bits,
.commit_subvp_config = dcn32_commit_subvp_config,
+ .enable_phantom_streams = dcn32_enable_phantom_streams,
.subvp_pipe_control_lock = dcn32_subvp_pipe_control_lock,
.update_visual_confirm_color = dcn20_update_visual_confirm_color,
.update_phantom_vp_position = dcn32_update_phantom_vp_position,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
index e4dbc8353ea3..dfecdf3e25e9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
@@ -726,6 +726,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.allow_sw_cursor_fallback = false, // Linux can't do SW cursor "fallback"
.alloc_extra_way_for_cursor = true,
.min_prefetch_in_strobe_ns = 60000, // 60us
+ .disable_unbounded_requesting = false,
};
static const struct dc_debug_options debug_defaults_diags = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h
index 13fbc574910b..57ce1d670abe 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h
@@ -112,6 +112,7 @@ bool dcn32_subvp_in_use(struct dc *dc,
bool dcn32_mpo_in_use(struct dc_state *context);
bool dcn32_any_surfaces_rotated(struct dc *dc, struct dc_state *context);
+bool dcn32_is_center_timing(struct pipe_ctx *pipe);
struct pipe_ctx *dcn32_acquire_idle_pipe_for_head_pipe_in_layer(
struct dc_state *state,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
index 783935c4e664..e5287e5f66d5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
@@ -255,6 +255,19 @@ bool dcn32_any_surfaces_rotated(struct dc *dc, struct dc_state *context)
return false;
}
+bool dcn32_is_center_timing(struct pipe_ctx *pipe)
+{
+ bool is_center_timing = false;
+
+ if (pipe->stream) {
+ if (pipe->stream->timing.v_addressable != pipe->stream->dst.height ||
+ pipe->stream->timing.v_addressable != pipe->stream->src.height) {
+ is_center_timing = true;
+ }
+ }
+ return is_center_timing;
+}
+
/**
* *******************************************************************************************
* dcn32_determine_det_override: Determine DET allocation for each pipe
@@ -357,6 +370,7 @@ void dcn32_set_det_allocations(struct dc *dc, struct dc_state *context,
int i, pipe_cnt;
struct resource_context *res_ctx = &context->res_ctx;
struct pipe_ctx *pipe;
+ bool disable_unbounded_requesting = dc->debug.disable_z9_mpc || dc->debug.disable_unbounded_requesting;
for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
@@ -373,7 +387,7 @@ void dcn32_set_det_allocations(struct dc *dc, struct dc_state *context,
*/
if (pipe_cnt == 1) {
pipes[0].pipe.src.det_size_override = DCN3_2_MAX_DET_SIZE;
- if (pipe->plane_state && !dc->debug.disable_z9_mpc && pipe->plane_state->tiling_info.gfx9.swizzle != DC_SW_LINEAR) {
+ if (pipe->plane_state && !disable_unbounded_requesting && pipe->plane_state->tiling_info.gfx9.swizzle != DC_SW_LINEAR) {
if (!is_dual_plane(pipe->plane_state->format)) {
pipes[0].pipe.src.det_size_override = DCN3_2_DEFAULT_DET_SIZE;
pipes[0].pipe.src.unbounded_req_mode = true;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
index d1f36df03c2e..62e400e90b56 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
@@ -724,6 +724,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.allow_sw_cursor_fallback = false, // Linux can't do SW cursor "fallback"
.alloc_extra_way_for_cursor = true,
.min_prefetch_in_strobe_ns = 60000, // 60us
+ .disable_unbounded_requesting = false,
};
static const struct dc_debug_options debug_defaults_diags = {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
index f94abd124021..e7459fd50bf9 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
@@ -691,7 +691,7 @@ static bool dcn32_assign_subvp_pipe(struct dc *dc,
* to combine this with SubVP can cause issues with the scheduling).
* - Not TMZ surface
*/
- if (pipe->plane_state && !pipe->top_pipe &&
+ if (pipe->plane_state && !pipe->top_pipe && !dcn32_is_center_timing(pipe) &&
pipe->stream->mall_stream_config.type == SUBVP_NONE && refresh_rate < 120 && !pipe->plane_state->address.tmz_surface &&
vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] <= 0) {
while (pipe) {
@@ -979,8 +979,11 @@ static bool subvp_vblank_schedulable(struct dc *dc, struct dc_state *context)
}
// Use ignore_msa_timing_param flag to identify as DRR
if (found && context->res_ctx.pipe_ctx[vblank_index].stream->ignore_msa_timing_param) {
- // SUBVP + DRR case
- schedulable = subvp_drr_schedulable(dc, context, &context->res_ctx.pipe_ctx[vblank_index]);
+ // SUBVP + DRR case -- don't enable SubVP + DRR for HDMI VRR cases
+ if (context->res_ctx.pipe_ctx[vblank_index].stream->allow_freesync)
+ schedulable = subvp_drr_schedulable(dc, context, &context->res_ctx.pipe_ctx[vblank_index]);
+ else
+ schedulable = false;
} else if (found) {
main_timing = &subvp_pipe->stream->timing;
phantom_timing = &subvp_pipe->stream->mall_stream_config.paired_stream->timing;
@@ -1169,6 +1172,16 @@ static void dcn32_full_validate_bw_helper(struct dc *dc,
pipes[0].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, *pipe_cnt, 0);
*vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, *pipe_cnt);
+ /* Check that vlevel requested supports pstate or not
+ * if not, select the lowest vlevel that supports it
+ */
+ for (i = *vlevel; i < context->bw_ctx.dml.soc.num_states; i++) {
+ if (vba->DRAMClockChangeSupport[i][vba->maxMpcComb] != dm_dram_clock_change_unsupported) {
+ *vlevel = i;
+ break;
+ }
+ }
+
if (*vlevel < context->bw_ctx.dml.soc.num_states &&
vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] != dm_dram_clock_change_unsupported
&& subvp_validate_static_schedulability(dc, context, *vlevel)) {
@@ -1185,7 +1198,7 @@ static void dcn32_full_validate_bw_helper(struct dc *dc,
pipe->stream->mall_stream_config.type == SUBVP_NONE) {
non_subvp_pipes++;
// Use ignore_msa_timing_param flag to identify as DRR
- if (pipe->stream->ignore_msa_timing_param) {
+ if (pipe->stream->ignore_msa_timing_param && pipe->stream->allow_freesync) {
drr_pipe_found = true;
drr_pipe_index = i;
}
@@ -1551,6 +1564,7 @@ bool dcn32_internal_validate_bw(struct dc *dc,
context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final =
dm_prefetch_support_fclk_and_stutter;
+ context->bw_ctx.dml.validate_max_state = fast_validate;
vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt);
/* Last attempt with Prefetch mode 2 (dm_prefetch_support_stutter == 3) */
@@ -1559,6 +1573,7 @@ bool dcn32_internal_validate_bw(struct dc *dc,
dm_prefetch_support_stutter;
vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt);
}
+ context->bw_ctx.dml.validate_max_state = false;
if (vlevel < context->bw_ctx.dml.soc.num_states) {
memset(split, 0, sizeof(split));
@@ -1645,6 +1660,7 @@ bool dcn32_internal_validate_bw(struct dc *dc,
dcn20_release_dsc(&context->res_ctx, dc->res_pool, &pipe->stream_res.dsc);
memset(&pipe->plane_res, 0, sizeof(pipe->plane_res));
memset(&pipe->stream_res, 0, sizeof(pipe->stream_res));
+ memset(&pipe->link_res, 0, sizeof(pipe->link_res));
repopulate_pipes = true;
} else if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state) {
struct pipe_ctx *top_pipe = pipe->top_pipe;
@@ -1660,6 +1676,7 @@ bool dcn32_internal_validate_bw(struct dc *dc,
pipe->stream = NULL;
memset(&pipe->plane_res, 0, sizeof(pipe->plane_res));
memset(&pipe->stream_res, 0, sizeof(pipe->stream_res));
+ memset(&pipe->link_res, 0, sizeof(pipe->link_res));
repopulate_pipes = true;
} else
ASSERT(0); /* Should never try to merge master pipe */
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
index 4b8f5fa0f0ad..bc22078751f8 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
@@ -1707,7 +1707,7 @@ static void mode_support_configuration(struct vba_vars_st *v,
void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_lib)
{
struct vba_vars_st *v = &mode_lib->vba;
- int i, j;
+ int i, j, start_state;
unsigned int k, m;
unsigned int MaximumMPCCombine;
unsigned int NumberOfNonCombinedSurfaceOfMaximumBandwidth;
@@ -1720,7 +1720,10 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
#endif
/*MODE SUPPORT, VOLTAGE STATE AND SOC CONFIGURATION*/
-
+ if (mode_lib->validate_max_state)
+ start_state = v->soc.num_states - 1;
+ else
+ start_state = 0;
/*Scale Ratio, taps Support Check*/
mode_lib->vba.ScaleRatioAndTapsSupport = true;
@@ -2009,7 +2012,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.MPCCombineMethodIncompatible = v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.MPCCombineMethodAsNeededForPStateChangeAndVoltage
&& v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.MPCCombineMethodAsPossible;
- for (i = 0; i < v->soc.num_states; i++) {
+ for (i = start_state; i < v->soc.num_states; i++) {
for (j = 0; j < 2; j++) {
mode_lib->vba.TotalNumberOfActiveDPP[i][j] = 0;
mode_lib->vba.TotalAvailablePipesSupport[i][j] = true;
@@ -2286,7 +2289,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
}
}
- for (i = 0; i < v->soc.num_states; ++i) {
+ for (i = start_state; i < v->soc.num_states; ++i) {
mode_lib->vba.ExceededMultistreamSlots[i] = false;
for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) {
if (mode_lib->vba.OutputMultistreamEn[k] == true && mode_lib->vba.OutputMultistreamId[k] == k) {
@@ -2386,7 +2389,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
}
}
- for (i = 0; i < v->soc.num_states; ++i) {
+ for (i = start_state; i < v->soc.num_states; ++i) {
mode_lib->vba.DTBCLKRequiredMoreThanSupported[i] = false;
for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) {
if (mode_lib->vba.BlendingAndTiming[k] == k
@@ -2403,7 +2406,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
}
}
- for (i = 0; i < v->soc.num_states; ++i) {
+ for (i = start_state; i < v->soc.num_states; ++i) {
mode_lib->vba.ODMCombine2To1SupportCheckOK[i] = true;
mode_lib->vba.ODMCombine4To1SupportCheckOK[i] = true;
for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) {
@@ -2421,7 +2424,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
}
}
- for (i = 0; i < v->soc.num_states; i++) {
+ for (i = start_state; i < v->soc.num_states; i++) {
mode_lib->vba.DSCCLKRequiredMoreThanSupported[i] = false;
for (k = 0; k <= mode_lib->vba.NumberOfActiveSurfaces - 1; k++) {
if (mode_lib->vba.BlendingAndTiming[k] == k) {
@@ -2458,7 +2461,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
/* Check DSC Unit and Slices Support */
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.TotalDSCUnitsRequired = 0;
- for (i = 0; i < v->soc.num_states; ++i) {
+ for (i = start_state; i < v->soc.num_states; ++i) {
mode_lib->vba.NotEnoughDSCUnits[i] = false;
mode_lib->vba.NotEnoughDSCSlices[i] = false;
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.TotalDSCUnitsRequired = 0;
@@ -2493,7 +2496,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
}
/*DSC Delay per state*/
- for (i = 0; i < v->soc.num_states; ++i) {
+ for (i = start_state; i < v->soc.num_states; ++i) {
for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) {
mode_lib->vba.DSCDelayPerState[i][k] = dml32_DSCDelayRequirement(
mode_lib->vba.RequiresDSC[i][k], mode_lib->vba.ODMCombineEnablePerState[i][k],
@@ -2520,7 +2523,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
//Calculate Swath, DET Configuration, DCFCLKDeepSleep
//
- for (i = 0; i < (int) v->soc.num_states; ++i) {
+ for (i = start_state; i < (int) v->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j) {
for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) {
mode_lib->vba.RequiredDPPCLKThisState[k] = mode_lib->vba.RequiredDPPCLK[i][j][k];
@@ -2655,7 +2658,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.SurfaceSizeInMALL,
&mode_lib->vba.ExceededMALLSize);
- for (i = 0; i < v->soc.num_states; i++) {
+ for (i = start_state; i < v->soc.num_states; i++) {
for (j = 0; j < 2; j++) {
for (k = 0; k <= mode_lib->vba.NumberOfActiveSurfaces - 1; k++) {
mode_lib->vba.swath_width_luma_ub_this_state[k] =
@@ -2882,7 +2885,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
}
//Calculate Return BW
- for (i = 0; i < (int) v->soc.num_states; ++i) {
+ for (i = start_state; i < (int) v->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j) {
for (k = 0; k <= mode_lib->vba.NumberOfActiveSurfaces - 1; k++) {
if (mode_lib->vba.BlendingAndTiming[k] == k) {
@@ -2961,7 +2964,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
&mode_lib->vba.MinPrefetchMode,
&mode_lib->vba.MaxPrefetchMode);
- for (i = 0; i < (int) v->soc.num_states; ++i) {
+ for (i = start_state; i < (int) v->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j)
mode_lib->vba.DCFCLKState[i][j] = mode_lib->vba.DCFCLKPerState[i];
}
@@ -3083,7 +3086,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.DCFCLKState);
} // UseMinimumRequiredDCFCLK == true
- for (i = 0; i < (int) v->soc.num_states; ++i) {
+ for (i = start_state; i < (int) v->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j) {
mode_lib->vba.ReturnBWPerState[i][j] = dml32_get_return_bw_mbps(&mode_lib->vba.soc, i,
mode_lib->vba.HostVMEnable, mode_lib->vba.DCFCLKState[i][j],
@@ -3092,7 +3095,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
}
//Re-ordering Buffer Support Check
- for (i = 0; i < (int) v->soc.num_states; ++i) {
+ for (i = start_state; i < (int) v->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j) {
if ((mode_lib->vba.ROBBufferSizeInKByte - mode_lib->vba.PixelChunkSizeInKByte) * 1024
/ mode_lib->vba.ReturnBWPerState[i][j]
@@ -3114,7 +3117,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
+ mode_lib->vba.ReadBandwidthChroma[k];
}
- for (i = 0; i < (int) v->soc.num_states; ++i) {
+ for (i = start_state; i < (int) v->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j) {
mode_lib->vba.MaxTotalVerticalActiveAvailableBandwidth[i][j] =
dml_min3(mode_lib->vba.ReturnBusWidth * mode_lib->vba.DCFCLKState[i][j]
@@ -3138,7 +3141,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
/* Prefetch Check */
- for (i = 0; i < (int) v->soc.num_states; ++i) {
+ for (i = start_state; i < (int) v->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j) {
mode_lib->vba.TimeCalc = 24 / mode_lib->vba.ProjectedDCFCLKDeepSleep[i][j];
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
index 5af601cff1a0..b53feeaf5cf1 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
@@ -6257,12 +6257,12 @@ bool dml32_CalculateDETSwathFillLatencyHiding(unsigned int NumberOfActiveSurface
double SwathSizePerSurfaceC[DC__NUM_DPP__MAX];
bool NotEnoughDETSwathFillLatencyHiding = false;
- /* calculate sum of single swath size for all pipes in bytes*/
+ /* calculate sum of single swath size for all pipes in bytes */
for (k = 0; k < NumberOfActiveSurfaces; k++) {
- SwathSizePerSurfaceY[k] += SwathHeightY[k] * SwathWidthY[k] * BytePerPixelInDETY[k] * NumOfDPP[k];
+ SwathSizePerSurfaceY[k] = SwathHeightY[k] * SwathWidthY[k] * BytePerPixelInDETY[k] * NumOfDPP[k];
if (SwathHeightC[k] != 0)
- SwathSizePerSurfaceC[k] += SwathHeightC[k] * SwathWidthC[k] * BytePerPixelInDETC[k] * NumOfDPP[k];
+ SwathSizePerSurfaceC[k] = SwathHeightC[k] * SwathWidthC[k] * BytePerPixelInDETC[k] * NumOfDPP[k];
else
SwathSizePerSurfaceC[k] = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
index f4b176599be7..0ea406145c1d 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
@@ -136,7 +136,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_21_soc = {
.urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
.urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
- .pct_ideal_sdp_bw_after_urgent = 100.0,
+ .pct_ideal_sdp_bw_after_urgent = 90.0,
.pct_ideal_fabric_bw_after_urgent = 67.0,
.pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 20.0,
.pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0, // N/A, for now keep as is until DML implemented
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
index 3d643d50c3eb..a9d49ef58fb5 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
@@ -91,6 +91,7 @@ struct display_mode_lib {
struct dal_logger *logger;
struct dml_funcs funcs;
struct _vcs_dpi_display_e2e_pipe_params_st dml_pipe_state[6];
+ bool validate_max_state;
};
void dml_init_instance(struct display_mode_lib *lib,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index 525f8f0b8732..b093ea495468 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -547,15 +547,6 @@ struct dc_state {
struct resource_context res_ctx;
/**
- * @bw_ctx: The output from bandwidth and watermark calculations and the DML
- *
- * Each context must have its own instance of VBA, and in order to
- * initialize and obtain IP and SOC, the base DML instance from DC is
- * initially copied into every context.
- */
- struct bw_context bw_ctx;
-
- /**
* @pp_display_cfg: PowerPlay clocks and settings
* Note: this is a big struct, do *not* put on stack!
*/
@@ -570,6 +561,15 @@ struct dc_state {
struct clk_mgr *clk_mgr;
/**
+ * @bw_ctx: The output from bandwidth and watermark calculations and the DML
+ *
+ * Each context must have its own instance of VBA, and in order to
+ * initialize and obtain IP and SOC, the base DML instance from DC is
+ * initially copied into every context.
+ */
+ struct bw_context bw_ctx;
+
+ /**
* @refcount: refcount reference
*
* Notice that dc_state is used around the code to capture the current
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
index c43523f9ff6d..88ac723d10aa 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
@@ -266,6 +266,7 @@ struct hw_sequencer_funcs {
void (*apply_update_flags_for_phantom)(struct pipe_ctx *phantom_pipe);
void (*commit_subvp_config)(struct dc *dc, struct dc_state *context);
+ void (*enable_phantom_streams)(struct dc *dc, struct dc_state *context);
void (*subvp_pipe_control_lock)(struct dc *dc,
struct dc_state *context,
bool lock,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h
index 5040836f404d..4ab029e3326d 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/resource.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h
@@ -236,4 +236,13 @@ bool dc_resource_acquire_secondary_pipe_for_mpc_odm(
struct pipe_ctx *pri_pipe,
struct pipe_ctx *sec_pipe,
bool odm);
+
+/* A test harness interface that modifies dp encoder resources in the given dc
+ * state and bypasses the need to revalidate. The interface assumes that the
+ * test harness interface is called with pre-validated link config stored in the
+ * pipe_ctx and updates dp encoder resources according to the link config.
+ */
+enum dc_status update_dp_encoder_resources_for_test_harness(const struct dc *dc,
+ struct dc_state *context,
+ struct pipe_ctx *pipe_ctx);
#endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_RESOURCE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c b/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c
index 5f4f6dd79511..27dc8c9955f4 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c
@@ -136,11 +136,6 @@ static const struct irq_source_info_funcs vupdate_no_lock_irq_info_funcs = {
.ack = NULL
};
-static const struct irq_source_info_funcs dmub_outbox_irq_info_funcs = {
- .set = NULL,
- .ack = NULL
-};
-
#undef BASE_INNER
#define BASE_INNER(seg) DMU_BASE__INST0_SEG ## seg
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c b/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c
index 2f46e1ac4ce0..164d631e8809 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c
@@ -87,57 +87,20 @@ static void set_hpo_dp_hblank_min_symbol_width(struct pipe_ctx *pipe_ctx,
hblank_min_symbol_width);
}
-static int get_odm_segment_count(struct pipe_ctx *pipe_ctx)
-{
- struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe;
- int count = 1;
-
- while (odm_pipe != NULL) {
- count++;
- odm_pipe = odm_pipe->next_odm_pipe;
- }
-
- return count;
-}
-
static void setup_hpo_dp_stream_encoder(struct pipe_ctx *pipe_ctx)
{
- struct dc *dc = pipe_ctx->stream->ctx->dc;
struct hpo_dp_stream_encoder *stream_enc = pipe_ctx->stream_res.hpo_dp_stream_enc;
struct hpo_dp_link_encoder *link_enc = pipe_ctx->link_res.hpo_dp_link_enc;
- struct dccg *dccg = dc->res_pool->dccg;
- struct timing_generator *tg = pipe_ctx->stream_res.tg;
- struct dtbclk_dto_params dto_params = {0};
- enum phyd32clk_clock_source phyd32clk = get_phyd32clk_src(pipe_ctx->stream->link);
- dto_params.otg_inst = tg->inst;
- dto_params.pixclk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10;
- dto_params.num_odm_segments = get_odm_segment_count(pipe_ctx);
- dto_params.timing = &pipe_ctx->stream->timing;
- dto_params.ref_dtbclk_khz = dc->clk_mgr->funcs->get_dtb_ref_clk_frequency(dc->clk_mgr);
-
- dccg->funcs->set_dpstreamclk(dccg, DTBCLK0, tg->inst, stream_enc->inst);
- dccg->funcs->enable_symclk32_se(dccg, stream_enc->inst, phyd32clk);
- dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
stream_enc->funcs->enable_stream(stream_enc);
stream_enc->funcs->map_stream_to_link(stream_enc, stream_enc->inst, link_enc->inst);
}
static void reset_hpo_dp_stream_encoder(struct pipe_ctx *pipe_ctx)
{
- struct dc *dc = pipe_ctx->stream->ctx->dc;
struct hpo_dp_stream_encoder *stream_enc = pipe_ctx->stream_res.hpo_dp_stream_enc;
- struct dccg *dccg = dc->res_pool->dccg;
- struct timing_generator *tg = pipe_ctx->stream_res.tg;
- struct dtbclk_dto_params dto_params = {0};
-
- dto_params.otg_inst = tg->inst;
- dto_params.timing = &pipe_ctx->stream->timing;
stream_enc->funcs->disable(stream_enc);
- dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
- dccg->funcs->disable_symclk32_se(dccg, stream_enc->inst);
- dccg->funcs->set_dpstreamclk(dccg, REFCLK, tg->inst, stream_enc->inst);
}
static void setup_hpo_dp_stream_attribute(struct pipe_ctx *pipe_ctx)
diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
index eb5b7eb292ef..c8274967de94 100644
--- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
@@ -126,6 +126,7 @@ enum dmub_notification_type {
DMUB_NOTIFICATION_HPD,
DMUB_NOTIFICATION_HPD_IRQ,
DMUB_NOTIFICATION_SET_CONFIG_REPLY,
+ DMUB_NOTIFICATION_DPIA_NOTIFICATION,
DMUB_NOTIFICATION_MAX
};
@@ -453,6 +454,7 @@ struct dmub_srv {
* @pending_notification: Indicates there are other pending notifications
* @aux_reply: aux reply
* @hpd_status: hpd status
+ * @bw_alloc_reply: BW Allocation reply from CM/DPIA
*/
struct dmub_notification {
enum dmub_notification_type type;
@@ -463,6 +465,7 @@ struct dmub_notification {
struct aux_reply_data aux_reply;
enum dp_hpd_status hpd_status;
enum set_config_status sc_status;
+ struct dpia_notification_reply_data bw_alloc_reply;
};
};
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index 33907feefebb..4dcd82d19ccf 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -770,6 +770,7 @@ enum dmub_out_cmd_type {
* Command type used for SET_CONFIG Reply notification
*/
DMUB_OUT_CMD__SET_CONFIG_REPLY = 3,
+ DMUB_OUT_CMD__DPIA_NOTIFICATION = 5
};
/* DMUB_CMD__DPIA command sub-types. */
@@ -1517,6 +1518,84 @@ struct dp_hpd_data {
};
/**
+ * DPIA NOTIFICATION Response Type
+ */
+enum dpia_notify_bw_alloc_status {
+
+ DPIA_BW_REQ_FAILED = 0,
+ DPIA_BW_REQ_SUCCESS,
+ DPIA_EST_BW_CHANGED,
+ DPIA_BW_ALLOC_CAPS_CHANGED
+};
+
+/* DMUB_OUT_CMD__DPIA_NOTIFY Reply command - OutBox Cmd */
+/**
+ * Data passed to driver from FW in a DMUB_OUT_CMD__DPIA_NOTIFY command.
+ */
+struct dpia_notification_reply_data {
+ uint8_t allocated_bw;
+ uint8_t estimated_bw;
+};
+
+struct dpia_notification_common {
+ bool shared;
+};
+
+struct dpia_bw_allocation_notify_data {
+ union {
+ struct {
+ uint16_t cm_bw_alloc_support: 1; /**< USB4 CM BW Allocation mode support */
+ uint16_t bw_request_failed: 1; /**< BW_Request_Failed */
+ uint16_t bw_request_succeeded: 1; /**< BW_Request_Succeeded */
+ uint16_t est_bw_changed: 1; /**< Estimated_BW changed */
+ uint16_t bw_alloc_cap_changed: 1; /**< BW_Allocation_Capabiity_Changed */
+ uint16_t reserved: 11;
+ } bits;
+ uint16_t flags;
+ };
+ uint8_t cm_id; /**< CM ID */
+ uint8_t group_id; /**< Group ID */
+ uint8_t granularity; /**< BW Allocation Granularity */
+ uint8_t estimated_bw; /**< Estimated_BW */
+ uint8_t allocated_bw; /**< Allocated_BW */
+ uint8_t reserved;
+};
+
+union dpia_notification_data {
+ struct dpia_notification_common common_data;
+ struct dpia_bw_allocation_notify_data dpia_bw_alloc; /**< Used for DPIA BW Allocation mode notification */
+};
+
+enum dmub_cmd_dpia_notification_type {
+ DPIA_NOTIFY__BW_ALLOCATION = 0,
+};
+
+struct dpia_notification_header {
+ uint8_t instance; /**< DPIA Instance */
+ uint8_t reserved[3];
+ enum dmub_cmd_dpia_notification_type type; /**< DPIA notification type */
+};
+
+struct dpia_notification_payload {
+ struct dpia_notification_header header;
+ union dpia_notification_data data; /**< DPIA notification data */
+};
+
+/**
+ * Definition of a DMUB_OUT_CMD__DPIA_NOTIFY command.
+ */
+struct dmub_rb_cmd_dpia_notification {
+ /**
+ * Command header.
+ */
+ struct dmub_cmd_header header; /**< DPIA notification header */
+ /**
+ * Data passed to driver from FW in a DMUB_OUT_CMD__DPIA_NOTIFY command.
+ */
+ struct dpia_notification_payload payload; /**< DPIA notification payload */
+};
+
+/**
* Definition of a DMUB_OUT_CMD__DP_HPD_NOTIFY command.
*/
struct dmub_rb_cmd_dp_hpd_notify {
@@ -3422,6 +3501,10 @@ union dmub_rb_out_cmd {
* SET_CONFIG reply command.
*/
struct dmub_rb_cmd_dp_set_config_reply set_config_reply;
+ /**
+ * BW ALLOCATION notification command.
+ */
+ struct dmub_rb_cmd_dpia_notification dpia_notify;
};
#pragma pack(pop)
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c
index 44502ec919a2..55a534ec0794 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c
@@ -92,6 +92,27 @@ enum dmub_status dmub_srv_stat_get_notification(struct dmub_srv *dmub,
notify->link_index = cmd.set_config_reply.set_config_reply_control.instance;
notify->sc_status = cmd.set_config_reply.set_config_reply_control.status;
break;
+ case DMUB_OUT_CMD__DPIA_NOTIFICATION:
+ notify->type = DMUB_NOTIFICATION_DPIA_NOTIFICATION;
+ notify->link_index = cmd.dpia_notify.payload.header.instance;
+
+ if (cmd.dpia_notify.payload.header.type == DPIA_NOTIFY__BW_ALLOCATION) {
+
+ if (cmd.dpia_notify.payload.data.dpia_bw_alloc.bits.bw_request_failed) {
+ notify->result = DPIA_BW_REQ_FAILED;
+ } else if (cmd.dpia_notify.payload.data.dpia_bw_alloc.bits.bw_request_succeeded) {
+ notify->result = DPIA_BW_REQ_SUCCESS;
+ notify->bw_alloc_reply.allocated_bw =
+ cmd.dpia_notify.payload.data.dpia_bw_alloc.allocated_bw;
+ } else if (cmd.dpia_notify.payload.data.dpia_bw_alloc.bits.est_bw_changed) {
+ notify->result = DPIA_EST_BW_CHANGED;
+ notify->bw_alloc_reply.estimated_bw =
+ cmd.dpia_notify.payload.data.dpia_bw_alloc.estimated_bw;
+ } else if (cmd.dpia_notify.payload.data.dpia_bw_alloc.bits.bw_alloc_cap_changed) {
+ notify->result = DPIA_BW_ALLOC_CAPS_CHANGED;
+ }
+ }
+ break;
default:
notify->type = DMUB_NOTIFICATION_NO_DATA;
break;
diff --git a/drivers/gpu/drm/amd/display/include/dpcd_defs.h b/drivers/gpu/drm/amd/display/include/dpcd_defs.h
index b2df07f9e91c..c062a44db078 100644
--- a/drivers/gpu/drm/amd/display/include/dpcd_defs.h
+++ b/drivers/gpu/drm/amd/display/include/dpcd_defs.h
@@ -88,7 +88,10 @@ enum dpcd_phy_test_patterns {
PHY_TEST_PATTERN_PRBS23 = 0x30,
PHY_TEST_PATTERN_PRBS31 = 0x38,
PHY_TEST_PATTERN_264BIT_CUSTOM = 0x40,
- PHY_TEST_PATTERN_SQUARE_PULSE = 0x48,
+ PHY_TEST_PATTERN_SQUARE = 0x48,
+ PHY_TEST_PATTERN_SQUARE_PRESHOOT_DISABLED = 0x49,
+ PHY_TEST_PATTERN_SQUARE_DEEMPHASIS_DISABLED = 0x4A,
+ PHY_TEST_PATTERN_SQUARE_PRESHOOT_DEEMPHASIS_DISABLED = 0x4B,
};
enum dpcd_test_dyn_range {
diff --git a/drivers/gpu/drm/amd/display/include/link_service_types.h b/drivers/gpu/drm/amd/display/include/link_service_types.h
index d1e91d31d151..18b9173d5a96 100644
--- a/drivers/gpu/drm/amd/display/include/link_service_types.h
+++ b/drivers/gpu/drm/amd/display/include/link_service_types.h
@@ -165,7 +165,12 @@ enum dp_test_pattern {
DP_TEST_PATTERN_PRBS23,
DP_TEST_PATTERN_PRBS31,
DP_TEST_PATTERN_264BIT_CUSTOM,
- DP_TEST_PATTERN_SQUARE_PULSE,
+ DP_TEST_PATTERN_SQUARE_BEGIN,
+ DP_TEST_PATTERN_SQUARE = DP_TEST_PATTERN_SQUARE_BEGIN,
+ DP_TEST_PATTERN_SQUARE_PRESHOOT_DISABLED,
+ DP_TEST_PATTERN_SQUARE_DEEMPHASIS_DISABLED,
+ DP_TEST_PATTERN_SQUARE_PRESHOOT_DEEMPHASIS_DISABLED,
+ DP_TEST_PATTERN_SQUARE_END = DP_TEST_PATTERN_SQUARE_PRESHOOT_DEEMPHASIS_DISABLED,
/* Link Training Patterns */
DP_TEST_PATTERN_TRAINING_PATTERN1,
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
index 9b5d9b2c9a6a..cf4fa87c7db6 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
@@ -916,3 +916,34 @@ bool mod_power_only_edp(const struct dc_state *context, const struct dc_stream_s
{
return context && context->stream_count == 1 && dc_is_embedded_signal(stream->signal);
}
+
+bool psr_su_set_y_granularity(struct dc *dc, struct dc_link *link,
+ struct dc_stream_state *stream,
+ struct psr_config *config)
+{
+ uint16_t pic_height;
+ uint8_t slice_height;
+
+ if ((link->connector_signal & SIGNAL_TYPE_EDP) &&
+ (!dc->caps.edp_dsc_support ||
+ link->panel_config.dsc.disable_dsc_edp ||
+ !link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT ||
+ !stream->timing.dsc_cfg.num_slices_v))
+ return true;
+
+ pic_height = stream->timing.v_addressable +
+ stream->timing.v_border_top + stream->timing.v_border_bottom;
+ slice_height = pic_height / stream->timing.dsc_cfg.num_slices_v;
+
+ if (slice_height) {
+ if (config->su_y_granularity &&
+ (slice_height % config->su_y_granularity)) {
+ ASSERT(0);
+ return false;
+ }
+
+ config->su_y_granularity = slice_height;
+ }
+
+ return true;
+}
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
index 316452e9dbc9..bb16b37b83da 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
@@ -59,4 +59,7 @@ void mod_power_calc_psr_configs(struct psr_config *psr_config,
const struct dc_stream_state *stream);
bool mod_power_only_edp(const struct dc_state *context,
const struct dc_stream_state *stream);
+bool psr_su_set_y_granularity(struct dc *dc, struct dc_link *link,
+ struct dc_stream_state *stream,
+ struct psr_config *config);
#endif /* MODULES_POWER_POWER_HELPERS_H_ */
diff --git a/drivers/gpu/drm/amd/include/asic_reg/df/df_4_3_offset.h b/drivers/gpu/drm/amd/include/asic_reg/df/df_4_3_offset.h
new file mode 100644
index 000000000000..fbb18e44ec52
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/df/df_4_3_offset.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _df_4_3_OFFSET_HEADER
+#define _df_4_3_OFFSET_HEADER
+
+#define regDF_CS_UMC_AON0_HardwareAssertMaskLow 0x0e3e
+#define regDF_CS_UMC_AON0_HardwareAssertMaskLow_BASE_IDX 4
+#define regDF_NCS_PG0_HardwareAssertMaskHigh 0x0e3f
+#define regDF_NCS_PG0_HardwareAssertMaskHigh_BASE_IDX 4
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/df/df_4_3_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/df/df_4_3_sh_mask.h
new file mode 100644
index 000000000000..9c8f19ded4eb
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/df/df_4_3_sh_mask.h
@@ -0,0 +1,157 @@
+/*
+ * Copyright (C) 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _df_4_3_SH_MASK_HEADER
+#define _df_4_3_SH_MASK_HEADER
+
+//DF_CS_UMC_AON0_HardwareAssertMaskLow
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk0__SHIFT 0x0
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk1__SHIFT 0x1
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk2__SHIFT 0x2
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk3__SHIFT 0x3
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk4__SHIFT 0x4
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk5__SHIFT 0x5
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk6__SHIFT 0x6
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk7__SHIFT 0x7
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk8__SHIFT 0x8
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk9__SHIFT 0x9
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk10__SHIFT 0xa
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk11__SHIFT 0xb
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk12__SHIFT 0xc
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk13__SHIFT 0xd
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk14__SHIFT 0xe
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk15__SHIFT 0xf
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk16__SHIFT 0x10
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk17__SHIFT 0x11
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk18__SHIFT 0x12
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk19__SHIFT 0x13
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk20__SHIFT 0x14
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk21__SHIFT 0x15
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk22__SHIFT 0x16
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk23__SHIFT 0x17
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk24__SHIFT 0x18
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk25__SHIFT 0x19
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk26__SHIFT 0x1a
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk27__SHIFT 0x1b
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk28__SHIFT 0x1c
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk29__SHIFT 0x1d
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk30__SHIFT 0x1e
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk31__SHIFT 0x1f
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk0_MASK 0x00000001L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk1_MASK 0x00000002L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk2_MASK 0x00000004L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk3_MASK 0x00000008L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk4_MASK 0x00000010L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk5_MASK 0x00000020L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk6_MASK 0x00000040L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk7_MASK 0x00000080L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk8_MASK 0x00000100L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk9_MASK 0x00000200L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk10_MASK 0x00000400L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk11_MASK 0x00000800L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk12_MASK 0x00001000L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk13_MASK 0x00002000L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk14_MASK 0x00004000L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk15_MASK 0x00008000L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk16_MASK 0x00010000L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk17_MASK 0x00020000L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk18_MASK 0x00040000L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk19_MASK 0x00080000L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk20_MASK 0x00100000L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk21_MASK 0x00200000L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk22_MASK 0x00400000L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk23_MASK 0x00800000L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk24_MASK 0x01000000L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk25_MASK 0x02000000L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk26_MASK 0x04000000L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk27_MASK 0x08000000L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk28_MASK 0x10000000L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk29_MASK 0x20000000L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk30_MASK 0x40000000L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk31_MASK 0x80000000L
+
+//DF_NCS_PG0_HardwareAssertMaskHigh
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk0__SHIFT 0x0
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk1__SHIFT 0x1
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk2__SHIFT 0x2
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk3__SHIFT 0x3
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk4__SHIFT 0x4
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk5__SHIFT 0x5
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk6__SHIFT 0x6
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk7__SHIFT 0x7
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk8__SHIFT 0x8
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk9__SHIFT 0x9
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk10__SHIFT 0xa
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk11__SHIFT 0xb
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk12__SHIFT 0xc
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk13__SHIFT 0xd
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk14__SHIFT 0xe
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk15__SHIFT 0xf
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk16__SHIFT 0x10
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk17__SHIFT 0x11
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk18__SHIFT 0x12
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk19__SHIFT 0x13
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk20__SHIFT 0x14
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk21__SHIFT 0x15
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk22__SHIFT 0x16
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk23__SHIFT 0x17
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk24__SHIFT 0x18
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk25__SHIFT 0x19
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk26__SHIFT 0x1a
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk27__SHIFT 0x1b
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk28__SHIFT 0x1c
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk29__SHIFT 0x1d
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk30__SHIFT 0x1e
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk31__SHIFT 0x1f
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk0_MASK 0x00000001L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk1_MASK 0x00000002L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk2_MASK 0x00000004L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk3_MASK 0x00000008L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk4_MASK 0x00000010L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk5_MASK 0x00000020L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk6_MASK 0x00000040L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk7_MASK 0x00000080L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk8_MASK 0x00000100L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk9_MASK 0x00000200L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk10_MASK 0x00000400L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk11_MASK 0x00000800L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk12_MASK 0x00001000L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk13_MASK 0x00002000L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk14_MASK 0x00004000L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk15_MASK 0x00008000L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk16_MASK 0x00010000L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk17_MASK 0x00020000L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk18_MASK 0x00040000L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk19_MASK 0x00080000L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk20_MASK 0x00100000L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk21_MASK 0x00200000L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk22_MASK 0x00400000L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk23_MASK 0x00800000L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk24_MASK 0x01000000L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk25_MASK 0x02000000L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk26_MASK 0x04000000L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk27_MASK 0x08000000L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk28_MASK 0x10000000L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk29_MASK 0x20000000L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk30_MASK 0x40000000L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk31_MASK 0x80000000L
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index d18162e9ed1d..f3d64c78feaa 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -139,6 +139,8 @@ enum amd_pp_sensors {
AMDGPU_PP_SENSOR_MIN_FAN_RPM,
AMDGPU_PP_SENSOR_MAX_FAN_RPM,
AMDGPU_PP_SENSOR_VCN_POWER_STATE,
+ AMDGPU_PP_SENSOR_PEAK_PSTATE_SCLK,
+ AMDGPU_PP_SENSOR_PEAK_PSTATE_MCLK,
};
enum amd_pp_task {
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
index 49c398ec0aaf..d6d9e3b1b2c0 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
@@ -7714,20 +7714,13 @@ static int si_dpm_init_microcode(struct amdgpu_device *adev)
}
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_smc.bin", chip_name);
- err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->pm.fw);
-
-out:
+ err = amdgpu_ucode_request(adev, &adev->pm.fw, fw_name);
if (err) {
DRM_ERROR("si_smc: Failed to load firmware. err = %d\"%s\"\n",
err, fw_name);
- release_firmware(adev->pm.fw);
- adev->pm.fw = NULL;
+ amdgpu_ucode_release(&adev->pm.fw);
}
return err;
-
}
static int si_dpm_sw_init(void *handle)
diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
index 304190d5c9d2..11b7b4cffaae 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
@@ -111,8 +111,7 @@ static int pp_sw_fini(void *handle)
hwmgr_sw_fini(hwmgr);
- release_firmware(adev->pm.fw);
- adev->pm.fw = NULL;
+ amdgpu_ucode_release(&adev->pm.fw);
return 0;
}
@@ -769,10 +768,16 @@ static int pp_dpm_read_sensor(void *handle, int idx,
switch (idx) {
case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
- *((uint32_t *)value) = hwmgr->pstate_sclk;
+ *((uint32_t *)value) = hwmgr->pstate_sclk * 100;
return 0;
case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
- *((uint32_t *)value) = hwmgr->pstate_mclk;
+ *((uint32_t *)value) = hwmgr->pstate_mclk * 100;
+ return 0;
+ case AMDGPU_PP_SENSOR_PEAK_PSTATE_SCLK:
+ *((uint32_t *)value) = hwmgr->pstate_sclk_peak * 100;
+ return 0;
+ case AMDGPU_PP_SENSOR_PEAK_PSTATE_MCLK:
+ *((uint32_t *)value) = hwmgr->pstate_mclk_peak * 100;
return 0;
case AMDGPU_PP_SENSOR_MIN_FAN_RPM:
*((uint32_t *)value) = hwmgr->thermal_controller.fanInfo.ulMinRPM;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
index ede71de2343d..86d6e88c7386 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
@@ -375,6 +375,17 @@ static int smu10_enable_gfx_off(struct pp_hwmgr *hwmgr)
return 0;
}
+static void smu10_populate_umdpstate_clocks(struct pp_hwmgr *hwmgr)
+{
+ hwmgr->pstate_sclk = SMU10_UMD_PSTATE_GFXCLK;
+ hwmgr->pstate_mclk = SMU10_UMD_PSTATE_FCLK;
+
+ smum_send_msg_to_smc(hwmgr,
+ PPSMC_MSG_GetMaxGfxclkFrequency,
+ &hwmgr->pstate_sclk_peak);
+ hwmgr->pstate_mclk_peak = SMU10_UMD_PSTATE_PEAK_FCLK;
+}
+
static int smu10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
{
struct amdgpu_device *adev = hwmgr->adev;
@@ -398,6 +409,8 @@ static int smu10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
return ret;
}
+ smu10_populate_umdpstate_clocks(hwmgr);
+
return 0;
}
@@ -574,9 +587,6 @@ static int smu10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
- hwmgr->pstate_sclk = SMU10_UMD_PSTATE_GFXCLK * 100;
- hwmgr->pstate_mclk = SMU10_UMD_PSTATE_FCLK * 100;
-
/* enable the pp_od_clk_voltage sysfs file */
hwmgr->od_enabled = 1;
/* disabled fine grain tuning function by default */
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
index 7ef7e81525a3..6dd33a2cd987 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
@@ -1501,6 +1501,65 @@ static int smu7_populate_edc_leakage_registers(struct pp_hwmgr *hwmgr)
return ret;
}
+static void smu7_populate_umdpstate_clocks(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct smu7_dpm_table *golden_dpm_table = &data->golden_dpm_table;
+ struct phm_clock_voltage_dependency_table *vddc_dependency_on_sclk =
+ hwmgr->dyn_state.vddc_dependency_on_sclk;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_on_sclk =
+ table_info->vdd_dep_on_sclk;
+ int32_t tmp_sclk, count, percentage;
+
+ if (golden_dpm_table->mclk_table.count == 1) {
+ percentage = 70;
+ hwmgr->pstate_mclk = golden_dpm_table->mclk_table.dpm_levels[0].value;
+ } else {
+ percentage = 100 * golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count - 1].value /
+ golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 1].value;
+ hwmgr->pstate_mclk = golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 2].value;
+ }
+
+ tmp_sclk = hwmgr->pstate_mclk * percentage / 100;
+
+ if (hwmgr->pp_table_version == PP_TABLE_V0) {
+ for (count = vddc_dependency_on_sclk->count - 1; count >= 0; count--) {
+ if (tmp_sclk >= vddc_dependency_on_sclk->entries[count].clk) {
+ hwmgr->pstate_sclk = vddc_dependency_on_sclk->entries[count].clk;
+ break;
+ }
+ }
+ if (count < 0)
+ hwmgr->pstate_sclk = vddc_dependency_on_sclk->entries[0].clk;
+
+ hwmgr->pstate_sclk_peak =
+ vddc_dependency_on_sclk->entries[vddc_dependency_on_sclk->count - 1].clk;
+ } else if (hwmgr->pp_table_version == PP_TABLE_V1) {
+ for (count = vdd_dep_on_sclk->count - 1; count >= 0; count--) {
+ if (tmp_sclk >= vdd_dep_on_sclk->entries[count].clk) {
+ hwmgr->pstate_sclk = vdd_dep_on_sclk->entries[count].clk;
+ break;
+ }
+ }
+ if (count < 0)
+ hwmgr->pstate_sclk = vdd_dep_on_sclk->entries[0].clk;
+
+ hwmgr->pstate_sclk_peak =
+ vdd_dep_on_sclk->entries[vdd_dep_on_sclk->count - 1].clk;
+ }
+
+ hwmgr->pstate_mclk_peak =
+ golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 1].value;
+
+ /* make sure the output is in Mhz */
+ hwmgr->pstate_sclk /= 100;
+ hwmgr->pstate_mclk /= 100;
+ hwmgr->pstate_sclk_peak /= 100;
+ hwmgr->pstate_mclk_peak /= 100;
+}
+
static int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
{
int tmp_result = 0;
@@ -1625,6 +1684,8 @@ static int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE((0 == tmp_result),
"pcie performance request failed!", result = tmp_result);
+ smu7_populate_umdpstate_clocks(hwmgr);
+
return 0;
}
@@ -3143,15 +3204,12 @@ static int smu7_get_profiling_clk(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_le
for (count = hwmgr->dyn_state.vddc_dependency_on_sclk->count-1;
count >= 0; count--) {
if (tmp_sclk >= hwmgr->dyn_state.vddc_dependency_on_sclk->entries[count].clk) {
- tmp_sclk = hwmgr->dyn_state.vddc_dependency_on_sclk->entries[count].clk;
*sclk_mask = count;
break;
}
}
- if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
+ if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK)
*sclk_mask = 0;
- tmp_sclk = hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].clk;
- }
if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
*sclk_mask = hwmgr->dyn_state.vddc_dependency_on_sclk->count-1;
@@ -3161,15 +3219,12 @@ static int smu7_get_profiling_clk(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_le
for (count = table_info->vdd_dep_on_sclk->count-1; count >= 0; count--) {
if (tmp_sclk >= table_info->vdd_dep_on_sclk->entries[count].clk) {
- tmp_sclk = table_info->vdd_dep_on_sclk->entries[count].clk;
*sclk_mask = count;
break;
}
}
- if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
+ if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK)
*sclk_mask = 0;
- tmp_sclk = table_info->vdd_dep_on_sclk->entries[0].clk;
- }
if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
*sclk_mask = table_info->vdd_dep_on_sclk->count - 1;
@@ -3181,8 +3236,6 @@ static int smu7_get_profiling_clk(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_le
*mclk_mask = golden_dpm_table->mclk_table.count - 1;
*pcie_mask = data->dpm_table.pcie_speed_table.count - 1;
- hwmgr->pstate_sclk = tmp_sclk;
- hwmgr->pstate_mclk = tmp_mclk;
return 0;
}
@@ -3195,9 +3248,6 @@ static int smu7_force_dpm_level(struct pp_hwmgr *hwmgr,
uint32_t mclk_mask = 0;
uint32_t pcie_mask = 0;
- if (hwmgr->pstate_sclk == 0)
- smu7_get_profiling_clk(hwmgr, level, &sclk_mask, &mclk_mask, &pcie_mask);
-
switch (level) {
case AMD_DPM_FORCED_LEVEL_HIGH:
ret = smu7_force_dpm_highest(hwmgr);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
index b50fd4a4a3d1..b015a601b385 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
@@ -1016,6 +1016,18 @@ static void smu8_reset_acp_boot_level(struct pp_hwmgr *hwmgr)
data->acp_boot_level = 0xff;
}
+static void smu8_populate_umdpstate_clocks(struct pp_hwmgr *hwmgr)
+{
+ struct phm_clock_voltage_dependency_table *table =
+ hwmgr->dyn_state.vddc_dependency_on_sclk;
+
+ hwmgr->pstate_sclk = table->entries[0].clk / 100;
+ hwmgr->pstate_mclk = 0;
+
+ hwmgr->pstate_sclk_peak = table->entries[table->count - 1].clk / 100;
+ hwmgr->pstate_mclk_peak = 0;
+}
+
static int smu8_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
{
smu8_program_voting_clients(hwmgr);
@@ -1024,6 +1036,8 @@ static int smu8_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
smu8_program_bootup_state(hwmgr);
smu8_reset_acp_boot_level(hwmgr);
+ smu8_populate_umdpstate_clocks(hwmgr);
+
return 0;
}
@@ -1167,8 +1181,6 @@ static int smu8_phm_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
data->sclk_dpm.soft_min_clk = table->entries[0].clk;
data->sclk_dpm.hard_min_clk = table->entries[0].clk;
- hwmgr->pstate_sclk = table->entries[0].clk;
- hwmgr->pstate_mclk = 0;
level = smu8_get_max_sclk_level(hwmgr) - 1;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
index c8c9fb827bda..6f5161738bf8 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
@@ -3008,6 +3008,30 @@ static int vega10_enable_disable_PCC_limit_feature(struct pp_hwmgr *hwmgr, bool
return 0;
}
+static void vega10_populate_umdpstate_clocks(struct pp_hwmgr *hwmgr)
+{
+ struct phm_ppt_v2_information *table_info =
+ (struct phm_ppt_v2_information *)(hwmgr->pptable);
+
+ if (table_info->vdd_dep_on_sclk->count > VEGA10_UMD_PSTATE_GFXCLK_LEVEL &&
+ table_info->vdd_dep_on_mclk->count > VEGA10_UMD_PSTATE_MCLK_LEVEL) {
+ hwmgr->pstate_sclk = table_info->vdd_dep_on_sclk->entries[VEGA10_UMD_PSTATE_GFXCLK_LEVEL].clk;
+ hwmgr->pstate_mclk = table_info->vdd_dep_on_mclk->entries[VEGA10_UMD_PSTATE_MCLK_LEVEL].clk;
+ } else {
+ hwmgr->pstate_sclk = table_info->vdd_dep_on_sclk->entries[0].clk;
+ hwmgr->pstate_mclk = table_info->vdd_dep_on_mclk->entries[0].clk;
+ }
+
+ hwmgr->pstate_sclk_peak = table_info->vdd_dep_on_sclk->entries[table_info->vdd_dep_on_sclk->count - 1].clk;
+ hwmgr->pstate_mclk_peak = table_info->vdd_dep_on_mclk->entries[table_info->vdd_dep_on_mclk->count - 1].clk;
+
+ /* make sure the output is in Mhz */
+ hwmgr->pstate_sclk /= 100;
+ hwmgr->pstate_mclk /= 100;
+ hwmgr->pstate_sclk_peak /= 100;
+ hwmgr->pstate_mclk_peak /= 100;
+}
+
static int vega10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
{
struct vega10_hwmgr *data = hwmgr->backend;
@@ -3082,6 +3106,8 @@ static int vega10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
result = tmp_result);
}
+ vega10_populate_umdpstate_clocks(hwmgr);
+
return result;
}
@@ -4169,8 +4195,6 @@ static int vega10_get_profiling_clk_mask(struct pp_hwmgr *hwmgr, enum amd_dpm_fo
*sclk_mask = VEGA10_UMD_PSTATE_GFXCLK_LEVEL;
*soc_mask = VEGA10_UMD_PSTATE_SOCCLK_LEVEL;
*mclk_mask = VEGA10_UMD_PSTATE_MCLK_LEVEL;
- hwmgr->pstate_sclk = table_info->vdd_dep_on_sclk->entries[VEGA10_UMD_PSTATE_GFXCLK_LEVEL].clk;
- hwmgr->pstate_mclk = table_info->vdd_dep_on_mclk->entries[VEGA10_UMD_PSTATE_MCLK_LEVEL].clk;
}
if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
@@ -4281,9 +4305,6 @@ static int vega10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
uint32_t mclk_mask = 0;
uint32_t soc_mask = 0;
- if (hwmgr->pstate_sclk == 0)
- vega10_get_profiling_clk_mask(hwmgr, level, &sclk_mask, &mclk_mask, &soc_mask);
-
switch (level) {
case AMD_DPM_FORCED_LEVEL_HIGH:
ret = vega10_force_dpm_highest(hwmgr);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
index a2f4d6773d45..33f31461ea6c 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
@@ -1026,6 +1026,25 @@ static int vega12_get_all_clock_ranges(struct pp_hwmgr *hwmgr)
return 0;
}
+static void vega12_populate_umdpstate_clocks(struct pp_hwmgr *hwmgr)
+{
+ struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
+ struct vega12_single_dpm_table *gfx_dpm_table = &(data->dpm_table.gfx_table);
+ struct vega12_single_dpm_table *mem_dpm_table = &(data->dpm_table.mem_table);
+
+ if (gfx_dpm_table->count > VEGA12_UMD_PSTATE_GFXCLK_LEVEL &&
+ mem_dpm_table->count > VEGA12_UMD_PSTATE_MCLK_LEVEL) {
+ hwmgr->pstate_sclk = gfx_dpm_table->dpm_levels[VEGA12_UMD_PSTATE_GFXCLK_LEVEL].value;
+ hwmgr->pstate_mclk = mem_dpm_table->dpm_levels[VEGA12_UMD_PSTATE_MCLK_LEVEL].value;
+ } else {
+ hwmgr->pstate_sclk = gfx_dpm_table->dpm_levels[0].value;
+ hwmgr->pstate_mclk = mem_dpm_table->dpm_levels[0].value;
+ }
+
+ hwmgr->pstate_sclk_peak = gfx_dpm_table->dpm_levels[gfx_dpm_table->count].value;
+ hwmgr->pstate_mclk_peak = mem_dpm_table->dpm_levels[mem_dpm_table->count].value;
+}
+
static int vega12_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
{
int tmp_result, result = 0;
@@ -1077,6 +1096,9 @@ static int vega12_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(!result,
"Failed to setup default DPM tables!",
return result);
+
+ vega12_populate_umdpstate_clocks(hwmgr);
+
return result;
}
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
index b30684c84e20..2a5abac81b4a 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
@@ -1555,26 +1555,23 @@ static int vega20_set_mclk_od(
return 0;
}
-static int vega20_populate_umdpstate_clocks(
- struct pp_hwmgr *hwmgr)
+static void vega20_populate_umdpstate_clocks(struct pp_hwmgr *hwmgr)
{
struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
struct vega20_single_dpm_table *gfx_table = &(data->dpm_table.gfx_table);
struct vega20_single_dpm_table *mem_table = &(data->dpm_table.mem_table);
- hwmgr->pstate_sclk = gfx_table->dpm_levels[0].value;
- hwmgr->pstate_mclk = mem_table->dpm_levels[0].value;
-
if (gfx_table->count > VEGA20_UMD_PSTATE_GFXCLK_LEVEL &&
mem_table->count > VEGA20_UMD_PSTATE_MCLK_LEVEL) {
hwmgr->pstate_sclk = gfx_table->dpm_levels[VEGA20_UMD_PSTATE_GFXCLK_LEVEL].value;
hwmgr->pstate_mclk = mem_table->dpm_levels[VEGA20_UMD_PSTATE_MCLK_LEVEL].value;
+ } else {
+ hwmgr->pstate_sclk = gfx_table->dpm_levels[0].value;
+ hwmgr->pstate_mclk = mem_table->dpm_levels[0].value;
}
- hwmgr->pstate_sclk = hwmgr->pstate_sclk * 100;
- hwmgr->pstate_mclk = hwmgr->pstate_mclk * 100;
-
- return 0;
+ hwmgr->pstate_sclk_peak = gfx_table->dpm_levels[gfx_table->count - 1].value;
+ hwmgr->pstate_mclk_peak = mem_table->dpm_levels[mem_table->count - 1].value;
}
static int vega20_get_max_sustainable_clock(struct pp_hwmgr *hwmgr,
@@ -1753,10 +1750,7 @@ static int vega20_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
"[EnableDPMTasks] Failed to initialize odn settings!",
return result);
- result = vega20_populate_umdpstate_clocks(hwmgr);
- PP_ASSERT_WITH_CODE(!result,
- "[EnableDPMTasks] Failed to populate umdpstate clocks!",
- return result);
+ vega20_populate_umdpstate_clocks(hwmgr);
result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetPptLimit,
POWER_SOURCE_AC << 16, &hwmgr->default_power_limit);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
index 27f8d0e0e6a8..5ce433e2c16a 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
@@ -809,6 +809,8 @@ struct pp_hwmgr {
uint32_t workload_prority[Workload_Policy_Max];
uint32_t workload_setting[Workload_Policy_Max];
bool gfxoff_state_changed_by_workload;
+ uint32_t pstate_sclk_peak;
+ uint32_t pstate_mclk_peak;
};
int hwmgr_early_init(struct pp_hwmgr *hwmgr);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu10_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu10_smumgr.c
index 88a5641465dc..7eeab84d421a 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu10_smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu10_smumgr.c
@@ -250,9 +250,8 @@ static int smu10_smu_init(struct pp_hwmgr *hwmgr)
/* allocate space for watermarks table */
r = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
- sizeof(Watermarks_t),
- PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
+ sizeof(Watermarks_t), PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT,
&priv->smu_tables.entry[SMU10_WMTABLE].handle,
&priv->smu_tables.entry[SMU10_WMTABLE].mc_addr,
&priv->smu_tables.entry[SMU10_WMTABLE].table);
@@ -266,9 +265,8 @@ static int smu10_smu_init(struct pp_hwmgr *hwmgr)
/* allocate space for watermarks table */
r = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
- sizeof(DpmClocks_t),
- PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
+ sizeof(DpmClocks_t), PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT,
&priv->smu_tables.entry[SMU10_CLOCKTABLE].handle,
&priv->smu_tables.entry[SMU10_CLOCKTABLE].mc_addr,
&priv->smu_tables.entry[SMU10_CLOCKTABLE].table);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index ca3beb5d8f27..ec52830dde24 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -623,6 +623,7 @@ static int smu_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct smu_context *smu;
+ int r;
smu = kzalloc(sizeof(struct smu_context), GFP_KERNEL);
if (!smu)
@@ -640,7 +641,10 @@ static int smu_early_init(void *handle)
adev->powerplay.pp_handle = smu;
adev->powerplay.pp_funcs = &swsmu_pm_funcs;
- return smu_set_funcs(adev);
+ r = smu_set_funcs(adev);
+ if (r)
+ return r;
+ return smu_init_microcode(smu);
}
static int smu_set_default_dpm_table(struct smu_context *smu)
@@ -1067,12 +1071,6 @@ static int smu_sw_init(void *handle)
smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
- ret = smu_init_microcode(smu);
- if (ret) {
- dev_err(adev->dev, "Failed to load smu firmware!\n");
- return ret;
- }
-
ret = smu_smc_table_sw_init(smu);
if (ret) {
dev_err(adev->dev, "Failed to sw init smc table!\n");
@@ -2473,6 +2471,14 @@ static int smu_read_sensor(void *handle,
*((uint32_t *)data) = pstate_table->uclk_pstate.standard * 100;
*size = 4;
break;
+ case AMDGPU_PP_SENSOR_PEAK_PSTATE_SCLK:
+ *((uint32_t *)data) = pstate_table->gfxclk_pstate.peak * 100;
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_PEAK_PSTATE_MCLK:
+ *((uint32_t *)data) = pstate_table->uclk_pstate.peak * 100;
+ *size = 4;
+ break;
case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
ret = smu_feature_get_enabled_mask(smu, (uint64_t *)data);
*size = 8;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
index e8c6febb8b64..913d3a8d7e2f 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
@@ -244,11 +244,6 @@ int smu_v13_0_set_single_dpm_table(struct smu_context *smu,
enum smu_clk_type clk_type,
struct smu_13_0_dpm_table *single_dpm_table);
-int smu_v13_0_get_dpm_level_range(struct smu_context *smu,
- enum smu_clk_type clk_type,
- uint32_t *min_value,
- uint32_t *max_value);
-
int smu_v13_0_get_current_pcie_link_width_level(struct smu_context *smu);
int smu_v13_0_get_current_pcie_link_width(struct smu_context *smu);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
index ad66d57aa102..6492d69e2e60 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
@@ -93,7 +93,7 @@ static void smu_v11_0_poll_baco_exit(struct smu_context *smu)
int smu_v11_0_init_microcode(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
- const char *chip_name;
+ char ucode_prefix[30];
char fw_name[SMU_FW_NAME_LEN];
int err = 0;
const struct smc_firmware_header_v1_0 *hdr;
@@ -105,43 +105,11 @@ int smu_v11_0_init_microcode(struct smu_context *smu)
(adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7))))
return 0;
- switch (adev->ip_versions[MP1_HWIP][0]) {
- case IP_VERSION(11, 0, 0):
- chip_name = "navi10";
- break;
- case IP_VERSION(11, 0, 5):
- chip_name = "navi14";
- break;
- case IP_VERSION(11, 0, 9):
- chip_name = "navi12";
- break;
- case IP_VERSION(11, 0, 7):
- chip_name = "sienna_cichlid";
- break;
- case IP_VERSION(11, 0, 11):
- chip_name = "navy_flounder";
- break;
- case IP_VERSION(11, 0, 12):
- chip_name = "dimgrey_cavefish";
- break;
- case IP_VERSION(11, 0, 13):
- chip_name = "beige_goby";
- break;
- case IP_VERSION(11, 0, 2):
- chip_name = "arcturus";
- break;
- default:
- dev_err(adev->dev, "Unsupported IP version 0x%x\n",
- adev->ip_versions[MP1_HWIP][0]);
- return -EINVAL;
- }
+ amdgpu_ucode_ip_version_decode(adev, MP1_HWIP, ucode_prefix, sizeof(ucode_prefix));
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_smc.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", ucode_prefix);
- err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->pm.fw);
+ err = amdgpu_ucode_request(adev, &adev->pm.fw, fw_name);
if (err)
goto out;
@@ -159,12 +127,8 @@ int smu_v11_0_init_microcode(struct smu_context *smu)
}
out:
- if (err) {
- DRM_ERROR("smu_v11_0: Failed to load firmware \"%s\"\n",
- fw_name);
- release_firmware(adev->pm.fw);
- adev->pm.fw = NULL;
- }
+ if (err)
+ amdgpu_ucode_release(&adev->pm.fw);
return err;
}
@@ -172,8 +136,7 @@ void smu_v11_0_fini_microcode(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
- release_firmware(adev->pm.fw);
- adev->pm.fw = NULL;
+ amdgpu_ucode_release(&adev->pm.fw);
adev->pm.fw_version = 0;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
index e54b760b875b..78945e79dbee 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
@@ -88,7 +88,6 @@ static const int link_speed[] = {25, 50, 80, 160};
int smu_v13_0_init_microcode(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
- const char *chip_name;
char fw_name[30];
char ucode_prefix[30];
int err = 0;
@@ -100,21 +99,11 @@ int smu_v13_0_init_microcode(struct smu_context *smu)
if (amdgpu_sriov_vf(adev))
return 0;
- switch (adev->ip_versions[MP1_HWIP][0]) {
- case IP_VERSION(13, 0, 2):
- chip_name = "aldebaran_smc";
- break;
- default:
- amdgpu_ucode_ip_version_decode(adev, MP1_HWIP, ucode_prefix, sizeof(ucode_prefix));
- chip_name = ucode_prefix;
- }
+ amdgpu_ucode_ip_version_decode(adev, MP1_HWIP, ucode_prefix, sizeof(ucode_prefix));
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", ucode_prefix);
- err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->pm.fw);
+ err = amdgpu_ucode_request(adev, &adev->pm.fw, fw_name);
if (err)
goto out;
@@ -132,12 +121,8 @@ int smu_v13_0_init_microcode(struct smu_context *smu)
}
out:
- if (err) {
- DRM_ERROR("smu_v13_0: Failed to load firmware \"%s\"\n",
- fw_name);
- release_firmware(adev->pm.fw);
- adev->pm.fw = NULL;
- }
+ if (err)
+ amdgpu_ucode_release(&adev->pm.fw);
return err;
}
@@ -145,8 +130,7 @@ void smu_v13_0_fini_microcode(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
- release_firmware(adev->pm.fw);
- adev->pm.fw = NULL;
+ amdgpu_ucode_release(&adev->pm.fw);
adev->pm.fw_version = 0;
}
@@ -1261,7 +1245,8 @@ int smu_v13_0_set_fan_speed_rpm(struct smu_context *smu,
uint32_t speed)
{
struct amdgpu_device *adev = smu->adev;
- uint32_t tach_period, crystal_clock_freq;
+ uint32_t crystal_clock_freq = 2500;
+ uint32_t tach_period;
int ret;
if (!speed)
@@ -1271,7 +1256,6 @@ int smu_v13_0_set_fan_speed_rpm(struct smu_context *smu,
if (ret)
return ret;
- crystal_clock_freq = amdgpu_asic_get_xclk(adev);
tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed);
WREG32_SOC15(THM, 0, regCG_TACH_CTRL,
REG_SET_FIELD(RREG32_SOC15(THM, 0, regCG_TACH_CTRL),
@@ -2064,45 +2048,6 @@ int smu_v13_0_set_single_dpm_table(struct smu_context *smu,
return 0;
}
-int smu_v13_0_get_dpm_level_range(struct smu_context *smu,
- enum smu_clk_type clk_type,
- uint32_t *min_value,
- uint32_t *max_value)
-{
- uint32_t level_count = 0;
- int ret = 0;
-
- if (!min_value && !max_value)
- return -EINVAL;
-
- if (min_value) {
- /* by default, level 0 clock value as min value */
- ret = smu_v13_0_get_dpm_freq_by_index(smu,
- clk_type,
- 0,
- min_value);
- if (ret)
- return ret;
- }
-
- if (max_value) {
- ret = smu_v13_0_get_dpm_level_count(smu,
- clk_type,
- &level_count);
- if (ret)
- return ret;
-
- ret = smu_v13_0_get_dpm_freq_by_index(smu,
- clk_type,
- level_count - 1,
- max_value);
- if (ret)
- return ret;
- }
-
- return ret;
-}
-
int smu_v13_0_get_current_pcie_link_width_level(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
@@ -2298,6 +2243,10 @@ bool smu_v13_0_baco_is_support(struct smu_context *smu)
!smu_baco->platform_support)
return false;
+ /* return true if ASIC is in BACO state already */
+ if (smu_v13_0_baco_get_state(smu) == SMU_BACO_STATE_ENTER)
+ return true;
+
if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_BACO_BIT) &&
!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT))
return false;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index 9643b21c636a..969e5f965540 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -213,6 +213,7 @@ static struct cmn2asic_mapping smu_v13_0_0_feature_mask_map[SMU_FEATURE_COUNT] =
FEA_MAP(SOC_PCC),
[SMU_FEATURE_DPM_VCLK_BIT] = {1, FEATURE_MM_DPM_BIT},
[SMU_FEATURE_DPM_DCLK_BIT] = {1, FEATURE_MM_DPM_BIT},
+ [SMU_FEATURE_PPT_BIT] = {1, FEATURE_THROTTLERS_BIT},
};
static struct cmn2asic_mapping smu_v13_0_0_table_map[SMU_TABLE_COUNT] = {
@@ -240,6 +241,7 @@ static struct cmn2asic_mapping smu_v13_0_0_workload_map[PP_SMC_POWER_PROFILE_COU
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR, WORKLOAD_PPLIB_VR_BIT),
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_COMPUTE_BIT),
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT),
+ WORKLOAD_MAP(PP_SMC_POWER_PROFILE_WINDOW3D, WORKLOAD_PPLIB_WINDOW_3D_BIT),
};
static const uint8_t smu_v13_0_0_throttler_map[] = {
@@ -1555,7 +1557,7 @@ static int smu_v13_0_0_get_power_profile_mode(struct smu_context *smu,
title[0], title[1], title[2], title[3], title[4], title[5],
title[6], title[7], title[8], title[9]);
- for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) {
+ for (i = 0; i < PP_SMC_POWER_PROFILE_COUNT; i++) {
/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
workload_type = smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_WORKLOAD,
@@ -1617,7 +1619,7 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu,
smu->power_profile_mode = input[size];
- if (smu->power_profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) {
+ if (smu->power_profile_mode >= PP_SMC_POWER_PROFILE_COUNT) {
dev_err(smu->adev->dev, "Invalid power profile mode %d\n", smu->power_profile_mode);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
index 5c6c6ad011ca..e87db7e02e8a 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
@@ -192,6 +192,7 @@ static struct cmn2asic_mapping smu_v13_0_7_feature_mask_map[SMU_FEATURE_COUNT] =
FEA_MAP(SOC_PCC),
[SMU_FEATURE_DPM_VCLK_BIT] = {1, FEATURE_MM_DPM_BIT},
[SMU_FEATURE_DPM_DCLK_BIT] = {1, FEATURE_MM_DPM_BIT},
+ [SMU_FEATURE_PPT_BIT] = {1, FEATURE_THROTTLERS_BIT},
};
static struct cmn2asic_mapping smu_v13_0_7_table_map[SMU_TABLE_COUNT] = {
diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c
index 7043d1c9ed8f..e3507dd6f82a 100644
--- a/drivers/gpu/drm/arm/hdlcd_drv.c
+++ b/drivers/gpu/drm/arm/hdlcd_drv.c
@@ -195,8 +195,8 @@ static int hdlcd_setup_mode_config(struct drm_device *drm)
#ifdef CONFIG_DEBUG_FS
static int hdlcd_show_underrun_count(struct seq_file *m, void *arg)
{
- struct drm_info_node *node = (struct drm_info_node *)m->private;
- struct drm_device *drm = node->minor->dev;
+ struct drm_debugfs_entry *entry = m->private;
+ struct drm_device *drm = entry->dev;
struct hdlcd_drm_private *hdlcd = drm_to_hdlcd_priv(drm);
seq_printf(m, "underrun : %d\n", atomic_read(&hdlcd->buffer_underrun_count));
@@ -208,8 +208,8 @@ static int hdlcd_show_underrun_count(struct seq_file *m, void *arg)
static int hdlcd_show_pxlclock(struct seq_file *m, void *arg)
{
- struct drm_info_node *node = (struct drm_info_node *)m->private;
- struct drm_device *drm = node->minor->dev;
+ struct drm_debugfs_entry *entry = m->private;
+ struct drm_device *drm = entry->dev;
struct hdlcd_drm_private *hdlcd = drm_to_hdlcd_priv(drm);
unsigned long clkrate = clk_get_rate(hdlcd->clk);
unsigned long mode_clock = hdlcd->crtc.mode.crtc_clock * 1000;
@@ -219,17 +219,10 @@ static int hdlcd_show_pxlclock(struct seq_file *m, void *arg)
return 0;
}
-static struct drm_info_list hdlcd_debugfs_list[] = {
+static struct drm_debugfs_info hdlcd_debugfs_list[] = {
{ "interrupt_count", hdlcd_show_underrun_count, 0 },
{ "clocks", hdlcd_show_pxlclock, 0 },
};
-
-static void hdlcd_debugfs_init(struct drm_minor *minor)
-{
- drm_debugfs_create_files(hdlcd_debugfs_list,
- ARRAY_SIZE(hdlcd_debugfs_list),
- minor->debugfs_root, minor);
-}
#endif
DEFINE_DRM_GEM_DMA_FOPS(fops);
@@ -237,9 +230,6 @@ DEFINE_DRM_GEM_DMA_FOPS(fops);
static const struct drm_driver hdlcd_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
DRM_GEM_DMA_DRIVER_OPS,
-#ifdef CONFIG_DEBUG_FS
- .debugfs_init = hdlcd_debugfs_init,
-#endif
.fops = &fops,
.name = "hdlcd",
.desc = "ARM HDLCD Controller DRM",
@@ -303,6 +293,10 @@ static int hdlcd_drm_bind(struct device *dev)
drm_mode_config_reset(drm);
drm_kms_helper_poll_init(drm);
+#ifdef CONFIG_DEBUG_FS
+ drm_debugfs_add_files(drm, hdlcd_debugfs_list, ARRAY_SIZE(hdlcd_debugfs_list));
+#endif
+
ret = drm_dev_register(drm, 0);
if (ret)
goto err_register;
diff --git a/drivers/gpu/drm/ast/Kconfig b/drivers/gpu/drm/ast/Kconfig
index d367a90cd3de..563fa7a3b546 100644
--- a/drivers/gpu/drm/ast/Kconfig
+++ b/drivers/gpu/drm/ast/Kconfig
@@ -4,6 +4,8 @@ config DRM_AST
depends on DRM && PCI && MMU
select DRM_GEM_SHMEM_HELPER
select DRM_KMS_HELPER
+ select I2C
+ select I2C_ALGOBIT
help
Say yes for experimental AST GPU driver. Do not enable
this driver without having a working -modesetting,
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index c7443317c747..e82e9a8d85e5 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -636,7 +636,7 @@ static void ast_handle_damage(struct ast_plane *ast_plane, struct iosys_map *src
struct drm_framebuffer *fb,
const struct drm_rect *clip)
{
- struct iosys_map dst = IOSYS_MAP_INIT_VADDR(ast_plane->vaddr);
+ struct iosys_map dst = IOSYS_MAP_INIT_VADDR_IOMEM(ast_plane->vaddr);
iosys_map_incr(&dst, drm_fb_clip_offset(fb->pitches[0], fb->format, clip));
drm_fb_memcpy(&dst, fb->pitches, src, fb, clip);
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
index a2bb5b916235..4e806b06d35d 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
@@ -784,7 +784,6 @@ static int atmel_hlcdc_dc_drm_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
static int atmel_hlcdc_dc_drm_suspend(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
@@ -815,10 +814,10 @@ static int atmel_hlcdc_dc_drm_resume(struct device *dev)
return drm_atomic_helper_resume(drm_dev, dc->suspend.state);
}
-#endif
-static SIMPLE_DEV_PM_OPS(atmel_hlcdc_dc_drm_pm_ops,
- atmel_hlcdc_dc_drm_suspend, atmel_hlcdc_dc_drm_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(atmel_hlcdc_dc_drm_pm_ops,
+ atmel_hlcdc_dc_drm_suspend,
+ atmel_hlcdc_dc_drm_resume);
static const struct of_device_id atmel_hlcdc_dc_of_match[] = {
{ .compatible = "atmel,hlcdc-display-controller" },
@@ -830,7 +829,7 @@ static struct platform_driver atmel_hlcdc_dc_platform_driver = {
.remove = atmel_hlcdc_dc_drm_remove,
.driver = {
.name = "atmel-hlcdc-display-controller",
- .pm = &atmel_hlcdc_dc_drm_pm_ops,
+ .pm = pm_sleep_ptr(&atmel_hlcdc_dc_drm_pm_ops),
.of_match_table = atmel_hlcdc_dc_of_match,
},
};
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
index e7a6e456ed0d..ddceafa7b637 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
@@ -1185,8 +1185,9 @@ static int adv7511_parse_dt(struct device_node *np,
return 0;
}
-static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
+static int adv7511_probe(struct i2c_client *i2c)
{
+ const struct i2c_device_id *id = i2c_client_get_device_id(i2c);
struct adv7511_link_config link_config;
struct adv7511 *adv7511;
struct device *dev = &i2c->dev;
@@ -1392,7 +1393,7 @@ static struct i2c_driver adv7511_driver = {
.of_match_table = adv7511_of_ids,
},
.id_table = adv7511_i2c_ids,
- .probe = adv7511_probe,
+ .probe_new = adv7511_probe,
.remove = adv7511_remove,
};
diff --git a/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c b/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
index 660a54857929..339e0f05b260 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
@@ -692,8 +692,7 @@ static bool anx6345_get_chip_id(struct anx6345 *anx6345)
return false;
}
-static int anx6345_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int anx6345_i2c_probe(struct i2c_client *client)
{
struct anx6345 *anx6345;
struct device *dev;
@@ -817,7 +816,7 @@ static struct i2c_driver anx6345_driver = {
.name = "anx6345",
.of_match_table = of_match_ptr(anx6345_match_table),
},
- .probe = anx6345_i2c_probe,
+ .probe_new = anx6345_i2c_probe,
.remove = anx6345_i2c_remove,
.id_table = anx6345_id,
};
diff --git a/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c b/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
index 5997049fde5b..a3a38bbe2786 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
@@ -1214,8 +1214,7 @@ static const u16 anx78xx_chipid_list[] = {
0x7818,
};
-static int anx78xx_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int anx78xx_i2c_probe(struct i2c_client *client)
{
struct anx78xx *anx78xx;
struct anx78xx_platform_data *pdata;
@@ -1390,7 +1389,7 @@ static struct i2c_driver anx78xx_driver = {
.name = "anx7814",
.of_match_table = of_match_ptr(anx78xx_match_table),
},
- .probe = anx78xx_i2c_probe,
+ .probe_new = anx78xx_i2c_probe,
.remove = anx78xx_i2c_remove,
.id_table = anx78xx_id,
};
diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c
index b0ff1ecb80a5..b375887e655d 100644
--- a/drivers/gpu/drm/bridge/analogix/anx7625.c
+++ b/drivers/gpu/drm/bridge/analogix/anx7625.c
@@ -1403,7 +1403,6 @@ static void anx7625_stop_dp_work(struct anx7625_data *ctx)
{
ctx->hpd_status = 0;
ctx->hpd_high_cnt = 0;
- ctx->display_timing_valid = 0;
}
static void anx7625_start_dp_work(struct anx7625_data *ctx)
@@ -2562,8 +2561,7 @@ static void anx7625_runtime_disable(void *data)
pm_runtime_disable(data);
}
-static int anx7625_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int anx7625_i2c_probe(struct i2c_client *client)
{
struct anx7625_data *platform;
struct anx7625_platform_data *pdata;
@@ -2756,7 +2754,7 @@ static struct i2c_driver anx7625_driver = {
.of_match_table = anx_match_table,
.pm = &anx7625_pm_ops,
},
- .probe = anx7625_i2c_probe,
+ .probe_new = anx7625_i2c_probe,
.remove = anx7625_i2c_remove,
.id_table = anx7625_id,
diff --git a/drivers/gpu/drm/bridge/chipone-icn6211.c b/drivers/gpu/drm/bridge/chipone-icn6211.c
index bf920c3503aa..0e37840cd7a8 100644
--- a/drivers/gpu/drm/bridge/chipone-icn6211.c
+++ b/drivers/gpu/drm/bridge/chipone-icn6211.c
@@ -740,8 +740,7 @@ static int chipone_dsi_probe(struct mipi_dsi_device *dsi)
return ret;
}
-static int chipone_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int chipone_i2c_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct chipone *icn;
@@ -796,7 +795,7 @@ static struct i2c_device_id chipone_i2c_id[] = {
MODULE_DEVICE_TABLE(i2c, chipone_i2c_id);
static struct i2c_driver chipone_i2c_driver = {
- .probe = chipone_i2c_probe,
+ .probe_new = chipone_i2c_probe,
.id_table = chipone_i2c_id,
.driver = {
.name = "chipone-icn6211-i2c",
diff --git a/drivers/gpu/drm/bridge/chrontel-ch7033.c b/drivers/gpu/drm/bridge/chrontel-ch7033.c
index b94f39a86846..339b759e4c81 100644
--- a/drivers/gpu/drm/bridge/chrontel-ch7033.c
+++ b/drivers/gpu/drm/bridge/chrontel-ch7033.c
@@ -528,8 +528,7 @@ static const struct regmap_config ch7033_regmap_config = {
.max_register = 0x7f,
};
-static int ch7033_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int ch7033_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct ch7033_priv *priv;
@@ -604,7 +603,7 @@ static const struct i2c_device_id ch7033_ids[] = {
MODULE_DEVICE_TABLE(i2c, ch7033_ids);
static struct i2c_driver ch7033_driver = {
- .probe = ch7033_probe,
+ .probe_new = ch7033_probe,
.remove = ch7033_remove,
.driver = {
.name = "ch7033",
diff --git a/drivers/gpu/drm/bridge/ite-it6505.c b/drivers/gpu/drm/bridge/ite-it6505.c
index 21a9b8422bda..9cda2df21b88 100644
--- a/drivers/gpu/drm/bridge/ite-it6505.c
+++ b/drivers/gpu/drm/bridge/ite-it6505.c
@@ -412,7 +412,6 @@ struct it6505 {
* Mutex protects extcon and interrupt functions from interfering
* each other.
*/
- struct mutex irq_lock;
struct mutex extcon_lock;
struct mutex mode_lock; /* used to bridge_detect */
struct mutex aux_lock; /* used to aux data transfers */
@@ -438,6 +437,8 @@ struct it6505 {
bool powered;
bool hpd_state;
u32 afe_setting;
+ u32 max_dpi_pixel_clock;
+ u32 max_lane_count;
enum hdcp_state hdcp_status;
struct delayed_work hdcp_work;
struct work_struct hdcp_wait_ksv_list;
@@ -457,6 +458,8 @@ struct it6505 {
/* it6505 driver hold option */
bool enable_drv_hold;
+
+ struct edid *cached_edid;
};
struct it6505_step_train_para {
@@ -1463,7 +1466,8 @@ static void it6505_parse_link_capabilities(struct it6505 *it6505)
it6505->lane_count = link->num_lanes;
DRM_DEV_DEBUG_DRIVER(dev, "Sink support %d lanes training",
it6505->lane_count);
- it6505->lane_count = min_t(int, it6505->lane_count, MAX_LANE_COUNT);
+ it6505->lane_count = min_t(int, it6505->lane_count,
+ it6505->max_lane_count);
it6505->branch_device = drm_dp_is_branch(it6505->dpcd);
DRM_DEV_DEBUG_DRIVER(dev, "Sink %sbranch device",
@@ -2244,6 +2248,12 @@ static void it6505_plugged_status_to_codec(struct it6505 *it6505)
status == connector_status_connected);
}
+static void it6505_remove_edid(struct it6505 *it6505)
+{
+ kfree(it6505->cached_edid);
+ it6505->cached_edid = NULL;
+}
+
static int it6505_process_hpd_irq(struct it6505 *it6505)
{
struct device *dev = &it6505->client->dev;
@@ -2270,6 +2280,7 @@ static int it6505_process_hpd_irq(struct it6505 *it6505)
it6505_reset_logic(it6505);
it6505_int_mask_enable(it6505);
it6505_init(it6505);
+ it6505_remove_edid(it6505);
return 0;
}
@@ -2353,6 +2364,7 @@ static void it6505_irq_hpd(struct it6505 *it6505)
it6505_video_reset(it6505);
} else {
memset(it6505->dpcd, 0, sizeof(it6505->dpcd));
+ it6505_remove_edid(it6505);
if (it6505->hdcp_desired)
it6505_stop_hdcp(it6505);
@@ -2494,10 +2506,8 @@ static irqreturn_t it6505_int_threaded_handler(int unused, void *data)
};
int int_status[3], i;
- mutex_lock(&it6505->irq_lock);
-
- if (it6505->enable_drv_hold || !it6505->powered)
- goto unlock;
+ if (it6505->enable_drv_hold || pm_runtime_get_if_in_use(dev) <= 0)
+ return IRQ_HANDLED;
int_status[0] = it6505_read(it6505, INT_STATUS_01);
int_status[1] = it6505_read(it6505, INT_STATUS_02);
@@ -2515,16 +2525,14 @@ static irqreturn_t it6505_int_threaded_handler(int unused, void *data)
if (it6505_test_bit(irq_vec[0].bit, (unsigned int *)int_status))
irq_vec[0].handler(it6505);
- if (!it6505->hpd_state)
- goto unlock;
-
- for (i = 1; i < ARRAY_SIZE(irq_vec); i++) {
- if (it6505_test_bit(irq_vec[i].bit, (unsigned int *)int_status))
- irq_vec[i].handler(it6505);
+ if (it6505->hpd_state) {
+ for (i = 1; i < ARRAY_SIZE(irq_vec); i++) {
+ if (it6505_test_bit(irq_vec[i].bit, (unsigned int *)int_status))
+ irq_vec[i].handler(it6505);
+ }
}
-unlock:
- mutex_unlock(&it6505->irq_lock);
+ pm_runtime_put_sync(dev);
return IRQ_HANDLED;
}
@@ -2902,7 +2910,7 @@ it6505_bridge_mode_valid(struct drm_bridge *bridge,
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
return MODE_NO_INTERLACE;
- if (mode->clock > DPI_PIXEL_CLK_MAX)
+ if (mode->clock > it6505->max_dpi_pixel_clock)
return MODE_CLOCK_HIGH;
it6505->video_info.clock = mode->clock;
@@ -3016,16 +3024,18 @@ static struct edid *it6505_bridge_get_edid(struct drm_bridge *bridge,
{
struct it6505 *it6505 = bridge_to_it6505(bridge);
struct device *dev = &it6505->client->dev;
- struct edid *edid;
- edid = drm_do_get_edid(connector, it6505_get_edid_block, it6505);
+ if (!it6505->cached_edid) {
+ it6505->cached_edid = drm_do_get_edid(connector, it6505_get_edid_block,
+ it6505);
- if (!edid) {
- DRM_DEV_DEBUG_DRIVER(dev, "failed to get edid!");
- return NULL;
+ if (!it6505->cached_edid) {
+ DRM_DEV_DEBUG_DRIVER(dev, "failed to get edid!");
+ return NULL;
+ }
}
- return edid;
+ return drm_edid_duplicate(it6505->cached_edid);
}
static const struct drm_bridge_funcs it6505_bridge_funcs = {
@@ -3089,10 +3099,32 @@ static int it6505_init_pdata(struct it6505 *it6505)
return 0;
}
+static int it6505_get_data_lanes_count(const struct device_node *endpoint,
+ const unsigned int min,
+ const unsigned int max)
+{
+ int ret;
+
+ ret = of_property_count_u32_elems(endpoint, "data-lanes");
+ if (ret < 0)
+ return ret;
+
+ if (ret < min || ret > max)
+ return -EINVAL;
+
+ return ret;
+}
+
static void it6505_parse_dt(struct it6505 *it6505)
{
struct device *dev = &it6505->client->dev;
+ struct device_node *np = dev->of_node, *ep = NULL;
+ int len;
+ u64 link_frequencies;
+ u32 data_lanes[4];
u32 *afe_setting = &it6505->afe_setting;
+ u32 *max_lane_count = &it6505->max_lane_count;
+ u32 *max_dpi_pixel_clock = &it6505->max_dpi_pixel_clock;
it6505->lane_swap_disabled =
device_property_read_bool(dev, "no-laneswap");
@@ -3108,7 +3140,56 @@ static void it6505_parse_dt(struct it6505 *it6505)
} else {
*afe_setting = 0;
}
- DRM_DEV_DEBUG_DRIVER(dev, "using afe_setting: %d", *afe_setting);
+
+ ep = of_graph_get_endpoint_by_regs(np, 1, 0);
+ of_node_put(ep);
+
+ if (ep) {
+ len = it6505_get_data_lanes_count(ep, 1, 4);
+
+ if (len > 0 && len != 3) {
+ of_property_read_u32_array(ep, "data-lanes",
+ data_lanes, len);
+ *max_lane_count = len;
+ } else {
+ *max_lane_count = MAX_LANE_COUNT;
+ dev_err(dev, "error data-lanes, use default");
+ }
+ } else {
+ *max_lane_count = MAX_LANE_COUNT;
+ dev_err(dev, "error endpoint, use default");
+ }
+
+ ep = of_graph_get_endpoint_by_regs(np, 0, 0);
+ of_node_put(ep);
+
+ if (ep) {
+ len = of_property_read_variable_u64_array(ep,
+ "link-frequencies",
+ &link_frequencies, 0,
+ 1);
+ if (len >= 0) {
+ do_div(link_frequencies, 1000);
+ if (link_frequencies > 297000) {
+ dev_err(dev,
+ "max pixel clock error, use default");
+ *max_dpi_pixel_clock = DPI_PIXEL_CLK_MAX;
+ } else {
+ *max_dpi_pixel_clock = link_frequencies;
+ }
+ } else {
+ dev_err(dev, "error link frequencies, use default");
+ *max_dpi_pixel_clock = DPI_PIXEL_CLK_MAX;
+ }
+ } else {
+ dev_err(dev, "error endpoint, use default");
+ *max_dpi_pixel_clock = DPI_PIXEL_CLK_MAX;
+ }
+
+ DRM_DEV_DEBUG_DRIVER(dev, "using afe_setting: %u, max_lane_count: %u",
+ it6505->afe_setting, it6505->max_lane_count);
+ DRM_DEV_DEBUG_DRIVER(dev, "using max_dpi_pixel_clock: %u kHz",
+ it6505->max_dpi_pixel_clock);
}
static ssize_t receive_timing_debugfs_show(struct file *file, char __user *buf,
@@ -3265,8 +3346,7 @@ static void it6505_shutdown(struct i2c_client *client)
it6505_lane_off(it6505);
}
-static int it6505_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int it6505_i2c_probe(struct i2c_client *client)
{
struct it6505 *it6505;
struct device *dev = &client->dev;
@@ -3277,7 +3357,6 @@ static int it6505_i2c_probe(struct i2c_client *client,
if (!it6505)
return -ENOMEM;
- mutex_init(&it6505->irq_lock);
mutex_init(&it6505->extcon_lock);
mutex_init(&it6505->mode_lock);
mutex_init(&it6505->aux_lock);
@@ -3367,6 +3446,7 @@ static void it6505_i2c_remove(struct i2c_client *client)
drm_dp_aux_unregister(&it6505->aux);
it6505_debugfs_remove(it6505);
it6505_poweroff(it6505);
+ it6505_remove_edid(it6505);
}
static const struct i2c_device_id it6505_id[] = {
@@ -3387,7 +3467,7 @@ static struct i2c_driver it6505_i2c_driver = {
.of_match_table = it6505_of_match,
.pm = &it6505_bridge_pm_ops,
},
- .probe = it6505_i2c_probe,
+ .probe_new = it6505_i2c_probe,
.remove = it6505_i2c_remove,
.shutdown = it6505_shutdown,
.id_table = it6505_id,
diff --git a/drivers/gpu/drm/bridge/ite-it66121.c b/drivers/gpu/drm/bridge/ite-it66121.c
index 4f6f1deba28c..b34860871627 100644
--- a/drivers/gpu/drm/bridge/ite-it66121.c
+++ b/drivers/gpu/drm/bridge/ite-it66121.c
@@ -35,10 +35,6 @@
#define IT66121_DEVICE_ID0_REG 0x02
#define IT66121_DEVICE_ID1_REG 0x03
-#define IT66121_VENDOR_ID0 0x54
-#define IT66121_VENDOR_ID1 0x49
-#define IT66121_DEVICE_ID0 0x12
-#define IT66121_DEVICE_ID1 0x06
#define IT66121_REVISION_MASK GENMASK(7, 4)
#define IT66121_DEVICE_ID1_MASK GENMASK(3, 0)
@@ -72,6 +68,7 @@
#define IT66121_AFE_XP_ENO BIT(4)
#define IT66121_AFE_XP_RESETB BIT(3)
#define IT66121_AFE_XP_PWDI BIT(2)
+#define IT6610_AFE_XP_BYPASS BIT(0)
#define IT66121_AFE_IP_REG 0x64
#define IT66121_AFE_IP_GAINBIT BIT(7)
@@ -286,13 +283,18 @@
#define IT66121_AUD_SWL_16BIT 0x2
#define IT66121_AUD_SWL_NOT_INDICATED 0x0
-#define IT66121_VENDOR_ID0 0x54
-#define IT66121_VENDOR_ID1 0x49
-#define IT66121_DEVICE_ID0 0x12
-#define IT66121_DEVICE_ID1 0x06
-#define IT66121_DEVICE_MASK 0x0F
#define IT66121_AFE_CLK_HIGH 80000 /* Khz */
+enum chip_id {
+ ID_IT6610,
+ ID_IT66121,
+};
+
+struct it66121_chip_info {
+ enum chip_id id;
+ u16 vid, pid;
+};
+
struct it66121_ctx {
struct regmap *regmap;
struct drm_bridge bridge;
@@ -301,7 +303,6 @@ struct it66121_ctx {
struct device *dev;
struct gpio_desc *gpio_reset;
struct i2c_client *client;
- struct regulator_bulk_data supplies[3];
u32 bus_width;
struct mutex lock; /* Protects fields below and device registers */
struct hdmi_avi_infoframe hdmi_avi_infoframe;
@@ -312,6 +313,7 @@ struct it66121_ctx {
u8 swl;
bool auto_cts;
} audio;
+ const struct it66121_chip_info *info;
};
static const struct regmap_range_cfg it66121_regmap_banks[] = {
@@ -342,16 +344,6 @@ static void it66121_hw_reset(struct it66121_ctx *ctx)
gpiod_set_value(ctx->gpio_reset, 0);
}
-static inline int ite66121_power_on(struct it66121_ctx *ctx)
-{
- return regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
-}
-
-static inline int ite66121_power_off(struct it66121_ctx *ctx)
-{
- return regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
-}
-
static inline int it66121_preamble_ddc(struct it66121_ctx *ctx)
{
return regmap_write(ctx->regmap, IT66121_MASTER_SEL_REG, IT66121_MASTER_SEL_HOST);
@@ -406,16 +398,22 @@ static int it66121_configure_afe(struct it66121_ctx *ctx,
ret = regmap_write_bits(ctx->regmap, IT66121_AFE_IP_REG,
IT66121_AFE_IP_GAINBIT |
- IT66121_AFE_IP_ER0 |
- IT66121_AFE_IP_EC1,
+ IT66121_AFE_IP_ER0,
IT66121_AFE_IP_GAINBIT);
if (ret)
return ret;
- ret = regmap_write_bits(ctx->regmap, IT66121_AFE_XP_EC1_REG,
- IT66121_AFE_XP_EC1_LOWCLK, 0x80);
- if (ret)
- return ret;
+ if (ctx->info->id == ID_IT66121) {
+ ret = regmap_write_bits(ctx->regmap, IT66121_AFE_IP_REG,
+ IT66121_AFE_IP_EC1, 0);
+ if (ret)
+ return ret;
+
+ ret = regmap_write_bits(ctx->regmap, IT66121_AFE_XP_EC1_REG,
+ IT66121_AFE_XP_EC1_LOWCLK, 0x80);
+ if (ret)
+ return ret;
+ }
} else {
ret = regmap_write_bits(ctx->regmap, IT66121_AFE_XP_REG,
IT66121_AFE_XP_GAINBIT |
@@ -426,17 +424,24 @@ static int it66121_configure_afe(struct it66121_ctx *ctx,
ret = regmap_write_bits(ctx->regmap, IT66121_AFE_IP_REG,
IT66121_AFE_IP_GAINBIT |
- IT66121_AFE_IP_ER0 |
- IT66121_AFE_IP_EC1, IT66121_AFE_IP_ER0 |
- IT66121_AFE_IP_EC1);
+ IT66121_AFE_IP_ER0,
+ IT66121_AFE_IP_ER0);
if (ret)
return ret;
- ret = regmap_write_bits(ctx->regmap, IT66121_AFE_XP_EC1_REG,
- IT66121_AFE_XP_EC1_LOWCLK,
- IT66121_AFE_XP_EC1_LOWCLK);
- if (ret)
- return ret;
+ if (ctx->info->id == ID_IT66121) {
+ ret = regmap_write_bits(ctx->regmap, IT66121_AFE_IP_REG,
+ IT66121_AFE_IP_EC1,
+ IT66121_AFE_IP_EC1);
+ if (ret)
+ return ret;
+
+ ret = regmap_write_bits(ctx->regmap, IT66121_AFE_XP_EC1_REG,
+ IT66121_AFE_XP_EC1_LOWCLK,
+ IT66121_AFE_XP_EC1_LOWCLK);
+ if (ret)
+ return ret;
+ }
}
/* Clear reset flags */
@@ -445,38 +450,36 @@ static int it66121_configure_afe(struct it66121_ctx *ctx,
if (ret)
return ret;
+ if (ctx->info->id == ID_IT6610) {
+ ret = regmap_write_bits(ctx->regmap, IT66121_AFE_XP_REG,
+ IT6610_AFE_XP_BYPASS,
+ IT6610_AFE_XP_BYPASS);
+ if (ret)
+ return ret;
+ }
+
return it66121_fire_afe(ctx);
}
static inline int it66121_wait_ddc_ready(struct it66121_ctx *ctx)
{
int ret, val;
- u32 busy = IT66121_DDC_STATUS_NOACK | IT66121_DDC_STATUS_WAIT_BUS |
- IT66121_DDC_STATUS_ARBI_LOSE;
+ u32 error = IT66121_DDC_STATUS_NOACK | IT66121_DDC_STATUS_WAIT_BUS |
+ IT66121_DDC_STATUS_ARBI_LOSE;
+ u32 done = IT66121_DDC_STATUS_TX_DONE;
- ret = regmap_read_poll_timeout(ctx->regmap, IT66121_DDC_STATUS_REG, val, true,
- IT66121_EDID_SLEEP_US, IT66121_EDID_TIMEOUT_US);
+ ret = regmap_read_poll_timeout(ctx->regmap, IT66121_DDC_STATUS_REG, val,
+ val & (error | done), IT66121_EDID_SLEEP_US,
+ IT66121_EDID_TIMEOUT_US);
if (ret)
return ret;
- if (val & busy)
+ if (val & error)
return -EAGAIN;
return 0;
}
-static int it66121_clear_ddc_fifo(struct it66121_ctx *ctx)
-{
- int ret;
-
- ret = it66121_preamble_ddc(ctx);
- if (ret)
- return ret;
-
- return regmap_write(ctx->regmap, IT66121_DDC_COMMAND_REG,
- IT66121_DDC_COMMAND_FIFO_CLR);
-}
-
static int it66121_abort_ddc_ops(struct it66121_ctx *ctx)
{
int ret;
@@ -516,7 +519,6 @@ static int it66121_get_edid_block(void *context, u8 *buf,
unsigned int block, size_t len)
{
struct it66121_ctx *ctx = context;
- unsigned int val;
int remain = len;
int offset = 0;
int ret, cnt;
@@ -524,26 +526,9 @@ static int it66121_get_edid_block(void *context, u8 *buf,
offset = (block % 2) * len;
block = block / 2;
- ret = regmap_read(ctx->regmap, IT66121_INT_STATUS1_REG, &val);
- if (ret)
- return ret;
-
- if (val & IT66121_INT_STATUS1_DDC_BUSHANG) {
- ret = it66121_abort_ddc_ops(ctx);
- if (ret)
- return ret;
- }
-
- ret = it66121_clear_ddc_fifo(ctx);
- if (ret)
- return ret;
-
while (remain > 0) {
cnt = (remain > IT66121_EDID_FIFO_SIZE) ?
IT66121_EDID_FIFO_SIZE : remain;
- ret = it66121_preamble_ddc(ctx);
- if (ret)
- return ret;
ret = regmap_write(ctx->regmap, IT66121_DDC_COMMAND_REG,
IT66121_DDC_COMMAND_FIFO_CLR);
@@ -554,25 +539,6 @@ static int it66121_get_edid_block(void *context, u8 *buf,
if (ret)
return ret;
- ret = regmap_read(ctx->regmap, IT66121_INT_STATUS1_REG, &val);
- if (ret)
- return ret;
-
- if (val & IT66121_INT_STATUS1_DDC_BUSHANG) {
- ret = it66121_abort_ddc_ops(ctx);
- if (ret)
- return ret;
- }
-
- ret = it66121_preamble_ddc(ctx);
- if (ret)
- return ret;
-
- ret = regmap_write(ctx->regmap, IT66121_DDC_HEADER_REG,
- IT66121_DDC_HEADER_EDID);
- if (ret)
- return ret;
-
ret = regmap_write(ctx->regmap, IT66121_DDC_OFFSET_REG, offset);
if (ret)
return ret;
@@ -593,20 +559,18 @@ static int it66121_get_edid_block(void *context, u8 *buf,
offset += cnt;
remain -= cnt;
- /* Per programming manual, sleep here before emptying the FIFO */
- msleep(20);
-
ret = it66121_wait_ddc_ready(ctx);
+ if (ret) {
+ it66121_abort_ddc_ops(ctx);
+ return ret;
+ }
+
+ ret = regmap_noinc_read(ctx->regmap, IT66121_DDC_RD_FIFO_REG,
+ buf, cnt);
if (ret)
return ret;
- do {
- ret = regmap_read(ctx->regmap, IT66121_DDC_RD_FIFO_REG, &val);
- if (ret)
- return ret;
- *(buf++) = val;
- cnt--;
- } while (cnt > 0);
+ buf += cnt;
}
return 0;
@@ -635,10 +599,12 @@ static int it66121_bridge_attach(struct drm_bridge *bridge,
if (ret)
return ret;
- ret = regmap_write_bits(ctx->regmap, IT66121_CLK_BANK_REG,
- IT66121_CLK_BANK_PWROFF_RCLK, 0);
- if (ret)
- return ret;
+ if (ctx->info->id == ID_IT66121) {
+ ret = regmap_write_bits(ctx->regmap, IT66121_CLK_BANK_REG,
+ IT66121_CLK_BANK_PWROFF_RCLK, 0);
+ if (ret)
+ return ret;
+ }
ret = regmap_write_bits(ctx->regmap, IT66121_INT_REG,
IT66121_INT_TX_CLK_OFF, 0);
@@ -684,11 +650,7 @@ static int it66121_bridge_attach(struct drm_bridge *bridge,
/* Per programming manual, sleep here for bridge to settle */
msleep(50);
- /* Start interrupts */
- return regmap_write_bits(ctx->regmap, IT66121_INT_MASK1_REG,
- IT66121_INT_MASK1_DDC_NOACK |
- IT66121_INT_MASK1_DDC_FIFOERR |
- IT66121_INT_MASK1_DDC_BUSHANG, 0);
+ return 0;
}
static int it66121_set_mute(struct it66121_ctx *ctx, bool mute)
@@ -780,29 +742,32 @@ static void it66121_bridge_disable(struct drm_bridge *bridge,
ctx->connector = NULL;
}
+static int it66121_bridge_check(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct it66121_ctx *ctx = container_of(bridge, struct it66121_ctx, bridge);
+
+ if (ctx->info->id == ID_IT6610) {
+ /* The IT6610 only supports these settings */
+ bridge_state->input_bus_cfg.flags |= DRM_BUS_FLAG_DE_HIGH |
+ DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE;
+ bridge_state->input_bus_cfg.flags &=
+ ~DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE;
+ }
+
+ return 0;
+}
+
static
void it66121_bridge_mode_set(struct drm_bridge *bridge,
const struct drm_display_mode *mode,
const struct drm_display_mode *adjusted_mode)
{
- int ret, i;
u8 buf[HDMI_INFOFRAME_SIZE(AVI)];
struct it66121_ctx *ctx = container_of(bridge, struct it66121_ctx, bridge);
- const u16 aviinfo_reg[HDMI_AVI_INFOFRAME_SIZE] = {
- IT66121_AVIINFO_DB1_REG,
- IT66121_AVIINFO_DB2_REG,
- IT66121_AVIINFO_DB3_REG,
- IT66121_AVIINFO_DB4_REG,
- IT66121_AVIINFO_DB5_REG,
- IT66121_AVIINFO_DB6_REG,
- IT66121_AVIINFO_DB7_REG,
- IT66121_AVIINFO_DB8_REG,
- IT66121_AVIINFO_DB9_REG,
- IT66121_AVIINFO_DB10_REG,
- IT66121_AVIINFO_DB11_REG,
- IT66121_AVIINFO_DB12_REG,
- IT66121_AVIINFO_DB13_REG
- };
+ int ret;
mutex_lock(&ctx->lock);
@@ -822,10 +787,12 @@ void it66121_bridge_mode_set(struct drm_bridge *bridge,
}
/* Write new AVI infoframe packet */
- for (i = 0; i < HDMI_AVI_INFOFRAME_SIZE; i++) {
- if (regmap_write(ctx->regmap, aviinfo_reg[i], buf[i + HDMI_INFOFRAME_HEADER_SIZE]))
- goto unlock;
- }
+ ret = regmap_bulk_write(ctx->regmap, IT66121_AVIINFO_DB1_REG,
+ &buf[HDMI_INFOFRAME_HEADER_SIZE],
+ HDMI_AVI_INFOFRAME_SIZE);
+ if (ret)
+ goto unlock;
+
if (regmap_write(ctx->regmap, IT66121_AVIINFO_CSUM_REG, buf[3]))
goto unlock;
@@ -838,9 +805,12 @@ void it66121_bridge_mode_set(struct drm_bridge *bridge,
if (regmap_write(ctx->regmap, IT66121_HDMI_MODE_REG, IT66121_HDMI_MODE_HDMI))
goto unlock;
- if (regmap_write_bits(ctx->regmap, IT66121_CLK_BANK_REG,
- IT66121_CLK_BANK_PWROFF_TXCLK, IT66121_CLK_BANK_PWROFF_TXCLK))
+ if (ctx->info->id == ID_IT66121 &&
+ regmap_write_bits(ctx->regmap, IT66121_CLK_BANK_REG,
+ IT66121_CLK_BANK_PWROFF_TXCLK,
+ IT66121_CLK_BANK_PWROFF_TXCLK)) {
goto unlock;
+ }
if (it66121_configure_input(ctx))
goto unlock;
@@ -848,7 +818,11 @@ void it66121_bridge_mode_set(struct drm_bridge *bridge,
if (it66121_configure_afe(ctx, adjusted_mode))
goto unlock;
- regmap_write_bits(ctx->regmap, IT66121_CLK_BANK_REG, IT66121_CLK_BANK_PWROFF_TXCLK, 0);
+ if (ctx->info->id == ID_IT66121 &&
+ regmap_write_bits(ctx->regmap, IT66121_CLK_BANK_REG,
+ IT66121_CLK_BANK_PWROFF_TXCLK, 0)) {
+ goto unlock;
+ }
unlock:
mutex_unlock(&ctx->lock);
@@ -906,9 +880,25 @@ static struct edid *it66121_bridge_get_edid(struct drm_bridge *bridge,
{
struct it66121_ctx *ctx = container_of(bridge, struct it66121_ctx, bridge);
struct edid *edid;
+ int ret;
mutex_lock(&ctx->lock);
+ ret = it66121_preamble_ddc(ctx);
+ if (ret) {
+ edid = ERR_PTR(ret);
+ goto out_unlock;
+ }
+
+ ret = regmap_write(ctx->regmap, IT66121_DDC_HEADER_REG,
+ IT66121_DDC_HEADER_EDID);
+ if (ret) {
+ edid = ERR_PTR(ret);
+ goto out_unlock;
+ }
+
edid = drm_do_get_edid(connector, it66121_get_edid_block, ctx);
+
+out_unlock:
mutex_unlock(&ctx->lock);
return edid;
@@ -923,6 +913,7 @@ static const struct drm_bridge_funcs it66121_bridge_funcs = {
.atomic_get_input_bus_fmts = it66121_bridge_atomic_get_input_bus_fmts,
.atomic_enable = it66121_bridge_enable,
.atomic_disable = it66121_bridge_disable,
+ .atomic_check = it66121_bridge_check,
.mode_set = it66121_bridge_mode_set,
.mode_valid = it66121_bridge_mode_valid,
.detect = it66121_bridge_detect,
@@ -952,21 +943,14 @@ static irqreturn_t it66121_irq_threaded_handler(int irq, void *dev_id)
ret = regmap_read(ctx->regmap, IT66121_INT_STATUS1_REG, &val);
if (ret) {
dev_err(dev, "Cannot read STATUS1_REG %d\n", ret);
- } else {
- if (val & IT66121_INT_STATUS1_DDC_FIFOERR)
- it66121_clear_ddc_fifo(ctx);
- if (val & (IT66121_INT_STATUS1_DDC_BUSHANG |
- IT66121_INT_STATUS1_DDC_NOACK))
- it66121_abort_ddc_ops(ctx);
- if (val & IT66121_INT_STATUS1_HPD_STATUS) {
- regmap_write_bits(ctx->regmap, IT66121_INT_CLR1_REG,
- IT66121_INT_CLR1_HPD, IT66121_INT_CLR1_HPD);
+ } else if (val & IT66121_INT_STATUS1_HPD_STATUS) {
+ regmap_write_bits(ctx->regmap, IT66121_INT_CLR1_REG,
+ IT66121_INT_CLR1_HPD, IT66121_INT_CLR1_HPD);
- status = it66121_is_hpd_detect(ctx) ? connector_status_connected
- : connector_status_disconnected;
+ status = it66121_is_hpd_detect(ctx) ? connector_status_connected
+ : connector_status_disconnected;
- event = true;
- }
+ event = true;
}
regmap_write_bits(ctx->regmap, IT66121_SYS_STATUS_REG,
@@ -1512,9 +1496,13 @@ static int it66121_audio_codec_init(struct it66121_ctx *ctx, struct device *dev)
return PTR_ERR_OR_ZERO(ctx->audio.pdev);
}
-static int it66121_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static const char * const it66121_supplies[] = {
+ "vcn33", "vcn18", "vrf12"
+};
+
+static int it66121_probe(struct i2c_client *client)
{
+ const struct i2c_device_id *id = i2c_client_get_device_id(client);
u32 revision_id, vendor_ids[2] = { 0 }, device_ids[2] = { 0 };
struct device_node *ep;
int ret;
@@ -1536,6 +1524,7 @@ static int it66121_probe(struct i2c_client *client,
ctx->dev = dev;
ctx->client = client;
+ ctx->info = (const struct it66121_chip_info *) id->driver_data;
of_property_read_u32(ep, "bus-width", &ctx->bus_width);
of_node_put(ep);
@@ -1565,26 +1554,18 @@ static int it66121_probe(struct i2c_client *client,
i2c_set_clientdata(client, ctx);
mutex_init(&ctx->lock);
- ctx->supplies[0].supply = "vcn33";
- ctx->supplies[1].supply = "vcn18";
- ctx->supplies[2].supply = "vrf12";
- ret = devm_regulator_bulk_get(ctx->dev, 3, ctx->supplies);
+ ret = devm_regulator_bulk_get_enable(dev, ARRAY_SIZE(it66121_supplies),
+ it66121_supplies);
if (ret) {
- dev_err(ctx->dev, "regulator_bulk failed\n");
+ dev_err(dev, "Failed to enable power supplies\n");
return ret;
}
- ret = ite66121_power_on(ctx);
- if (ret)
- return ret;
-
it66121_hw_reset(ctx);
ctx->regmap = devm_regmap_init_i2c(client, &it66121_regmap_config);
- if (IS_ERR(ctx->regmap)) {
- ite66121_power_off(ctx);
+ if (IS_ERR(ctx->regmap))
return PTR_ERR(ctx->regmap);
- }
regmap_read(ctx->regmap, IT66121_VENDOR_ID0_REG, &vendor_ids[0]);
regmap_read(ctx->regmap, IT66121_VENDOR_ID1_REG, &vendor_ids[1]);
@@ -1595,9 +1576,8 @@ static int it66121_probe(struct i2c_client *client,
revision_id = FIELD_GET(IT66121_REVISION_MASK, device_ids[1]);
device_ids[1] &= IT66121_DEVICE_ID1_MASK;
- if (vendor_ids[0] != IT66121_VENDOR_ID0 || vendor_ids[1] != IT66121_VENDOR_ID1 ||
- device_ids[0] != IT66121_DEVICE_ID0 || device_ids[1] != IT66121_DEVICE_ID1) {
- ite66121_power_off(ctx);
+ if ((vendor_ids[1] << 8 | vendor_ids[0]) != ctx->info->vid ||
+ (device_ids[1] << 8 | device_ids[0]) != ctx->info->pid) {
return -ENODEV;
}
@@ -1610,7 +1590,6 @@ static int it66121_probe(struct i2c_client *client,
IRQF_ONESHOT, dev_name(dev), ctx);
if (ret < 0) {
dev_err(dev, "Failed to request irq %d:%d\n", client->irq, ret);
- ite66121_power_off(ctx);
return ret;
}
@@ -1627,19 +1606,32 @@ static void it66121_remove(struct i2c_client *client)
{
struct it66121_ctx *ctx = i2c_get_clientdata(client);
- ite66121_power_off(ctx);
drm_bridge_remove(&ctx->bridge);
mutex_destroy(&ctx->lock);
}
static const struct of_device_id it66121_dt_match[] = {
{ .compatible = "ite,it66121" },
+ { .compatible = "ite,it6610" },
{ }
};
MODULE_DEVICE_TABLE(of, it66121_dt_match);
+static const struct it66121_chip_info it66121_chip_info = {
+ .id = ID_IT66121,
+ .vid = 0x4954,
+ .pid = 0x0612,
+};
+
+static const struct it66121_chip_info it6610_chip_info = {
+ .id = ID_IT6610,
+ .vid = 0xca00,
+ .pid = 0x0611,
+};
+
static const struct i2c_device_id it66121_id[] = {
- { "it66121", 0 },
+ { "it66121", (kernel_ulong_t) &it66121_chip_info },
+ { "it6610", (kernel_ulong_t) &it6610_chip_info },
{ }
};
MODULE_DEVICE_TABLE(i2c, it66121_id);
@@ -1649,7 +1641,7 @@ static struct i2c_driver it66121_driver = {
.name = "it66121",
.of_match_table = it66121_dt_match,
},
- .probe = it66121_probe,
+ .probe_new = it66121_probe,
.remove = it66121_remove,
.id_table = it66121_id,
};
diff --git a/drivers/gpu/drm/bridge/lontium-lt8912b.c b/drivers/gpu/drm/bridge/lontium-lt8912b.c
index a98efef0ba0e..2019a8167d69 100644
--- a/drivers/gpu/drm/bridge/lontium-lt8912b.c
+++ b/drivers/gpu/drm/bridge/lontium-lt8912b.c
@@ -517,14 +517,27 @@ static int lt8912_attach_dsi(struct lt8912 *lt)
return 0;
}
+static void lt8912_bridge_hpd_cb(void *data, enum drm_connector_status status)
+{
+ struct lt8912 *lt = data;
+
+ if (lt->bridge.dev)
+ drm_helper_hpd_irq_event(lt->bridge.dev);
+}
+
static int lt8912_bridge_connector_init(struct drm_bridge *bridge)
{
int ret;
struct lt8912 *lt = bridge_to_lt8912(bridge);
struct drm_connector *connector = &lt->connector;
- connector->polled = DRM_CONNECTOR_POLL_CONNECT |
- DRM_CONNECTOR_POLL_DISCONNECT;
+ if (lt->hdmi_port->ops & DRM_BRIDGE_OP_HPD) {
+ drm_bridge_hpd_enable(lt->hdmi_port, lt8912_bridge_hpd_cb, lt);
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+ } else {
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT |
+ DRM_CONNECTOR_POLL_DISCONNECT;
+ }
ret = drm_connector_init(bridge->dev, connector,
&lt8912_connector_funcs,
@@ -578,6 +591,10 @@ static void lt8912_bridge_detach(struct drm_bridge *bridge)
if (lt->is_attached) {
lt8912_hard_power_off(lt);
+
+ if (lt->hdmi_port->ops & DRM_BRIDGE_OP_HPD)
+ drm_bridge_hpd_disable(lt->hdmi_port);
+
drm_connector_unregister(&lt->connector);
drm_connector_cleanup(&lt->connector);
}
@@ -685,8 +702,7 @@ static int lt8912_put_dt(struct lt8912 *lt)
return 0;
}
-static int lt8912_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int lt8912_probe(struct i2c_client *client)
{
static struct lt8912 *lt;
int ret = 0;
@@ -758,7 +774,7 @@ static struct i2c_driver lt8912_i2c_driver = {
.name = "lt8912",
.of_match_table = lt8912_dt_match,
},
- .probe = lt8912_probe,
+ .probe_new = lt8912_probe,
.remove = lt8912_remove,
.id_table = lt8912_id,
};
diff --git a/drivers/gpu/drm/bridge/lontium-lt9211.c b/drivers/gpu/drm/bridge/lontium-lt9211.c
index 933ca028d612..3e19fff6547a 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9211.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9211.c
@@ -720,8 +720,7 @@ static int lt9211_host_attach(struct lt9211 *ctx)
return 0;
}
-static int lt9211_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int lt9211_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct lt9211 *ctx;
@@ -786,7 +785,7 @@ static const struct of_device_id lt9211_match_table[] = {
MODULE_DEVICE_TABLE(of, lt9211_match_table);
static struct i2c_driver lt9211_driver = {
- .probe = lt9211_probe,
+ .probe_new = lt9211_probe,
.remove = lt9211_remove,
.id_table = lt9211_id,
.driver = {
diff --git a/drivers/gpu/drm/bridge/lontium-lt9611.c b/drivers/gpu/drm/bridge/lontium-lt9611.c
index 7c0a99173b39..3ce4e495aee5 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9611.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9611.c
@@ -259,6 +259,7 @@ static int lt9611_pll_setup(struct lt9611 *lt9611, const struct drm_display_mode
{ 0x8126, 0x55 },
{ 0x8127, 0x66 },
{ 0x8128, 0x88 },
+ { 0x812a, 0x20 },
};
regmap_multi_reg_write(lt9611->regmap, reg_cfg, ARRAY_SIZE(reg_cfg));
@@ -1108,8 +1109,7 @@ static void lt9611_audio_exit(struct lt9611 *lt9611)
}
}
-static int lt9611_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int lt9611_probe(struct i2c_client *client)
{
struct lt9611 *lt9611;
struct device *dev = &client->dev;
@@ -1248,7 +1248,7 @@ static struct i2c_driver lt9611_driver = {
.name = "lt9611",
.of_match_table = lt9611_match_table,
},
- .probe = lt9611_probe,
+ .probe_new = lt9611_probe,
.remove = lt9611_remove,
.id_table = lt9611_id,
};
diff --git a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
index fa1ee6264d92..583daacf3705 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
@@ -844,8 +844,7 @@ static const struct attribute_group *lt9611uxc_attr_groups[] = {
NULL,
};
-static int lt9611uxc_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int lt9611uxc_probe(struct i2c_client *client)
{
struct lt9611uxc *lt9611uxc;
struct device *dev = &client->dev;
@@ -1012,7 +1011,7 @@ static struct i2c_driver lt9611uxc_driver = {
.of_match_table = lt9611uxc_match_table,
.dev_groups = lt9611uxc_attr_groups,
},
- .probe = lt9611uxc_probe,
+ .probe_new = lt9611uxc_probe,
.remove = lt9611uxc_remove,
.id_table = lt9611uxc_id,
};
diff --git a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
index 97359f807bfc..4fc494d9084b 100644
--- a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
+++ b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
@@ -336,8 +336,7 @@ static int ge_b850v3_register(void)
"ge-b850v3-lvds-dp", ge_b850v3_lvds_ptr);
}
-static int stdp4028_ge_b850v3_fw_probe(struct i2c_client *stdp4028_i2c,
- const struct i2c_device_id *id)
+static int stdp4028_ge_b850v3_fw_probe(struct i2c_client *stdp4028_i2c)
{
struct device *dev = &stdp4028_i2c->dev;
int ret;
@@ -376,7 +375,7 @@ MODULE_DEVICE_TABLE(of, stdp4028_ge_b850v3_fw_match);
static struct i2c_driver stdp4028_ge_b850v3_fw_driver = {
.id_table = stdp4028_ge_b850v3_fw_i2c_table,
- .probe = stdp4028_ge_b850v3_fw_probe,
+ .probe_new = stdp4028_ge_b850v3_fw_probe,
.remove = stdp4028_ge_b850v3_fw_remove,
.driver = {
.name = "stdp4028-ge-b850v3-fw",
@@ -384,8 +383,7 @@ static struct i2c_driver stdp4028_ge_b850v3_fw_driver = {
},
};
-static int stdp2690_ge_b850v3_fw_probe(struct i2c_client *stdp2690_i2c,
- const struct i2c_device_id *id)
+static int stdp2690_ge_b850v3_fw_probe(struct i2c_client *stdp2690_i2c)
{
struct device *dev = &stdp2690_i2c->dev;
int ret;
@@ -424,7 +422,7 @@ MODULE_DEVICE_TABLE(of, stdp2690_ge_b850v3_fw_match);
static struct i2c_driver stdp2690_ge_b850v3_fw_driver = {
.id_table = stdp2690_ge_b850v3_fw_i2c_table,
- .probe = stdp2690_ge_b850v3_fw_probe,
+ .probe_new = stdp2690_ge_b850v3_fw_probe,
.remove = stdp2690_ge_b850v3_fw_remove,
.driver = {
.name = "stdp2690-ge-b850v3-fw",
@@ -440,7 +438,11 @@ static int __init stdpxxxx_ge_b850v3_init(void)
if (ret)
return ret;
- return i2c_add_driver(&stdp2690_ge_b850v3_fw_driver);
+ ret = i2c_add_driver(&stdp2690_ge_b850v3_fw_driver);
+ if (ret)
+ i2c_del_driver(&stdp4028_ge_b850v3_fw_driver);
+
+ return ret;
}
module_init(stdpxxxx_ge_b850v3_init);
diff --git a/drivers/gpu/drm/bridge/nxp-ptn3460.c b/drivers/gpu/drm/bridge/nxp-ptn3460.c
index 0851101a8c72..cd292a2f894c 100644
--- a/drivers/gpu/drm/bridge/nxp-ptn3460.c
+++ b/drivers/gpu/drm/bridge/nxp-ptn3460.c
@@ -257,8 +257,7 @@ static const struct drm_bridge_funcs ptn3460_bridge_funcs = {
.get_edid = ptn3460_get_edid,
};
-static int ptn3460_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int ptn3460_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct ptn3460_bridge *ptn_bridge;
@@ -336,7 +335,7 @@ MODULE_DEVICE_TABLE(of, ptn3460_match);
static struct i2c_driver ptn3460_driver = {
.id_table = ptn3460_i2c_table,
- .probe = ptn3460_probe,
+ .probe_new = ptn3460_probe,
.remove = ptn3460_remove,
.driver = {
.name = "nxp,ptn3460",
diff --git a/drivers/gpu/drm/bridge/panel.c b/drivers/gpu/drm/bridge/panel.c
index 216af76d0042..e8aae3cdc73d 100644
--- a/drivers/gpu/drm/bridge/panel.c
+++ b/drivers/gpu/drm/bridge/panel.c
@@ -357,13 +357,16 @@ struct drm_bridge *devm_drm_panel_bridge_add_typed(struct device *dev,
return ERR_PTR(-ENOMEM);
bridge = drm_panel_bridge_add_typed(panel, connector_type);
- if (!IS_ERR(bridge)) {
- *ptr = bridge;
- devres_add(dev, ptr);
- } else {
+ if (IS_ERR(bridge)) {
devres_free(ptr);
+ return bridge;
}
+ bridge->pre_enable_prev_first = panel->prepare_prev_first;
+
+ *ptr = bridge;
+ devres_add(dev, ptr);
+
return bridge;
}
EXPORT_SYMBOL(devm_drm_panel_bridge_add_typed);
@@ -402,6 +405,8 @@ struct drm_bridge *drmm_panel_bridge_add(struct drm_device *drm,
if (ret)
return ERR_PTR(ret);
+ bridge->pre_enable_prev_first = panel->prepare_prev_first;
+
return bridge;
}
EXPORT_SYMBOL(drmm_panel_bridge_add);
diff --git a/drivers/gpu/drm/bridge/parade-ps8622.c b/drivers/gpu/drm/bridge/parade-ps8622.c
index 309de802863d..530ee6a19e7e 100644
--- a/drivers/gpu/drm/bridge/parade-ps8622.c
+++ b/drivers/gpu/drm/bridge/parade-ps8622.c
@@ -442,9 +442,9 @@ static const struct of_device_id ps8622_devices[] = {
};
MODULE_DEVICE_TABLE(of, ps8622_devices);
-static int ps8622_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int ps8622_probe(struct i2c_client *client)
{
+ const struct i2c_device_id *id = i2c_client_get_device_id(client);
struct device *dev = &client->dev;
struct ps8622_bridge *ps8622;
struct drm_bridge *panel_bridge;
@@ -538,7 +538,7 @@ MODULE_DEVICE_TABLE(i2c, ps8622_i2c_table);
static struct i2c_driver ps8622_driver = {
.id_table = ps8622_i2c_table,
- .probe = ps8622_probe,
+ .probe_new = ps8622_probe,
.remove = ps8622_remove,
.driver = {
.name = "ps8622",
diff --git a/drivers/gpu/drm/bridge/parade-ps8640.c b/drivers/gpu/drm/bridge/parade-ps8640.c
index 6a614e54b383..4b361d7d5e44 100644
--- a/drivers/gpu/drm/bridge/parade-ps8640.c
+++ b/drivers/gpu/drm/bridge/parade-ps8640.c
@@ -15,6 +15,7 @@
#include <drm/display/drm_dp_aux_bus.h>
#include <drm/display/drm_dp_helper.h>
+#include <drm/drm_atomic_state_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_edid.h>
#include <drm/drm_mipi_dsi.h>
@@ -442,7 +443,8 @@ static const struct dev_pm_ops ps8640_pm_ops = {
pm_runtime_force_resume)
};
-static void ps8640_pre_enable(struct drm_bridge *bridge)
+static void ps8640_atomic_pre_enable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
{
struct ps8640 *ps_bridge = bridge_to_ps8640(bridge);
struct regmap *map = ps_bridge->regmap[PAGE2_TOP_CNTL];
@@ -476,7 +478,8 @@ static void ps8640_pre_enable(struct drm_bridge *bridge)
ps_bridge->pre_enabled = true;
}
-static void ps8640_post_disable(struct drm_bridge *bridge)
+static void ps8640_atomic_post_disable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
{
struct ps8640 *ps_bridge = bridge_to_ps8640(bridge);
@@ -554,7 +557,7 @@ static struct edid *ps8640_bridge_get_edid(struct drm_bridge *bridge,
* EDID, for this chip, we need to do a full poweron, otherwise it will
* fail.
*/
- drm_bridge_chain_pre_enable(bridge);
+ drm_atomic_bridge_chain_pre_enable(bridge, connector->state->state);
edid = drm_get_edid(connector,
ps_bridge->page[PAGE0_DP_CNTL]->adapter);
@@ -564,7 +567,7 @@ static struct edid *ps8640_bridge_get_edid(struct drm_bridge *bridge,
* before, return the chip to its original power state.
*/
if (poweroff)
- drm_bridge_chain_post_disable(bridge);
+ drm_atomic_bridge_chain_post_disable(bridge, connector->state->state);
return edid;
}
@@ -579,8 +582,11 @@ static const struct drm_bridge_funcs ps8640_bridge_funcs = {
.attach = ps8640_bridge_attach,
.detach = ps8640_bridge_detach,
.get_edid = ps8640_bridge_get_edid,
- .post_disable = ps8640_post_disable,
- .pre_enable = ps8640_pre_enable,
+ .atomic_post_disable = ps8640_atomic_post_disable,
+ .atomic_pre_enable = ps8640_atomic_pre_enable,
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
};
static int ps8640_bridge_get_dsi_resources(struct device *dev, struct ps8640 *ps_bridge)
@@ -734,13 +740,13 @@ static int ps8640_probe(struct i2c_client *client)
pm_runtime_enable(dev);
/*
* Powering on ps8640 takes ~300ms. To avoid wasting time on power
- * cycling ps8640 too often, set autosuspend_delay to 1000ms to ensure
+ * cycling ps8640 too often, set autosuspend_delay to 2000ms to ensure
* the bridge wouldn't suspend in between each _aux_transfer_msg() call
* during EDID read (~20ms in my experiment) and in between the last
* _aux_transfer_msg() call during EDID read and the _pre_enable() call
* (~100ms in my experiment).
*/
- pm_runtime_set_autosuspend_delay(dev, 1000);
+ pm_runtime_set_autosuspend_delay(dev, 2000);
pm_runtime_use_autosuspend(dev);
pm_suspend_ignore_children(dev, true);
ret = devm_add_action_or_reset(dev, ps8640_runtime_disable, dev);
diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c
index 878fb7d3732b..d212ff7f7a87 100644
--- a/drivers/gpu/drm/bridge/sii902x.c
+++ b/drivers/gpu/drm/bridge/sii902x.c
@@ -171,7 +171,6 @@ struct sii902x {
struct drm_connector connector;
struct gpio_desc *reset_gpio;
struct i2c_mux_core *i2cmux;
- struct regulator_bulk_data supplies[2];
bool sink_is_hdmi;
/*
* Mutex protects audio and video functions from interfering
@@ -1066,12 +1065,12 @@ static int sii902x_init(struct sii902x *sii902x)
return i2c_mux_add_adapter(sii902x->i2cmux, 0, 0, 0);
}
-static int sii902x_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int sii902x_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct device_node *endpoint;
struct sii902x *sii902x;
+ static const char * const supplies[] = {"iovcc", "cvcc12"};
int ret;
ret = i2c_check_functionality(client->adapter,
@@ -1122,27 +1121,11 @@ static int sii902x_probe(struct i2c_client *client,
mutex_init(&sii902x->mutex);
- sii902x->supplies[0].supply = "iovcc";
- sii902x->supplies[1].supply = "cvcc12";
- ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(sii902x->supplies),
- sii902x->supplies);
+ ret = devm_regulator_bulk_get_enable(dev, ARRAY_SIZE(supplies), supplies);
if (ret < 0)
- return ret;
-
- ret = regulator_bulk_enable(ARRAY_SIZE(sii902x->supplies),
- sii902x->supplies);
- if (ret < 0) {
- dev_err_probe(dev, ret, "Failed to enable supplies");
- return ret;
- }
+ return dev_err_probe(dev, ret, "Failed to enable supplies");
- ret = sii902x_init(sii902x);
- if (ret < 0) {
- regulator_bulk_disable(ARRAY_SIZE(sii902x->supplies),
- sii902x->supplies);
- }
-
- return ret;
+ return sii902x_init(sii902x);
}
static void sii902x_remove(struct i2c_client *client)
@@ -1152,8 +1135,6 @@ static void sii902x_remove(struct i2c_client *client)
i2c_mux_del_adapters(sii902x->i2cmux);
drm_bridge_remove(&sii902x->bridge);
- regulator_bulk_disable(ARRAY_SIZE(sii902x->supplies),
- sii902x->supplies);
}
static const struct of_device_id sii902x_dt_ids[] = {
@@ -1169,7 +1150,7 @@ static const struct i2c_device_id sii902x_i2c_ids[] = {
MODULE_DEVICE_TABLE(i2c, sii902x_i2c_ids);
static struct i2c_driver sii902x_driver = {
- .probe = sii902x_probe,
+ .probe_new = sii902x_probe,
.remove = sii902x_remove,
.driver = {
.name = "sii902x",
diff --git a/drivers/gpu/drm/bridge/sii9234.c b/drivers/gpu/drm/bridge/sii9234.c
index 5b3061d4b5c3..099b510ff285 100644
--- a/drivers/gpu/drm/bridge/sii9234.c
+++ b/drivers/gpu/drm/bridge/sii9234.c
@@ -886,8 +886,7 @@ static const struct drm_bridge_funcs sii9234_bridge_funcs = {
.mode_valid = sii9234_mode_valid,
};
-static int sii9234_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int sii9234_probe(struct i2c_client *client)
{
struct i2c_adapter *adapter = client->adapter;
struct sii9234 *ctx;
@@ -961,7 +960,7 @@ static struct i2c_driver sii9234_driver = {
.name = "sii9234",
.of_match_table = sii9234_dt_match,
},
- .probe = sii9234_probe,
+ .probe_new = sii9234_probe,
.remove = sii9234_remove,
.id_table = sii9234_id,
};
diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c
index 511982a1cedb..b96d03cd878d 100644
--- a/drivers/gpu/drm/bridge/sil-sii8620.c
+++ b/drivers/gpu/drm/bridge/sil-sii8620.c
@@ -2284,8 +2284,7 @@ static const struct drm_bridge_funcs sii8620_bridge_funcs = {
.mode_valid = sii8620_mode_valid,
};
-static int sii8620_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int sii8620_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct sii8620 *ctx;
@@ -2379,7 +2378,7 @@ static struct i2c_driver sii8620_driver = {
.name = "sii8620",
.of_match_table = of_match_ptr(sii8620_dt_match),
},
- .probe = sii8620_probe,
+ .probe_new = sii8620_probe,
.remove = sii8620_remove,
.id_table = sii8620_id,
};
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
index 2a58eb271f70..a4725efe812d 100644
--- a/drivers/gpu/drm/bridge/tc358767.c
+++ b/drivers/gpu/drm/bridge/tc358767.c
@@ -2029,7 +2029,7 @@ static void tc_clk_disable(void *data)
clk_disable_unprepare(refclk);
}
-static int tc_probe(struct i2c_client *client, const struct i2c_device_id *id)
+static int tc_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct tc_data *tc;
@@ -2209,7 +2209,7 @@ static struct i2c_driver tc358767_driver = {
.of_match_table = tc358767_of_ids,
},
.id_table = tc358767_i2c_ids,
- .probe = tc_probe,
+ .probe_new = tc_probe,
.remove = tc_remove,
};
module_i2c_driver(tc358767_driver);
diff --git a/drivers/gpu/drm/bridge/tc358768.c b/drivers/gpu/drm/bridge/tc358768.c
index 4c4b77ce8aba..839b8832b9b5 100644
--- a/drivers/gpu/drm/bridge/tc358768.c
+++ b/drivers/gpu/drm/bridge/tc358768.c
@@ -1018,8 +1018,7 @@ static int tc358768_get_regulators(struct tc358768_priv *priv)
return ret;
}
-static int tc358768_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int tc358768_i2c_probe(struct i2c_client *client)
{
struct tc358768_priv *priv;
struct device *dev = &client->dev;
@@ -1085,7 +1084,7 @@ static struct i2c_driver tc358768_driver = {
.of_match_table = tc358768_of_ids,
},
.id_table = tc358768_i2c_ids,
- .probe = tc358768_i2c_probe,
+ .probe_new = tc358768_i2c_probe,
.remove = tc358768_i2c_remove,
};
module_i2c_driver(tc358768_driver);
diff --git a/drivers/gpu/drm/bridge/tc358775.c b/drivers/gpu/drm/bridge/tc358775.c
index 3ceb0e9f9bdc..91b5e1207c47 100644
--- a/drivers/gpu/drm/bridge/tc358775.c
+++ b/drivers/gpu/drm/bridge/tc358775.c
@@ -637,7 +637,7 @@ static int tc_attach_host(struct tc_data *tc)
return 0;
}
-static int tc_probe(struct i2c_client *client, const struct i2c_device_id *id)
+static int tc_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct tc_data *tc;
@@ -729,7 +729,7 @@ static struct i2c_driver tc358775_driver = {
.of_match_table = tc358775_of_ids,
},
.id_table = tc358775_i2c_ids,
- .probe = tc_probe,
+ .probe_new = tc_probe,
.remove = tc_remove,
};
module_i2c_driver(tc358775_driver);
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi83.c b/drivers/gpu/drm/bridge/ti-sn65dsi83.c
index 7ba9467fff12..91ecfbe45bf9 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi83.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi83.c
@@ -346,7 +346,7 @@ static void sn65dsi83_atomic_enable(struct drm_bridge *bridge,
/* Deassert reset */
gpiod_set_value_cansleep(ctx->enable_gpio, 1);
- usleep_range(1000, 1100);
+ usleep_range(10000, 11000);
/* Get the LVDS format from the bridge state. */
bridge_state = drm_atomic_get_new_bridge_state(state, bridge);
@@ -653,9 +653,9 @@ static int sn65dsi83_host_attach(struct sn65dsi83 *ctx)
return 0;
}
-static int sn65dsi83_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int sn65dsi83_probe(struct i2c_client *client)
{
+ const struct i2c_device_id *id = i2c_client_get_device_id(client);
struct device *dev = &client->dev;
enum sn65dsi83_model model;
struct sn65dsi83 *ctx;
@@ -730,7 +730,7 @@ static const struct of_device_id sn65dsi83_match_table[] = {
MODULE_DEVICE_TABLE(of, sn65dsi83_match_table);
static struct i2c_driver sn65dsi83_driver = {
- .probe = sn65dsi83_probe,
+ .probe_new = sn65dsi83_probe,
.remove = sn65dsi83_remove,
.id_table = sn65dsi83_id,
.driver = {
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
index 05f8756d1aaf..1e26fa63845a 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
@@ -1852,8 +1852,7 @@ static int ti_sn65dsi86_parse_regulators(struct ti_sn65dsi86 *pdata)
pdata->supplies);
}
-static int ti_sn65dsi86_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int ti_sn65dsi86_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct ti_sn65dsi86 *pdata;
@@ -1952,7 +1951,7 @@ static struct i2c_driver ti_sn65dsi86_driver = {
.of_match_table = ti_sn65dsi86_match_table,
.pm = &ti_sn65dsi86_pm_ops,
},
- .probe = ti_sn65dsi86_probe,
+ .probe_new = ti_sn65dsi86_probe,
.id_table = ti_sn65dsi86_id,
};
diff --git a/drivers/gpu/drm/bridge/ti-tfp410.c b/drivers/gpu/drm/bridge/ti-tfp410.c
index b9635abbad16..6db69df0e18b 100644
--- a/drivers/gpu/drm/bridge/ti-tfp410.c
+++ b/drivers/gpu/drm/bridge/ti-tfp410.c
@@ -379,8 +379,7 @@ static struct platform_driver tfp410_platform_driver = {
#if IS_ENABLED(CONFIG_I2C)
/* There is currently no i2c functionality. */
-static int tfp410_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int tfp410_i2c_probe(struct i2c_client *client)
{
int reg;
@@ -411,7 +410,7 @@ static struct i2c_driver tfp410_i2c_driver = {
.of_match_table = of_match_ptr(tfp410_match),
},
.id_table = tfp410_i2c_ids,
- .probe = tfp410_i2c_probe,
+ .probe_new = tfp410_i2c_probe,
.remove = tfp410_i2c_remove,
};
#endif /* IS_ENABLED(CONFIG_I2C) */
diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c
index 51a46689cda7..5861b0a6247b 100644
--- a/drivers/gpu/drm/display/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c
@@ -3309,8 +3309,13 @@ int drm_dp_add_payload_part1(struct drm_dp_mst_topology_mgr *mgr,
int ret;
port = drm_dp_mst_topology_get_port_validated(mgr, payload->port);
- if (!port)
+ if (!port) {
+ drm_dbg_kms(mgr->dev,
+ "VCPI %d for port %p not in topology, not creating a payload\n",
+ payload->vcpi, payload->port);
+ payload->vc_start_slot = -1;
return 0;
+ }
if (mgr->payload_count == 0)
mgr->next_start_slot = mst_state->start_slot;
@@ -3641,6 +3646,9 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0);
ret = 0;
mgr->payload_id_table_cleared = false;
+
+ memset(&mgr->down_rep_recv, 0, sizeof(mgr->down_rep_recv));
+ memset(&mgr->up_req_recv, 0, sizeof(mgr->up_req_recv));
}
out_unlock:
@@ -3853,7 +3861,7 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
struct drm_dp_sideband_msg_rx *msg = &mgr->down_rep_recv;
if (!drm_dp_get_one_sb_msg(mgr, false, &mstb))
- goto out;
+ goto out_clear_reply;
/* Multi-packet message transmission, don't clear the reply */
if (!msg->have_eomt)
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index f197f59f6d99..5457c02ca1ab 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -880,7 +880,7 @@ EXPORT_SYMBOL(drm_atomic_get_private_obj_state);
* or NULL if the private_obj is not part of the global atomic state.
*/
struct drm_private_state *
-drm_atomic_get_old_private_obj_state(struct drm_atomic_state *state,
+drm_atomic_get_old_private_obj_state(const struct drm_atomic_state *state,
struct drm_private_obj *obj)
{
int i;
@@ -902,7 +902,7 @@ EXPORT_SYMBOL(drm_atomic_get_old_private_obj_state);
* or NULL if the private_obj is not part of the global atomic state.
*/
struct drm_private_state *
-drm_atomic_get_new_private_obj_state(struct drm_atomic_state *state,
+drm_atomic_get_new_private_obj_state(const struct drm_atomic_state *state,
struct drm_private_obj *obj)
{
int i;
@@ -934,7 +934,7 @@ EXPORT_SYMBOL(drm_atomic_get_new_private_obj_state);
* not connected.
*/
struct drm_connector *
-drm_atomic_get_old_connector_for_encoder(struct drm_atomic_state *state,
+drm_atomic_get_old_connector_for_encoder(const struct drm_atomic_state *state,
struct drm_encoder *encoder)
{
struct drm_connector_state *conn_state;
@@ -968,7 +968,7 @@ EXPORT_SYMBOL(drm_atomic_get_old_connector_for_encoder);
* not connected.
*/
struct drm_connector *
-drm_atomic_get_new_connector_for_encoder(struct drm_atomic_state *state,
+drm_atomic_get_new_connector_for_encoder(const struct drm_atomic_state *state,
struct drm_encoder *encoder)
{
struct drm_connector_state *conn_state;
@@ -1117,7 +1117,7 @@ EXPORT_SYMBOL(drm_atomic_get_bridge_state);
* the bridge is not part of the global atomic state.
*/
struct drm_bridge_state *
-drm_atomic_get_old_bridge_state(struct drm_atomic_state *state,
+drm_atomic_get_old_bridge_state(const struct drm_atomic_state *state,
struct drm_bridge *bridge)
{
struct drm_private_state *obj_state;
@@ -1139,7 +1139,7 @@ EXPORT_SYMBOL(drm_atomic_get_old_bridge_state);
* the bridge is not part of the global atomic state.
*/
struct drm_bridge_state *
-drm_atomic_get_new_bridge_state(struct drm_atomic_state *state,
+drm_atomic_get_new_bridge_state(const struct drm_atomic_state *state,
struct drm_bridge *bridge)
{
struct drm_private_state *obj_state;
@@ -1756,8 +1756,8 @@ EXPORT_SYMBOL(drm_state_dump);
#ifdef CONFIG_DEBUG_FS
static int drm_state_info(struct seq_file *m, void *data)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
+ struct drm_debugfs_entry *entry = m->private;
+ struct drm_device *dev = entry->dev;
struct drm_printer p = drm_seq_file_printer(m);
__drm_state_dump(dev, &p, true);
@@ -1766,14 +1766,13 @@ static int drm_state_info(struct seq_file *m, void *data)
}
/* any use in debugfs files to dump individual planes/crtc/etc? */
-static const struct drm_info_list drm_atomic_debugfs_list[] = {
+static const struct drm_debugfs_info drm_atomic_debugfs_list[] = {
{"state", drm_state_info, 0},
};
void drm_atomic_debugfs_init(struct drm_minor *minor)
{
- drm_debugfs_create_files(drm_atomic_debugfs_list,
- ARRAY_SIZE(drm_atomic_debugfs_list),
- minor->debugfs_root, minor);
+ drm_debugfs_add_files(minor->dev, drm_atomic_debugfs_list,
+ ARRAY_SIZE(drm_atomic_debugfs_list));
}
#endif
diff --git a/drivers/gpu/drm/drm_atomic_state_helper.c b/drivers/gpu/drm/drm_atomic_state_helper.c
index dfb57217253b..22251c5f6a8a 100644
--- a/drivers/gpu/drm/drm_atomic_state_helper.c
+++ b/drivers/gpu/drm/drm_atomic_state_helper.c
@@ -482,6 +482,130 @@ void drm_atomic_helper_connector_tv_margins_reset(struct drm_connector *connecto
EXPORT_SYMBOL(drm_atomic_helper_connector_tv_margins_reset);
/**
+ * drm_atomic_helper_connector_tv_reset - Resets Analog TV connector properties
+ * @connector: DRM connector
+ *
+ * Resets the analog TV properties attached to a connector
+ */
+void drm_atomic_helper_connector_tv_reset(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_cmdline_mode *cmdline = &connector->cmdline_mode;
+ struct drm_connector_state *state = connector->state;
+ struct drm_property *prop;
+ uint64_t val;
+
+ prop = dev->mode_config.tv_mode_property;
+ if (prop)
+ if (!drm_object_property_get_default_value(&connector->base,
+ prop, &val))
+ state->tv.mode = val;
+
+ if (cmdline->tv_mode_specified)
+ state->tv.mode = cmdline->tv_mode;
+
+ prop = dev->mode_config.tv_select_subconnector_property;
+ if (prop)
+ if (!drm_object_property_get_default_value(&connector->base,
+ prop, &val))
+ state->tv.select_subconnector = val;
+
+ prop = dev->mode_config.tv_subconnector_property;
+ if (prop)
+ if (!drm_object_property_get_default_value(&connector->base,
+ prop, &val))
+ state->tv.subconnector = val;
+
+ prop = dev->mode_config.tv_brightness_property;
+ if (prop)
+ if (!drm_object_property_get_default_value(&connector->base,
+ prop, &val))
+ state->tv.brightness = val;
+
+ prop = dev->mode_config.tv_contrast_property;
+ if (prop)
+ if (!drm_object_property_get_default_value(&connector->base,
+ prop, &val))
+ state->tv.contrast = val;
+
+ prop = dev->mode_config.tv_flicker_reduction_property;
+ if (prop)
+ if (!drm_object_property_get_default_value(&connector->base,
+ prop, &val))
+ state->tv.flicker_reduction = val;
+
+ prop = dev->mode_config.tv_overscan_property;
+ if (prop)
+ if (!drm_object_property_get_default_value(&connector->base,
+ prop, &val))
+ state->tv.overscan = val;
+
+ prop = dev->mode_config.tv_saturation_property;
+ if (prop)
+ if (!drm_object_property_get_default_value(&connector->base,
+ prop, &val))
+ state->tv.saturation = val;
+
+ prop = dev->mode_config.tv_hue_property;
+ if (prop)
+ if (!drm_object_property_get_default_value(&connector->base,
+ prop, &val))
+ state->tv.hue = val;
+
+ drm_atomic_helper_connector_tv_margins_reset(connector);
+}
+EXPORT_SYMBOL(drm_atomic_helper_connector_tv_reset);
+
+/**
+ * @drm_atomic_helper_connector_tv_check: Validate an analog TV connector state
+ * @connector: DRM Connector
+ * @state: the DRM State object
+ *
+ * Checks the state object to see if the requested state is valid for an
+ * analog TV connector.
+ *
+ * Returns:
+ * Zero for success, a negative error code on error.
+ */
+int drm_atomic_helper_connector_tv_check(struct drm_connector *connector,
+ struct drm_atomic_state *state)
+{
+ struct drm_connector_state *old_conn_state =
+ drm_atomic_get_old_connector_state(state, connector);
+ struct drm_connector_state *new_conn_state =
+ drm_atomic_get_new_connector_state(state, connector);
+ struct drm_crtc_state *crtc_state;
+ struct drm_crtc *crtc;
+
+ crtc = new_conn_state->crtc;
+ if (!crtc)
+ return 0;
+
+ crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+ if (!crtc_state)
+ return -EINVAL;
+
+ if (old_conn_state->tv.mode != new_conn_state->tv.mode)
+ crtc_state->mode_changed = true;
+
+ if (old_conn_state->tv.margins.left != new_conn_state->tv.margins.left ||
+ old_conn_state->tv.margins.right != new_conn_state->tv.margins.right ||
+ old_conn_state->tv.margins.top != new_conn_state->tv.margins.top ||
+ old_conn_state->tv.margins.bottom != new_conn_state->tv.margins.bottom ||
+ old_conn_state->tv.mode != new_conn_state->tv.mode ||
+ old_conn_state->tv.brightness != new_conn_state->tv.brightness ||
+ old_conn_state->tv.contrast != new_conn_state->tv.contrast ||
+ old_conn_state->tv.flicker_reduction != new_conn_state->tv.flicker_reduction ||
+ old_conn_state->tv.overscan != new_conn_state->tv.overscan ||
+ old_conn_state->tv.saturation != new_conn_state->tv.saturation ||
+ old_conn_state->tv.hue != new_conn_state->tv.hue)
+ crtc_state->connectors_changed = true;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_atomic_helper_connector_tv_check);
+
+/**
* __drm_atomic_helper_connector_duplicate_state - copy atomic connector state
* @connector: connector object
* @state: atomic connector state
diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c
index c06d0639d552..d867e7f9f2cd 100644
--- a/drivers/gpu/drm/drm_atomic_uapi.c
+++ b/drivers/gpu/drm/drm_atomic_uapi.c
@@ -698,6 +698,8 @@ static int drm_atomic_connector_set_property(struct drm_connector *connector,
state->tv.margins.top = val;
} else if (property == config->tv_bottom_margin_property) {
state->tv.margins.bottom = val;
+ } else if (property == config->legacy_tv_mode_property) {
+ state->tv.legacy_mode = val;
} else if (property == config->tv_mode_property) {
state->tv.mode = val;
} else if (property == config->tv_brightness_property) {
@@ -808,6 +810,8 @@ drm_atomic_connector_get_property(struct drm_connector *connector,
*val = state->tv.margins.top;
} else if (property == config->tv_bottom_margin_property) {
*val = state->tv.margins.bottom;
+ } else if (property == config->legacy_tv_mode_property) {
+ *val = state->tv.legacy_mode;
} else if (property == config->tv_mode_property) {
*val = state->tv.mode;
} else if (property == config->tv_brightness_property) {
diff --git a/drivers/gpu/drm/drm_blend.c b/drivers/gpu/drm/drm_blend.c
index b4c8cab7158c..6e74de833466 100644
--- a/drivers/gpu/drm/drm_blend.c
+++ b/drivers/gpu/drm/drm_blend.c
@@ -450,8 +450,8 @@ static int drm_atomic_helper_crtc_normalize_zpos(struct drm_crtc *crtc,
int i, n = 0;
int ret = 0;
- DRM_DEBUG_ATOMIC("[CRTC:%d:%s] calculating normalized zpos values\n",
- crtc->base.id, crtc->name);
+ drm_dbg_atomic(dev, "[CRTC:%d:%s] calculating normalized zpos values\n",
+ crtc->base.id, crtc->name);
states = kmalloc_array(total_planes, sizeof(*states), GFP_KERNEL);
if (!states)
@@ -469,9 +469,8 @@ static int drm_atomic_helper_crtc_normalize_zpos(struct drm_crtc *crtc,
goto done;
}
states[n++] = plane_state;
- DRM_DEBUG_ATOMIC("[PLANE:%d:%s] processing zpos value %d\n",
- plane->base.id, plane->name,
- plane_state->zpos);
+ drm_dbg_atomic(dev, "[PLANE:%d:%s] processing zpos value %d\n",
+ plane->base.id, plane->name, plane_state->zpos);
}
sort(states, n, sizeof(*states), drm_atomic_state_zpos_cmp, NULL);
@@ -480,8 +479,8 @@ static int drm_atomic_helper_crtc_normalize_zpos(struct drm_crtc *crtc,
plane = states[i]->plane;
states[i]->normalized_zpos = i;
- DRM_DEBUG_ATOMIC("[PLANE:%d:%s] normalized zpos value %d\n",
- plane->base.id, plane->name, i);
+ drm_dbg_atomic(dev, "[PLANE:%d:%s] normalized zpos value %d\n",
+ plane->base.id, plane->name, i);
}
crtc_state->zpos_changed = true;
diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c
index 1545c50fd1c8..c3d69af02e79 100644
--- a/drivers/gpu/drm/drm_bridge.c
+++ b/drivers/gpu/drm/drm_bridge.c
@@ -153,6 +153,45 @@
* situation when probing.
*/
+/**
+ * DOC: dsi bridge operations
+ *
+ * DSI host interfaces are expected to be implemented as bridges rather than
+ * encoders, however there are a few aspects of their operation that need to
+ * be defined in order to provide a consistent interface.
+ *
+ * A DSI host should keep the PHY powered down until the pre_enable operation is
+ * called. All lanes are in an undefined idle state up to this point, and it
+ * must not be assumed that it is LP-11.
+ * pre_enable should initialise the PHY, set the data lanes to LP-11, and the
+ * clock lane to either LP-11 or HS depending on the mode_flag
+ * %MIPI_DSI_CLOCK_NON_CONTINUOUS.
+ *
+ * Ordinarily the downstream bridge DSI peripheral pre_enable will have been
+ * called before the DSI host. If the DSI peripheral requires LP-11 and/or
+ * the clock lane to be in HS mode prior to pre_enable, then it can set the
+ * &pre_enable_prev_first flag to request the pre_enable (and
+ * post_disable) order to be altered to enable the DSI host first.
+ *
+ * Either the CRTC being enabled, or the DSI host enable operation should switch
+ * the host to actively transmitting video on the data lanes.
+ *
+ * The reverse also applies. The DSI host disable operation or stopping the CRTC
+ * should stop transmitting video, and the data lanes should return to the LP-11
+ * state. The DSI host &post_disable operation should disable the PHY.
+ * If the &pre_enable_prev_first flag is set, then the DSI peripheral's
+ * bridge &post_disable will be called before the DSI host's post_disable.
+ *
+ * Whilst it is valid to call &host_transfer prior to pre_enable or after
+ * post_disable, the exact state of the lanes is undefined at this point. The
+ * DSI host should initialise the interface, transmit the data, and then disable
+ * the interface again.
+ *
+ * Ultra Low Power State (ULPS) is not explicitly supported by DRM. If
+ * implemented, it therefore needs to be handled entirely within the DSI Host
+ * driver.
+ */
+
static DEFINE_MUTEX(bridge_lock);
static LIST_HEAD(bridge_list);
@@ -510,61 +549,6 @@ drm_bridge_chain_mode_valid(struct drm_bridge *bridge,
EXPORT_SYMBOL(drm_bridge_chain_mode_valid);
/**
- * drm_bridge_chain_disable - disables all bridges in the encoder chain
- * @bridge: bridge control structure
- *
- * Calls &drm_bridge_funcs.disable op for all the bridges in the encoder
- * chain, starting from the last bridge to the first. These are called before
- * calling the encoder's prepare op.
- *
- * Note: the bridge passed should be the one closest to the encoder
- */
-void drm_bridge_chain_disable(struct drm_bridge *bridge)
-{
- struct drm_encoder *encoder;
- struct drm_bridge *iter;
-
- if (!bridge)
- return;
-
- encoder = bridge->encoder;
- list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) {
- if (iter->funcs->disable)
- iter->funcs->disable(iter);
-
- if (iter == bridge)
- break;
- }
-}
-EXPORT_SYMBOL(drm_bridge_chain_disable);
-
-/**
- * drm_bridge_chain_post_disable - cleans up after disabling all bridges in the
- * encoder chain
- * @bridge: bridge control structure
- *
- * Calls &drm_bridge_funcs.post_disable op for all the bridges in the
- * encoder chain, starting from the first bridge to the last. These are called
- * after completing the encoder's prepare op.
- *
- * Note: the bridge passed should be the one closest to the encoder
- */
-void drm_bridge_chain_post_disable(struct drm_bridge *bridge)
-{
- struct drm_encoder *encoder;
-
- if (!bridge)
- return;
-
- encoder = bridge->encoder;
- list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) {
- if (bridge->funcs->post_disable)
- bridge->funcs->post_disable(bridge);
- }
-}
-EXPORT_SYMBOL(drm_bridge_chain_post_disable);
-
-/**
* drm_bridge_chain_mode_set - set proposed mode for all bridges in the
* encoder chain
* @bridge: bridge control structure
@@ -594,61 +578,6 @@ void drm_bridge_chain_mode_set(struct drm_bridge *bridge,
EXPORT_SYMBOL(drm_bridge_chain_mode_set);
/**
- * drm_bridge_chain_pre_enable - prepares for enabling all bridges in the
- * encoder chain
- * @bridge: bridge control structure
- *
- * Calls &drm_bridge_funcs.pre_enable op for all the bridges in the encoder
- * chain, starting from the last bridge to the first. These are called
- * before calling the encoder's commit op.
- *
- * Note: the bridge passed should be the one closest to the encoder
- */
-void drm_bridge_chain_pre_enable(struct drm_bridge *bridge)
-{
- struct drm_encoder *encoder;
- struct drm_bridge *iter;
-
- if (!bridge)
- return;
-
- encoder = bridge->encoder;
- list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) {
- if (iter->funcs->pre_enable)
- iter->funcs->pre_enable(iter);
-
- if (iter == bridge)
- break;
- }
-}
-EXPORT_SYMBOL(drm_bridge_chain_pre_enable);
-
-/**
- * drm_bridge_chain_enable - enables all bridges in the encoder chain
- * @bridge: bridge control structure
- *
- * Calls &drm_bridge_funcs.enable op for all the bridges in the encoder
- * chain, starting from the first bridge to the last. These are called
- * after completing the encoder's commit op.
- *
- * Note that the bridge passed should be the one closest to the encoder
- */
-void drm_bridge_chain_enable(struct drm_bridge *bridge)
-{
- struct drm_encoder *encoder;
-
- if (!bridge)
- return;
-
- encoder = bridge->encoder;
- list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) {
- if (bridge->funcs->enable)
- bridge->funcs->enable(bridge);
- }
-}
-EXPORT_SYMBOL(drm_bridge_chain_enable);
-
-/**
* drm_atomic_bridge_chain_disable - disables all bridges in the encoder chain
* @bridge: bridge control structure
* @old_state: old atomic state
@@ -691,6 +620,25 @@ void drm_atomic_bridge_chain_disable(struct drm_bridge *bridge,
}
EXPORT_SYMBOL(drm_atomic_bridge_chain_disable);
+static void drm_atomic_bridge_call_post_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *old_state)
+{
+ if (old_state && bridge->funcs->atomic_post_disable) {
+ struct drm_bridge_state *old_bridge_state;
+
+ old_bridge_state =
+ drm_atomic_get_old_bridge_state(old_state,
+ bridge);
+ if (WARN_ON(!old_bridge_state))
+ return;
+
+ bridge->funcs->atomic_post_disable(bridge,
+ old_bridge_state);
+ } else if (bridge->funcs->post_disable) {
+ bridge->funcs->post_disable(bridge);
+ }
+}
+
/**
* drm_atomic_bridge_chain_post_disable - cleans up after disabling all bridges
* in the encoder chain
@@ -702,36 +650,86 @@ EXPORT_SYMBOL(drm_atomic_bridge_chain_disable);
* starting from the first bridge to the last. These are called after completing
* &drm_encoder_helper_funcs.atomic_disable
*
+ * If a bridge sets @pre_enable_prev_first, then the @post_disable for that
+ * bridge will be called before the previous one to reverse the @pre_enable
+ * calling direction.
+ *
* Note: the bridge passed should be the one closest to the encoder
*/
void drm_atomic_bridge_chain_post_disable(struct drm_bridge *bridge,
struct drm_atomic_state *old_state)
{
struct drm_encoder *encoder;
+ struct drm_bridge *next, *limit;
if (!bridge)
return;
encoder = bridge->encoder;
+
list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) {
- if (bridge->funcs->atomic_post_disable) {
- struct drm_bridge_state *old_bridge_state;
+ limit = NULL;
+
+ if (!list_is_last(&bridge->chain_node, &encoder->bridge_chain)) {
+ next = list_next_entry(bridge, chain_node);
+
+ if (next->pre_enable_prev_first) {
+ /* next bridge had requested that prev
+ * was enabled first, so disabled last
+ */
+ limit = next;
+
+ /* Find the next bridge that has NOT requested
+ * prev to be enabled first / disabled last
+ */
+ list_for_each_entry_from(next, &encoder->bridge_chain,
+ chain_node) {
+ if (next->pre_enable_prev_first) {
+ next = list_prev_entry(next, chain_node);
+ limit = next;
+ break;
+ }
+ }
+
+ /* Call these bridges in reverse order */
+ list_for_each_entry_from_reverse(next, &encoder->bridge_chain,
+ chain_node) {
+ if (next == bridge)
+ break;
+
+ drm_atomic_bridge_call_post_disable(next,
+ old_state);
+ }
+ }
+ }
- old_bridge_state =
- drm_atomic_get_old_bridge_state(old_state,
- bridge);
- if (WARN_ON(!old_bridge_state))
- return;
+ drm_atomic_bridge_call_post_disable(bridge, old_state);
- bridge->funcs->atomic_post_disable(bridge,
- old_bridge_state);
- } else if (bridge->funcs->post_disable) {
- bridge->funcs->post_disable(bridge);
- }
+ if (limit)
+ /* Jump all bridges that we have already post_disabled */
+ bridge = limit;
}
}
EXPORT_SYMBOL(drm_atomic_bridge_chain_post_disable);
+static void drm_atomic_bridge_call_pre_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *old_state)
+{
+ if (old_state && bridge->funcs->atomic_pre_enable) {
+ struct drm_bridge_state *old_bridge_state;
+
+ old_bridge_state =
+ drm_atomic_get_old_bridge_state(old_state,
+ bridge);
+ if (WARN_ON(!old_bridge_state))
+ return;
+
+ bridge->funcs->atomic_pre_enable(bridge, old_bridge_state);
+ } else if (bridge->funcs->pre_enable) {
+ bridge->funcs->pre_enable(bridge);
+ }
+}
+
/**
* drm_atomic_bridge_chain_pre_enable - prepares for enabling all bridges in
* the encoder chain
@@ -743,32 +741,60 @@ EXPORT_SYMBOL(drm_atomic_bridge_chain_post_disable);
* starting from the last bridge to the first. These are called before calling
* &drm_encoder_helper_funcs.atomic_enable
*
+ * If a bridge sets @pre_enable_prev_first, then the pre_enable for the
+ * prev bridge will be called before pre_enable of this bridge.
+ *
* Note: the bridge passed should be the one closest to the encoder
*/
void drm_atomic_bridge_chain_pre_enable(struct drm_bridge *bridge,
struct drm_atomic_state *old_state)
{
struct drm_encoder *encoder;
- struct drm_bridge *iter;
+ struct drm_bridge *iter, *next, *limit;
if (!bridge)
return;
encoder = bridge->encoder;
+
list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) {
- if (iter->funcs->atomic_pre_enable) {
- struct drm_bridge_state *old_bridge_state;
+ if (iter->pre_enable_prev_first) {
+ next = iter;
+ limit = bridge;
+ list_for_each_entry_from_reverse(next,
+ &encoder->bridge_chain,
+ chain_node) {
+ if (next == bridge)
+ break;
+
+ if (!next->pre_enable_prev_first) {
+ /* Found first bridge that does NOT
+ * request prev to be enabled first
+ */
+ limit = list_prev_entry(next, chain_node);
+ break;
+ }
+ }
+
+ list_for_each_entry_from(next, &encoder->bridge_chain, chain_node) {
+ /* Call requested prev bridge pre_enable
+ * in order.
+ */
+ if (next == iter)
+ /* At the first bridge to request prev
+ * bridges called first.
+ */
+ break;
+
+ drm_atomic_bridge_call_pre_enable(next, old_state);
+ }
+ }
- old_bridge_state =
- drm_atomic_get_old_bridge_state(old_state,
- iter);
- if (WARN_ON(!old_bridge_state))
- return;
+ drm_atomic_bridge_call_pre_enable(iter, old_state);
- iter->funcs->atomic_pre_enable(iter, old_bridge_state);
- } else if (iter->funcs->pre_enable) {
- iter->funcs->pre_enable(iter);
- }
+ if (iter->pre_enable_prev_first)
+ /* Jump all bridges that we have already pre_enabled */
+ iter = limit;
if (iter == bridge)
break;
diff --git a/drivers/gpu/drm/drm_bridge_connector.c b/drivers/gpu/drm/drm_bridge_connector.c
index 1c7d936523df..19ae4a177ac3 100644
--- a/drivers/gpu/drm/drm_bridge_connector.c
+++ b/drivers/gpu/drm/drm_bridge_connector.c
@@ -128,14 +128,7 @@ static void drm_bridge_connector_hpd_cb(void *cb_data,
drm_kms_helper_hotplug_event(dev);
}
-/**
- * drm_bridge_connector_enable_hpd - Enable hot-plug detection for the connector
- * @connector: The DRM bridge connector
- *
- * This function enables hot-plug detection for the given bridge connector.
- * This is typically used by display drivers in their resume handler.
- */
-void drm_bridge_connector_enable_hpd(struct drm_connector *connector)
+static void drm_bridge_connector_enable_hpd(struct drm_connector *connector)
{
struct drm_bridge_connector *bridge_connector =
to_drm_bridge_connector(connector);
@@ -145,17 +138,8 @@ void drm_bridge_connector_enable_hpd(struct drm_connector *connector)
drm_bridge_hpd_enable(hpd, drm_bridge_connector_hpd_cb,
bridge_connector);
}
-EXPORT_SYMBOL_GPL(drm_bridge_connector_enable_hpd);
-/**
- * drm_bridge_connector_disable_hpd - Disable hot-plug detection for the
- * connector
- * @connector: The DRM bridge connector
- *
- * This function disables hot-plug detection for the given bridge connector.
- * This is typically used by display drivers in their suspend handler.
- */
-void drm_bridge_connector_disable_hpd(struct drm_connector *connector)
+static void drm_bridge_connector_disable_hpd(struct drm_connector *connector)
{
struct drm_bridge_connector *bridge_connector =
to_drm_bridge_connector(connector);
@@ -164,7 +148,6 @@ void drm_bridge_connector_disable_hpd(struct drm_connector *connector)
if (hpd)
drm_bridge_hpd_disable(hpd);
}
-EXPORT_SYMBOL_GPL(drm_bridge_connector_disable_hpd);
/* -----------------------------------------------------------------------------
* Bridge Connector Functions
@@ -305,6 +288,8 @@ static int drm_bridge_connector_get_modes(struct drm_connector *connector)
static const struct drm_connector_helper_funcs drm_bridge_connector_helper_funcs = {
.get_modes = drm_bridge_connector_get_modes,
/* No need for .mode_valid(), the bridges are checked by the core. */
+ .enable_hpd = drm_bridge_connector_enable_hpd,
+ .disable_hpd = drm_bridge_connector_disable_hpd,
};
/* -----------------------------------------------------------------------------
@@ -387,10 +372,8 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
connector_type, ddc);
drm_connector_helper_add(connector, &drm_bridge_connector_helper_funcs);
- if (bridge_connector->bridge_hpd) {
+ if (bridge_connector->bridge_hpd)
connector->polled = DRM_CONNECTOR_POLL_HPD;
- drm_bridge_connector_enable_hpd(connector);
- }
else if (bridge_connector->bridge_detect)
connector->polled = DRM_CONNECTOR_POLL_CONNECT
| DRM_CONNECTOR_POLL_DISCONNECT;
diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c
index fd67efe37c63..262ec64d4397 100644
--- a/drivers/gpu/drm/drm_client.c
+++ b/drivers/gpu/drm/drm_client.c
@@ -480,8 +480,8 @@ EXPORT_SYMBOL(drm_client_framebuffer_flush);
#ifdef CONFIG_DEBUG_FS
static int drm_client_debugfs_internal_clients(struct seq_file *m, void *data)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
+ struct drm_debugfs_entry *entry = m->private;
+ struct drm_device *dev = entry->dev;
struct drm_printer p = drm_seq_file_printer(m);
struct drm_client_dev *client;
@@ -493,14 +493,13 @@ static int drm_client_debugfs_internal_clients(struct seq_file *m, void *data)
return 0;
}
-static const struct drm_info_list drm_client_debugfs_list[] = {
+static const struct drm_debugfs_info drm_client_debugfs_list[] = {
{ "internal_clients", drm_client_debugfs_internal_clients, 0 },
};
void drm_client_debugfs_init(struct drm_minor *minor)
{
- drm_debugfs_create_files(drm_client_debugfs_list,
- ARRAY_SIZE(drm_client_debugfs_list),
- minor->debugfs_root, minor);
+ drm_debugfs_add_files(minor->dev, drm_client_debugfs_list,
+ ARRAY_SIZE(drm_client_debugfs_list));
}
#endif
diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c
index d553e793e673..1b12a3c201a3 100644
--- a/drivers/gpu/drm/drm_client_modeset.c
+++ b/drivers/gpu/drm/drm_client_modeset.c
@@ -188,10 +188,6 @@ static struct drm_display_mode *drm_connector_pick_cmdline_mode(struct drm_conne
prefer_non_interlace = !cmdline_mode->interlace;
again:
list_for_each_entry(mode, &connector->modes, head) {
- /* Check (optional) mode name first */
- if (!strcmp(mode->name, cmdline_mode->name))
- return mode;
-
/* check width/height */
if (mode->hdisplay != cmdline_mode->xres ||
mode->vdisplay != cmdline_mode->yres)
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index 547356e00341..8d92777e57dd 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -984,6 +984,41 @@ static const struct drm_prop_enum_list drm_dvi_i_subconnector_enum_list[] = {
DRM_ENUM_NAME_FN(drm_get_dvi_i_subconnector_name,
drm_dvi_i_subconnector_enum_list)
+static const struct drm_prop_enum_list drm_tv_mode_enum_list[] = {
+ { DRM_MODE_TV_MODE_NTSC, "NTSC" },
+ { DRM_MODE_TV_MODE_NTSC_443, "NTSC-443" },
+ { DRM_MODE_TV_MODE_NTSC_J, "NTSC-J" },
+ { DRM_MODE_TV_MODE_PAL, "PAL" },
+ { DRM_MODE_TV_MODE_PAL_M, "PAL-M" },
+ { DRM_MODE_TV_MODE_PAL_N, "PAL-N" },
+ { DRM_MODE_TV_MODE_SECAM, "SECAM" },
+};
+DRM_ENUM_NAME_FN(drm_get_tv_mode_name, drm_tv_mode_enum_list)
+
+/**
+ * drm_get_tv_mode_from_name - Translates a TV mode name into its enum value
+ * @name: TV Mode name we want to convert
+ * @len: Length of @name
+ *
+ * Translates @name into an enum drm_connector_tv_mode.
+ *
+ * Returns: the enum value on success, a negative errno otherwise.
+ */
+int drm_get_tv_mode_from_name(const char *name, size_t len)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(drm_tv_mode_enum_list); i++) {
+ const struct drm_prop_enum_list *item = &drm_tv_mode_enum_list[i];
+
+ if (strlen(item->name) == len && !strncmp(item->name, name, len))
+ return item->type;
+ }
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL(drm_get_tv_mode_from_name);
+
static const struct drm_prop_enum_list drm_tv_select_enum_list[] = {
{ DRM_MODE_SUBCONNECTOR_Automatic, "Automatic" }, /* DVI-I and TV-out */
{ DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */
@@ -1552,6 +1587,71 @@ EXPORT_SYMBOL(drm_connector_attach_dp_subconnector_property);
* infoframe values is done through drm_hdmi_avi_infoframe_content_type().
*/
+/*
+ * TODO: Document the properties:
+ * - left margin
+ * - right margin
+ * - top margin
+ * - bottom margin
+ * - brightness
+ * - contrast
+ * - flicker reduction
+ * - hue
+ * - mode
+ * - overscan
+ * - saturation
+ * - select subconnector
+ * - subconnector
+ */
+/**
+ * DOC: Analog TV Connector Properties
+ *
+ * TV Mode:
+ * Indicates the TV Mode used on an analog TV connector. The value
+ * of this property can be one of the following:
+ *
+ * NTSC:
+ * TV Mode is CCIR System M (aka 525-lines) together with
+ * the NTSC Color Encoding.
+ *
+ * NTSC-443:
+ *
+ * TV Mode is CCIR System M (aka 525-lines) together with
+ * the NTSC Color Encoding, but with a color subcarrier
+ * frequency of 4.43MHz
+ *
+ * NTSC-J:
+ *
+ * TV Mode is CCIR System M (aka 525-lines) together with
+ * the NTSC Color Encoding, but with a black level equal to
+ * the blanking level.
+ *
+ * PAL:
+ *
+ * TV Mode is CCIR System B (aka 625-lines) together with
+ * the PAL Color Encoding.
+ *
+ * PAL-M:
+ *
+ * TV Mode is CCIR System M (aka 525-lines) together with
+ * the PAL Color Encoding.
+ *
+ * PAL-N:
+ *
+ * TV Mode is CCIR System N together with the PAL Color
+ * Encoding, a color subcarrier frequency of 3.58MHz, the
+ * SECAM color space, and narrower channels than other PAL
+ * variants.
+ *
+ * SECAM:
+ *
+ * TV Mode is CCIR System B (aka 625-lines) together with
+ * the SECAM Color Encoding.
+ *
+ * Drivers can set up this property by calling
+ * drm_mode_create_tv_properties().
+ */
+
/**
* drm_connector_attach_content_type_property - attach content-type property
* @connector: connector to attach content type property on.
@@ -1604,7 +1704,7 @@ EXPORT_SYMBOL(drm_connector_attach_tv_margin_properties);
* Called by a driver's HDMI connector initialization routine, this function
* creates the TV margin properties for a given device. No need to call this
* function for an SDTV connector, it's already called from
- * drm_mode_create_tv_properties().
+ * drm_mode_create_tv_properties_legacy().
*
* Returns:
* 0 on success or a negative error code on failure.
@@ -1639,7 +1739,7 @@ int drm_mode_create_tv_margin_properties(struct drm_device *dev)
EXPORT_SYMBOL(drm_mode_create_tv_margin_properties);
/**
- * drm_mode_create_tv_properties - create TV specific connector properties
+ * drm_mode_create_tv_properties_legacy - create TV specific connector properties
* @dev: DRM device
* @num_modes: number of different TV formats (modes) supported
* @modes: array of pointers to strings containing name of each format
@@ -1649,12 +1749,16 @@ EXPORT_SYMBOL(drm_mode_create_tv_margin_properties);
* responsible for allocating a list of format names and passing them to
* this routine.
*
+ * NOTE: This functions registers the deprecated "mode" connector
+ * property to select the analog TV mode (ie, NTSC, PAL, etc.). New
+ * drivers must use drm_mode_create_tv_properties() instead.
+ *
* Returns:
* 0 on success or a negative error code on failure.
*/
-int drm_mode_create_tv_properties(struct drm_device *dev,
- unsigned int num_modes,
- const char * const modes[])
+int drm_mode_create_tv_properties_legacy(struct drm_device *dev,
+ unsigned int num_modes,
+ const char * const modes[])
{
struct drm_property *tv_selector;
struct drm_property *tv_subconnector;
@@ -1690,15 +1794,17 @@ int drm_mode_create_tv_properties(struct drm_device *dev,
if (drm_mode_create_tv_margin_properties(dev))
goto nomem;
- dev->mode_config.tv_mode_property =
- drm_property_create(dev, DRM_MODE_PROP_ENUM,
- "mode", num_modes);
- if (!dev->mode_config.tv_mode_property)
- goto nomem;
+ if (num_modes) {
+ dev->mode_config.legacy_tv_mode_property =
+ drm_property_create(dev, DRM_MODE_PROP_ENUM,
+ "mode", num_modes);
+ if (!dev->mode_config.legacy_tv_mode_property)
+ goto nomem;
- for (i = 0; i < num_modes; i++)
- drm_property_add_enum(dev->mode_config.tv_mode_property,
- i, modes[i]);
+ for (i = 0; i < num_modes; i++)
+ drm_property_add_enum(dev->mode_config.legacy_tv_mode_property,
+ i, modes[i]);
+ }
dev->mode_config.tv_brightness_property =
drm_property_create_range(dev, 0, "brightness", 0, 100);
@@ -1734,6 +1840,47 @@ int drm_mode_create_tv_properties(struct drm_device *dev,
nomem:
return -ENOMEM;
}
+EXPORT_SYMBOL(drm_mode_create_tv_properties_legacy);
+
+/**
+ * drm_mode_create_tv_properties - create TV specific connector properties
+ * @dev: DRM device
+ * @supported_tv_modes: Bitmask of TV modes supported (See DRM_MODE_TV_MODE_*)
+
+ * Called by a driver's TV initialization routine, this function creates
+ * the TV specific connector properties for a given device.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ */
+int drm_mode_create_tv_properties(struct drm_device *dev,
+ unsigned int supported_tv_modes)
+{
+ struct drm_prop_enum_list tv_mode_list[DRM_MODE_TV_MODE_MAX];
+ struct drm_property *tv_mode;
+ unsigned int i, len = 0;
+
+ if (dev->mode_config.tv_mode_property)
+ return 0;
+
+ for (i = 0; i < DRM_MODE_TV_MODE_MAX; i++) {
+ if (!(supported_tv_modes & BIT(i)))
+ continue;
+
+ tv_mode_list[len].type = i;
+ tv_mode_list[len].name = drm_get_tv_mode_name(i);
+ len++;
+ }
+
+ tv_mode = drm_property_create_enum(dev, 0, "TV mode",
+ tv_mode_list, len);
+ if (!tv_mode)
+ return -ENOMEM;
+
+ dev->mode_config.tv_mode_property = tv_mode;
+
+ return drm_mode_create_tv_properties_legacy(dev, 0, NULL);
+}
EXPORT_SYMBOL(drm_mode_create_tv_properties);
/**
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index ee445f4605ba..4f643a490dc3 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -38,6 +38,7 @@
#include <drm/drm_edid.h>
#include <drm/drm_file.h>
#include <drm/drm_gem.h>
+#include <drm/drm_managed.h>
#include "drm_crtc_internal.h"
#include "drm_internal.h"
@@ -50,9 +51,8 @@
static int drm_name_info(struct seq_file *m, void *data)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_minor *minor = node->minor;
- struct drm_device *dev = minor->dev;
+ struct drm_debugfs_entry *entry = m->private;
+ struct drm_device *dev = entry->dev;
struct drm_master *master;
mutex_lock(&dev->master_mutex);
@@ -72,8 +72,8 @@ static int drm_name_info(struct seq_file *m, void *data)
static int drm_clients_info(struct seq_file *m, void *data)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
+ struct drm_debugfs_entry *entry = m->private;
+ struct drm_device *dev = entry->dev;
struct drm_file *priv;
kuid_t uid;
@@ -124,8 +124,8 @@ static int drm_gem_one_name_info(int id, void *ptr, void *data)
static int drm_gem_name_info(struct seq_file *m, void *data)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
+ struct drm_debugfs_entry *entry = m->private;
+ struct drm_device *dev = entry->dev;
seq_printf(m, " name size handles refcount\n");
@@ -136,7 +136,7 @@ static int drm_gem_name_info(struct seq_file *m, void *data)
return 0;
}
-static const struct drm_info_list drm_debugfs_list[] = {
+static const struct drm_debugfs_info drm_debugfs_list[] = {
{"name", drm_name_info, 0},
{"clients", drm_clients_info, 0},
{"gem_names", drm_gem_name_info, DRIVER_GEM},
@@ -151,6 +151,21 @@ static int drm_debugfs_open(struct inode *inode, struct file *file)
return single_open(file, node->info_ent->show, node);
}
+static int drm_debugfs_entry_open(struct inode *inode, struct file *file)
+{
+ struct drm_debugfs_entry *entry = inode->i_private;
+ struct drm_debugfs_info *node = &entry->file;
+
+ return single_open(file, node->show, entry);
+}
+
+static const struct file_operations drm_debugfs_entry_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_debugfs_entry_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
static const struct file_operations drm_debugfs_fops = {
.owner = THIS_MODULE,
@@ -192,7 +207,7 @@ void drm_debugfs_create_files(const struct drm_info_list *files, int count,
tmp->minor = minor;
tmp->dent = debugfs_create_file(files[i].name,
- S_IFREG | S_IRUGO, root, tmp,
+ 0444, root, tmp,
&drm_debugfs_fops);
tmp->info_ent = &files[i];
@@ -207,6 +222,7 @@ int drm_debugfs_init(struct drm_minor *minor, int minor_id,
struct dentry *root)
{
struct drm_device *dev = minor->dev;
+ struct drm_debugfs_entry *entry, *tmp;
char name[64];
INIT_LIST_HEAD(&minor->debugfs_list);
@@ -214,8 +230,7 @@ int drm_debugfs_init(struct drm_minor *minor, int minor_id,
sprintf(name, "%d", minor_id);
minor->debugfs_root = debugfs_create_dir(name, root);
- drm_debugfs_create_files(drm_debugfs_list, DRM_DEBUGFS_ENTRIES,
- minor->debugfs_root, minor);
+ drm_debugfs_add_files(minor->dev, drm_debugfs_list, DRM_DEBUGFS_ENTRIES);
if (drm_drv_uses_atomic_modeset(dev)) {
drm_atomic_debugfs_init(minor);
@@ -230,9 +245,29 @@ int drm_debugfs_init(struct drm_minor *minor, int minor_id,
if (dev->driver->debugfs_init)
dev->driver->debugfs_init(minor);
+ list_for_each_entry_safe(entry, tmp, &dev->debugfs_list, list) {
+ debugfs_create_file(entry->file.name, 0444,
+ minor->debugfs_root, entry, &drm_debugfs_entry_fops);
+ list_del(&entry->list);
+ }
+
return 0;
}
+void drm_debugfs_late_register(struct drm_device *dev)
+{
+ struct drm_minor *minor = dev->primary;
+ struct drm_debugfs_entry *entry, *tmp;
+
+ if (!minor)
+ return;
+
+ list_for_each_entry_safe(entry, tmp, &dev->debugfs_list, list) {
+ debugfs_create_file(entry->file.name, 0444,
+ minor->debugfs_root, entry, &drm_debugfs_entry_fops);
+ list_del(&entry->list);
+ }
+}
int drm_debugfs_remove_files(const struct drm_info_list *files, int count,
struct drm_minor *minor)
@@ -281,6 +316,53 @@ void drm_debugfs_cleanup(struct drm_minor *minor)
minor->debugfs_root = NULL;
}
+/**
+ * drm_debugfs_add_file - Add a given file to the DRM device debugfs file list
+ * @dev: drm device for the ioctl
+ * @name: debugfs file name
+ * @show: show callback
+ * @data: driver-private data, should not be device-specific
+ *
+ * Add a given file entry to the DRM device debugfs file list to be created on
+ * drm_debugfs_init.
+ */
+void drm_debugfs_add_file(struct drm_device *dev, const char *name,
+ int (*show)(struct seq_file*, void*), void *data)
+{
+ struct drm_debugfs_entry *entry = drmm_kzalloc(dev, sizeof(*entry), GFP_KERNEL);
+
+ if (!entry)
+ return;
+
+ entry->file.name = name;
+ entry->file.show = show;
+ entry->file.data = data;
+ entry->dev = dev;
+
+ mutex_lock(&dev->debugfs_mutex);
+ list_add(&entry->list, &dev->debugfs_list);
+ mutex_unlock(&dev->debugfs_mutex);
+}
+EXPORT_SYMBOL(drm_debugfs_add_file);
+
+/**
+ * drm_debugfs_add_files - Add an array of files to the DRM device debugfs file list
+ * @dev: drm device for the ioctl
+ * @files: The array of files to create
+ * @count: The number of files given
+ *
+ * Add a given set of debugfs files represented by an array of
+ * &struct drm_debugfs_info in the DRM device debugfs file list.
+ */
+void drm_debugfs_add_files(struct drm_device *dev, const struct drm_debugfs_info *files, int count)
+{
+ int i;
+
+ for (i = 0; i < count; i++)
+ drm_debugfs_add_file(dev, files[i].name, files[i].show, files[i].data);
+}
+EXPORT_SYMBOL(drm_debugfs_add_files);
+
static int connector_show(struct seq_file *m, void *data)
{
struct drm_connector *connector = m->private;
@@ -426,15 +508,15 @@ void drm_debugfs_connector_add(struct drm_connector *connector)
connector->debugfs_entry = root;
/* force */
- debugfs_create_file("force", S_IRUGO | S_IWUSR, root, connector,
+ debugfs_create_file("force", 0644, root, connector,
&drm_connector_fops);
/* edid */
- debugfs_create_file("edid_override", S_IRUGO | S_IWUSR, root, connector,
+ debugfs_create_file("edid_override", 0644, root, connector,
&drm_edid_fops);
/* vrr range */
- debugfs_create_file("vrr_range", S_IRUGO, root, connector,
+ debugfs_create_file("vrr_range", 0444, root, connector,
&vrr_range_fops);
/* max bpc */
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 73b845a75d52..c6eb8972451a 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -598,6 +598,7 @@ static void drm_dev_init_release(struct drm_device *dev, void *res)
mutex_destroy(&dev->clientlist_mutex);
mutex_destroy(&dev->filelist_mutex);
mutex_destroy(&dev->struct_mutex);
+ mutex_destroy(&dev->debugfs_mutex);
drm_legacy_destroy_members(dev);
}
@@ -638,12 +639,14 @@ static int drm_dev_init(struct drm_device *dev,
INIT_LIST_HEAD(&dev->filelist_internal);
INIT_LIST_HEAD(&dev->clientlist);
INIT_LIST_HEAD(&dev->vblank_event_list);
+ INIT_LIST_HEAD(&dev->debugfs_list);
spin_lock_init(&dev->event_lock);
mutex_init(&dev->struct_mutex);
mutex_init(&dev->filelist_mutex);
mutex_init(&dev->clientlist_mutex);
mutex_init(&dev->master_mutex);
+ mutex_init(&dev->debugfs_mutex);
ret = drmm_add_action_or_reset(dev, drm_dev_init_release, NULL);
if (ret)
@@ -929,8 +932,8 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
dev->registered = true;
- if (dev->driver->load) {
- ret = dev->driver->load(dev, flags);
+ if (driver->load) {
+ ret = driver->load(dev, flags);
if (ret)
goto err_minors;
}
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index b3a731b9170a..427631706128 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -1726,117 +1726,132 @@ unlock:
}
EXPORT_SYMBOL(drm_fb_helper_pan_display);
-/*
- * Allocates the backing storage and sets up the fbdev info structure through
- * the ->fb_probe callback.
- */
-static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
- int preferred_bpp)
+static uint32_t drm_fb_helper_find_format(struct drm_fb_helper *fb_helper, const uint32_t *formats,
+ size_t format_count, uint32_t bpp, uint32_t depth)
{
- struct drm_client_dev *client = &fb_helper->client;
struct drm_device *dev = fb_helper->dev;
- struct drm_mode_config *config = &dev->mode_config;
- int ret = 0;
- int crtc_count = 0;
- struct drm_connector_list_iter conn_iter;
- struct drm_fb_helper_surface_size sizes;
- struct drm_connector *connector;
- struct drm_mode_set *mode_set;
- int best_depth = 0;
-
- memset(&sizes, 0, sizeof(struct drm_fb_helper_surface_size));
- sizes.surface_depth = 24;
- sizes.surface_bpp = 32;
- sizes.fb_width = (u32)-1;
- sizes.fb_height = (u32)-1;
+ uint32_t format;
+ size_t i;
/*
- * If driver picks 8 or 16 by default use that for both depth/bpp
- * to begin with
+ * Do not consider YUV or other complicated formats
+ * for framebuffers. This means only legacy formats
+ * are supported (fmt->depth is a legacy field), but
+ * the framebuffer emulation can only deal with such
+ * formats, specifically RGB/BGA formats.
*/
- if (preferred_bpp != sizes.surface_bpp)
- sizes.surface_depth = sizes.surface_bpp = preferred_bpp;
+ format = drm_mode_legacy_fb_format(bpp, depth);
+ if (!format)
+ goto err;
- drm_connector_list_iter_begin(fb_helper->dev, &conn_iter);
- drm_client_for_each_connector_iter(connector, &conn_iter) {
- struct drm_cmdline_mode *cmdline_mode;
+ for (i = 0; i < format_count; ++i) {
+ if (formats[i] == format)
+ return format;
+ }
- cmdline_mode = &connector->cmdline_mode;
+err:
+ /* We found nothing. */
+ drm_warn(dev, "bpp/depth value of %u/%u not supported\n", bpp, depth);
- if (cmdline_mode->bpp_specified) {
- switch (cmdline_mode->bpp) {
- case 8:
- sizes.surface_depth = sizes.surface_bpp = 8;
- break;
- case 15:
- sizes.surface_depth = 15;
- sizes.surface_bpp = 16;
- break;
- case 16:
- sizes.surface_depth = sizes.surface_bpp = 16;
- break;
- case 24:
- sizes.surface_depth = sizes.surface_bpp = 24;
- break;
- case 32:
- sizes.surface_depth = 24;
- sizes.surface_bpp = 32;
- break;
- }
- break;
- }
+ return DRM_FORMAT_INVALID;
+}
+
+static uint32_t drm_fb_helper_find_color_mode_format(struct drm_fb_helper *fb_helper,
+ const uint32_t *formats, size_t format_count,
+ unsigned int color_mode)
+{
+ struct drm_device *dev = fb_helper->dev;
+ uint32_t bpp, depth;
+
+ switch (color_mode) {
+ case 1:
+ case 2:
+ case 4:
+ case 8:
+ case 16:
+ case 24:
+ bpp = depth = color_mode;
+ break;
+ case 15:
+ bpp = 16;
+ depth = 15;
+ break;
+ case 32:
+ bpp = 32;
+ depth = 24;
+ break;
+ default:
+ drm_info(dev, "unsupported color mode of %d\n", color_mode);
+ return DRM_FORMAT_INVALID;
}
- drm_connector_list_iter_end(&conn_iter);
- /*
- * If we run into a situation where, for example, the primary plane
- * supports RGBA5551 (16 bpp, depth 15) but not RGB565 (16 bpp, depth
- * 16) we need to scale down the depth of the sizes we request.
- */
- mutex_lock(&client->modeset_mutex);
+ return drm_fb_helper_find_format(fb_helper, formats, format_count, bpp, depth);
+}
+
+static int __drm_fb_helper_find_sizes(struct drm_fb_helper *fb_helper, int preferred_bpp,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct drm_client_dev *client = &fb_helper->client;
+ struct drm_device *dev = fb_helper->dev;
+ int crtc_count = 0;
+ struct drm_connector_list_iter conn_iter;
+ struct drm_connector *connector;
+ struct drm_mode_set *mode_set;
+ uint32_t surface_format = DRM_FORMAT_INVALID;
+ const struct drm_format_info *info;
+
+ memset(sizes, 0, sizeof(*sizes));
+ sizes->fb_width = (u32)-1;
+ sizes->fb_height = (u32)-1;
+
drm_client_for_each_modeset(mode_set, client) {
struct drm_crtc *crtc = mode_set->crtc;
struct drm_plane *plane = crtc->primary;
- int j;
drm_dbg_kms(dev, "test CRTC %u primary plane\n", drm_crtc_index(crtc));
- for (j = 0; j < plane->format_count; j++) {
- const struct drm_format_info *fmt;
-
- fmt = drm_format_info(plane->format_types[j]);
-
- /*
- * Do not consider YUV or other complicated formats
- * for framebuffers. This means only legacy formats
- * are supported (fmt->depth is a legacy field) but
- * the framebuffer emulation can only deal with such
- * formats, specifically RGB/BGA formats.
- */
- if (fmt->depth == 0)
- continue;
-
- /* We found a perfect fit, great */
- if (fmt->depth == sizes.surface_depth) {
- best_depth = fmt->depth;
- break;
- }
+ drm_connector_list_iter_begin(fb_helper->dev, &conn_iter);
+ drm_client_for_each_connector_iter(connector, &conn_iter) {
+ struct drm_cmdline_mode *cmdline_mode = &connector->cmdline_mode;
- /* Skip depths above what we're looking for */
- if (fmt->depth > sizes.surface_depth)
+ if (!cmdline_mode->bpp_specified)
continue;
- /* Best depth found so far */
- if (fmt->depth > best_depth)
- best_depth = fmt->depth;
+ surface_format = drm_fb_helper_find_color_mode_format(fb_helper,
+ plane->format_types,
+ plane->format_count,
+ cmdline_mode->bpp);
+ if (surface_format != DRM_FORMAT_INVALID)
+ break; /* found supported format */
}
+ drm_connector_list_iter_end(&conn_iter);
+
+ if (surface_format != DRM_FORMAT_INVALID)
+ break; /* found supported format */
+
+ /* try preferred color mode */
+ surface_format = drm_fb_helper_find_color_mode_format(fb_helper,
+ plane->format_types,
+ plane->format_count,
+ preferred_bpp);
+ if (surface_format != DRM_FORMAT_INVALID)
+ break; /* found supported format */
}
- if (sizes.surface_depth != best_depth && best_depth) {
- drm_info(dev, "requested bpp %d, scaled depth down to %d",
- sizes.surface_bpp, best_depth);
- sizes.surface_depth = best_depth;
+
+ if (surface_format == DRM_FORMAT_INVALID) {
+ /*
+ * If none of the given color modes works, fall back
+ * to XRGB8888. Drivers are expected to provide this
+ * format for compatibility with legacy applications.
+ */
+ drm_warn(dev, "No compatible format found\n");
+ surface_format = drm_driver_legacy_fb_format(dev, 32, 24);
}
+ info = drm_format_info(surface_format);
+ sizes->surface_bpp = drm_format_info_bpp(info, 0);
+ sizes->surface_depth = info->depth;
+
/* first up get a count of crtcs now in use and new min/maxes width/heights */
crtc_count = 0;
drm_client_for_each_modeset(mode_set, client) {
@@ -1858,8 +1873,10 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
x = mode_set->x;
y = mode_set->y;
- sizes.surface_width = max_t(u32, desired_mode->hdisplay + x, sizes.surface_width);
- sizes.surface_height = max_t(u32, desired_mode->vdisplay + y, sizes.surface_height);
+ sizes->surface_width =
+ max_t(u32, desired_mode->hdisplay + x, sizes->surface_width);
+ sizes->surface_height =
+ max_t(u32, desired_mode->vdisplay + y, sizes->surface_height);
for (j = 0; j < mode_set->num_connectors; j++) {
struct drm_connector *connector = mode_set->connectors[j];
@@ -1875,28 +1892,63 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
}
if (lasth)
- sizes.fb_width = min_t(u32, desired_mode->hdisplay + x, sizes.fb_width);
+ sizes->fb_width = min_t(u32, desired_mode->hdisplay + x, sizes->fb_width);
if (lastv)
- sizes.fb_height = min_t(u32, desired_mode->vdisplay + y, sizes.fb_height);
+ sizes->fb_height = min_t(u32, desired_mode->vdisplay + y, sizes->fb_height);
}
- mutex_unlock(&client->modeset_mutex);
- if (crtc_count == 0 || sizes.fb_width == -1 || sizes.fb_height == -1) {
+ if (crtc_count == 0 || sizes->fb_width == -1 || sizes->fb_height == -1) {
drm_info(dev, "Cannot find any crtc or sizes\n");
-
- /* First time: disable all crtc's.. */
- if (!fb_helper->deferred_setup)
- drm_client_modeset_commit(client);
return -EAGAIN;
}
+ return 0;
+}
+
+static int drm_fb_helper_find_sizes(struct drm_fb_helper *fb_helper, int preferred_bpp,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct drm_client_dev *client = &fb_helper->client;
+ struct drm_device *dev = fb_helper->dev;
+ struct drm_mode_config *config = &dev->mode_config;
+ int ret;
+
+ mutex_lock(&client->modeset_mutex);
+ ret = __drm_fb_helper_find_sizes(fb_helper, preferred_bpp, sizes);
+ mutex_unlock(&client->modeset_mutex);
+
+ if (ret)
+ return ret;
+
/* Handle our overallocation */
- sizes.surface_height *= drm_fbdev_overalloc;
- sizes.surface_height /= 100;
- if (sizes.surface_height > config->max_height) {
+ sizes->surface_height *= drm_fbdev_overalloc;
+ sizes->surface_height /= 100;
+ if (sizes->surface_height > config->max_height) {
drm_dbg_kms(dev, "Fbdev over-allocation too large; clamping height to %d\n",
config->max_height);
- sizes.surface_height = config->max_height;
+ sizes->surface_height = config->max_height;
+ }
+
+ return 0;
+}
+
+/*
+ * Allocates the backing storage and sets up the fbdev info structure through
+ * the ->fb_probe callback.
+ */
+static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
+ int preferred_bpp)
+{
+ struct drm_client_dev *client = &fb_helper->client;
+ struct drm_fb_helper_surface_size sizes;
+ int ret;
+
+ ret = drm_fb_helper_find_sizes(fb_helper, preferred_bpp, &sizes);
+ if (ret) {
+ /* First time: disable all crtc's.. */
+ if (!fb_helper->deferred_setup)
+ drm_client_modeset_commit(client);
+ return ret;
}
#if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM)
diff --git a/drivers/gpu/drm/drm_fbdev_generic.c b/drivers/gpu/drm/drm_fbdev_generic.c
index ab8695669279..0a4c160e0e58 100644
--- a/drivers/gpu/drm/drm_fbdev_generic.c
+++ b/drivers/gpu/drm/drm_fbdev_generic.c
@@ -431,7 +431,6 @@ static const struct drm_client_funcs drm_fbdev_client_funcs = {
* drm_fbdev_generic_setup() - Setup generic fbdev emulation
* @dev: DRM device
* @preferred_bpp: Preferred bits per pixel for the device.
- * @dev->mode_config.preferred_depth is used if this is zero.
*
* This function sets up generic fbdev emulation for drivers that supports
* dumb buffers with a virtual address and that can be mmap'ed.
@@ -475,13 +474,17 @@ void drm_fbdev_generic_setup(struct drm_device *dev,
}
/*
- * FIXME: This mixes up depth with bpp, which results in a glorious
- * mess, resulting in some drivers picking wrong fbdev defaults and
- * others wrong preferred_depth defaults.
+ * Pick a preferred bpp of 32 if no value has been given. This
+ * will select XRGB8888 for the framebuffer formats. All drivers
+ * have to support XRGB8888 for backwards compatibility with legacy
+ * userspace, so it's the safe choice here.
+ *
+ * TODO: Replace struct drm_mode_config.preferred_depth and this
+ * bpp value with a preferred format that is given as struct
+ * drm_format_info. Then derive all other values from the
+ * format.
*/
if (!preferred_bpp)
- preferred_bpp = dev->mode_config.preferred_depth;
- if (!preferred_bpp)
preferred_bpp = 32;
fb_helper->preferred_bpp = preferred_bpp;
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index 64b4a3a87fbb..a51ff8cee049 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -245,10 +245,10 @@ void drm_file_free(struct drm_file *file)
dev = file->minor->dev;
- DRM_DEBUG("comm=\"%s\", pid=%d, dev=0x%lx, open_count=%d\n",
- current->comm, task_pid_nr(current),
- (long)old_encode_dev(file->minor->kdev->devt),
- atomic_read(&dev->open_count));
+ drm_dbg_core(dev, "comm=\"%s\", pid=%d, dev=0x%lx, open_count=%d\n",
+ current->comm, task_pid_nr(current),
+ (long)old_encode_dev(file->minor->kdev->devt),
+ atomic_read(&dev->open_count));
#ifdef CONFIG_DRM_LEGACY
if (drm_core_check_feature(dev, DRIVER_LEGACY) &&
@@ -340,8 +340,8 @@ int drm_open_helper(struct file *filp, struct drm_minor *minor)
dev->switch_power_state != DRM_SWITCH_POWER_DYNAMIC_OFF)
return -EINVAL;
- DRM_DEBUG("comm=\"%s\", pid=%d, minor=%d\n", current->comm,
- task_pid_nr(current), minor->index);
+ drm_dbg_core(dev, "comm=\"%s\", pid=%d, minor=%d\n",
+ current->comm, task_pid_nr(current), minor->index);
priv = drm_file_alloc(minor);
if (IS_ERR(priv))
@@ -450,11 +450,11 @@ EXPORT_SYMBOL(drm_open);
void drm_lastclose(struct drm_device * dev)
{
- DRM_DEBUG("\n");
+ drm_dbg_core(dev, "\n");
if (dev->driver->lastclose)
dev->driver->lastclose(dev);
- DRM_DEBUG("driver lastclose completed\n");
+ drm_dbg_core(dev, "driver lastclose completed\n");
if (drm_core_check_feature(dev, DRIVER_LEGACY))
drm_legacy_dev_reinit(dev);
@@ -485,7 +485,7 @@ int drm_release(struct inode *inode, struct file *filp)
if (drm_dev_needs_global_mutex(dev))
mutex_lock(&drm_global_mutex);
- DRM_DEBUG("open_count = %d\n", atomic_read(&dev->open_count));
+ drm_dbg_core(dev, "open_count = %d\n", atomic_read(&dev->open_count));
drm_close_helper(filp);
diff --git a/drivers/gpu/drm/drm_format_helper.c b/drivers/gpu/drm/drm_format_helper.c
index 74ff33c2ddaa..994f8fb71f45 100644
--- a/drivers/gpu/drm/drm_format_helper.c
+++ b/drivers/gpu/drm/drm_format_helper.c
@@ -322,7 +322,7 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb332);
static void drm_fb_xrgb8888_to_rgb565_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
- u16 *dbuf16 = dbuf;
+ __le16 *dbuf16 = dbuf;
const __le32 *sbuf32 = sbuf;
unsigned int x;
u16 val16;
@@ -333,14 +333,15 @@ static void drm_fb_xrgb8888_to_rgb565_line(void *dbuf, const void *sbuf, unsigne
val16 = ((pix & 0x00F80000) >> 8) |
((pix & 0x0000FC00) >> 5) |
((pix & 0x000000F8) >> 3);
- dbuf16[x] = val16;
+ dbuf16[x] = cpu_to_le16(val16);
}
}
+/* TODO: implement this helper as conversion to RGB565|BIG_ENDIAN */
static void drm_fb_xrgb8888_to_rgb565_swab_line(void *dbuf, const void *sbuf,
unsigned int pixels)
{
- u16 *dbuf16 = dbuf;
+ __le16 *dbuf16 = dbuf;
const __le32 *sbuf32 = sbuf;
unsigned int x;
u16 val16;
@@ -351,7 +352,7 @@ static void drm_fb_xrgb8888_to_rgb565_swab_line(void *dbuf, const void *sbuf,
val16 = ((pix & 0x00F80000) >> 8) |
((pix & 0x0000FC00) >> 5) |
((pix & 0x000000F8) >> 3);
- dbuf16[x] = swab16(val16);
+ dbuf16[x] = cpu_to_le16(swab16(val16));
}
}
@@ -395,6 +396,161 @@ void drm_fb_xrgb8888_to_rgb565(struct iosys_map *dst, const unsigned int *dst_pi
}
EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb565);
+static void drm_fb_xrgb8888_to_xrgb1555_line(void *dbuf, const void *sbuf, unsigned int pixels)
+{
+ __le16 *dbuf16 = dbuf;
+ const __le32 *sbuf32 = sbuf;
+ unsigned int x;
+ u16 val16;
+ u32 pix;
+
+ for (x = 0; x < pixels; x++) {
+ pix = le32_to_cpu(sbuf32[x]);
+ val16 = ((pix & 0x00f80000) >> 9) |
+ ((pix & 0x0000f800) >> 6) |
+ ((pix & 0x000000f8) >> 3);
+ dbuf16[x] = cpu_to_le16(val16);
+ }
+}
+
+/**
+ * drm_fb_xrgb8888_to_xrgb1555 - Convert XRGB8888 to XRGB1555 clip buffer
+ * @dst: Array of XRGB1555 destination buffers
+ * @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines
+ * within @dst; can be NULL if scanlines are stored next to each other.
+ * @src: Array of XRGB8888 source buffer
+ * @fb: DRM framebuffer
+ * @clip: Clip rectangle area to copy
+ *
+ * This function copies parts of a framebuffer to display memory and converts
+ * the color format during the process. The parameters @dst, @dst_pitch and
+ * @src refer to arrays. Each array must have at least as many entries as
+ * there are planes in @fb's format. Each entry stores the value for the
+ * format's respective color plane at the same index.
+ *
+ * This function does not apply clipping on @dst (i.e. the destination is at the
+ * top-left corner).
+ *
+ * Drivers can use this function for XRGB1555 devices that don't support
+ * XRGB8888 natively.
+ */
+void drm_fb_xrgb8888_to_xrgb1555(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip)
+{
+ static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
+ 2,
+ };
+
+ drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false,
+ drm_fb_xrgb8888_to_xrgb1555_line);
+}
+EXPORT_SYMBOL(drm_fb_xrgb8888_to_xrgb1555);
+
+static void drm_fb_xrgb8888_to_argb1555_line(void *dbuf, const void *sbuf, unsigned int pixels)
+{
+ __le16 *dbuf16 = dbuf;
+ const __le32 *sbuf32 = sbuf;
+ unsigned int x;
+ u16 val16;
+ u32 pix;
+
+ for (x = 0; x < pixels; x++) {
+ pix = le32_to_cpu(sbuf32[x]);
+ val16 = BIT(15) | /* set alpha bit */
+ ((pix & 0x00f80000) >> 9) |
+ ((pix & 0x0000f800) >> 6) |
+ ((pix & 0x000000f8) >> 3);
+ dbuf16[x] = cpu_to_le16(val16);
+ }
+}
+
+/**
+ * drm_fb_xrgb8888_to_argb1555 - Convert XRGB8888 to ARGB1555 clip buffer
+ * @dst: Array of ARGB1555 destination buffers
+ * @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines
+ * within @dst; can be NULL if scanlines are stored next to each other.
+ * @src: Array of XRGB8888 source buffer
+ * @fb: DRM framebuffer
+ * @clip: Clip rectangle area to copy
+ *
+ * This function copies parts of a framebuffer to display memory and converts
+ * the color format during the process. The parameters @dst, @dst_pitch and
+ * @src refer to arrays. Each array must have at least as many entries as
+ * there are planes in @fb's format. Each entry stores the value for the
+ * format's respective color plane at the same index.
+ *
+ * This function does not apply clipping on @dst (i.e. the destination is at the
+ * top-left corner).
+ *
+ * Drivers can use this function for ARGB1555 devices that don't support
+ * XRGB8888 natively. It sets an opaque alpha channel as part of the conversion.
+ */
+void drm_fb_xrgb8888_to_argb1555(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip)
+{
+ static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
+ 2,
+ };
+
+ drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false,
+ drm_fb_xrgb8888_to_argb1555_line);
+}
+EXPORT_SYMBOL(drm_fb_xrgb8888_to_argb1555);
+
+static void drm_fb_xrgb8888_to_rgba5551_line(void *dbuf, const void *sbuf, unsigned int pixels)
+{
+ __le16 *dbuf16 = dbuf;
+ const __le32 *sbuf32 = sbuf;
+ unsigned int x;
+ u16 val16;
+ u32 pix;
+
+ for (x = 0; x < pixels; x++) {
+ pix = le32_to_cpu(sbuf32[x]);
+ val16 = ((pix & 0x00f80000) >> 8) |
+ ((pix & 0x0000f800) >> 5) |
+ ((pix & 0x000000f8) >> 2) |
+ BIT(0); /* set alpha bit */
+ dbuf16[x] = cpu_to_le16(val16);
+ }
+}
+
+/**
+ * drm_fb_xrgb8888_to_rgba5551 - Convert XRGB8888 to RGBA5551 clip buffer
+ * @dst: Array of RGBA5551 destination buffers
+ * @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines
+ * within @dst; can be NULL if scanlines are stored next to each other.
+ * @src: Array of XRGB8888 source buffer
+ * @fb: DRM framebuffer
+ * @clip: Clip rectangle area to copy
+ *
+ * This function copies parts of a framebuffer to display memory and converts
+ * the color format during the process. The parameters @dst, @dst_pitch and
+ * @src refer to arrays. Each array must have at least as many entries as
+ * there are planes in @fb's format. Each entry stores the value for the
+ * format's respective color plane at the same index.
+ *
+ * This function does not apply clipping on @dst (i.e. the destination is at the
+ * top-left corner).
+ *
+ * Drivers can use this function for RGBA5551 devices that don't support
+ * XRGB8888 natively. It sets an opaque alpha channel as part of the conversion.
+ */
+void drm_fb_xrgb8888_to_rgba5551(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip)
+{
+ static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
+ 2,
+ };
+
+ drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false,
+ drm_fb_xrgb8888_to_rgba5551_line);
+}
+EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgba5551);
+
static void drm_fb_xrgb8888_to_rgb888_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
u8 *dbuf8 = dbuf;
@@ -404,6 +560,7 @@ static void drm_fb_xrgb8888_to_rgb888_line(void *dbuf, const void *sbuf, unsigne
for (x = 0; x < pixels; x++) {
pix = le32_to_cpu(sbuf32[x]);
+ /* write blue-green-red to output in little endianness */
*dbuf8++ = (pix & 0x000000FF) >> 0;
*dbuf8++ = (pix & 0x0000FF00) >> 8;
*dbuf8++ = (pix & 0x00FF0000) >> 16;
@@ -444,64 +601,53 @@ void drm_fb_xrgb8888_to_rgb888(struct iosys_map *dst, const unsigned int *dst_pi
}
EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb888);
-static void drm_fb_rgb565_to_xrgb8888_line(void *dbuf, const void *sbuf, unsigned int pixels)
+static void drm_fb_xrgb8888_to_argb8888_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
__le32 *dbuf32 = dbuf;
- const __le16 *sbuf16 = sbuf;
- unsigned int x;
-
- for (x = 0; x < pixels; x++) {
- u16 val16 = le16_to_cpu(sbuf16[x]);
- u32 val32 = ((val16 & 0xf800) << 8) |
- ((val16 & 0x07e0) << 5) |
- ((val16 & 0x001f) << 3);
- val32 = 0xff000000 | val32 |
- ((val32 >> 3) & 0x00070007) |
- ((val32 >> 2) & 0x00000300);
- dbuf32[x] = cpu_to_le32(val32);
- }
-}
-
-static void drm_fb_rgb565_to_xrgb8888(struct iosys_map *dst, const unsigned int *dst_pitch,
- const struct iosys_map *src,
- const struct drm_framebuffer *fb,
- const struct drm_rect *clip)
-{
- static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
- 4,
- };
-
- drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false,
- drm_fb_rgb565_to_xrgb8888_line);
-}
-
-static void drm_fb_rgb888_to_xrgb8888_line(void *dbuf, const void *sbuf, unsigned int pixels)
-{
- __le32 *dbuf32 = dbuf;
- const u8 *sbuf8 = sbuf;
+ const __le32 *sbuf32 = sbuf;
unsigned int x;
+ u32 pix;
for (x = 0; x < pixels; x++) {
- u8 r = *sbuf8++;
- u8 g = *sbuf8++;
- u8 b = *sbuf8++;
- u32 pix = 0xff000000 | (r << 16) | (g << 8) | b;
+ pix = le32_to_cpu(sbuf32[x]);
+ pix |= GENMASK(31, 24); /* fill alpha bits */
dbuf32[x] = cpu_to_le32(pix);
}
}
-static void drm_fb_rgb888_to_xrgb8888(struct iosys_map *dst, const unsigned int *dst_pitch,
- const struct iosys_map *src,
- const struct drm_framebuffer *fb,
- const struct drm_rect *clip)
+/**
+ * drm_fb_xrgb8888_to_argb8888 - Convert XRGB8888 to ARGB8888 clip buffer
+ * @dst: Array of ARGB8888 destination buffers
+ * @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines
+ * within @dst; can be NULL if scanlines are stored next to each other.
+ * @src: Array of XRGB8888 source buffer
+ * @fb: DRM framebuffer
+ * @clip: Clip rectangle area to copy
+ *
+ * This function copies parts of a framebuffer to display memory and converts the
+ * color format during the process. The parameters @dst, @dst_pitch and @src refer
+ * to arrays. Each array must have at least as many entries as there are planes in
+ * @fb's format. Each entry stores the value for the format's respective color plane
+ * at the same index.
+ *
+ * This function does not apply clipping on @dst (i.e. the destination is at the
+ * top-left corner).
+ *
+ * Drivers can use this function for ARGB8888 devices that don't support XRGB8888
+ * natively. It sets an opaque alpha channel as part of the conversion.
+ */
+void drm_fb_xrgb8888_to_argb8888(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip)
{
static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
4,
};
drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false,
- drm_fb_rgb888_to_xrgb8888_line);
+ drm_fb_xrgb8888_to_argb8888_line);
}
+EXPORT_SYMBOL(drm_fb_xrgb8888_to_argb8888);
static void drm_fb_xrgb8888_to_xrgb2101010_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
@@ -555,6 +701,59 @@ void drm_fb_xrgb8888_to_xrgb2101010(struct iosys_map *dst, const unsigned int *d
}
EXPORT_SYMBOL(drm_fb_xrgb8888_to_xrgb2101010);
+static void drm_fb_xrgb8888_to_argb2101010_line(void *dbuf, const void *sbuf, unsigned int pixels)
+{
+ __le32 *dbuf32 = dbuf;
+ const __le32 *sbuf32 = sbuf;
+ unsigned int x;
+ u32 val32;
+ u32 pix;
+
+ for (x = 0; x < pixels; x++) {
+ pix = le32_to_cpu(sbuf32[x]);
+ val32 = ((pix & 0x000000ff) << 2) |
+ ((pix & 0x0000ff00) << 4) |
+ ((pix & 0x00ff0000) << 6);
+ pix = GENMASK(31, 30) | /* set alpha bits */
+ val32 | ((val32 >> 8) & 0x00300c03);
+ *dbuf32++ = cpu_to_le32(pix);
+ }
+}
+
+/**
+ * drm_fb_xrgb8888_to_argb2101010 - Convert XRGB8888 to ARGB2101010 clip buffer
+ * @dst: Array of ARGB2101010 destination buffers
+ * @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines
+ * within @dst; can be NULL if scanlines are stored next to each other.
+ * @src: Array of XRGB8888 source buffers
+ * @fb: DRM framebuffer
+ * @clip: Clip rectangle area to copy
+ *
+ * This function copies parts of a framebuffer to display memory and converts
+ * the color format during the process. The parameters @dst, @dst_pitch and
+ * @src refer to arrays. Each array must have at least as many entries as
+ * there are planes in @fb's format. Each entry stores the value for the
+ * format's respective color plane at the same index.
+ *
+ * This function does not apply clipping on @dst (i.e. the destination is at the
+ * top-left corner).
+ *
+ * Drivers can use this function for ARGB2101010 devices that don't support XRGB8888
+ * natively.
+ */
+void drm_fb_xrgb8888_to_argb2101010(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip)
+{
+ static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
+ 4,
+ };
+
+ drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false,
+ drm_fb_xrgb8888_to_argb2101010_line);
+}
+EXPORT_SYMBOL(drm_fb_xrgb8888_to_argb2101010);
+
static void drm_fb_xrgb8888_to_gray8_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
u8 *dbuf8 = dbuf;
@@ -641,50 +840,41 @@ int drm_fb_blit(struct iosys_map *dst, const unsigned int *dst_pitch, uint32_t d
{
uint32_t fb_format = fb->format->format;
- /* treat alpha channel like filler bits */
- if (fb_format == DRM_FORMAT_ARGB8888)
- fb_format = DRM_FORMAT_XRGB8888;
- if (dst_format == DRM_FORMAT_ARGB8888)
- dst_format = DRM_FORMAT_XRGB8888;
- if (fb_format == DRM_FORMAT_ARGB2101010)
- fb_format = DRM_FORMAT_XRGB2101010;
- if (dst_format == DRM_FORMAT_ARGB2101010)
- dst_format = DRM_FORMAT_XRGB2101010;
-
- if (dst_format == fb_format) {
+ if (fb_format == dst_format) {
drm_fb_memcpy(dst, dst_pitch, src, fb, clip);
return 0;
-
- } else if (dst_format == DRM_FORMAT_RGB565) {
- if (fb_format == DRM_FORMAT_XRGB8888) {
+ } else if (fb_format == (dst_format | DRM_FORMAT_BIG_ENDIAN)) {
+ drm_fb_swab(dst, dst_pitch, src, fb, clip, false);
+ return 0;
+ } else if (fb_format == (dst_format & ~DRM_FORMAT_BIG_ENDIAN)) {
+ drm_fb_swab(dst, dst_pitch, src, fb, clip, false);
+ return 0;
+ } else if (fb_format == DRM_FORMAT_XRGB8888) {
+ if (dst_format == DRM_FORMAT_RGB565) {
drm_fb_xrgb8888_to_rgb565(dst, dst_pitch, src, fb, clip, false);
return 0;
- }
- } else if (dst_format == (DRM_FORMAT_RGB565 | DRM_FORMAT_BIG_ENDIAN)) {
- if (fb_format == DRM_FORMAT_RGB565) {
- drm_fb_swab(dst, dst_pitch, src, fb, clip, false);
+ } else if (dst_format == DRM_FORMAT_XRGB1555) {
+ drm_fb_xrgb8888_to_xrgb1555(dst, dst_pitch, src, fb, clip);
return 0;
- }
- } else if (dst_format == DRM_FORMAT_RGB888) {
- if (fb_format == DRM_FORMAT_XRGB8888) {
- drm_fb_xrgb8888_to_rgb888(dst, dst_pitch, src, fb, clip);
+ } else if (dst_format == DRM_FORMAT_ARGB1555) {
+ drm_fb_xrgb8888_to_argb1555(dst, dst_pitch, src, fb, clip);
return 0;
- }
- } else if (dst_format == DRM_FORMAT_XRGB8888) {
- if (fb_format == DRM_FORMAT_RGB888) {
- drm_fb_rgb888_to_xrgb8888(dst, dst_pitch, src, fb, clip);
+ } else if (dst_format == DRM_FORMAT_RGBA5551) {
+ drm_fb_xrgb8888_to_rgba5551(dst, dst_pitch, src, fb, clip);
+ return 0;
+ } else if (dst_format == DRM_FORMAT_RGB888) {
+ drm_fb_xrgb8888_to_rgb888(dst, dst_pitch, src, fb, clip);
return 0;
- } else if (fb_format == DRM_FORMAT_RGB565) {
- drm_fb_rgb565_to_xrgb8888(dst, dst_pitch, src, fb, clip);
+ } else if (dst_format == DRM_FORMAT_ARGB8888) {
+ drm_fb_xrgb8888_to_argb8888(dst, dst_pitch, src, fb, clip);
return 0;
- }
- } else if (dst_format == DRM_FORMAT_XRGB2101010) {
- if (fb_format == DRM_FORMAT_XRGB8888) {
+ } else if (dst_format == DRM_FORMAT_XRGB2101010) {
drm_fb_xrgb8888_to_xrgb2101010(dst, dst_pitch, src, fb, clip);
return 0;
- }
- } else if (dst_format == DRM_FORMAT_BGRX8888) {
- if (fb_format == DRM_FORMAT_XRGB8888) {
+ } else if (dst_format == DRM_FORMAT_ARGB2101010) {
+ drm_fb_xrgb8888_to_argb2101010(dst, dst_pitch, src, fb, clip);
+ return 0;
+ } else if (dst_format == DRM_FORMAT_BGRX8888) {
drm_fb_swab(dst, dst_pitch, src, fb, clip, false);
return 0;
}
@@ -805,6 +995,39 @@ void drm_fb_xrgb8888_to_mono(struct iosys_map *dst, const unsigned int *dst_pitc
}
EXPORT_SYMBOL(drm_fb_xrgb8888_to_mono);
+static uint32_t drm_fb_nonalpha_fourcc(uint32_t fourcc)
+{
+ /* only handle formats with depth != 0 and alpha channel */
+ switch (fourcc) {
+ case DRM_FORMAT_ARGB1555:
+ return DRM_FORMAT_XRGB1555;
+ case DRM_FORMAT_ABGR1555:
+ return DRM_FORMAT_XBGR1555;
+ case DRM_FORMAT_RGBA5551:
+ return DRM_FORMAT_RGBX5551;
+ case DRM_FORMAT_BGRA5551:
+ return DRM_FORMAT_BGRX5551;
+ case DRM_FORMAT_ARGB8888:
+ return DRM_FORMAT_XRGB8888;
+ case DRM_FORMAT_ABGR8888:
+ return DRM_FORMAT_XBGR8888;
+ case DRM_FORMAT_RGBA8888:
+ return DRM_FORMAT_RGBX8888;
+ case DRM_FORMAT_BGRA8888:
+ return DRM_FORMAT_BGRX8888;
+ case DRM_FORMAT_ARGB2101010:
+ return DRM_FORMAT_XRGB2101010;
+ case DRM_FORMAT_ABGR2101010:
+ return DRM_FORMAT_XBGR2101010;
+ case DRM_FORMAT_RGBA1010102:
+ return DRM_FORMAT_RGBX1010102;
+ case DRM_FORMAT_BGRA1010102:
+ return DRM_FORMAT_BGRX1010102;
+ }
+
+ return fourcc;
+}
+
static bool is_listed_fourcc(const uint32_t *fourccs, size_t nfourccs, uint32_t fourcc)
{
const uint32_t *fourccs_end = fourccs + nfourccs;
@@ -817,73 +1040,48 @@ static bool is_listed_fourcc(const uint32_t *fourccs, size_t nfourccs, uint32_t
return false;
}
-static const uint32_t conv_from_xrgb8888[] = {
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_ARGB8888,
- DRM_FORMAT_XRGB2101010,
- DRM_FORMAT_ARGB2101010,
- DRM_FORMAT_RGB565,
- DRM_FORMAT_RGB888,
-};
-
-static const uint32_t conv_from_rgb565_888[] = {
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_ARGB8888,
-};
-
-static bool is_conversion_supported(uint32_t from, uint32_t to)
-{
- switch (from) {
- case DRM_FORMAT_XRGB8888:
- case DRM_FORMAT_ARGB8888:
- return is_listed_fourcc(conv_from_xrgb8888, ARRAY_SIZE(conv_from_xrgb8888), to);
- case DRM_FORMAT_RGB565:
- case DRM_FORMAT_RGB888:
- return is_listed_fourcc(conv_from_rgb565_888, ARRAY_SIZE(conv_from_rgb565_888), to);
- case DRM_FORMAT_XRGB2101010:
- return to == DRM_FORMAT_ARGB2101010;
- case DRM_FORMAT_ARGB2101010:
- return to == DRM_FORMAT_XRGB2101010;
- default:
- return false;
- }
-}
-
/**
* drm_fb_build_fourcc_list - Filters a list of supported color formats against
* the device's native formats
* @dev: DRM device
* @native_fourccs: 4CC codes of natively supported color formats
* @native_nfourccs: The number of entries in @native_fourccs
- * @driver_fourccs: 4CC codes of all driver-supported color formats
- * @driver_nfourccs: The number of entries in @driver_fourccs
* @fourccs_out: Returns 4CC codes of supported color formats
* @nfourccs_out: The number of available entries in @fourccs_out
*
* This function create a list of supported color format from natively
- * supported formats and the emulated formats.
+ * supported formats and additional emulated formats.
* At a minimum, most userspace programs expect at least support for
* XRGB8888 on the primary plane. Devices that have to emulate the
* format, and possibly others, can use drm_fb_build_fourcc_list() to
* create a list of supported color formats. The returned list can
* be handed over to drm_universal_plane_init() et al. Native formats
- * will go before emulated formats. Other heuristics might be applied
+ * will go before emulated formats. Native formats with alpha channel
+ * will be replaced by such without, as primary planes usually don't
+ * support alpha. Other heuristics might be applied
* to optimize the order. Formats near the beginning of the list are
- * usually preferred over formats near the end of the list. Formats
- * without conversion helpers will be skipped. New drivers should only
- * pass in XRGB8888 and avoid exposing additional emulated formats.
+ * usually preferred over formats near the end of the list.
*
* Returns:
* The number of color-formats 4CC codes returned in @fourccs_out.
*/
size_t drm_fb_build_fourcc_list(struct drm_device *dev,
const u32 *native_fourccs, size_t native_nfourccs,
- const u32 *driver_fourccs, size_t driver_nfourccs,
u32 *fourccs_out, size_t nfourccs_out)
{
+ /*
+ * XRGB8888 is the default fallback format for most of userspace
+ * and it's currently the only format that should be emulated for
+ * the primary plane. Only if there's ever another default fallback,
+ * it should be added here.
+ */
+ static const uint32_t extra_fourccs[] = {
+ DRM_FORMAT_XRGB8888,
+ };
+ static const size_t extra_nfourccs = ARRAY_SIZE(extra_fourccs);
+
u32 *fourccs = fourccs_out;
const u32 *fourccs_end = fourccs_out + nfourccs_out;
- uint32_t native_format = 0;
size_t i;
/*
@@ -891,7 +1089,12 @@ size_t drm_fb_build_fourcc_list(struct drm_device *dev,
*/
for (i = 0; i < native_nfourccs; ++i) {
- u32 fourcc = native_fourccs[i];
+ /*
+ * Several DTs, boot loaders and firmware report native
+ * alpha formats that are non-alpha formats instead. So
+ * replace alpha formats by non-alpha formats.
+ */
+ u32 fourcc = drm_fb_nonalpha_fourcc(native_fourccs[i]);
if (is_listed_fourcc(fourccs_out, fourccs - fourccs_out, fourcc)) {
continue; /* skip duplicate entries */
@@ -902,14 +1105,6 @@ size_t drm_fb_build_fourcc_list(struct drm_device *dev,
drm_dbg_kms(dev, "adding native format %p4cc\n", &fourcc);
- /*
- * There should only be one native format with the current API.
- * This API needs to be refactored to correctly support arbitrary
- * sets of native formats, since it needs to report which native
- * format to use for each emulated format.
- */
- if (!native_format)
- native_format = fourcc;
*fourccs = fourcc;
++fourccs;
}
@@ -918,17 +1113,14 @@ size_t drm_fb_build_fourcc_list(struct drm_device *dev,
* The extra formats, emulated by the driver, go second.
*/
- for (i = 0; (i < driver_nfourccs) && (fourccs < fourccs_end); ++i) {
- u32 fourcc = driver_fourccs[i];
+ for (i = 0; (i < extra_nfourccs) && (fourccs < fourccs_end); ++i) {
+ u32 fourcc = extra_fourccs[i];
if (is_listed_fourcc(fourccs_out, fourccs - fourccs_out, fourcc)) {
continue; /* skip duplicate and native entries */
} else if (fourccs == fourccs_end) {
drm_warn(dev, "Ignoring emulated format %p4cc\n", &fourcc);
continue; /* end of available output buffer */
- } else if (!is_conversion_supported(fourcc, native_format)) {
- drm_dbg_kms(dev, "Unsupported emulated format %p4cc\n", &fourcc);
- continue; /* format is not supported for conversion */
}
drm_dbg_kms(dev, "adding emulated format %p4cc\n", &fourcc);
diff --git a/drivers/gpu/drm/drm_fourcc.c b/drivers/gpu/drm/drm_fourcc.c
index 6242dfbe9240..0f17dfa8702b 100644
--- a/drivers/gpu/drm/drm_fourcc.c
+++ b/drivers/gpu/drm/drm_fourcc.c
@@ -190,6 +190,10 @@ const struct drm_format_info *__drm_format_info(u32 format)
{ .format = DRM_FORMAT_BGRA5551, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_RGB565, .depth = 16, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_BGR565, .depth = 16, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+#ifdef __BIG_ENDIAN
+ { .format = DRM_FORMAT_XRGB1555 | DRM_FORMAT_BIG_ENDIAN, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_RGB565 | DRM_FORMAT_BIG_ENDIAN, .depth = 16, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+#endif
{ .format = DRM_FORMAT_RGB888, .depth = 24, .num_planes = 1, .cpp = { 3, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_BGR888, .depth = 24, .num_planes = 1, .cpp = { 3, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c
index 2dd97473ca10..aff3746dedfb 100644
--- a/drivers/gpu/drm/drm_framebuffer.c
+++ b/drivers/gpu/drm/drm_framebuffer.c
@@ -1203,8 +1203,8 @@ void drm_framebuffer_print_info(struct drm_printer *p, unsigned int indent,
#ifdef CONFIG_DEBUG_FS
static int drm_framebuffer_info(struct seq_file *m, void *data)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
+ struct drm_debugfs_entry *entry = m->private;
+ struct drm_device *dev = entry->dev;
struct drm_printer p = drm_seq_file_printer(m);
struct drm_framebuffer *fb;
@@ -1218,14 +1218,13 @@ static int drm_framebuffer_info(struct seq_file *m, void *data)
return 0;
}
-static const struct drm_info_list drm_framebuffer_debugfs_list[] = {
+static const struct drm_debugfs_info drm_framebuffer_debugfs_list[] = {
{ "framebuffer", drm_framebuffer_info, 0 },
};
void drm_framebuffer_debugfs_init(struct drm_minor *minor)
{
- drm_debugfs_create_files(drm_framebuffer_debugfs_list,
- ARRAY_SIZE(drm_framebuffer_debugfs_list),
- minor->debugfs_root, minor);
+ drm_debugfs_add_files(minor->dev, drm_framebuffer_debugfs_list,
+ ARRAY_SIZE(drm_framebuffer_debugfs_list));
}
#endif
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index b8db675e7fb5..59a0bb5ebd85 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -170,6 +170,20 @@ void drm_gem_private_object_init(struct drm_device *dev,
EXPORT_SYMBOL(drm_gem_private_object_init);
/**
+ * drm_gem_private_object_fini - Finalize a failed drm_gem_object
+ * @obj: drm_gem_object
+ *
+ * Uninitialize an already allocated GEM object when it initialized failed
+ */
+void drm_gem_private_object_fini(struct drm_gem_object *obj)
+{
+ WARN_ON(obj->dma_buf);
+
+ dma_resv_fini(&obj->_resv);
+}
+EXPORT_SYMBOL(drm_gem_private_object_fini);
+
+/**
* drm_gem_object_handle_free - release resources bound to userspace handles
* @obj: GEM object to clean up.
*
@@ -930,12 +944,11 @@ drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
void
drm_gem_object_release(struct drm_gem_object *obj)
{
- WARN_ON(obj->dma_buf);
-
if (obj->filp)
fput(obj->filp);
- dma_resv_fini(&obj->_resv);
+ drm_gem_private_object_fini(obj);
+
drm_gem_free_mmap_offset(obj);
drm_gem_lru_remove(obj);
}
diff --git a/drivers/gpu/drm/drm_gem_atomic_helper.c b/drivers/gpu/drm/drm_gem_atomic_helper.c
index e42800718f51..5d4b9cd077f7 100644
--- a/drivers/gpu/drm/drm_gem_atomic_helper.c
+++ b/drivers/gpu/drm/drm_gem_atomic_helper.c
@@ -26,11 +26,8 @@
* call drm_gem_plane_helper_prepare_fb() from their implementation of
* struct &drm_plane_helper.prepare_fb . It sets the plane's fence from
* the framebuffer so that the DRM core can synchronize access automatically.
- *
* drm_gem_plane_helper_prepare_fb() can also be used directly as
- * implementation of prepare_fb. For drivers based on
- * struct drm_simple_display_pipe, drm_gem_simple_display_pipe_prepare_fb()
- * provides equivalent functionality.
+ * implementation of prepare_fb.
*
* .. code-block:: c
*
@@ -41,11 +38,6 @@
* . prepare_fb = drm_gem_plane_helper_prepare_fb,
* };
*
- * struct drm_simple_display_pipe_funcs driver_pipe_funcs = {
- * ...,
- * . prepare_fb = drm_gem_simple_display_pipe_prepare_fb,
- * };
- *
* A driver using a shadow buffer copies the content of the shadow buffers
* into the HW's framebuffer memory during an atomic update. This requires
* a mapping of the shadow buffer into kernel address space. The mappings
@@ -205,27 +197,6 @@ error:
}
EXPORT_SYMBOL_GPL(drm_gem_plane_helper_prepare_fb);
-/**
- * drm_gem_simple_display_pipe_prepare_fb - prepare_fb helper for &drm_simple_display_pipe
- * @pipe: Simple display pipe
- * @plane_state: Plane state
- *
- * This function uses drm_gem_plane_helper_prepare_fb() to extract the fences
- * from &drm_gem_object.resv and attaches them to the plane state for the atomic
- * helper to wait on. This is necessary to correctly implement implicit
- * synchronization for any buffers shared as a struct &dma_buf. Drivers can use
- * this as their &drm_simple_display_pipe_funcs.prepare_fb callback.
- *
- * See drm_gem_plane_helper_prepare_fb() for a discussion of implicit and
- * explicit fencing in atomic modeset updates.
- */
-int drm_gem_simple_display_pipe_prepare_fb(struct drm_simple_display_pipe *pipe,
- struct drm_plane_state *plane_state)
-{
- return drm_gem_plane_helper_prepare_fb(&pipe->plane, plane_state);
-}
-EXPORT_SYMBOL(drm_gem_simple_display_pipe_prepare_fb);
-
/*
* Shadow-buffered Planes
*/
diff --git a/drivers/gpu/drm/drm_gem_dma_helper.c b/drivers/gpu/drm/drm_gem_dma_helper.c
index 1e658c448366..df89fbd2d35c 100644
--- a/drivers/gpu/drm/drm_gem_dma_helper.c
+++ b/drivers/gpu/drm/drm_gem_dma_helper.c
@@ -477,8 +477,8 @@ drm_gem_dma_prime_import_sg_table(struct drm_device *dev,
dma_obj->dma_addr = sg_dma_address(sgt->sgl);
dma_obj->sgt = sgt;
- DRM_DEBUG_PRIME("dma_addr = %pad, size = %zu\n", &dma_obj->dma_addr,
- attach->dmabuf->size);
+ drm_dbg_prime(dev, "dma_addr = %pad, size = %zu\n", &dma_obj->dma_addr,
+ attach->dmabuf->size);
return &dma_obj->base;
}
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index b602cd72a120..235832ad7a79 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -79,8 +79,10 @@ __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private)
} else {
ret = drm_gem_object_init(dev, obj, size);
}
- if (ret)
+ if (ret) {
+ drm_gem_private_object_fini(obj);
goto err_free;
+ }
ret = drm_gem_create_mmap_offset(obj);
if (ret)
@@ -764,7 +766,7 @@ drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
shmem->sgt = sgt;
- DRM_DEBUG_PRIME("size = %zu\n", size);
+ drm_dbg_prime(dev, "size = %zu\n", size);
return &shmem->base;
}
diff --git a/drivers/gpu/drm/drm_gem_ttm_helper.c b/drivers/gpu/drm/drm_gem_ttm_helper.c
index d5962a34c01d..3734aa2d1c5b 100644
--- a/drivers/gpu/drm/drm_gem_ttm_helper.c
+++ b/drivers/gpu/drm/drm_gem_ttm_helper.c
@@ -3,6 +3,8 @@
#include <linux/module.h>
#include <drm/drm_gem_ttm_helper.h>
+#include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_tt.h>
/**
* DOC: overview
diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c
index b6c7e3803bb3..d40b3edb52d0 100644
--- a/drivers/gpu/drm/drm_gem_vram_helper.c
+++ b/drivers/gpu/drm/drm_gem_vram_helper.c
@@ -19,6 +19,7 @@
#include <drm/drm_simple_kms_helper.h>
#include <drm/ttm/ttm_range_manager.h>
+#include <drm/ttm/ttm_tt.h>
static const struct drm_gem_object_funcs drm_gem_vram_object_funcs;
@@ -956,8 +957,8 @@ static struct ttm_device_funcs bo_driver = {
static int drm_vram_mm_debugfs(struct seq_file *m, void *data)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_vram_mm *vmm = node->minor->dev->vram_mm;
+ struct drm_debugfs_entry *entry = m->private;
+ struct drm_vram_mm *vmm = entry->dev->vram_mm;
struct ttm_resource_manager *man = ttm_manager_type(&vmm->bdev, TTM_PL_VRAM);
struct drm_printer p = drm_seq_file_printer(m);
@@ -965,7 +966,7 @@ static int drm_vram_mm_debugfs(struct seq_file *m, void *data)
return 0;
}
-static const struct drm_info_list drm_vram_mm_debugfs_list[] = {
+static const struct drm_debugfs_info drm_vram_mm_debugfs_list[] = {
{ "vram-mm", drm_vram_mm_debugfs, 0, NULL },
};
@@ -977,9 +978,8 @@ static const struct drm_info_list drm_vram_mm_debugfs_list[] = {
*/
void drm_vram_mm_debugfs_init(struct drm_minor *minor)
{
- drm_debugfs_create_files(drm_vram_mm_debugfs_list,
- ARRAY_SIZE(drm_vram_mm_debugfs_list),
- minor->debugfs_root, minor);
+ drm_debugfs_add_files(minor->dev, drm_vram_mm_debugfs_list,
+ ARRAY_SIZE(drm_vram_mm_debugfs_list));
}
EXPORT_SYMBOL(drm_vram_mm_debugfs_init);
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index 5ea5e260118c..ed2103ee272c 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -186,6 +186,7 @@ int drm_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
int drm_debugfs_init(struct drm_minor *minor, int minor_id,
struct dentry *root);
void drm_debugfs_cleanup(struct drm_minor *minor);
+void drm_debugfs_late_register(struct drm_device *dev);
void drm_debugfs_connector_add(struct drm_connector *connector);
void drm_debugfs_connector_remove(struct drm_connector *connector);
void drm_debugfs_crtc_add(struct drm_crtc *crtc);
@@ -202,6 +203,10 @@ static inline void drm_debugfs_cleanup(struct drm_minor *minor)
{
}
+static inline void drm_debugfs_late_register(struct drm_device *dev)
+{
+}
+
static inline void drm_debugfs_connector_add(struct drm_connector *connector)
{
}
diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
index 5d82891c3222..49a743f62b4a 100644
--- a/drivers/gpu/drm/drm_ioc32.c
+++ b/drivers/gpu/drm/drm_ioc32.c
@@ -972,6 +972,7 @@ long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
unsigned int nr = DRM_IOCTL_NR(cmd);
struct drm_file *file_priv = filp->private_data;
+ struct drm_device *dev = file_priv->minor->dev;
drm_ioctl_compat_t *fn;
int ret;
@@ -986,14 +987,14 @@ long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (!fn)
return drm_ioctl(filp, cmd, arg);
- DRM_DEBUG("comm=\"%s\", pid=%d, dev=0x%lx, auth=%d, %s\n",
- current->comm, task_pid_nr(current),
- (long)old_encode_dev(file_priv->minor->kdev->devt),
- file_priv->authenticated,
- drm_compat_ioctls[nr].name);
+ drm_dbg_core(dev, "comm=\"%s\", pid=%d, dev=0x%lx, auth=%d, %s\n",
+ current->comm, task_pid_nr(current),
+ (long)old_encode_dev(file_priv->minor->kdev->devt),
+ file_priv->authenticated,
+ drm_compat_ioctls[nr].name);
ret = (*fn)(filp, cmd, arg);
if (ret)
- DRM_DEBUG("ret = %d\n", ret);
+ drm_dbg_core(dev, "ret = %d\n", ret);
return ret;
}
EXPORT_SYMBOL(drm_compat_ioctl);
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index ca2a6e6101dc..7c9d66ee917d 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -440,7 +440,7 @@ done:
int drm_noop(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- DRM_DEBUG("\n");
+ drm_dbg_core(dev, "\n");
return 0;
}
EXPORT_SYMBOL(drm_noop);
@@ -856,16 +856,16 @@ long drm_ioctl(struct file *filp,
out_size = 0;
ksize = max(max(in_size, out_size), drv_size);
- DRM_DEBUG("comm=\"%s\" pid=%d, dev=0x%lx, auth=%d, %s\n",
- current->comm, task_pid_nr(current),
- (long)old_encode_dev(file_priv->minor->kdev->devt),
- file_priv->authenticated, ioctl->name);
+ drm_dbg_core(dev, "comm=\"%s\" pid=%d, dev=0x%lx, auth=%d, %s\n",
+ current->comm, task_pid_nr(current),
+ (long)old_encode_dev(file_priv->minor->kdev->devt),
+ file_priv->authenticated, ioctl->name);
/* Do not trust userspace, use our own definition */
func = ioctl->func;
if (unlikely(!func)) {
- DRM_DEBUG("no function\n");
+ drm_dbg_core(dev, "no function\n");
retcode = -EINVAL;
goto err_i1;
}
@@ -894,16 +894,17 @@ long drm_ioctl(struct file *filp,
err_i1:
if (!ioctl)
- DRM_DEBUG("invalid ioctl: comm=\"%s\", pid=%d, dev=0x%lx, auth=%d, cmd=0x%02x, nr=0x%02x\n",
- current->comm, task_pid_nr(current),
- (long)old_encode_dev(file_priv->minor->kdev->devt),
- file_priv->authenticated, cmd, nr);
+ drm_dbg_core(dev,
+ "invalid ioctl: comm=\"%s\", pid=%d, dev=0x%lx, auth=%d, cmd=0x%02x, nr=0x%02x\n",
+ current->comm, task_pid_nr(current),
+ (long)old_encode_dev(file_priv->minor->kdev->devt),
+ file_priv->authenticated, cmd, nr);
if (kdata != stack_kdata)
kfree(kdata);
if (retcode)
- DRM_DEBUG("comm=\"%s\", pid=%d, ret=%d\n", current->comm,
- task_pid_nr(current), retcode);
+ drm_dbg_core(dev, "comm=\"%s\", pid=%d, ret=%d\n",
+ current->comm, task_pid_nr(current), retcode);
return retcode;
}
EXPORT_SYMBOL(drm_ioctl);
diff --git a/drivers/gpu/drm/drm_lease.c b/drivers/gpu/drm/drm_lease.c
index d72c2fac0ff1..08ab75303a00 100644
--- a/drivers/gpu/drm/drm_lease.c
+++ b/drivers/gpu/drm/drm_lease.c
@@ -213,11 +213,11 @@ static struct drm_master *drm_lease_create(struct drm_master *lessor, struct idr
int id;
void *entry;
- DRM_DEBUG_LEASE("lessor %d\n", lessor->lessee_id);
+ drm_dbg_lease(dev, "lessor %d\n", lessor->lessee_id);
lessee = drm_master_create(lessor->dev);
if (!lessee) {
- DRM_DEBUG_LEASE("drm_master_create failed\n");
+ drm_dbg_lease(dev, "drm_master_create failed\n");
return ERR_PTR(-ENOMEM);
}
@@ -231,7 +231,7 @@ static struct drm_master *drm_lease_create(struct drm_master *lessor, struct idr
error = -EBUSY;
if (error != 0) {
- DRM_DEBUG_LEASE("object %d failed %d\n", object, error);
+ drm_dbg_lease(dev, "object %d failed %d\n", object, error);
goto out_lessee;
}
}
@@ -249,7 +249,8 @@ static struct drm_master *drm_lease_create(struct drm_master *lessor, struct idr
/* Move the leases over */
lessee->leases = *leases;
- DRM_DEBUG_LEASE("new lessee %d %p, lessor %d %p\n", lessee->lessee_id, lessee, lessor->lessee_id, lessor);
+ drm_dbg_lease(dev, "new lessee %d %p, lessor %d %p\n",
+ lessee->lessee_id, lessee, lessor->lessee_id, lessor);
mutex_unlock(&dev->mode_config.idr_mutex);
return lessee;
@@ -268,7 +269,7 @@ void drm_lease_destroy(struct drm_master *master)
mutex_lock(&dev->mode_config.idr_mutex);
- DRM_DEBUG_LEASE("drm_lease_destroy %d\n", master->lessee_id);
+ drm_dbg_lease(dev, "drm_lease_destroy %d\n", master->lessee_id);
/* This master is referenced by all lessees, hence it cannot be destroyed
* until all of them have been
@@ -277,7 +278,8 @@ void drm_lease_destroy(struct drm_master *master)
/* Remove this master from the lessee idr in the owner */
if (master->lessee_id != 0) {
- DRM_DEBUG_LEASE("remove master %d from device list of lessees\n", master->lessee_id);
+ drm_dbg_lease(dev, "remove master %d from device list of lessees\n",
+ master->lessee_id);
idr_remove(&(drm_lease_owner(master)->lessee_idr), master->lessee_id);
}
@@ -292,7 +294,7 @@ void drm_lease_destroy(struct drm_master *master)
drm_master_put(&master->lessor);
}
- DRM_DEBUG_LEASE("drm_lease_destroy done %d\n", master->lessee_id);
+ drm_dbg_lease(dev, "drm_lease_destroy done %d\n", master->lessee_id);
}
static void _drm_lease_revoke(struct drm_master *top)
@@ -308,7 +310,8 @@ static void _drm_lease_revoke(struct drm_master *top)
* the tree is fully connected, we can do this without recursing
*/
for (;;) {
- DRM_DEBUG_LEASE("revoke leases for %p %d\n", master, master->lessee_id);
+ drm_dbg_lease(master->dev, "revoke leases for %p %d\n",
+ master, master->lessee_id);
/* Evacuate the lease */
idr_for_each_entry(&master->leases, entry, object)
@@ -408,7 +411,7 @@ static int fill_object_idr(struct drm_device *dev,
ret = validate_lease(dev, object_count, objects, universal_planes);
if (ret) {
- DRM_DEBUG_LEASE("lease validation failed\n");
+ drm_dbg_lease(dev, "lease validation failed\n");
goto out_free_objects;
}
@@ -418,7 +421,7 @@ static int fill_object_idr(struct drm_device *dev,
struct drm_mode_object *obj = objects[o];
u32 object_id = objects[o]->id;
- DRM_DEBUG_LEASE("Adding object %d to lease\n", object_id);
+ drm_dbg_lease(dev, "Adding object %d to lease\n", object_id);
/*
* We're using an IDR to hold the set of leased
@@ -430,8 +433,8 @@ static int fill_object_idr(struct drm_device *dev,
*/
ret = idr_alloc(leases, &drm_lease_idr_object , object_id, object_id + 1, GFP_KERNEL);
if (ret < 0) {
- DRM_DEBUG_LEASE("Object %d cannot be inserted into leases (%d)\n",
- object_id, ret);
+ drm_dbg_lease(dev, "Object %d cannot be inserted into leases (%d)\n",
+ object_id, ret);
goto out_free_objects;
}
if (obj->type == DRM_MODE_OBJECT_CRTC && !universal_planes) {
@@ -439,15 +442,15 @@ static int fill_object_idr(struct drm_device *dev,
ret = idr_alloc(leases, &drm_lease_idr_object, crtc->primary->base.id, crtc->primary->base.id + 1, GFP_KERNEL);
if (ret < 0) {
- DRM_DEBUG_LEASE("Object primary plane %d cannot be inserted into leases (%d)\n",
- object_id, ret);
+ drm_dbg_lease(dev, "Object primary plane %d cannot be inserted into leases (%d)\n",
+ object_id, ret);
goto out_free_objects;
}
if (crtc->cursor) {
ret = idr_alloc(leases, &drm_lease_idr_object, crtc->cursor->base.id, crtc->cursor->base.id + 1, GFP_KERNEL);
if (ret < 0) {
- DRM_DEBUG_LEASE("Object cursor plane %d cannot be inserted into leases (%d)\n",
- object_id, ret);
+ drm_dbg_lease(dev, "Object cursor plane %d cannot be inserted into leases (%d)\n",
+ object_id, ret);
goto out_free_objects;
}
}
@@ -490,14 +493,14 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
return -EOPNOTSUPP;
if (cl->flags && (cl->flags & ~(O_CLOEXEC | O_NONBLOCK))) {
- DRM_DEBUG_LEASE("invalid flags\n");
+ drm_dbg_lease(dev, "invalid flags\n");
return -EINVAL;
}
lessor = drm_file_get_master(lessor_priv);
/* Do not allow sub-leases */
if (lessor->lessor) {
- DRM_DEBUG_LEASE("recursive leasing not allowed\n");
+ drm_dbg_lease(dev, "recursive leasing not allowed\n");
ret = -EINVAL;
goto out_lessor;
}
@@ -520,7 +523,7 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
object_count, object_ids);
kfree(object_ids);
if (ret) {
- DRM_DEBUG_LEASE("lease object lookup failed: %i\n", ret);
+ drm_dbg_lease(dev, "lease object lookup failed: %i\n", ret);
idr_destroy(&leases);
goto out_lessor;
}
@@ -534,7 +537,7 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
goto out_lessor;
}
- DRM_DEBUG_LEASE("Creating lease\n");
+ drm_dbg_lease(dev, "Creating lease\n");
/* lessee will take the ownership of leases */
lessee = drm_lease_create(lessor, &leases);
@@ -545,7 +548,7 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
}
/* Clone the lessor file to create a new file for us */
- DRM_DEBUG_LEASE("Allocating lease file\n");
+ drm_dbg_lease(dev, "Allocating lease file\n");
lessee_file = file_clone_open(lessor_file);
if (IS_ERR(lessee_file)) {
ret = PTR_ERR(lessee_file);
@@ -560,7 +563,7 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
lessee_priv->authenticated = 1;
/* Pass fd back to userspace */
- DRM_DEBUG_LEASE("Returning fd %d id %d\n", fd, lessee->lessee_id);
+ drm_dbg_lease(dev, "Returning fd %d id %d\n", fd, lessee->lessee_id);
cl->fd = fd;
cl->lessee_id = lessee->lessee_id;
@@ -568,7 +571,7 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
fd_install(fd, lessee_file);
drm_master_put(&lessor);
- DRM_DEBUG_LEASE("drm_mode_create_lease_ioctl succeeded\n");
+ drm_dbg_lease(dev, "drm_mode_create_lease_ioctl succeeded\n");
return 0;
out_lessee:
@@ -579,7 +582,7 @@ out_leases:
out_lessor:
drm_master_put(&lessor);
- DRM_DEBUG_LEASE("drm_mode_create_lease_ioctl failed: %d\n", ret);
+ drm_dbg_lease(dev, "drm_mode_create_lease_ioctl failed: %d\n", ret);
return ret;
}
@@ -601,7 +604,7 @@ int drm_mode_list_lessees_ioctl(struct drm_device *dev,
return -EOPNOTSUPP;
lessor = drm_file_get_master(lessor_priv);
- DRM_DEBUG_LEASE("List lessees for %d\n", lessor->lessee_id);
+ drm_dbg_lease(dev, "List lessees for %d\n", lessor->lessee_id);
mutex_lock(&dev->mode_config.idr_mutex);
@@ -610,7 +613,8 @@ int drm_mode_list_lessees_ioctl(struct drm_device *dev,
/* Only list un-revoked leases */
if (!idr_is_empty(&lessee->leases)) {
if (count_lessees > count) {
- DRM_DEBUG_LEASE("Add lessee %d\n", lessee->lessee_id);
+ drm_dbg_lease(dev, "Add lessee %d\n",
+ lessee->lessee_id);
ret = put_user(lessee->lessee_id, lessee_ids + count);
if (ret)
break;
@@ -619,7 +623,7 @@ int drm_mode_list_lessees_ioctl(struct drm_device *dev,
}
}
- DRM_DEBUG_LEASE("Lessor leases to %d\n", count);
+ drm_dbg_lease(dev, "Lessor leases to %d\n", count);
if (ret == 0)
arg->count_lessees = count;
@@ -651,7 +655,7 @@ int drm_mode_get_lease_ioctl(struct drm_device *dev,
return -EOPNOTSUPP;
lessee = drm_file_get_master(lessee_priv);
- DRM_DEBUG_LEASE("get lease for %d\n", lessee->lessee_id);
+ drm_dbg_lease(dev, "get lease for %d\n", lessee->lessee_id);
mutex_lock(&dev->mode_config.idr_mutex);
@@ -665,7 +669,7 @@ int drm_mode_get_lease_ioctl(struct drm_device *dev,
count = 0;
idr_for_each_entry(object_idr, entry, object) {
if (count_objects > count) {
- DRM_DEBUG_LEASE("adding object %d\n", object);
+ drm_dbg_lease(dev, "adding object %d\n", object);
ret = put_user(object, object_ids + count);
if (ret)
break;
@@ -696,7 +700,7 @@ int drm_mode_revoke_lease_ioctl(struct drm_device *dev,
struct drm_master *lessee;
int ret = 0;
- DRM_DEBUG_LEASE("revoke lease for %d\n", arg->lessee_id);
+ drm_dbg_lease(dev, "revoke lease for %d\n", arg->lessee_id);
/* Can't lease without MODESET */
if (!drm_core_check_feature(dev, DRIVER_MODESET))
diff --git a/drivers/gpu/drm/drm_mipi_dbi.c b/drivers/gpu/drm/drm_mipi_dbi.c
index a6ac56580876..c871d9f096b8 100644
--- a/drivers/gpu/drm/drm_mipi_dbi.c
+++ b/drivers/gpu/drm/drm_mipi_dbi.c
@@ -21,6 +21,7 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem.h>
+#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_mipi_dbi.h>
#include <drm/drm_modes.h>
@@ -192,6 +193,7 @@ EXPORT_SYMBOL(mipi_dbi_command_stackbuf);
/**
* mipi_dbi_buf_copy - Copy a framebuffer, transforming it if necessary
* @dst: The destination buffer
+ * @src: The source buffer
* @fb: The source framebuffer
* @clip: Clipping rectangle of the area to be copied
* @swap: When true, swap MSB/LSB of 16-bit values
@@ -199,12 +201,10 @@ EXPORT_SYMBOL(mipi_dbi_command_stackbuf);
* Returns:
* Zero on success, negative error code on failure.
*/
-int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb,
+int mipi_dbi_buf_copy(void *dst, struct iosys_map *src, struct drm_framebuffer *fb,
struct drm_rect *clip, bool swap)
{
struct drm_gem_object *gem = drm_gem_fb_get_obj(fb, 0);
- struct iosys_map map[DRM_FORMAT_MAX_PLANES];
- struct iosys_map data[DRM_FORMAT_MAX_PLANES];
struct iosys_map dst_map = IOSYS_MAP_INIT_VADDR(dst);
int ret;
@@ -212,19 +212,15 @@ int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb,
if (ret)
return ret;
- ret = drm_gem_fb_vmap(fb, map, data);
- if (ret)
- goto out_drm_gem_fb_end_cpu_access;
-
switch (fb->format->format) {
case DRM_FORMAT_RGB565:
if (swap)
- drm_fb_swab(&dst_map, NULL, data, fb, clip, !gem->import_attach);
+ drm_fb_swab(&dst_map, NULL, src, fb, clip, !gem->import_attach);
else
- drm_fb_memcpy(&dst_map, NULL, data, fb, clip);
+ drm_fb_memcpy(&dst_map, NULL, src, fb, clip);
break;
case DRM_FORMAT_XRGB8888:
- drm_fb_xrgb8888_to_rgb565(&dst_map, NULL, data, fb, clip, swap);
+ drm_fb_xrgb8888_to_rgb565(&dst_map, NULL, src, fb, clip, swap);
break;
default:
drm_err_once(fb->dev, "Format is not supported: %p4cc\n",
@@ -232,8 +228,6 @@ int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb,
ret = -EINVAL;
}
- drm_gem_fb_vunmap(fb, map);
-out_drm_gem_fb_end_cpu_access:
drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
return ret;
@@ -257,29 +251,18 @@ static void mipi_dbi_set_window_address(struct mipi_dbi_dev *dbidev,
ys & 0xff, (ye >> 8) & 0xff, ye & 0xff);
}
-static void mipi_dbi_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect)
+static void mipi_dbi_fb_dirty(struct iosys_map *src, struct drm_framebuffer *fb,
+ struct drm_rect *rect)
{
- struct iosys_map map[DRM_FORMAT_MAX_PLANES];
- struct iosys_map data[DRM_FORMAT_MAX_PLANES];
struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(fb->dev);
unsigned int height = rect->y2 - rect->y1;
unsigned int width = rect->x2 - rect->x1;
struct mipi_dbi *dbi = &dbidev->dbi;
bool swap = dbi->swap_bytes;
- int idx, ret = 0;
+ int ret = 0;
bool full;
void *tr;
- if (WARN_ON(!fb))
- return;
-
- if (!drm_dev_enter(fb->dev, &idx))
- return;
-
- ret = drm_gem_fb_vmap(fb, map, data);
- if (ret)
- goto err_drm_dev_exit;
-
full = width == fb->width && height == fb->height;
DRM_DEBUG_KMS("Flushing [FB:%d] " DRM_RECT_FMT "\n", fb->base.id, DRM_RECT_ARG(rect));
@@ -287,11 +270,11 @@ static void mipi_dbi_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect)
if (!dbi->dc || !full || swap ||
fb->format->format == DRM_FORMAT_XRGB8888) {
tr = dbidev->tx_buf;
- ret = mipi_dbi_buf_copy(dbidev->tx_buf, fb, rect, swap);
+ ret = mipi_dbi_buf_copy(tr, src, fb, rect, swap);
if (ret)
goto err_msg;
} else {
- tr = data[0].vaddr; /* TODO: Use mapping abstraction properly */
+ tr = src->vaddr; /* TODO: Use mapping abstraction properly */
}
mipi_dbi_set_window_address(dbidev, rect->x1, rect->x2 - 1, rect->y1,
@@ -302,11 +285,6 @@ static void mipi_dbi_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect)
err_msg:
if (ret)
drm_err_once(fb->dev, "Failed to update display %d\n", ret);
-
- drm_gem_fb_vunmap(fb, map);
-
-err_drm_dev_exit:
- drm_dev_exit(idx);
}
/**
@@ -339,13 +317,24 @@ void mipi_dbi_pipe_update(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *old_state)
{
struct drm_plane_state *state = pipe->plane.state;
+ struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(state);
+ struct drm_framebuffer *fb = state->fb;
struct drm_rect rect;
+ int idx;
if (!pipe->crtc.state->active)
return;
+ if (WARN_ON(!fb))
+ return;
+
+ if (!drm_dev_enter(fb->dev, &idx))
+ return;
+
if (drm_atomic_helper_damage_merged(old_state, state, &rect))
- mipi_dbi_fb_dirty(state->fb, &rect);
+ mipi_dbi_fb_dirty(&shadow_plane_state->data[0], fb, &rect);
+
+ drm_dev_exit(idx);
}
EXPORT_SYMBOL(mipi_dbi_pipe_update);
@@ -366,6 +355,7 @@ void mipi_dbi_enable_flush(struct mipi_dbi_dev *dbidev,
struct drm_crtc_state *crtc_state,
struct drm_plane_state *plane_state)
{
+ struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
struct drm_framebuffer *fb = plane_state->fb;
struct drm_rect rect = {
.x1 = 0,
@@ -378,7 +368,7 @@ void mipi_dbi_enable_flush(struct mipi_dbi_dev *dbidev,
if (!drm_dev_enter(&dbidev->drm, &idx))
return;
- mipi_dbi_fb_dirty(fb, &rect);
+ mipi_dbi_fb_dirty(&shadow_plane_state->data[0], fb, &rect);
backlight_enable(dbidev->backlight);
drm_dev_exit(idx);
@@ -427,9 +417,95 @@ void mipi_dbi_pipe_disable(struct drm_simple_display_pipe *pipe)
if (dbidev->regulator)
regulator_disable(dbidev->regulator);
+ if (dbidev->io_regulator)
+ regulator_disable(dbidev->io_regulator);
}
EXPORT_SYMBOL(mipi_dbi_pipe_disable);
+/**
+ * mipi_dbi_pipe_begin_fb_access - MIPI DBI pipe begin-access helper
+ * @pipe: Display pipe
+ * @plane_state: Plane state
+ *
+ * This function implements struct &drm_simple_display_funcs.begin_fb_access.
+ *
+ * See drm_gem_begin_shadow_fb_access() for details and mipi_dbi_pipe_cleanup_fb()
+ * for cleanup.
+ *
+ * Returns:
+ * 0 on success, or a negative errno code otherwise.
+ */
+int mipi_dbi_pipe_begin_fb_access(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *plane_state)
+{
+ return drm_gem_begin_shadow_fb_access(&pipe->plane, plane_state);
+}
+EXPORT_SYMBOL(mipi_dbi_pipe_begin_fb_access);
+
+/**
+ * mipi_dbi_pipe_end_fb_access - MIPI DBI pipe end-access helper
+ * @pipe: Display pipe
+ * @plane_state: Plane state
+ *
+ * This function implements struct &drm_simple_display_funcs.end_fb_access.
+ *
+ * See mipi_dbi_pipe_begin_fb_access().
+ */
+void mipi_dbi_pipe_end_fb_access(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *plane_state)
+{
+ drm_gem_end_shadow_fb_access(&pipe->plane, plane_state);
+}
+EXPORT_SYMBOL(mipi_dbi_pipe_end_fb_access);
+
+/**
+ * mipi_dbi_pipe_reset_plane - MIPI DBI plane-reset helper
+ * @pipe: Display pipe
+ *
+ * This function implements struct &drm_simple_display_funcs.reset_plane
+ * for MIPI DBI planes.
+ */
+void mipi_dbi_pipe_reset_plane(struct drm_simple_display_pipe *pipe)
+{
+ drm_gem_reset_shadow_plane(&pipe->plane);
+}
+EXPORT_SYMBOL(mipi_dbi_pipe_reset_plane);
+
+/**
+ * mipi_dbi_pipe_duplicate_plane_state - duplicates MIPI DBI plane state
+ * @pipe: Display pipe
+ *
+ * This function implements struct &drm_simple_display_funcs.duplicate_plane_state
+ * for MIPI DBI planes.
+ *
+ * See drm_gem_duplicate_shadow_plane_state() for additional details.
+ *
+ * Returns:
+ * A pointer to a new plane state on success, or NULL otherwise.
+ */
+struct drm_plane_state *mipi_dbi_pipe_duplicate_plane_state(struct drm_simple_display_pipe *pipe)
+{
+ return drm_gem_duplicate_shadow_plane_state(&pipe->plane);
+}
+EXPORT_SYMBOL(mipi_dbi_pipe_duplicate_plane_state);
+
+/**
+ * mipi_dbi_pipe_destroy_plane_state - cleans up MIPI DBI plane state
+ * @pipe: Display pipe
+ * @plane_state: Plane state
+ *
+ * This function implements struct drm_simple_display_funcs.destroy_plane_state
+ * for MIPI DBI planes.
+ *
+ * See drm_gem_destroy_shadow_plane_state() for additional details.
+ */
+void mipi_dbi_pipe_destroy_plane_state(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *plane_state)
+{
+ drm_gem_destroy_shadow_plane_state(&pipe->plane, plane_state);
+}
+EXPORT_SYMBOL(mipi_dbi_pipe_destroy_plane_state);
+
static int mipi_dbi_connector_get_modes(struct drm_connector *connector)
{
struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(connector->dev);
@@ -652,6 +728,16 @@ static int mipi_dbi_poweron_reset_conditional(struct mipi_dbi_dev *dbidev, bool
}
}
+ if (dbidev->io_regulator) {
+ ret = regulator_enable(dbidev->io_regulator);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "Failed to enable I/O regulator (%d)\n", ret);
+ if (dbidev->regulator)
+ regulator_disable(dbidev->regulator);
+ return ret;
+ }
+ }
+
if (cond && mipi_dbi_display_is_on(dbi))
return 1;
@@ -661,6 +747,8 @@ static int mipi_dbi_poweron_reset_conditional(struct mipi_dbi_dev *dbidev, bool
DRM_DEV_ERROR(dev, "Failed to send reset command (%d)\n", ret);
if (dbidev->regulator)
regulator_disable(dbidev->regulator);
+ if (dbidev->io_regulator)
+ regulator_disable(dbidev->io_regulator);
return ret;
}
diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c
index 688c8afe0bf1..87eb591fe9b5 100644
--- a/drivers/gpu/drm/drm_mode_config.c
+++ b/drivers/gpu/drm/drm_mode_config.c
@@ -54,6 +54,8 @@ int drm_modeset_register_all(struct drm_device *dev)
if (ret)
goto err_connector;
+ drm_debugfs_late_register(dev);
+
return 0;
err_connector:
@@ -399,6 +401,8 @@ static void drm_mode_config_init_release(struct drm_device *dev, void *ptr)
*/
int drmm_mode_config_init(struct drm_device *dev)
{
+ int ret;
+
mutex_init(&dev->mode_config.mutex);
drm_modeset_lock_init(&dev->mode_config.connection_mutex);
mutex_init(&dev->mode_config.idr_mutex);
@@ -420,7 +424,11 @@ int drmm_mode_config_init(struct drm_device *dev)
init_llist_head(&dev->mode_config.connector_free_list);
INIT_WORK(&dev->mode_config.connector_free_work, drm_connector_free_work_fn);
- drm_mode_create_standard_properties(dev);
+ ret = drm_mode_create_standard_properties(dev);
+ if (ret) {
+ drm_mode_config_cleanup(dev);
+ return ret;
+ }
/* Just to be sure */
dev->mode_config.num_fb = 0;
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 3c8034a8c27b..be030f4a5311 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -116,6 +116,482 @@ void drm_mode_probed_add(struct drm_connector *connector,
}
EXPORT_SYMBOL(drm_mode_probed_add);
+enum drm_mode_analog {
+ DRM_MODE_ANALOG_NTSC, /* 525 lines, 60Hz */
+ DRM_MODE_ANALOG_PAL, /* 625 lines, 50Hz */
+};
+
+/*
+ * The timings come from:
+ * - https://web.archive.org/web/20220406232708/http://www.kolumbus.fi/pami1/video/pal_ntsc.html
+ * - https://web.archive.org/web/20220406124914/http://martin.hinner.info/vga/pal.html
+ * - https://web.archive.org/web/20220609202433/http://www.batsocks.co.uk/readme/video_timing.htm
+ */
+#define NTSC_LINE_DURATION_NS 63556U
+#define NTSC_LINES_NUMBER 525
+
+#define NTSC_HBLK_DURATION_TYP_NS 10900U
+#define NTSC_HBLK_DURATION_MIN_NS (NTSC_HBLK_DURATION_TYP_NS - 200)
+#define NTSC_HBLK_DURATION_MAX_NS (NTSC_HBLK_DURATION_TYP_NS + 200)
+
+#define NTSC_HACT_DURATION_TYP_NS (NTSC_LINE_DURATION_NS - NTSC_HBLK_DURATION_TYP_NS)
+#define NTSC_HACT_DURATION_MIN_NS (NTSC_LINE_DURATION_NS - NTSC_HBLK_DURATION_MAX_NS)
+#define NTSC_HACT_DURATION_MAX_NS (NTSC_LINE_DURATION_NS - NTSC_HBLK_DURATION_MIN_NS)
+
+#define NTSC_HFP_DURATION_TYP_NS 1500
+#define NTSC_HFP_DURATION_MIN_NS 1270
+#define NTSC_HFP_DURATION_MAX_NS 2220
+
+#define NTSC_HSLEN_DURATION_TYP_NS 4700
+#define NTSC_HSLEN_DURATION_MIN_NS (NTSC_HSLEN_DURATION_TYP_NS - 100)
+#define NTSC_HSLEN_DURATION_MAX_NS (NTSC_HSLEN_DURATION_TYP_NS + 100)
+
+#define NTSC_HBP_DURATION_TYP_NS 4700
+
+/*
+ * I couldn't find the actual tolerance for the back porch, so let's
+ * just reuse the sync length ones.
+ */
+#define NTSC_HBP_DURATION_MIN_NS (NTSC_HBP_DURATION_TYP_NS - 100)
+#define NTSC_HBP_DURATION_MAX_NS (NTSC_HBP_DURATION_TYP_NS + 100)
+
+#define PAL_LINE_DURATION_NS 64000U
+#define PAL_LINES_NUMBER 625
+
+#define PAL_HACT_DURATION_TYP_NS 51950U
+#define PAL_HACT_DURATION_MIN_NS (PAL_HACT_DURATION_TYP_NS - 100)
+#define PAL_HACT_DURATION_MAX_NS (PAL_HACT_DURATION_TYP_NS + 400)
+
+#define PAL_HBLK_DURATION_TYP_NS (PAL_LINE_DURATION_NS - PAL_HACT_DURATION_TYP_NS)
+#define PAL_HBLK_DURATION_MIN_NS (PAL_LINE_DURATION_NS - PAL_HACT_DURATION_MAX_NS)
+#define PAL_HBLK_DURATION_MAX_NS (PAL_LINE_DURATION_NS - PAL_HACT_DURATION_MIN_NS)
+
+#define PAL_HFP_DURATION_TYP_NS 1650
+#define PAL_HFP_DURATION_MIN_NS (PAL_HFP_DURATION_TYP_NS - 100)
+#define PAL_HFP_DURATION_MAX_NS (PAL_HFP_DURATION_TYP_NS + 400)
+
+#define PAL_HSLEN_DURATION_TYP_NS 4700
+#define PAL_HSLEN_DURATION_MIN_NS (PAL_HSLEN_DURATION_TYP_NS - 200)
+#define PAL_HSLEN_DURATION_MAX_NS (PAL_HSLEN_DURATION_TYP_NS + 200)
+
+#define PAL_HBP_DURATION_TYP_NS 5700
+#define PAL_HBP_DURATION_MIN_NS (PAL_HBP_DURATION_TYP_NS - 200)
+#define PAL_HBP_DURATION_MAX_NS (PAL_HBP_DURATION_TYP_NS + 200)
+
+struct analog_param_field {
+ unsigned int even, odd;
+};
+
+#define PARAM_FIELD(_odd, _even) \
+ { .even = _even, .odd = _odd }
+
+struct analog_param_range {
+ unsigned int min, typ, max;
+};
+
+#define PARAM_RANGE(_min, _typ, _max) \
+ { .min = _min, .typ = _typ, .max = _max }
+
+struct analog_parameters {
+ unsigned int num_lines;
+ unsigned int line_duration_ns;
+
+ struct analog_param_range hact_ns;
+ struct analog_param_range hfp_ns;
+ struct analog_param_range hslen_ns;
+ struct analog_param_range hbp_ns;
+ struct analog_param_range hblk_ns;
+
+ unsigned int bt601_hfp;
+
+ struct analog_param_field vfp_lines;
+ struct analog_param_field vslen_lines;
+ struct analog_param_field vbp_lines;
+};
+
+#define TV_MODE_PARAMETER(_mode, _lines, _line_dur, _hact, _hfp, \
+ _hslen, _hbp, _hblk, _bt601_hfp, _vfp, \
+ _vslen, _vbp) \
+ [_mode] = { \
+ .num_lines = _lines, \
+ .line_duration_ns = _line_dur, \
+ .hact_ns = _hact, \
+ .hfp_ns = _hfp, \
+ .hslen_ns = _hslen, \
+ .hbp_ns = _hbp, \
+ .hblk_ns = _hblk, \
+ .bt601_hfp = _bt601_hfp, \
+ .vfp_lines = _vfp, \
+ .vslen_lines = _vslen, \
+ .vbp_lines = _vbp, \
+ }
+
+static const struct analog_parameters tv_modes_parameters[] = {
+ TV_MODE_PARAMETER(DRM_MODE_ANALOG_NTSC,
+ NTSC_LINES_NUMBER,
+ NTSC_LINE_DURATION_NS,
+ PARAM_RANGE(NTSC_HACT_DURATION_MIN_NS,
+ NTSC_HACT_DURATION_TYP_NS,
+ NTSC_HACT_DURATION_MAX_NS),
+ PARAM_RANGE(NTSC_HFP_DURATION_MIN_NS,
+ NTSC_HFP_DURATION_TYP_NS,
+ NTSC_HFP_DURATION_MAX_NS),
+ PARAM_RANGE(NTSC_HSLEN_DURATION_MIN_NS,
+ NTSC_HSLEN_DURATION_TYP_NS,
+ NTSC_HSLEN_DURATION_MAX_NS),
+ PARAM_RANGE(NTSC_HBP_DURATION_MIN_NS,
+ NTSC_HBP_DURATION_TYP_NS,
+ NTSC_HBP_DURATION_MAX_NS),
+ PARAM_RANGE(NTSC_HBLK_DURATION_MIN_NS,
+ NTSC_HBLK_DURATION_TYP_NS,
+ NTSC_HBLK_DURATION_MAX_NS),
+ 16,
+ PARAM_FIELD(3, 3),
+ PARAM_FIELD(3, 3),
+ PARAM_FIELD(16, 17)),
+ TV_MODE_PARAMETER(DRM_MODE_ANALOG_PAL,
+ PAL_LINES_NUMBER,
+ PAL_LINE_DURATION_NS,
+ PARAM_RANGE(PAL_HACT_DURATION_MIN_NS,
+ PAL_HACT_DURATION_TYP_NS,
+ PAL_HACT_DURATION_MAX_NS),
+ PARAM_RANGE(PAL_HFP_DURATION_MIN_NS,
+ PAL_HFP_DURATION_TYP_NS,
+ PAL_HFP_DURATION_MAX_NS),
+ PARAM_RANGE(PAL_HSLEN_DURATION_MIN_NS,
+ PAL_HSLEN_DURATION_TYP_NS,
+ PAL_HSLEN_DURATION_MAX_NS),
+ PARAM_RANGE(PAL_HBP_DURATION_MIN_NS,
+ PAL_HBP_DURATION_TYP_NS,
+ PAL_HBP_DURATION_MAX_NS),
+ PARAM_RANGE(PAL_HBLK_DURATION_MIN_NS,
+ PAL_HBLK_DURATION_TYP_NS,
+ PAL_HBLK_DURATION_MAX_NS),
+ 12,
+
+ /*
+ * The front porch is actually 6 short sync
+ * pulses for the even field, and 5 for the
+ * odd field. Each sync takes half a life so
+ * the odd field front porch is shorter by
+ * half a line.
+ *
+ * In progressive, we're supposed to use 6
+ * pulses, so we're fine there
+ */
+ PARAM_FIELD(3, 2),
+
+ /*
+ * The vsync length is 5 long sync pulses,
+ * each field taking half a line. We're
+ * shorter for both fields by half a line.
+ *
+ * In progressive, we're supposed to use 5
+ * pulses, so we're off by half
+ * a line.
+ *
+ * In interlace, we're now off by half a line
+ * for the even field and one line for the odd
+ * field.
+ */
+ PARAM_FIELD(3, 3),
+
+ /*
+ * The back porch starts with post-equalizing
+ * pulses, consisting in 5 short sync pulses
+ * for the even field, 4 for the odd field. In
+ * progressive, it's 5 short syncs.
+ *
+ * In progressive, we thus have 2.5 lines,
+ * plus the 0.5 line we were missing
+ * previously, so we should use 3 lines.
+ *
+ * In interlace, the even field is in the
+ * exact same case than progressive. For the
+ * odd field, we should be using 2 lines but
+ * we're one line short, so we'll make up for
+ * it here by using 3.
+ *
+ * The entire blanking area is supposed to
+ * take 25 lines, so we also need to account
+ * for the rest of the blanking area that
+ * can't be in either the front porch or sync
+ * period.
+ */
+ PARAM_FIELD(19, 20)),
+};
+
+static int fill_analog_mode(struct drm_device *dev,
+ struct drm_display_mode *mode,
+ const struct analog_parameters *params,
+ unsigned long pixel_clock_hz,
+ unsigned int hactive,
+ unsigned int vactive,
+ bool interlace)
+{
+ unsigned long pixel_duration_ns = NSEC_PER_SEC / pixel_clock_hz;
+ unsigned int htotal, vtotal;
+ unsigned int max_hact, hact_duration_ns;
+ unsigned int hblk, hblk_duration_ns;
+ unsigned int hfp, hfp_duration_ns;
+ unsigned int hslen, hslen_duration_ns;
+ unsigned int hbp, hbp_duration_ns;
+ unsigned int porches, porches_duration_ns;
+ unsigned int vfp, vfp_min;
+ unsigned int vbp, vbp_min;
+ unsigned int vslen;
+ bool bt601 = false;
+ int porches_rem;
+ u64 result;
+
+ drm_dbg_kms(dev,
+ "Generating a %ux%u%c, %u-line mode with a %lu kHz clock\n",
+ hactive, vactive,
+ interlace ? 'i' : 'p',
+ params->num_lines,
+ pixel_clock_hz / 1000);
+
+ max_hact = params->hact_ns.max / pixel_duration_ns;
+ if (pixel_clock_hz == 13500000 && hactive > max_hact && hactive <= 720) {
+ drm_dbg_kms(dev, "Trying to generate a BT.601 mode. Disabling checks.\n");
+ bt601 = true;
+ }
+
+ /*
+ * Our pixel duration is going to be round down by the division,
+ * so rounding up is probably going to introduce even more
+ * deviation.
+ */
+ result = (u64)params->line_duration_ns * pixel_clock_hz;
+ do_div(result, NSEC_PER_SEC);
+ htotal = result;
+
+ drm_dbg_kms(dev, "Total Horizontal Number of Pixels: %u\n", htotal);
+
+ hact_duration_ns = hactive * pixel_duration_ns;
+ if (!bt601 &&
+ (hact_duration_ns < params->hact_ns.min ||
+ hact_duration_ns > params->hact_ns.max)) {
+ DRM_ERROR("Invalid horizontal active area duration: %uns (min: %u, max %u)\n",
+ hact_duration_ns, params->hact_ns.min, params->hact_ns.max);
+ return -EINVAL;
+ }
+
+ hblk = htotal - hactive;
+ drm_dbg_kms(dev, "Horizontal Blanking Period: %u\n", hblk);
+
+ hblk_duration_ns = hblk * pixel_duration_ns;
+ if (!bt601 &&
+ (hblk_duration_ns < params->hblk_ns.min ||
+ hblk_duration_ns > params->hblk_ns.max)) {
+ DRM_ERROR("Invalid horizontal blanking duration: %uns (min: %u, max %u)\n",
+ hblk_duration_ns, params->hblk_ns.min, params->hblk_ns.max);
+ return -EINVAL;
+ }
+
+ hslen = DIV_ROUND_UP(params->hslen_ns.typ, pixel_duration_ns);
+ drm_dbg_kms(dev, "Horizontal Sync Period: %u\n", hslen);
+
+ hslen_duration_ns = hslen * pixel_duration_ns;
+ if (!bt601 &&
+ (hslen_duration_ns < params->hslen_ns.min ||
+ hslen_duration_ns > params->hslen_ns.max)) {
+ DRM_ERROR("Invalid horizontal sync duration: %uns (min: %u, max %u)\n",
+ hslen_duration_ns, params->hslen_ns.min, params->hslen_ns.max);
+ return -EINVAL;
+ }
+
+ porches = hblk - hslen;
+ drm_dbg_kms(dev, "Remaining horizontal pixels for both porches: %u\n", porches);
+
+ porches_duration_ns = porches * pixel_duration_ns;
+ if (!bt601 &&
+ (porches_duration_ns > (params->hfp_ns.max + params->hbp_ns.max) ||
+ porches_duration_ns < (params->hfp_ns.min + params->hbp_ns.min))) {
+ DRM_ERROR("Invalid horizontal porches duration: %uns\n", porches_duration_ns);
+ return -EINVAL;
+ }
+
+ if (bt601) {
+ hfp = params->bt601_hfp;
+ } else {
+ unsigned int hfp_min = DIV_ROUND_UP(params->hfp_ns.min,
+ pixel_duration_ns);
+ unsigned int hbp_min = DIV_ROUND_UP(params->hbp_ns.min,
+ pixel_duration_ns);
+ int porches_rem = porches - hfp_min - hbp_min;
+
+ hfp = hfp_min + DIV_ROUND_UP(porches_rem, 2);
+ }
+
+ drm_dbg_kms(dev, "Horizontal Front Porch: %u\n", hfp);
+
+ hfp_duration_ns = hfp * pixel_duration_ns;
+ if (!bt601 &&
+ (hfp_duration_ns < params->hfp_ns.min ||
+ hfp_duration_ns > params->hfp_ns.max)) {
+ DRM_ERROR("Invalid horizontal front porch duration: %uns (min: %u, max %u)\n",
+ hfp_duration_ns, params->hfp_ns.min, params->hfp_ns.max);
+ return -EINVAL;
+ }
+
+ hbp = porches - hfp;
+ drm_dbg_kms(dev, "Horizontal Back Porch: %u\n", hbp);
+
+ hbp_duration_ns = hbp * pixel_duration_ns;
+ if (!bt601 &&
+ (hbp_duration_ns < params->hbp_ns.min ||
+ hbp_duration_ns > params->hbp_ns.max)) {
+ DRM_ERROR("Invalid horizontal back porch duration: %uns (min: %u, max %u)\n",
+ hbp_duration_ns, params->hbp_ns.min, params->hbp_ns.max);
+ return -EINVAL;
+ }
+
+ if (htotal != (hactive + hfp + hslen + hbp))
+ return -EINVAL;
+
+ mode->clock = pixel_clock_hz / 1000;
+ mode->hdisplay = hactive;
+ mode->hsync_start = mode->hdisplay + hfp;
+ mode->hsync_end = mode->hsync_start + hslen;
+ mode->htotal = mode->hsync_end + hbp;
+
+ if (interlace) {
+ vfp_min = params->vfp_lines.even + params->vfp_lines.odd;
+ vbp_min = params->vbp_lines.even + params->vbp_lines.odd;
+ vslen = params->vslen_lines.even + params->vslen_lines.odd;
+ } else {
+ /*
+ * By convention, NTSC (aka 525/60) systems start with
+ * the even field, but PAL (aka 625/50) systems start
+ * with the odd one.
+ *
+ * PAL systems also have asymmetric timings between the
+ * even and odd field, while NTSC is symmetric.
+ *
+ * Moreover, if we want to create a progressive mode for
+ * PAL, we need to use the odd field timings.
+ *
+ * Since odd == even for NTSC, we can just use the odd
+ * one all the time to simplify the code a bit.
+ */
+ vfp_min = params->vfp_lines.odd;
+ vbp_min = params->vbp_lines.odd;
+ vslen = params->vslen_lines.odd;
+ }
+
+ drm_dbg_kms(dev, "Vertical Sync Period: %u\n", vslen);
+
+ porches = params->num_lines - vactive - vslen;
+ drm_dbg_kms(dev, "Remaining vertical pixels for both porches: %u\n", porches);
+
+ porches_rem = porches - vfp_min - vbp_min;
+ vfp = vfp_min + (porches_rem / 2);
+ drm_dbg_kms(dev, "Vertical Front Porch: %u\n", vfp);
+
+ vbp = porches - vfp;
+ drm_dbg_kms(dev, "Vertical Back Porch: %u\n", vbp);
+
+ vtotal = vactive + vfp + vslen + vbp;
+ if (params->num_lines != vtotal) {
+ DRM_ERROR("Invalid vertical total: %upx (expected %upx)\n",
+ vtotal, params->num_lines);
+ return -EINVAL;
+ }
+
+ mode->vdisplay = vactive;
+ mode->vsync_start = mode->vdisplay + vfp;
+ mode->vsync_end = mode->vsync_start + vslen;
+ mode->vtotal = mode->vsync_end + vbp;
+
+ if (mode->vtotal != params->num_lines)
+ return -EINVAL;
+
+ mode->type = DRM_MODE_TYPE_DRIVER;
+ mode->flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC;
+ if (interlace)
+ mode->flags |= DRM_MODE_FLAG_INTERLACE;
+
+ drm_mode_set_name(mode);
+
+ drm_dbg_kms(dev, "Generated mode " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
+
+ return 0;
+}
+
+/**
+ * drm_analog_tv_mode - create a display mode for an analog TV
+ * @dev: drm device
+ * @tv_mode: TV Mode standard to create a mode for. See DRM_MODE_TV_MODE_*.
+ * @pixel_clock_hz: Pixel Clock Frequency, in Hertz
+ * @hdisplay: hdisplay size
+ * @vdisplay: vdisplay size
+ * @interlace: whether to compute an interlaced mode
+ *
+ * This function creates a struct drm_display_mode instance suited for
+ * an analog TV output, for one of the usual analog TV mode.
+ *
+ * Note that @hdisplay is larger than the usual constraints for the PAL
+ * and NTSC timings, and we'll choose to ignore most timings constraints
+ * to reach those resolutions.
+ *
+ * Returns:
+ *
+ * A pointer to the mode, allocated with drm_mode_create(). Returns NULL
+ * on error.
+ */
+struct drm_display_mode *drm_analog_tv_mode(struct drm_device *dev,
+ enum drm_connector_tv_mode tv_mode,
+ unsigned long pixel_clock_hz,
+ unsigned int hdisplay,
+ unsigned int vdisplay,
+ bool interlace)
+{
+ struct drm_display_mode *mode;
+ enum drm_mode_analog analog;
+ int ret;
+
+ switch (tv_mode) {
+ case DRM_MODE_TV_MODE_NTSC:
+ fallthrough;
+ case DRM_MODE_TV_MODE_NTSC_443:
+ fallthrough;
+ case DRM_MODE_TV_MODE_NTSC_J:
+ fallthrough;
+ case DRM_MODE_TV_MODE_PAL_M:
+ analog = DRM_MODE_ANALOG_NTSC;
+ break;
+
+ case DRM_MODE_TV_MODE_PAL:
+ fallthrough;
+ case DRM_MODE_TV_MODE_PAL_N:
+ fallthrough;
+ case DRM_MODE_TV_MODE_SECAM:
+ analog = DRM_MODE_ANALOG_PAL;
+ break;
+
+ default:
+ return NULL;
+ }
+
+ mode = drm_mode_create(dev);
+ if (!mode)
+ return NULL;
+
+ ret = fill_analog_mode(dev, mode,
+ &tv_modes_parameters[analog],
+ pixel_clock_hz, hdisplay, vdisplay, interlace);
+ if (ret)
+ goto err_free_mode;
+
+ return mode;
+
+err_free_mode:
+ drm_mode_destroy(dev, mode);
+ return NULL;
+}
+EXPORT_SYMBOL(drm_analog_tv_mode);
+
/**
* drm_cvt_mode -create a modeline based on the CVT algorithm
* @dev: drm device
@@ -1659,6 +2135,30 @@ static int drm_mode_parse_panel_orientation(const char *delim,
return 0;
}
+static int drm_mode_parse_tv_mode(const char *delim,
+ struct drm_cmdline_mode *mode)
+{
+ const char *value;
+ int ret;
+
+ if (*delim != '=')
+ return -EINVAL;
+
+ value = delim + 1;
+ delim = strchr(value, ',');
+ if (!delim)
+ delim = value + strlen(value);
+
+ ret = drm_get_tv_mode_from_name(value, delim - value);
+ if (ret < 0)
+ return ret;
+
+ mode->tv_mode_specified = true;
+ mode->tv_mode = ret;
+
+ return 0;
+}
+
static int drm_mode_parse_cmdline_options(const char *str,
bool freestanding,
const struct drm_connector *connector,
@@ -1728,6 +2228,9 @@ static int drm_mode_parse_cmdline_options(const char *str,
} else if (!strncmp(option, "panel_orientation", delim - option)) {
if (drm_mode_parse_panel_orientation(delim, mode))
return -EINVAL;
+ } else if (!strncmp(option, "tv_mode", delim - option)) {
+ if (drm_mode_parse_tv_mode(delim, mode))
+ return -EINVAL;
} else {
return -EINVAL;
}
@@ -1756,20 +2259,24 @@ struct drm_named_mode {
unsigned int xres;
unsigned int yres;
unsigned int flags;
+ unsigned int tv_mode;
};
-#define NAMED_MODE(_name, _pclk, _x, _y, _flags) \
+#define NAMED_MODE(_name, _pclk, _x, _y, _flags, _mode) \
{ \
.name = _name, \
.pixel_clock_khz = _pclk, \
.xres = _x, \
.yres = _y, \
.flags = _flags, \
+ .tv_mode = _mode, \
}
static const struct drm_named_mode drm_named_modes[] = {
- NAMED_MODE("NTSC", 13500, 720, 480, DRM_MODE_FLAG_INTERLACE),
- NAMED_MODE("PAL", 13500, 720, 576, DRM_MODE_FLAG_INTERLACE),
+ NAMED_MODE("NTSC", 13500, 720, 480, DRM_MODE_FLAG_INTERLACE, DRM_MODE_TV_MODE_NTSC),
+ NAMED_MODE("NTSC-J", 13500, 720, 480, DRM_MODE_FLAG_INTERLACE, DRM_MODE_TV_MODE_NTSC_J),
+ NAMED_MODE("PAL", 13500, 720, 576, DRM_MODE_FLAG_INTERLACE, DRM_MODE_TV_MODE_PAL),
+ NAMED_MODE("PAL-M", 13500, 720, 480, DRM_MODE_FLAG_INTERLACE, DRM_MODE_TV_MODE_PAL_M),
};
static int drm_mode_parse_cmdline_named_mode(const char *name,
@@ -1809,11 +2316,13 @@ static int drm_mode_parse_cmdline_named_mode(const char *name,
if (ret != name_end)
continue;
- strcpy(cmdline_mode->name, mode->name);
+ strscpy(cmdline_mode->name, mode->name, sizeof(cmdline_mode->name));
cmdline_mode->pixel_clock = mode->pixel_clock_khz;
cmdline_mode->xres = mode->xres;
cmdline_mode->yres = mode->yres;
cmdline_mode->interlace = !!(mode->flags & DRM_MODE_FLAG_INTERLACE);
+ cmdline_mode->tv_mode = mode->tv_mode;
+ cmdline_mode->tv_mode_specified = true;
cmdline_mode->specified = true;
return 1;
@@ -1992,6 +2501,31 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option,
}
EXPORT_SYMBOL(drm_mode_parse_command_line_for_connector);
+static struct drm_display_mode *drm_named_mode(struct drm_device *dev,
+ struct drm_cmdline_mode *cmd)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(drm_named_modes); i++) {
+ const struct drm_named_mode *named_mode = &drm_named_modes[i];
+
+ if (strcmp(cmd->name, named_mode->name))
+ continue;
+
+ if (!cmd->tv_mode_specified)
+ continue;
+
+ return drm_analog_tv_mode(dev,
+ named_mode->tv_mode,
+ named_mode->pixel_clock_khz * 1000,
+ named_mode->xres,
+ named_mode->yres,
+ named_mode->flags & DRM_MODE_FLAG_INTERLACE);
+ }
+
+ return NULL;
+}
+
/**
* drm_mode_create_from_cmdline_mode - convert a command line modeline into a DRM display mode
* @dev: DRM device to create the new mode for
@@ -2009,7 +2543,9 @@ drm_mode_create_from_cmdline_mode(struct drm_device *dev,
if (cmd->xres == 0 || cmd->yres == 0)
return NULL;
- if (cmd->cvt)
+ if (strlen(cmd->name))
+ mode = drm_named_mode(dev, cmd);
+ else if (cmd->cvt)
mode = drm_cvt_mode(dev,
cmd->xres, cmd->yres,
cmd->refresh_specified ? cmd->refresh : 60,
diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
index 52d8800a8ab8..ca531dbb749d 100644
--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
+++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
@@ -30,12 +30,6 @@ struct drm_dmi_panel_orientation_data {
int orientation;
};
-static const struct drm_dmi_panel_orientation_data asus_t100ha = {
- .width = 800,
- .height = 1280,
- .orientation = DRM_MODE_PANEL_ORIENTATION_LEFT_UP,
-};
-
static const struct drm_dmi_panel_orientation_data gpd_micropc = {
.width = 720,
.height = 1280,
@@ -97,6 +91,12 @@ static const struct drm_dmi_panel_orientation_data lcd720x1280_rightside_up = {
.orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
};
+static const struct drm_dmi_panel_orientation_data lcd800x1280_leftside_up = {
+ .width = 800,
+ .height = 1280,
+ .orientation = DRM_MODE_PANEL_ORIENTATION_LEFT_UP,
+};
+
static const struct drm_dmi_panel_orientation_data lcd800x1280_rightside_up = {
.width = 800,
.height = 1280,
@@ -127,6 +127,12 @@ static const struct drm_dmi_panel_orientation_data lcd1600x2560_leftside_up = {
.orientation = DRM_MODE_PANEL_ORIENTATION_LEFT_UP,
};
+static const struct drm_dmi_panel_orientation_data lcd1600x2560_rightside_up = {
+ .width = 1600,
+ .height = 2560,
+ .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
+};
+
static const struct dmi_system_id orientation_data[] = {
{ /* Acer One 10 (S1003) */
.matches = {
@@ -151,7 +157,7 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T100HAN"),
},
- .driver_data = (void *)&asus_t100ha,
+ .driver_data = (void *)&lcd800x1280_leftside_up,
}, { /* Asus T101HA */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
@@ -196,6 +202,12 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Hi10 pro tablet"),
},
.driver_data = (void *)&lcd1200x1920_rightside_up,
+ }, { /* Dynabook K50 */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dynabook Inc."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "dynabook K50/FR"),
+ },
+ .driver_data = (void *)&lcd800x1280_leftside_up,
}, { /* GPD MicroPC (generic strings, also match on bios date) */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Default string"),
@@ -325,6 +337,13 @@ static const struct dmi_system_id orientation_data[] = {
DMI_MATCH(DMI_BIOS_VERSION, "BLADE_21"),
},
.driver_data = (void *)&lcd1200x1920_rightside_up,
+ }, { /* Lenovo Yoga Tab 3 X90F */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Blade3-10A-001"),
+ },
+ .driver_data = (void *)&lcd1600x2560_rightside_up,
}, { /* Nanote UMPC-01 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "RWC CO.,LTD"),
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
index 33357629a7f5..24e7998d1731 100644
--- a/drivers/gpu/drm/drm_plane.c
+++ b/drivers/gpu/drm/drm_plane.c
@@ -46,6 +46,11 @@
* properties that specify how the pixels are positioned and blended, like
* rotation or Z-position. All these properties are stored in &drm_plane_state.
*
+ * Unless explicitly specified (via CRTC property or otherwise), the active area
+ * of a CRTC will be black by default. This means portions of the active area
+ * which are not covered by a plane will be black, and alpha blending of any
+ * planes with the CRTC background will blend with black at the lowest zpos.
+ *
* To create a plane, a KMS drivers allocates and zeroes an instances of
* &struct drm_plane (possibly as part of a larger structure) and registers it
* with a call to drm_universal_plane_init().
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index bcd9611dabfd..7973f2589ced 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -250,6 +250,12 @@ void drm_kms_helper_poll_enable(struct drm_device *dev)
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
+ const struct drm_connector_helper_funcs *funcs =
+ connector->helper_private;
+
+ if (funcs && funcs->enable_hpd)
+ funcs->enable_hpd(connector);
+
if (connector->polled & (DRM_CONNECTOR_POLL_CONNECT |
DRM_CONNECTOR_POLL_DISCONNECT))
poll = true;
@@ -802,6 +808,30 @@ bool drm_kms_helper_is_poll_worker(void)
}
EXPORT_SYMBOL(drm_kms_helper_is_poll_worker);
+static void drm_kms_helper_poll_disable_fini(struct drm_device *dev, bool fini)
+{
+ struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
+
+ if (!dev->mode_config.poll_enabled)
+ return;
+
+ if (fini)
+ dev->mode_config.poll_enabled = false;
+
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ const struct drm_connector_helper_funcs *funcs =
+ connector->helper_private;
+
+ if (funcs && funcs->disable_hpd)
+ funcs->disable_hpd(connector);
+ }
+ drm_connector_list_iter_end(&conn_iter);
+
+ cancel_delayed_work_sync(&dev->mode_config.output_poll_work);
+}
+
/**
* drm_kms_helper_poll_disable - disable output polling
* @dev: drm_device
@@ -818,9 +848,7 @@ EXPORT_SYMBOL(drm_kms_helper_is_poll_worker);
*/
void drm_kms_helper_poll_disable(struct drm_device *dev)
{
- if (!dev->mode_config.poll_enabled)
- return;
- cancel_delayed_work_sync(&dev->mode_config.output_poll_work);
+ drm_kms_helper_poll_disable_fini(dev, false);
}
EXPORT_SYMBOL(drm_kms_helper_poll_disable);
@@ -858,11 +886,7 @@ EXPORT_SYMBOL(drm_kms_helper_poll_init);
*/
void drm_kms_helper_poll_fini(struct drm_device *dev)
{
- if (!dev->mode_config.poll_enabled)
- return;
-
- dev->mode_config.poll_enabled = false;
- cancel_delayed_work_sync(&dev->mode_config.output_poll_work);
+ drm_kms_helper_poll_disable_fini(dev, true);
}
EXPORT_SYMBOL(drm_kms_helper_poll_fini);
@@ -1146,3 +1170,85 @@ int drm_connector_helper_get_modes(struct drm_connector *connector)
return count;
}
EXPORT_SYMBOL(drm_connector_helper_get_modes);
+
+/**
+ * drm_connector_helper_tv_get_modes - Fills the modes availables to a TV connector
+ * @connector: The connector
+ *
+ * Fills the available modes for a TV connector based on the supported
+ * TV modes, and the default mode expressed by the kernel command line.
+ *
+ * This can be used as the default TV connector helper .get_modes() hook
+ * if the driver does not need any special processing.
+ *
+ * Returns:
+ * The number of modes added to the connector.
+ */
+int drm_connector_helper_tv_get_modes(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_property *tv_mode_property =
+ dev->mode_config.tv_mode_property;
+ struct drm_cmdline_mode *cmdline = &connector->cmdline_mode;
+ unsigned int ntsc_modes = BIT(DRM_MODE_TV_MODE_NTSC) |
+ BIT(DRM_MODE_TV_MODE_NTSC_443) |
+ BIT(DRM_MODE_TV_MODE_NTSC_J) |
+ BIT(DRM_MODE_TV_MODE_PAL_M);
+ unsigned int pal_modes = BIT(DRM_MODE_TV_MODE_PAL) |
+ BIT(DRM_MODE_TV_MODE_PAL_N) |
+ BIT(DRM_MODE_TV_MODE_SECAM);
+ unsigned int tv_modes[2] = { UINT_MAX, UINT_MAX };
+ unsigned int i, supported_tv_modes = 0;
+
+ if (!tv_mode_property)
+ return 0;
+
+ for (i = 0; i < tv_mode_property->num_values; i++)
+ supported_tv_modes |= BIT(tv_mode_property->values[i]);
+
+ if ((supported_tv_modes & ntsc_modes) &&
+ (supported_tv_modes & pal_modes)) {
+ uint64_t default_mode;
+
+ if (drm_object_property_get_default_value(&connector->base,
+ tv_mode_property,
+ &default_mode))
+ return 0;
+
+ if (cmdline->tv_mode_specified)
+ default_mode = cmdline->tv_mode;
+
+ if (BIT(default_mode) & ntsc_modes) {
+ tv_modes[0] = DRM_MODE_TV_MODE_NTSC;
+ tv_modes[1] = DRM_MODE_TV_MODE_PAL;
+ } else {
+ tv_modes[0] = DRM_MODE_TV_MODE_PAL;
+ tv_modes[1] = DRM_MODE_TV_MODE_NTSC;
+ }
+ } else if (supported_tv_modes & ntsc_modes) {
+ tv_modes[0] = DRM_MODE_TV_MODE_NTSC;
+ } else if (supported_tv_modes & pal_modes) {
+ tv_modes[0] = DRM_MODE_TV_MODE_PAL;
+ } else {
+ return 0;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(tv_modes); i++) {
+ struct drm_display_mode *mode;
+
+ if (tv_modes[i] == DRM_MODE_TV_MODE_NTSC)
+ mode = drm_mode_analog_ntsc_480i(dev);
+ else if (tv_modes[i] == DRM_MODE_TV_MODE_PAL)
+ mode = drm_mode_analog_pal_576i(dev);
+ else
+ break;
+ if (!mode)
+ return i;
+ if (!i)
+ mode->type |= DRM_MODE_TYPE_PREFERRED;
+ drm_mode_probed_add(connector, mode);
+ }
+
+ return i;
+}
+EXPORT_SYMBOL(drm_connector_helper_tv_get_modes);
diff --git a/drivers/gpu/drm/drm_simple_kms_helper.c b/drivers/gpu/drm/drm_simple_kms_helper.c
index 3ef420ec4534..270523ae36d4 100644
--- a/drivers/gpu/drm/drm_simple_kms_helper.c
+++ b/drivers/gpu/drm/drm_simple_kms_helper.c
@@ -267,7 +267,7 @@ static int drm_simple_kms_plane_prepare_fb(struct drm_plane *plane,
WARN_ON_ONCE(pipe->funcs && pipe->funcs->cleanup_fb);
- return drm_gem_simple_display_pipe_prepare_fb(pipe, state);
+ return drm_gem_plane_helper_prepare_fb(plane, state);
}
return pipe->funcs->prepare_fb(pipe, state);
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index 8155d7e650f1..2867b39fa35e 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -710,7 +710,6 @@ static irqreturn_t decon_irq_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
-#ifdef CONFIG_PM
static int exynos5433_decon_suspend(struct device *dev)
{
struct decon_context *ctx = dev_get_drvdata(dev);
@@ -741,14 +740,10 @@ err:
return ret;
}
-#endif
-static const struct dev_pm_ops exynos5433_decon_pm_ops = {
- SET_RUNTIME_PM_OPS(exynos5433_decon_suspend, exynos5433_decon_resume,
- NULL)
- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
-};
+static DEFINE_RUNTIME_DEV_PM_OPS(exynos5433_decon_pm_ops,
+ exynos5433_decon_suspend,
+ exynos5433_decon_resume, NULL);
static const struct of_device_id exynos5433_decon_driver_dt_match[] = {
{
@@ -881,7 +876,7 @@ struct platform_driver exynos5433_decon_driver = {
.remove = exynos5433_decon_remove,
.driver = {
.name = "exynos5433-decon",
- .pm = &exynos5433_decon_pm_ops,
+ .pm = pm_ptr(&exynos5433_decon_pm_ops),
.of_match_table = exynos5433_decon_driver_dt_match,
},
};
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
index 7080cf7952ec..3126f735dedc 100644
--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
@@ -779,7 +779,6 @@ static int decon_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
static int exynos7_decon_suspend(struct device *dev)
{
struct decon_context *ctx = dev_get_drvdata(dev);
@@ -836,21 +835,16 @@ err_aclk_enable:
err_pclk_enable:
return ret;
}
-#endif
-static const struct dev_pm_ops exynos7_decon_pm_ops = {
- SET_RUNTIME_PM_OPS(exynos7_decon_suspend, exynos7_decon_resume,
- NULL)
- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
-};
+static DEFINE_RUNTIME_DEV_PM_OPS(exynos7_decon_pm_ops, exynos7_decon_suspend,
+ exynos7_decon_resume, NULL);
struct platform_driver decon_driver = {
.probe = decon_probe,
.remove = decon_remove,
.driver = {
.name = "exynos-decon",
- .pm = &exynos7_decon_pm_ops,
+ .pm = pm_ptr(&exynos7_decon_pm_ops),
.of_match_table = decon_driver_dt_match,
},
};
diff --git a/drivers/gpu/drm/exynos/exynos_dp.c b/drivers/gpu/drm/exynos/exynos_dp.c
index 4e3d3d5f6866..3404ec1367fb 100644
--- a/drivers/gpu/drm/exynos/exynos_dp.c
+++ b/drivers/gpu/drm/exynos/exynos_dp.c
@@ -260,7 +260,6 @@ static int exynos_dp_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
static int exynos_dp_suspend(struct device *dev)
{
struct exynos_dp_device *dp = dev_get_drvdata(dev);
@@ -274,13 +273,9 @@ static int exynos_dp_resume(struct device *dev)
return analogix_dp_resume(dp->adp);
}
-#endif
-static const struct dev_pm_ops exynos_dp_pm_ops = {
- SET_RUNTIME_PM_OPS(exynos_dp_suspend, exynos_dp_resume, NULL)
- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
-};
+static DEFINE_RUNTIME_DEV_PM_OPS(exynos_dp_pm_ops, exynos_dp_suspend,
+ exynos_dp_resume, NULL);
static const struct of_device_id exynos_dp_match[] = {
{ .compatible = "samsung,exynos5-dp" },
@@ -294,7 +289,7 @@ struct platform_driver dp_driver = {
.driver = {
.name = "exynos-dp",
.owner = THIS_MODULE,
- .pm = &exynos_dp_pm_ops,
+ .pm = pm_ptr(&exynos_dp_pm_ops),
.of_match_table = exynos_dp_match,
},
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index 0ee32e4b1e43..8de2714599fc 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -1381,7 +1381,6 @@ static int fimc_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
static int fimc_runtime_suspend(struct device *dev)
{
struct fimc_context *ctx = get_fimc_context(dev);
@@ -1398,13 +1397,9 @@ static int fimc_runtime_resume(struct device *dev)
DRM_DEV_DEBUG_KMS(dev, "id[%d]\n", ctx->id);
return clk_prepare_enable(ctx->clocks[FIMC_CLK_GATE]);
}
-#endif
-static const struct dev_pm_ops fimc_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
- SET_RUNTIME_PM_OPS(fimc_runtime_suspend, fimc_runtime_resume, NULL)
-};
+static DEFINE_RUNTIME_DEV_PM_OPS(fimc_pm_ops, fimc_runtime_suspend,
+ fimc_runtime_resume, NULL);
static const struct of_device_id fimc_of_match[] = {
{ .compatible = "samsung,exynos4210-fimc" },
@@ -1420,6 +1415,6 @@ struct platform_driver fimc_driver = {
.of_match_table = fimc_of_match,
.name = "exynos-drm-fimc",
.owner = THIS_MODULE,
- .pm = &fimc_pm_ops,
+ .pm = pm_ptr(&fimc_pm_ops),
},
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index ae6636e6658e..7f4a0be03dd1 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -1287,7 +1287,6 @@ static int fimd_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
static int exynos_fimd_suspend(struct device *dev)
{
struct fimd_context *ctx = dev_get_drvdata(dev);
@@ -1321,13 +1320,9 @@ static int exynos_fimd_resume(struct device *dev)
return 0;
}
-#endif
-static const struct dev_pm_ops exynos_fimd_pm_ops = {
- SET_RUNTIME_PM_OPS(exynos_fimd_suspend, exynos_fimd_resume, NULL)
- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
-};
+static DEFINE_RUNTIME_DEV_PM_OPS(exynos_fimd_pm_ops, exynos_fimd_suspend,
+ exynos_fimd_resume, NULL);
struct platform_driver fimd_driver = {
.probe = fimd_probe,
@@ -1335,7 +1330,7 @@ struct platform_driver fimd_driver = {
.driver = {
.name = "exynos4-fb",
.owner = THIS_MODULE,
- .pm = &exynos_fimd_pm_ops,
+ .pm = pm_ptr(&exynos_fimd_pm_ops),
.of_match_table = fimd_driver_dt_match,
},
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index e19c2ceb3759..ec784e58da5c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -1549,7 +1549,6 @@ static int g2d_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
static int g2d_suspend(struct device *dev)
{
struct g2d_data *g2d = dev_get_drvdata(dev);
@@ -1574,9 +1573,7 @@ static int g2d_resume(struct device *dev)
return 0;
}
-#endif
-#ifdef CONFIG_PM
static int g2d_runtime_suspend(struct device *dev)
{
struct g2d_data *g2d = dev_get_drvdata(dev);
@@ -1597,11 +1594,10 @@ static int g2d_runtime_resume(struct device *dev)
return ret;
}
-#endif
static const struct dev_pm_ops g2d_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(g2d_suspend, g2d_resume)
- SET_RUNTIME_PM_OPS(g2d_runtime_suspend, g2d_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(g2d_suspend, g2d_resume)
+ RUNTIME_PM_OPS(g2d_runtime_suspend, g2d_runtime_resume, NULL)
};
static const struct of_device_id exynos_g2d_match[] = {
@@ -1617,7 +1613,7 @@ struct platform_driver g2d_driver = {
.driver = {
.name = "exynos-drm-g2d",
.owner = THIS_MODULE,
- .pm = &g2d_pm_ops,
+ .pm = pm_ptr(&g2d_pm_ops),
.of_match_table = exynos_g2d_match,
},
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_mic.c b/drivers/gpu/drm/exynos/exynos_drm_mic.c
index 09ce28ee08d9..17bab5b1663f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_mic.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_mic.c
@@ -340,7 +340,6 @@ static const struct component_ops exynos_mic_component_ops = {
.unbind = exynos_mic_unbind,
};
-#ifdef CONFIG_PM
static int exynos_mic_suspend(struct device *dev)
{
struct exynos_mic *mic = dev_get_drvdata(dev);
@@ -369,13 +368,9 @@ static int exynos_mic_resume(struct device *dev)
}
return 0;
}
-#endif
-static const struct dev_pm_ops exynos_mic_pm_ops = {
- SET_RUNTIME_PM_OPS(exynos_mic_suspend, exynos_mic_resume, NULL)
- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
-};
+static DEFINE_RUNTIME_DEV_PM_OPS(exynos_mic_pm_ops, exynos_mic_suspend,
+ exynos_mic_resume, NULL);
static int exynos_mic_probe(struct platform_device *pdev)
{
@@ -470,7 +465,7 @@ struct platform_driver mic_driver = {
.remove = exynos_mic_remove,
.driver = {
.name = "exynos-mic",
- .pm = &exynos_mic_pm_ops,
+ .pm = pm_ptr(&exynos_mic_pm_ops),
.owner = THIS_MODULE,
.of_match_table = exynos_mic_of_match,
},
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
index dec7df35baa9..8706f377c349 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -340,7 +340,6 @@ static int rotator_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
static int rotator_runtime_suspend(struct device *dev)
{
struct rot_context *rot = dev_get_drvdata(dev);
@@ -355,7 +354,6 @@ static int rotator_runtime_resume(struct device *dev)
return clk_prepare_enable(rot->clock);
}
-#endif
static const struct drm_exynos_ipp_limit rotator_s5pv210_rbg888_limits[] = {
{ IPP_SIZE_LIMIT(BUFFER, .h = { 8, SZ_16K }, .v = { 8, SZ_16K }) },
@@ -450,12 +448,8 @@ static const struct of_device_id exynos_rotator_match[] = {
};
MODULE_DEVICE_TABLE(of, exynos_rotator_match);
-static const struct dev_pm_ops rotator_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
- SET_RUNTIME_PM_OPS(rotator_runtime_suspend, rotator_runtime_resume,
- NULL)
-};
+static DEFINE_RUNTIME_DEV_PM_OPS(rotator_pm_ops, rotator_runtime_suspend,
+ rotator_runtime_resume, NULL);
struct platform_driver rotator_driver = {
.probe = rotator_probe,
@@ -463,7 +457,7 @@ struct platform_driver rotator_driver = {
.driver = {
.name = "exynos-rotator",
.owner = THIS_MODULE,
- .pm = &rotator_pm_ops,
+ .pm = pm_ptr(&rotator_pm_ops),
.of_match_table = exynos_rotator_match,
},
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_scaler.c b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
index 3c049fb658a3..20608e9780ce 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_scaler.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
@@ -550,8 +550,6 @@ static int scaler_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
-
static int clk_disable_unprepare_wrapper(struct clk *clk)
{
clk_disable_unprepare(clk);
@@ -584,13 +582,9 @@ static int scaler_runtime_resume(struct device *dev)
return scaler_clk_ctrl(scaler, true);
}
-#endif
-static const struct dev_pm_ops scaler_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
- SET_RUNTIME_PM_OPS(scaler_runtime_suspend, scaler_runtime_resume, NULL)
-};
+static DEFINE_RUNTIME_DEV_PM_OPS(scaler_pm_ops, scaler_runtime_suspend,
+ scaler_runtime_resume, NULL);
static const struct drm_exynos_ipp_limit scaler_5420_two_pixel_hv_limits[] = {
{ IPP_SIZE_LIMIT(BUFFER, .h = { 16, SZ_8K }, .v = { 16, SZ_8K }) },
@@ -731,7 +725,7 @@ struct platform_driver scaler_driver = {
.driver = {
.name = "exynos-scaler",
.owner = THIS_MODULE,
- .pm = &scaler_pm_ops,
+ .pm = pm_ptr(&scaler_pm_ops),
.of_match_table = exynos_scaler_match,
},
};
diff --git a/drivers/gpu/drm/gma500/Kconfig b/drivers/gpu/drm/gma500/Kconfig
index 807b989e3c77..2efc0eb41c64 100644
--- a/drivers/gpu/drm/gma500/Kconfig
+++ b/drivers/gpu/drm/gma500/Kconfig
@@ -3,6 +3,8 @@ config DRM_GMA500
tristate "Intel GMA500/600/3600/3650 KMS Framebuffer"
depends on DRM && PCI && X86 && MMU
select DRM_KMS_HELPER
+ select I2C
+ select I2C_ALGOBIT
# GMA500 depends on ACPI_VIDEO when ACPI is enabled, just like i915
select ACPI_VIDEO if ACPI
select BACKLIGHT_CLASS_DEVICE if ACPI
diff --git a/drivers/gpu/drm/gud/gud_connector.c b/drivers/gpu/drm/gud/gud_connector.c
index fa636206f232..034e78360d4f 100644
--- a/drivers/gpu/drm/gud/gud_connector.c
+++ b/drivers/gpu/drm/gud/gud_connector.c
@@ -303,7 +303,7 @@ static int gud_connector_atomic_check(struct drm_connector *connector,
old_state->tv.margins.right != new_state->tv.margins.right ||
old_state->tv.margins.top != new_state->tv.margins.top ||
old_state->tv.margins.bottom != new_state->tv.margins.bottom ||
- old_state->tv.mode != new_state->tv.mode ||
+ old_state->tv.legacy_mode != new_state->tv.legacy_mode ||
old_state->tv.brightness != new_state->tv.brightness ||
old_state->tv.contrast != new_state->tv.contrast ||
old_state->tv.flicker_reduction != new_state->tv.flicker_reduction ||
@@ -400,7 +400,7 @@ static int gud_connector_add_tv_mode(struct gud_device *gdrm, struct drm_connect
for (i = 0; i < num_modes; i++)
modes[i] = &buf[i * GUD_CONNECTOR_TV_MODE_NAME_LEN];
- ret = drm_mode_create_tv_properties(connector->dev, num_modes, modes);
+ ret = drm_mode_create_tv_properties_legacy(connector->dev, num_modes, modes);
free:
kfree(buf);
if (ret < 0)
@@ -424,7 +424,7 @@ gud_connector_property_lookup(struct drm_connector *connector, u16 prop)
case GUD_PROPERTY_TV_BOTTOM_MARGIN:
return config->tv_bottom_margin_property;
case GUD_PROPERTY_TV_MODE:
- return config->tv_mode_property;
+ return config->legacy_tv_mode_property;
case GUD_PROPERTY_TV_BRIGHTNESS:
return config->tv_brightness_property;
case GUD_PROPERTY_TV_CONTRAST:
@@ -454,7 +454,7 @@ static unsigned int *gud_connector_tv_state_val(u16 prop, struct drm_tv_connecto
case GUD_PROPERTY_TV_BOTTOM_MARGIN:
return &state->margins.bottom;
case GUD_PROPERTY_TV_MODE:
- return &state->mode;
+ return &state->legacy_mode;
case GUD_PROPERTY_TV_BRIGHTNESS:
return &state->brightness;
case GUD_PROPERTY_TV_CONTRAST:
@@ -539,7 +539,7 @@ static int gud_connector_add_properties(struct gud_device *gdrm, struct gud_conn
fallthrough;
case GUD_PROPERTY_TV_HUE:
/* This is a no-op if already added. */
- ret = drm_mode_create_tv_properties(drm, 0, NULL);
+ ret = drm_mode_create_tv_properties_legacy(drm, 0, NULL);
if (ret)
goto out;
break;
diff --git a/drivers/gpu/drm/gud/gud_drv.c b/drivers/gpu/drm/gud/gud_drv.c
index d57dab104358..9d7bf8ee45f1 100644
--- a/drivers/gpu/drm/gud/gud_drv.c
+++ b/drivers/gpu/drm/gud/gud_drv.c
@@ -325,8 +325,8 @@ static struct drm_gem_object *gud_gem_prime_import(struct drm_device *drm, struc
static int gud_stats_debugfs(struct seq_file *m, void *data)
{
- struct drm_info_node *node = m->private;
- struct gud_device *gdrm = to_gud_device(node->minor->dev);
+ struct drm_debugfs_entry *entry = m->private;
+ struct gud_device *gdrm = to_gud_device(entry->dev);
char buf[10];
string_get_size(gdrm->bulk_len, 1, STRING_UNITS_2, buf, sizeof(buf));
@@ -352,19 +352,10 @@ static int gud_stats_debugfs(struct seq_file *m, void *data)
return 0;
}
-static const struct drm_info_list gud_debugfs_list[] = {
- { "stats", gud_stats_debugfs, 0, NULL },
-};
-
-static void gud_debugfs_init(struct drm_minor *minor)
-{
- drm_debugfs_create_files(gud_debugfs_list, ARRAY_SIZE(gud_debugfs_list),
- minor->debugfs_root, minor);
-}
-
static const struct drm_simple_display_pipe_funcs gud_pipe_funcs = {
.check = gud_pipe_check,
.update = gud_pipe_update,
+ DRM_GEM_SIMPLE_DISPLAY_PIPE_SHADOW_PLANE_FUNCS
};
static const struct drm_mode_config_funcs gud_mode_config_funcs = {
@@ -385,7 +376,6 @@ static const struct drm_driver gud_drm_driver = {
.fops = &gud_fops,
DRM_GEM_SHMEM_DRIVER_OPS,
.gem_prime_import = gud_gem_prime_import,
- .debugfs_init = gud_debugfs_init,
.name = "gud",
.desc = "Generic USB Display",
@@ -622,6 +612,8 @@ static int gud_probe(struct usb_interface *intf, const struct usb_device_id *id)
if (!gdrm->dmadev)
dev_warn(dev, "buffer sharing not supported");
+ drm_debugfs_add_file(drm, "stats", gud_stats_debugfs, NULL);
+
ret = drm_dev_register(drm, 0);
if (ret) {
put_device(gdrm->dmadev);
diff --git a/drivers/gpu/drm/gud/gud_internal.h b/drivers/gpu/drm/gud/gud_internal.h
index e351a1f1420d..0d148a6f27aa 100644
--- a/drivers/gpu/drm/gud/gud_internal.h
+++ b/drivers/gpu/drm/gud/gud_internal.h
@@ -43,6 +43,7 @@ struct gud_device {
struct drm_framebuffer *fb;
struct drm_rect damage;
bool prev_flush_failed;
+ void *shadow_buf;
};
static inline struct gud_device *to_gud_device(struct drm_device *drm)
diff --git a/drivers/gpu/drm/gud/gud_pipe.c b/drivers/gpu/drm/gud/gud_pipe.c
index 7c6dc2bcd14a..dc16a92625d4 100644
--- a/drivers/gpu/drm/gud/gud_pipe.c
+++ b/drivers/gpu/drm/gud/gud_pipe.c
@@ -5,6 +5,7 @@
#include <linux/lz4.h>
#include <linux/usb.h>
+#include <linux/vmalloc.h>
#include <linux/workqueue.h>
#include <drm/drm_atomic.h>
@@ -15,6 +16,7 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem.h>
+#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_print.h>
#include <drm/drm_rect.h>
@@ -24,17 +26,13 @@
#include "gud_internal.h"
/*
- * Some userspace rendering loops runs all displays in the same loop.
+ * Some userspace rendering loops run all displays in the same loop.
* This means that a fast display will have to wait for a slow one.
- * For this reason gud does flushing asynchronous by default.
- * The down side is that in e.g. a single display setup userspace thinks
- * the display is insanely fast since the driver reports back immediately
- * that the flush/pageflip is done. This wastes CPU and power.
- * Such users might want to set this module parameter to false.
+ * Such users might want to enable this module parameter.
*/
-static bool gud_async_flush = true;
+static bool gud_async_flush;
module_param_named(async_flush, gud_async_flush, bool, 0644);
-MODULE_PARM_DESC(async_flush, "Enable asynchronous flushing [default=true]");
+MODULE_PARM_DESC(async_flush, "Enable asynchronous flushing [default=0]");
/*
* FIXME: The driver is probably broken on Big Endian machines.
@@ -152,32 +150,21 @@ static size_t gud_xrgb8888_to_color(u8 *dst, const struct drm_format_info *forma
}
static int gud_prep_flush(struct gud_device *gdrm, struct drm_framebuffer *fb,
+ const struct iosys_map *src, bool cached_reads,
const struct drm_format_info *format, struct drm_rect *rect,
struct gud_set_buffer_req *req)
{
- struct dma_buf_attachment *import_attach = fb->obj[0]->import_attach;
u8 compression = gdrm->compression;
- struct iosys_map map[DRM_FORMAT_MAX_PLANES];
- struct iosys_map map_data[DRM_FORMAT_MAX_PLANES];
struct iosys_map dst;
void *vaddr, *buf;
size_t pitch, len;
- int ret = 0;
pitch = drm_format_info_min_pitch(format, 0, drm_rect_width(rect));
len = pitch * drm_rect_height(rect);
if (len > gdrm->bulk_len)
return -E2BIG;
- ret = drm_gem_fb_vmap(fb, map, map_data);
- if (ret)
- return ret;
-
- vaddr = map_data[0].vaddr;
-
- ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE);
- if (ret)
- goto vunmap;
+ vaddr = src[0].vaddr;
retry:
if (compression)
buf = gdrm->compress_buf;
@@ -192,29 +179,27 @@ retry:
if (format != fb->format) {
if (format->format == GUD_DRM_FORMAT_R1) {
len = gud_xrgb8888_to_r124(buf, format, vaddr, fb, rect);
- if (!len) {
- ret = -ENOMEM;
- goto end_cpu_access;
- }
+ if (!len)
+ return -ENOMEM;
} else if (format->format == DRM_FORMAT_R8) {
- drm_fb_xrgb8888_to_gray8(&dst, NULL, map_data, fb, rect);
+ drm_fb_xrgb8888_to_gray8(&dst, NULL, src, fb, rect);
} else if (format->format == DRM_FORMAT_RGB332) {
- drm_fb_xrgb8888_to_rgb332(&dst, NULL, map_data, fb, rect);
+ drm_fb_xrgb8888_to_rgb332(&dst, NULL, src, fb, rect);
} else if (format->format == DRM_FORMAT_RGB565) {
- drm_fb_xrgb8888_to_rgb565(&dst, NULL, map_data, fb, rect,
+ drm_fb_xrgb8888_to_rgb565(&dst, NULL, src, fb, rect,
gud_is_big_endian());
} else if (format->format == DRM_FORMAT_RGB888) {
- drm_fb_xrgb8888_to_rgb888(&dst, NULL, map_data, fb, rect);
+ drm_fb_xrgb8888_to_rgb888(&dst, NULL, src, fb, rect);
} else {
len = gud_xrgb8888_to_color(buf, format, vaddr, fb, rect);
}
} else if (gud_is_big_endian() && format->cpp[0] > 1) {
- drm_fb_swab(&dst, NULL, map_data, fb, rect, !import_attach);
- } else if (compression && !import_attach && pitch == fb->pitches[0]) {
+ drm_fb_swab(&dst, NULL, src, fb, rect, cached_reads);
+ } else if (compression && cached_reads && pitch == fb->pitches[0]) {
/* can compress directly from the framebuffer */
buf = vaddr + rect->y1 * pitch;
} else {
- drm_fb_memcpy(&dst, NULL, map_data, fb, rect);
+ drm_fb_memcpy(&dst, NULL, src, fb, rect);
}
memset(req, 0, sizeof(*req));
@@ -237,12 +222,7 @@ retry:
req->compressed_length = cpu_to_le32(complen);
}
-end_cpu_access:
- drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
-vunmap:
- drm_gem_fb_vunmap(fb, map);
-
- return ret;
+ return 0;
}
struct gud_usb_bulk_context {
@@ -285,6 +265,7 @@ static int gud_usb_bulk(struct gud_device *gdrm, size_t len)
}
static int gud_flush_rect(struct gud_device *gdrm, struct drm_framebuffer *fb,
+ const struct iosys_map *src, bool cached_reads,
const struct drm_format_info *format, struct drm_rect *rect)
{
struct gud_set_buffer_req req;
@@ -293,7 +274,7 @@ static int gud_flush_rect(struct gud_device *gdrm, struct drm_framebuffer *fb,
drm_dbg(&gdrm->drm, "Flushing [FB:%d] " DRM_RECT_FMT "\n", fb->base.id, DRM_RECT_ARG(rect));
- ret = gud_prep_flush(gdrm, fb, format, rect, &req);
+ ret = gud_prep_flush(gdrm, fb, src, cached_reads, format, rect, &req);
if (ret)
return ret;
@@ -333,46 +314,51 @@ void gud_clear_damage(struct gud_device *gdrm)
gdrm->damage.y2 = 0;
}
-static void gud_add_damage(struct gud_device *gdrm, struct drm_rect *damage)
+static void gud_flush_damage(struct gud_device *gdrm, struct drm_framebuffer *fb,
+ const struct iosys_map *src, bool cached_reads,
+ struct drm_rect *damage)
{
- gdrm->damage.x1 = min(gdrm->damage.x1, damage->x1);
- gdrm->damage.y1 = min(gdrm->damage.y1, damage->y1);
- gdrm->damage.x2 = max(gdrm->damage.x2, damage->x2);
- gdrm->damage.y2 = max(gdrm->damage.y2, damage->y2);
-}
+ const struct drm_format_info *format;
+ unsigned int i, lines;
+ size_t pitch;
+ int ret;
-static void gud_retry_failed_flush(struct gud_device *gdrm, struct drm_framebuffer *fb,
- struct drm_rect *damage)
-{
- /*
- * pipe_update waits for the worker when the display mode is going to change.
- * This ensures that the width and height is still the same making it safe to
- * add back the damage.
- */
+ format = fb->format;
+ if (format->format == DRM_FORMAT_XRGB8888 && gdrm->xrgb8888_emulation_format)
+ format = gdrm->xrgb8888_emulation_format;
- mutex_lock(&gdrm->damage_lock);
- if (!gdrm->fb) {
- drm_framebuffer_get(fb);
- gdrm->fb = fb;
- }
- gud_add_damage(gdrm, damage);
- mutex_unlock(&gdrm->damage_lock);
+ /* Split update if it's too big */
+ pitch = drm_format_info_min_pitch(format, 0, drm_rect_width(damage));
+ lines = drm_rect_height(damage);
+
+ if (gdrm->bulk_len < lines * pitch)
+ lines = gdrm->bulk_len / pitch;
+
+ for (i = 0; i < DIV_ROUND_UP(drm_rect_height(damage), lines); i++) {
+ struct drm_rect rect = *damage;
- /* Retry only once to avoid a possible storm in case of continues errors. */
- if (!gdrm->prev_flush_failed)
- queue_work(system_long_wq, &gdrm->work);
- gdrm->prev_flush_failed = true;
+ rect.y1 += i * lines;
+ rect.y2 = min_t(u32, rect.y1 + lines, damage->y2);
+
+ ret = gud_flush_rect(gdrm, fb, src, cached_reads, format, &rect);
+ if (ret) {
+ if (ret != -ENODEV && ret != -ECONNRESET &&
+ ret != -ESHUTDOWN && ret != -EPROTO)
+ dev_err_ratelimited(fb->dev->dev,
+ "Failed to flush framebuffer: error=%d\n", ret);
+ gdrm->prev_flush_failed = true;
+ break;
+ }
+ }
}
void gud_flush_work(struct work_struct *work)
{
struct gud_device *gdrm = container_of(work, struct gud_device, work);
- const struct drm_format_info *format;
+ struct iosys_map shadow_map;
struct drm_framebuffer *fb;
struct drm_rect damage;
- unsigned int i, lines;
- int idx, ret = 0;
- size_t pitch;
+ int idx;
if (!drm_dev_enter(&gdrm->drm, &idx))
return;
@@ -380,6 +366,7 @@ void gud_flush_work(struct work_struct *work)
mutex_lock(&gdrm->damage_lock);
fb = gdrm->fb;
gdrm->fb = NULL;
+ iosys_map_set_vaddr(&shadow_map, gdrm->shadow_buf);
damage = gdrm->damage;
gud_clear_damage(gdrm);
mutex_unlock(&gdrm->damage_lock);
@@ -387,59 +374,43 @@ void gud_flush_work(struct work_struct *work)
if (!fb)
goto out;
- format = fb->format;
- if (format->format == DRM_FORMAT_XRGB8888 && gdrm->xrgb8888_emulation_format)
- format = gdrm->xrgb8888_emulation_format;
-
- /* Split update if it's too big */
- pitch = drm_format_info_min_pitch(format, 0, drm_rect_width(&damage));
- lines = drm_rect_height(&damage);
-
- if (gdrm->bulk_len < lines * pitch)
- lines = gdrm->bulk_len / pitch;
-
- for (i = 0; i < DIV_ROUND_UP(drm_rect_height(&damage), lines); i++) {
- struct drm_rect rect = damage;
-
- rect.y1 += i * lines;
- rect.y2 = min_t(u32, rect.y1 + lines, damage.y2);
-
- ret = gud_flush_rect(gdrm, fb, format, &rect);
- if (ret) {
- if (ret != -ENODEV && ret != -ECONNRESET &&
- ret != -ESHUTDOWN && ret != -EPROTO) {
- bool prev_flush_failed = gdrm->prev_flush_failed;
-
- gud_retry_failed_flush(gdrm, fb, &damage);
- if (!prev_flush_failed)
- dev_err_ratelimited(fb->dev->dev,
- "Failed to flush framebuffer: error=%d\n", ret);
- }
- break;
- }
-
- gdrm->prev_flush_failed = false;
- }
+ gud_flush_damage(gdrm, fb, &shadow_map, true, &damage);
drm_framebuffer_put(fb);
out:
drm_dev_exit(idx);
}
-static void gud_fb_queue_damage(struct gud_device *gdrm, struct drm_framebuffer *fb,
- struct drm_rect *damage)
+static int gud_fb_queue_damage(struct gud_device *gdrm, struct drm_framebuffer *fb,
+ const struct iosys_map *src, struct drm_rect *damage)
{
struct drm_framebuffer *old_fb = NULL;
+ struct iosys_map shadow_map;
mutex_lock(&gdrm->damage_lock);
+ if (!gdrm->shadow_buf) {
+ gdrm->shadow_buf = vzalloc(fb->pitches[0] * fb->height);
+ if (!gdrm->shadow_buf) {
+ mutex_unlock(&gdrm->damage_lock);
+ return -ENOMEM;
+ }
+ }
+
+ iosys_map_set_vaddr(&shadow_map, gdrm->shadow_buf);
+ iosys_map_incr(&shadow_map, drm_fb_clip_offset(fb->pitches[0], fb->format, damage));
+ drm_fb_memcpy(&shadow_map, fb->pitches, src, fb, damage);
+
if (fb != gdrm->fb) {
old_fb = gdrm->fb;
drm_framebuffer_get(fb);
gdrm->fb = fb;
}
- gud_add_damage(gdrm, damage);
+ gdrm->damage.x1 = min(gdrm->damage.x1, damage->x1);
+ gdrm->damage.y1 = min(gdrm->damage.y1, damage->y1);
+ gdrm->damage.x2 = max(gdrm->damage.x2, damage->x2);
+ gdrm->damage.y2 = max(gdrm->damage.y2, damage->y2);
mutex_unlock(&gdrm->damage_lock);
@@ -447,6 +418,26 @@ static void gud_fb_queue_damage(struct gud_device *gdrm, struct drm_framebuffer
if (old_fb)
drm_framebuffer_put(old_fb);
+
+ return 0;
+}
+
+static void gud_fb_handle_damage(struct gud_device *gdrm, struct drm_framebuffer *fb,
+ const struct iosys_map *src, struct drm_rect *damage)
+{
+ int ret;
+
+ if (gdrm->flags & GUD_DISPLAY_FLAG_FULL_UPDATE)
+ drm_rect_init(damage, 0, 0, fb->width, fb->height);
+
+ if (gud_async_flush) {
+ ret = gud_fb_queue_damage(gdrm, fb, src, damage);
+ if (ret != -ENOMEM)
+ return;
+ }
+
+ /* Imported buffers are assumed to be WriteCombined with uncached reads */
+ gud_flush_damage(gdrm, fb, src, !fb->obj[0]->import_attach, damage);
}
int gud_pipe_check(struct drm_simple_display_pipe *pipe,
@@ -571,10 +562,11 @@ void gud_pipe_update(struct drm_simple_display_pipe *pipe,
struct drm_device *drm = pipe->crtc.dev;
struct gud_device *gdrm = to_gud_device(drm);
struct drm_plane_state *state = pipe->plane.state;
+ struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(state);
struct drm_framebuffer *fb = state->fb;
struct drm_crtc *crtc = &pipe->crtc;
struct drm_rect damage;
- int idx;
+ int ret, idx;
if (crtc->state->mode_changed || !crtc->state->enable) {
cancel_work_sync(&gdrm->work);
@@ -584,6 +576,8 @@ void gud_pipe_update(struct drm_simple_display_pipe *pipe,
gdrm->fb = NULL;
}
gud_clear_damage(gdrm);
+ vfree(gdrm->shadow_buf);
+ gdrm->shadow_buf = NULL;
mutex_unlock(&gdrm->damage_lock);
}
@@ -599,14 +593,19 @@ void gud_pipe_update(struct drm_simple_display_pipe *pipe,
if (crtc->state->active_changed)
gud_usb_set_u8(gdrm, GUD_REQ_SET_DISPLAY_ENABLE, crtc->state->active);
- if (drm_atomic_helper_damage_merged(old_state, state, &damage)) {
- if (gdrm->flags & GUD_DISPLAY_FLAG_FULL_UPDATE)
- drm_rect_init(&damage, 0, 0, fb->width, fb->height);
- gud_fb_queue_damage(gdrm, fb, &damage);
- if (!gud_async_flush)
- flush_work(&gdrm->work);
- }
+ if (!fb)
+ goto ctrl_disable;
+
+ ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE);
+ if (ret)
+ goto ctrl_disable;
+
+ if (drm_atomic_helper_damage_merged(old_state, state, &damage))
+ gud_fb_handle_damage(gdrm, fb, &shadow_plane_state->data[0], &damage);
+
+ drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
+ctrl_disable:
if (!crtc->state->enable)
gud_usb_set_u8(gdrm, GUD_REQ_SET_CONTROLLER_ENABLE, 0);
diff --git a/drivers/gpu/drm/hisilicon/hibmc/Kconfig b/drivers/gpu/drm/hisilicon/hibmc/Kconfig
index 4e41c144a290..126504318a4f 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/Kconfig
+++ b/drivers/gpu/drm/hisilicon/hibmc/Kconfig
@@ -7,6 +7,8 @@ config DRM_HISI_HIBMC
select DRM_VRAM_HELPER
select DRM_TTM
select DRM_TTM_HELPER
+ select I2C
+ select I2C_ALGOBIT
help
Choose this option if you have a Hisilicon Hibmc soc chipset.
If M is selected the module will be called hibmc-drm.
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
index 22053c613644..0c4aa4d9b0a7 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
@@ -106,7 +106,7 @@ static int hibmc_kms_init(struct hibmc_drm_private *priv)
dev->mode_config.max_width = 1920;
dev->mode_config.max_height = 1200;
- dev->mode_config.preferred_depth = 32;
+ dev->mode_config.preferred_depth = 24;
dev->mode_config.prefer_shadow = 1;
dev->mode_config.funcs = (void *)&hibmc_mode_funcs;
@@ -340,7 +340,7 @@ static int hibmc_pci_probe(struct pci_dev *pdev,
goto err_unload;
}
- drm_fbdev_generic_setup(dev, dev->mode_config.preferred_depth);
+ drm_fbdev_generic_setup(dev, 32);
return 0;
diff --git a/drivers/gpu/drm/i2c/ch7006_drv.c b/drivers/gpu/drm/i2c/ch7006_drv.c
index 578b738859b9..923389f22020 100644
--- a/drivers/gpu/drm/i2c/ch7006_drv.c
+++ b/drivers/gpu/drm/i2c/ch7006_drv.c
@@ -250,7 +250,7 @@ static int ch7006_encoder_create_resources(struct drm_encoder *encoder,
struct drm_device *dev = encoder->dev;
struct drm_mode_config *conf = &dev->mode_config;
- drm_mode_create_tv_properties(dev, NUM_TV_NORMS, ch7006_tv_norm_names);
+ drm_mode_create_tv_properties_legacy(dev, NUM_TV_NORMS, ch7006_tv_norm_names);
priv->scale_property = drm_property_create_range(dev, 0, "scale", 0, 2);
if (!priv->scale_property)
@@ -264,8 +264,8 @@ static int ch7006_encoder_create_resources(struct drm_encoder *encoder,
priv->hmargin);
drm_object_attach_property(&connector->base, conf->tv_bottom_margin_property,
priv->vmargin);
- drm_object_attach_property(&connector->base, conf->tv_mode_property,
- priv->norm);
+ drm_object_attach_property(&connector->base, conf->legacy_tv_mode_property,
+ priv->norm);
drm_object_attach_property(&connector->base, conf->tv_brightness_property,
priv->brightness);
drm_object_attach_property(&connector->base, conf->tv_contrast_property,
@@ -315,7 +315,7 @@ static int ch7006_encoder_set_property(struct drm_encoder *encoder,
ch7006_load_reg(client, state, CH7006_POV);
ch7006_load_reg(client, state, CH7006_VPOS);
- } else if (property == conf->tv_mode_property) {
+ } else if (property == conf->legacy_tv_mode_property) {
if (connector->dpms != DRM_MODE_DPMS_OFF)
return -EINVAL;
@@ -386,7 +386,7 @@ static const struct drm_encoder_slave_funcs ch7006_encoder_funcs = {
/* I2C driver functions */
-static int ch7006_probe(struct i2c_client *client, const struct i2c_device_id *id)
+static int ch7006_probe(struct i2c_client *client)
{
uint8_t addr = CH7006_VERSION_ID;
uint8_t val;
@@ -495,7 +495,7 @@ static const struct dev_pm_ops ch7006_pm_ops = {
static struct drm_i2c_encoder_driver ch7006_driver = {
.i2c_driver = {
- .probe = ch7006_probe,
+ .probe_new = ch7006_probe,
.remove = ch7006_remove,
.driver = {
diff --git a/drivers/gpu/drm/i2c/sil164_drv.c b/drivers/gpu/drm/i2c/sil164_drv.c
index 1bc0b5de4499..f57f9a807542 100644
--- a/drivers/gpu/drm/i2c/sil164_drv.c
+++ b/drivers/gpu/drm/i2c/sil164_drv.c
@@ -350,7 +350,7 @@ static const struct drm_encoder_slave_funcs sil164_encoder_funcs = {
/* I2C driver functions */
static int
-sil164_probe(struct i2c_client *client, const struct i2c_device_id *id)
+sil164_probe(struct i2c_client *client)
{
int vendor = sil164_read(client, SIL164_VENDOR_HI) << 8 |
sil164_read(client, SIL164_VENDOR_LO);
@@ -420,7 +420,7 @@ MODULE_DEVICE_TABLE(i2c, sil164_ids);
static struct drm_i2c_encoder_driver sil164_driver = {
.i2c_driver = {
- .probe = sil164_probe,
+ .probe_new = sil164_probe,
.driver = {
.name = "sil164",
},
diff --git a/drivers/gpu/drm/i2c/tda9950.c b/drivers/gpu/drm/i2c/tda9950.c
index 9ed54e7ccff2..b8c143e573e0 100644
--- a/drivers/gpu/drm/i2c/tda9950.c
+++ b/drivers/gpu/drm/i2c/tda9950.c
@@ -375,8 +375,7 @@ static void tda9950_cec_del(void *data)
cec_delete_adapter(priv->adap);
}
-static int tda9950_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int tda9950_probe(struct i2c_client *client)
{
struct tda9950_glue *glue = client->dev.platform_data;
struct device *dev = &client->dev;
@@ -493,7 +492,7 @@ static struct i2c_device_id tda9950_ids[] = {
MODULE_DEVICE_TABLE(i2c, tda9950_ids);
static struct i2c_driver tda9950_driver = {
- .probe = tda9950_probe,
+ .probe_new = tda9950_probe,
.remove = tda9950_remove,
.driver = {
.name = "tda9950",
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index a14d2896aebb..db5c9343a3d2 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -2059,7 +2059,7 @@ static const struct component_ops tda998x_ops = {
};
static int
-tda998x_probe(struct i2c_client *client, const struct i2c_device_id *id)
+tda998x_probe(struct i2c_client *client)
{
int ret;
@@ -2099,7 +2099,7 @@ static const struct i2c_device_id tda998x_ids[] = {
MODULE_DEVICE_TABLE(i2c, tda998x_ids);
static struct i2c_driver tda998x_driver = {
- .probe = tda998x_probe,
+ .probe_new = tda998x_probe,
.remove = tda998x_remove,
.driver = {
.name = "tda998x",
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index 8eb3e60aeec9..9c0990c0ec87 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -18,6 +18,8 @@ config DRM_I915
select DRM_PANEL
select DRM_MIPI_DSI
select RELAY
+ select I2C
+ select I2C_ALGOBIT
select IRQ_WORK
# i915 depends on ACPI_VIDEO when ACPI is enabled
# but for select to work, need to select ACPI_VIDEO's dependencies, ick
diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c
index 4d2101ca1692..b986bf075889 100644
--- a/drivers/gpu/drm/i915/display/intel_tv.c
+++ b/drivers/gpu/drm/i915/display/intel_tv.c
@@ -1905,10 +1905,10 @@ static void intel_tv_add_properties(struct drm_connector *connector)
tv_format_names[i] = tv_modes[i].name;
}
- drm_mode_create_tv_properties(&i915->drm, i, tv_format_names);
+ drm_mode_create_tv_properties_legacy(&i915->drm, i, tv_format_names);
drm_object_attach_property(&connector->base,
- i915->drm.mode_config.tv_mode_property,
+ i915->drm.mode_config.legacy_tv_mode_property,
conn_state->tv.mode);
drm_object_attach_property(&connector->base,
i915->drm.mode_config.tv_left_margin_property,
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index da09767fda07..f266b68cf012 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -730,32 +730,69 @@ static int eb_reserve(struct i915_execbuffer *eb)
bool unpinned;
/*
- * Attempt to pin all of the buffers into the GTT.
- * This is done in 2 phases:
+ * We have one more buffers that we couldn't bind, which could be due to
+ * various reasons. To resolve this we have 4 passes, with every next
+ * level turning the screws tighter:
*
- * 1. Unbind all objects that do not match the GTT constraints for
- * the execbuffer (fenceable, mappable, alignment etc).
- * 2. Bind new objects.
+ * 0. Unbind all objects that do not match the GTT constraints for the
+ * execbuffer (fenceable, mappable, alignment etc). Bind all new
+ * objects. This avoids unnecessary unbinding of later objects in order
+ * to make room for the earlier objects *unless* we need to defragment.
*
- * This avoid unnecessary unbinding of later objects in order to make
- * room for the earlier objects *unless* we need to defragment.
+ * 1. Reorder the buffers, where objects with the most restrictive
+ * placement requirements go first (ignoring fixed location buffers for
+ * now). For example, objects needing the mappable aperture (the first
+ * 256M of GTT), should go first vs objects that can be placed just
+ * about anywhere. Repeat the previous pass.
*
- * Defragmenting is skipped if all objects are pinned at a fixed location.
+ * 2. Consider buffers that are pinned at a fixed location. Also try to
+ * evict the entire VM this time, leaving only objects that we were
+ * unable to lock. Try again to bind the buffers. (still using the new
+ * buffer order).
+ *
+ * 3. We likely have object lock contention for one or more stubborn
+ * objects in the VM, for which we need to evict to make forward
+ * progress (perhaps we are fighting the shrinker?). When evicting the
+ * VM this time around, anything that we can't lock we now track using
+ * the busy_bo, using the full lock (after dropping the vm->mutex to
+ * prevent deadlocks), instead of trylock. We then continue to evict the
+ * VM, this time with the stubborn object locked, which we can now
+ * hopefully unbind (if still bound in the VM). Repeat until the VM is
+ * evicted. Finally we should be able bind everything.
*/
- for (pass = 0; pass <= 2; pass++) {
+ for (pass = 0; pass <= 3; pass++) {
int pin_flags = PIN_USER | PIN_VALIDATE;
if (pass == 0)
pin_flags |= PIN_NONBLOCK;
if (pass >= 1)
- unpinned = eb_unbind(eb, pass == 2);
+ unpinned = eb_unbind(eb, pass >= 2);
if (pass == 2) {
err = mutex_lock_interruptible(&eb->context->vm->mutex);
if (!err) {
- err = i915_gem_evict_vm(eb->context->vm, &eb->ww);
+ err = i915_gem_evict_vm(eb->context->vm, &eb->ww, NULL);
+ mutex_unlock(&eb->context->vm->mutex);
+ }
+ if (err)
+ return err;
+ }
+
+ if (pass == 3) {
+retry:
+ err = mutex_lock_interruptible(&eb->context->vm->mutex);
+ if (!err) {
+ struct drm_i915_gem_object *busy_bo = NULL;
+
+ err = i915_gem_evict_vm(eb->context->vm, &eb->ww, &busy_bo);
mutex_unlock(&eb->context->vm->mutex);
+ if (err && busy_bo) {
+ err = i915_gem_object_lock(busy_bo, &eb->ww);
+ i915_gem_object_put(busy_bo);
+ if (!err)
+ goto retry;
+ }
}
if (err)
return err;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index c29efdef8313..0ad44f3868de 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -369,7 +369,7 @@ retry:
if (vma == ERR_PTR(-ENOSPC)) {
ret = mutex_lock_interruptible(&ggtt->vm.mutex);
if (!ret) {
- ret = i915_gem_evict_vm(&ggtt->vm, &ww);
+ ret = i915_gem_evict_vm(&ggtt->vm, &ww, NULL);
mutex_unlock(&ggtt->vm.mutex);
}
if (ret)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index ab4c2f90a564..19c9bdd8f905 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -10,7 +10,7 @@
#include <linux/mmu_notifier.h>
#include <drm/drm_gem.h>
-#include <drm/ttm/ttm_bo_api.h>
+#include <drm/ttm/ttm_bo.h>
#include <uapi/drm/i915_drm.h>
#include "i915_active.h"
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
index 1e50fb0d6bfc..d409a77449a3 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
@@ -5,8 +5,8 @@
#include <linux/shmem_fs.h>
-#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_tt.h>
#include <drm/drm_buddy.h>
#include "i915_drv.h"
@@ -599,13 +599,16 @@ i915_ttm_resource_get_st(struct drm_i915_gem_object *obj,
static int i915_ttm_truncate(struct drm_i915_gem_object *obj)
{
struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
- int err;
+ long err;
WARN_ON_ONCE(obj->mm.madv == I915_MADV_WILLNEED);
- err = ttm_bo_wait(bo, true, false);
- if (err)
+ err = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP,
+ true, 15 * HZ);
+ if (err < 0)
return err;
+ if (err == 0)
+ return -EBUSY;
err = i915_ttm_move_notify(bo);
if (err)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
index f59f812dc6d2..2ebaaf4d663c 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
@@ -3,7 +3,7 @@
* Copyright © 2021 Intel Corporation
*/
-#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_tt.h>
#include "i915_deps.h"
#include "i915_drv.h"
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
index 767e329e1cc5..9c18b5f2e789 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -1109,9 +1109,15 @@ static void mmio_invalidate_full(struct intel_gt *gt)
continue;
if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) {
+ u32 val = BIT(engine->instance);
+
+ if (engine->class == VIDEO_DECODE_CLASS ||
+ engine->class == VIDEO_ENHANCEMENT_CLASS ||
+ engine->class == COMPUTE_CLASS)
+ val = _MASKED_BIT_ENABLE(val);
intel_gt_mcr_multicast_write_fw(gt,
xehp_regs[engine->class],
- BIT(engine->instance));
+ val);
} else {
rb = get_reg_and_bit(engine, regs == gen8_regs, regs, num);
if (!i915_mmio_reg_offset(rb.reg))
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c
index cf71305ad586..09b9365ededd 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c
@@ -164,7 +164,6 @@ sysfs_gt_attribute_r_func(struct kobject *kobj, struct attribute *attr,
NULL); \
INTEL_GT_ATTR_RO(_name)
-#ifdef CONFIG_PM
static u32 get_residency(struct intel_gt *gt, enum intel_rc6_res_type id)
{
intel_wakeref_t wakeref;
@@ -300,7 +299,7 @@ static void intel_sysfs_rc6_init(struct intel_gt *gt, struct kobject *kobj)
{
int ret;
- if (!HAS_RC6(gt->i915))
+ if (!IS_ENABLED(CONFIG_PM) || !HAS_RC6(gt->i915))
return;
ret = __intel_gt_sysfs_create_group(kobj, rc6_attr_group);
@@ -329,11 +328,6 @@ static void intel_sysfs_rc6_init(struct intel_gt *gt, struct kobject *kobj)
gt->info.id, ERR_PTR(ret));
}
}
-#else
-static void intel_sysfs_rc6_init(struct intel_gt *gt, struct kobject *kobj)
-{
-}
-#endif /* CONFIG_PM */
static u32 __act_freq_mhz_show(struct intel_gt *gt)
{
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
index 0c80ba51a4bd..2bcdd192f814 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
@@ -545,6 +545,32 @@ static int check_ccs_header(struct intel_gt *gt,
return 0;
}
+static int try_firmware_load(struct intel_uc_fw *uc_fw, const struct firmware **fw)
+{
+ struct intel_gt *gt = __uc_fw_to_gt(uc_fw);
+ struct device *dev = gt->i915->drm.dev;
+ int err;
+
+ err = firmware_request_nowarn(fw, uc_fw->file_selected.path, dev);
+
+ if (err)
+ return err;
+
+ if ((*fw)->size > INTEL_UC_RSVD_GGTT_PER_FW) {
+ drm_err(&gt->i915->drm,
+ "%s firmware %s: size (%zuKB) exceeds max supported size (%uKB)\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
+ (*fw)->size / SZ_1K, INTEL_UC_RSVD_GGTT_PER_FW / SZ_1K);
+
+ /* try to find another blob to load */
+ release_firmware(*fw);
+ *fw = NULL;
+ return -ENOENT;
+ }
+
+ return 0;
+}
+
/**
* intel_uc_fw_fetch - fetch uC firmware
* @uc_fw: uC firmware
@@ -558,7 +584,6 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
struct intel_gt *gt = __uc_fw_to_gt(uc_fw);
struct drm_i915_private *i915 = gt->i915;
struct intel_uc_fw_file file_ideal;
- struct device *dev = i915->drm.dev;
struct drm_i915_gem_object *obj;
const struct firmware *fw = NULL;
bool old_ver = false;
@@ -574,20 +599,9 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
__force_fw_fetch_failures(uc_fw, -EINVAL);
__force_fw_fetch_failures(uc_fw, -ESTALE);
- err = firmware_request_nowarn(&fw, uc_fw->file_selected.path, dev);
+ err = try_firmware_load(uc_fw, &fw);
memcpy(&file_ideal, &uc_fw->file_wanted, sizeof(file_ideal));
- if (!err && fw->size > INTEL_UC_RSVD_GGTT_PER_FW) {
- drm_err(&i915->drm,
- "%s firmware %s: size (%zuKB) exceeds max supported size (%uKB)\n",
- intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
- fw->size / SZ_1K, INTEL_UC_RSVD_GGTT_PER_FW / SZ_1K);
-
- /* try to find another blob to load */
- release_firmware(fw);
- err = -ENOENT;
- }
-
/* Any error is terminal if overriding. Don't bother searching for older versions */
if (err && intel_uc_fw_is_overridden(uc_fw))
goto fail;
@@ -608,7 +622,7 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
break;
}
- err = firmware_request_nowarn(&fw, uc_fw->file_selected.path, dev);
+ err = try_firmware_load(uc_fw, &fw);
}
if (err)
diff --git a/drivers/gpu/drm/i915/i915_deps.c b/drivers/gpu/drm/i915/i915_deps.c
index 297b8e4e42ee..91c61864285a 100644
--- a/drivers/gpu/drm/i915/i915_deps.c
+++ b/drivers/gpu/drm/i915/i915_deps.c
@@ -6,7 +6,7 @@
#include <linux/dma-fence.h>
#include <linux/slab.h>
-#include <drm/ttm/ttm_bo_api.h>
+#include <drm/ttm/ttm_bo.h>
#include "i915_deps.h"
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 8468ca9885fd..c38306f156d6 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1099,7 +1099,7 @@ void i915_gem_drain_freed_objects(struct drm_i915_private *i915)
{
while (atomic_read(&i915->mm.free_count)) {
flush_work(&i915->mm.free_work);
- flush_delayed_work(&i915->bdev.wq);
+ drain_workqueue(i915->bdev.wq);
rcu_barrier();
}
}
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index f025ee4fa526..a4b4d9b7d26c 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -416,6 +416,11 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
* @vm: Address space to cleanse
* @ww: An optional struct i915_gem_ww_ctx. If not NULL, i915_gem_evict_vm
* will be able to evict vma's locked by the ww as well.
+ * @busy_bo: Optional pointer to struct drm_i915_gem_object. If not NULL, then
+ * in the event i915_gem_evict_vm() is unable to trylock an object for eviction,
+ * then @busy_bo will point to it. -EBUSY is also returned. The caller must drop
+ * the vm->mutex, before trying again to acquire the contended lock. The caller
+ * also owns a reference to the object.
*
* This function evicts all vmas from a vm.
*
@@ -425,7 +430,8 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
* To clarify: This is for freeing up virtual address space, not for freeing
* memory in e.g. the shrinker.
*/
-int i915_gem_evict_vm(struct i915_address_space *vm, struct i915_gem_ww_ctx *ww)
+int i915_gem_evict_vm(struct i915_address_space *vm, struct i915_gem_ww_ctx *ww,
+ struct drm_i915_gem_object **busy_bo)
{
int ret = 0;
@@ -457,15 +463,22 @@ int i915_gem_evict_vm(struct i915_address_space *vm, struct i915_gem_ww_ctx *ww)
* the resv is shared among multiple objects, we still
* need the object ref.
*/
- if (dying_vma(vma) ||
+ if (!i915_gem_object_get_rcu(vma->obj) ||
(ww && (dma_resv_locking_ctx(vma->obj->base.resv) == &ww->ctx))) {
__i915_vma_pin(vma);
list_add(&vma->evict_link, &locked_eviction_list);
continue;
}
- if (!i915_gem_object_trylock(vma->obj, ww))
+ if (!i915_gem_object_trylock(vma->obj, ww)) {
+ if (busy_bo) {
+ *busy_bo = vma->obj; /* holds ref */
+ ret = -EBUSY;
+ break;
+ }
+ i915_gem_object_put(vma->obj);
continue;
+ }
__i915_vma_pin(vma);
list_add(&vma->evict_link, &eviction_list);
@@ -473,25 +486,29 @@ int i915_gem_evict_vm(struct i915_address_space *vm, struct i915_gem_ww_ctx *ww)
if (list_empty(&eviction_list) && list_empty(&locked_eviction_list))
break;
- ret = 0;
/* Unbind locked objects first, before unlocking the eviction_list */
list_for_each_entry_safe(vma, vn, &locked_eviction_list, evict_link) {
__i915_vma_unpin(vma);
- if (ret == 0)
+ if (ret == 0) {
ret = __i915_vma_unbind(vma);
- if (ret != -EINTR) /* "Get me out of here!" */
- ret = 0;
+ if (ret != -EINTR) /* "Get me out of here!" */
+ ret = 0;
+ }
+ if (!dying_vma(vma))
+ i915_gem_object_put(vma->obj);
}
list_for_each_entry_safe(vma, vn, &eviction_list, evict_link) {
__i915_vma_unpin(vma);
- if (ret == 0)
+ if (ret == 0) {
ret = __i915_vma_unbind(vma);
- if (ret != -EINTR) /* "Get me out of here!" */
- ret = 0;
+ if (ret != -EINTR) /* "Get me out of here!" */
+ ret = 0;
+ }
i915_gem_object_unlock(vma->obj);
+ i915_gem_object_put(vma->obj);
}
} while (ret == 0);
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.h b/drivers/gpu/drm/i915/i915_gem_evict.h
index e593c530f9bd..bf0ee0e4fe60 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.h
+++ b/drivers/gpu/drm/i915/i915_gem_evict.h
@@ -11,6 +11,7 @@
struct drm_mm_node;
struct i915_address_space;
struct i915_gem_ww_ctx;
+struct drm_i915_gem_object;
int __must_check i915_gem_evict_something(struct i915_address_space *vm,
struct i915_gem_ww_ctx *ww,
@@ -23,6 +24,7 @@ int __must_check i915_gem_evict_for_node(struct i915_address_space *vm,
struct drm_mm_node *node,
unsigned int flags);
int i915_gem_evict_vm(struct i915_address_space *vm,
- struct i915_gem_ww_ctx *ww);
+ struct i915_gem_ww_ctx *ww,
+ struct drm_i915_gem_object **busy_bo);
#endif /* __I915_GEM_EVICT_H__ */
diff --git a/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c b/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
index 7e611476c7a4..a72698a2dbc8 100644
--- a/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
+++ b/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
@@ -5,8 +5,8 @@
#include <linux/slab.h>
-#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_bo.h>
#include <drm/drm_buddy.h>
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 703fee6b5f75..3a33be5401ed 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -1566,7 +1566,7 @@ static int __i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
* locked objects when called from execbuf when pinning
* is removed. This would probably regress badly.
*/
- i915_gem_evict_vm(vm, NULL);
+ i915_gem_evict_vm(vm, NULL, NULL);
mutex_unlock(&vm->mutex);
}
} while (1);
diff --git a/drivers/gpu/drm/i915/intel_region_ttm.c b/drivers/gpu/drm/i915/intel_region_ttm.c
index cf89d0c2a2d9..4dc0702081b8 100644
--- a/drivers/gpu/drm/i915/intel_region_ttm.c
+++ b/drivers/gpu/drm/i915/intel_region_ttm.c
@@ -2,7 +2,6 @@
/*
* Copyright © 2021 Intel Corporation
*/
-#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_device.h>
#include <drm/ttm/ttm_range_manager.h>
@@ -132,7 +131,7 @@ int intel_region_ttm_fini(struct intel_memory_region *mem)
break;
msleep(20);
- flush_delayed_work(&mem->i915->bdev.wq);
+ drain_workqueue(mem->i915->bdev.wq);
}
/* If we leaked objects, Don't free the region causing use after free */
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
index 8c6517d29b8e..37068542aafe 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
@@ -344,7 +344,7 @@ static int igt_evict_vm(void *arg)
/* Everything is pinned, nothing should happen */
mutex_lock(&ggtt->vm.mutex);
- err = i915_gem_evict_vm(&ggtt->vm, NULL);
+ err = i915_gem_evict_vm(&ggtt->vm, NULL, NULL);
mutex_unlock(&ggtt->vm.mutex);
if (err) {
pr_err("i915_gem_evict_vm on a full GGTT returned err=%d]\n",
@@ -356,7 +356,7 @@ static int igt_evict_vm(void *arg)
for_i915_gem_ww(&ww, err, false) {
mutex_lock(&ggtt->vm.mutex);
- err = i915_gem_evict_vm(&ggtt->vm, &ww);
+ err = i915_gem_evict_vm(&ggtt->vm, &ww, NULL);
mutex_unlock(&ggtt->vm.mutex);
}
diff --git a/drivers/gpu/drm/imx/Kconfig b/drivers/gpu/drm/imx/Kconfig
index fd5b2471fdf0..e5749927fd6c 100644
--- a/drivers/gpu/drm/imx/Kconfig
+++ b/drivers/gpu/drm/imx/Kconfig
@@ -1,43 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
-config DRM_IMX
- tristate "DRM Support for Freescale i.MX"
- select DRM_KMS_HELPER
- select VIDEOMODE_HELPERS
- select DRM_GEM_DMA_HELPER
- depends on DRM && (ARCH_MXC || ARCH_MULTIPLATFORM || COMPILE_TEST)
- depends on IMX_IPUV3_CORE
- help
- enable i.MX graphics support
-
-config DRM_IMX_PARALLEL_DISPLAY
- tristate "Support for parallel displays"
- select DRM_PANEL
- depends on DRM_IMX
- select VIDEOMODE_HELPERS
-
-config DRM_IMX_TVE
- tristate "Support for TV and VGA displays"
- depends on DRM_IMX
- depends on COMMON_CLK
- select REGMAP_MMIO
- help
- Choose this to enable the internal Television Encoder (TVe)
- found on i.MX53 processors.
-
-config DRM_IMX_LDB
- tristate "Support for LVDS displays"
- depends on DRM_IMX && MFD_SYSCON
- depends on COMMON_CLK
- select DRM_PANEL
- help
- Choose this to enable the internal LVDS Display Bridge (LDB)
- found on i.MX53 and i.MX6 processors.
-
-config DRM_IMX_HDMI
- tristate "Freescale i.MX DRM HDMI"
- select DRM_DW_HDMI
- depends on DRM_IMX && OF
- help
- Choose this if you want to use HDMI on i.MX6.
source "drivers/gpu/drm/imx/dcss/Kconfig"
+source "drivers/gpu/drm/imx/ipuv3/Kconfig"
diff --git a/drivers/gpu/drm/imx/Makefile b/drivers/gpu/drm/imx/Makefile
index b644deffe948..909622864716 100644
--- a/drivers/gpu/drm/imx/Makefile
+++ b/drivers/gpu/drm/imx/Makefile
@@ -1,12 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
-imxdrm-objs := imx-drm-core.o ipuv3-crtc.o ipuv3-plane.o
-
-obj-$(CONFIG_DRM_IMX) += imxdrm.o
-
-obj-$(CONFIG_DRM_IMX_PARALLEL_DISPLAY) += parallel-display.o
-obj-$(CONFIG_DRM_IMX_TVE) += imx-tve.o
-obj-$(CONFIG_DRM_IMX_LDB) += imx-ldb.o
-
-obj-$(CONFIG_DRM_IMX_HDMI) += dw_hdmi-imx.o
obj-$(CONFIG_DRM_IMX_DCSS) += dcss/
+obj-$(CONFIG_DRM_IMX) += ipuv3/
diff --git a/drivers/gpu/drm/imx/dcss/dcss-dev.c b/drivers/gpu/drm/imx/dcss/dcss-dev.c
index 3f5750cc2673..5d1779ab65c0 100644
--- a/drivers/gpu/drm/imx/dcss/dcss-dev.c
+++ b/drivers/gpu/drm/imx/dcss/dcss-dev.c
@@ -249,16 +249,12 @@ void dcss_dev_destroy(struct dcss_dev *dcss)
kfree(dcss);
}
-#ifdef CONFIG_PM_SLEEP
-int dcss_dev_suspend(struct device *dev)
+static int dcss_dev_suspend(struct device *dev)
{
struct dcss_dev *dcss = dcss_drv_dev_to_dcss(dev);
struct drm_device *ddev = dcss_drv_dev_to_drm(dev);
- struct dcss_kms_dev *kms = container_of(ddev, struct dcss_kms_dev, base);
int ret;
- drm_bridge_connector_disable_hpd(kms->connector);
-
drm_mode_config_helper_suspend(ddev);
if (pm_runtime_suspended(dev))
@@ -273,11 +269,10 @@ int dcss_dev_suspend(struct device *dev)
return 0;
}
-int dcss_dev_resume(struct device *dev)
+static int dcss_dev_resume(struct device *dev)
{
struct dcss_dev *dcss = dcss_drv_dev_to_dcss(dev);
struct drm_device *ddev = dcss_drv_dev_to_drm(dev);
- struct dcss_kms_dev *kms = container_of(ddev, struct dcss_kms_dev, base);
if (pm_runtime_suspended(dev)) {
drm_mode_config_helper_resume(ddev);
@@ -292,14 +287,10 @@ int dcss_dev_resume(struct device *dev)
drm_mode_config_helper_resume(ddev);
- drm_bridge_connector_enable_hpd(kms->connector);
-
return 0;
}
-#endif /* CONFIG_PM_SLEEP */
-#ifdef CONFIG_PM
-int dcss_dev_runtime_suspend(struct device *dev)
+static int dcss_dev_runtime_suspend(struct device *dev)
{
struct dcss_dev *dcss = dcss_drv_dev_to_dcss(dev);
int ret;
@@ -313,7 +304,7 @@ int dcss_dev_runtime_suspend(struct device *dev)
return 0;
}
-int dcss_dev_runtime_resume(struct device *dev)
+static int dcss_dev_runtime_resume(struct device *dev)
{
struct dcss_dev *dcss = dcss_drv_dev_to_dcss(dev);
@@ -325,4 +316,8 @@ int dcss_dev_runtime_resume(struct device *dev)
return 0;
}
-#endif /* CONFIG_PM */
+
+EXPORT_GPL_DEV_PM_OPS(dcss_dev_pm_ops) = {
+ RUNTIME_PM_OPS(dcss_dev_runtime_suspend, dcss_dev_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(dcss_dev_suspend, dcss_dev_resume)
+};
diff --git a/drivers/gpu/drm/imx/dcss/dcss-dev.h b/drivers/gpu/drm/imx/dcss/dcss-dev.h
index 1e582270c6ea..f27b87c09599 100644
--- a/drivers/gpu/drm/imx/dcss/dcss-dev.h
+++ b/drivers/gpu/drm/imx/dcss/dcss-dev.h
@@ -9,6 +9,7 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_plane.h>
#include <linux/io.h>
+#include <linux/pm.h>
#include <video/videomode.h>
#define SET 0x04
@@ -95,13 +96,11 @@ struct dcss_dev *dcss_drv_dev_to_dcss(struct device *dev);
struct drm_device *dcss_drv_dev_to_drm(struct device *dev);
struct dcss_dev *dcss_dev_create(struct device *dev, bool hdmi_output);
void dcss_dev_destroy(struct dcss_dev *dcss);
-int dcss_dev_runtime_suspend(struct device *dev);
-int dcss_dev_runtime_resume(struct device *dev);
-int dcss_dev_suspend(struct device *dev);
-int dcss_dev_resume(struct device *dev);
void dcss_enable_dtg_and_ss(struct dcss_dev *dcss);
void dcss_disable_dtg_and_ss(struct dcss_dev *dcss);
+extern const struct dev_pm_ops dcss_dev_pm_ops;
+
/* BLKCTL */
int dcss_blkctl_init(struct dcss_dev *dcss, unsigned long blkctl_base);
void dcss_blkctl_cfg(struct dcss_blkctl *blkctl);
diff --git a/drivers/gpu/drm/imx/dcss/dcss-drv.c b/drivers/gpu/drm/imx/dcss/dcss-drv.c
index 1c70f70247f6..4f2291610139 100644
--- a/drivers/gpu/drm/imx/dcss/dcss-drv.c
+++ b/drivers/gpu/drm/imx/dcss/dcss-drv.c
@@ -74,8 +74,6 @@ static int dcss_drv_platform_probe(struct platform_device *pdev)
dcss_shutoff:
dcss_dev_destroy(mdrv->dcss);
- dev_set_drvdata(dev, NULL);
-
err:
kfree(mdrv);
return err;
@@ -85,14 +83,9 @@ static int dcss_drv_platform_remove(struct platform_device *pdev)
{
struct dcss_drv *mdrv = dev_get_drvdata(&pdev->dev);
- if (!mdrv)
- return 0;
-
dcss_kms_detach(mdrv->kms);
dcss_dev_destroy(mdrv->dcss);
- dev_set_drvdata(&pdev->dev, NULL);
-
kfree(mdrv);
return 0;
@@ -117,19 +110,13 @@ static const struct of_device_id dcss_of_match[] = {
MODULE_DEVICE_TABLE(of, dcss_of_match);
-static const struct dev_pm_ops dcss_dev_pm = {
- SET_SYSTEM_SLEEP_PM_OPS(dcss_dev_suspend, dcss_dev_resume)
- SET_RUNTIME_PM_OPS(dcss_dev_runtime_suspend,
- dcss_dev_runtime_resume, NULL)
-};
-
static struct platform_driver dcss_platform_driver = {
.probe = dcss_drv_platform_probe,
.remove = dcss_drv_platform_remove,
.driver = {
.name = "imx-dcss",
.of_match_table = dcss_of_match,
- .pm = &dcss_dev_pm,
+ .pm = pm_ptr(&dcss_dev_pm_ops),
},
};
diff --git a/drivers/gpu/drm/imx/dcss/dcss-kms.c b/drivers/gpu/drm/imx/dcss/dcss-kms.c
index 18df3888b7f9..dab5e664920d 100644
--- a/drivers/gpu/drm/imx/dcss/dcss-kms.c
+++ b/drivers/gpu/drm/imx/dcss/dcss-kms.c
@@ -150,7 +150,6 @@ struct dcss_kms_dev *dcss_kms_attach(struct dcss_dev *dcss)
return kms;
cleanup_crtc:
- drm_bridge_connector_disable_hpd(kms->connector);
drm_kms_helper_poll_fini(drm);
dcss_crtc_deinit(crtc, drm);
@@ -166,7 +165,6 @@ void dcss_kms_detach(struct dcss_kms_dev *kms)
struct drm_device *drm = &kms->base;
drm_dev_unregister(drm);
- drm_bridge_connector_disable_hpd(kms->connector);
drm_kms_helper_poll_fini(drm);
drm_atomic_helper_shutdown(drm);
drm_crtc_vblank_off(&kms->crtc.base);
diff --git a/drivers/gpu/drm/imx/ipuv3/Kconfig b/drivers/gpu/drm/imx/ipuv3/Kconfig
new file mode 100644
index 000000000000..bb278a369575
--- /dev/null
+++ b/drivers/gpu/drm/imx/ipuv3/Kconfig
@@ -0,0 +1,41 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config DRM_IMX
+ tristate "DRM Support for Freescale i.MX"
+ select DRM_KMS_HELPER
+ select VIDEOMODE_HELPERS
+ select DRM_GEM_DMA_HELPER
+ depends on DRM && (ARCH_MXC || ARCH_MULTIPLATFORM || COMPILE_TEST)
+ depends on IMX_IPUV3_CORE
+ help
+ enable i.MX graphics support
+
+config DRM_IMX_PARALLEL_DISPLAY
+ tristate "Support for parallel displays"
+ select DRM_PANEL
+ depends on DRM_IMX
+ select VIDEOMODE_HELPERS
+
+config DRM_IMX_TVE
+ tristate "Support for TV and VGA displays"
+ depends on DRM_IMX
+ depends on COMMON_CLK
+ select REGMAP_MMIO
+ help
+ Choose this to enable the internal Television Encoder (TVe)
+ found on i.MX53 processors.
+
+config DRM_IMX_LDB
+ tristate "Support for LVDS displays"
+ depends on DRM_IMX && MFD_SYSCON
+ depends on COMMON_CLK
+ select DRM_PANEL
+ help
+ Choose this to enable the internal LVDS Display Bridge (LDB)
+ found on i.MX53 and i.MX6 processors.
+
+config DRM_IMX_HDMI
+ tristate "Freescale i.MX DRM HDMI"
+ select DRM_DW_HDMI
+ depends on DRM_IMX && OF
+ help
+ Choose this if you want to use HDMI on i.MX6.
diff --git a/drivers/gpu/drm/imx/ipuv3/Makefile b/drivers/gpu/drm/imx/ipuv3/Makefile
new file mode 100644
index 000000000000..21cdcc2faabc
--- /dev/null
+++ b/drivers/gpu/drm/imx/ipuv3/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+
+imxdrm-objs := imx-drm-core.o ipuv3-crtc.o ipuv3-plane.o
+
+obj-$(CONFIG_DRM_IMX) += imxdrm.o
+
+obj-$(CONFIG_DRM_IMX_PARALLEL_DISPLAY) += parallel-display.o
+obj-$(CONFIG_DRM_IMX_TVE) += imx-tve.o
+obj-$(CONFIG_DRM_IMX_LDB) += imx-ldb.o
+
+obj-$(CONFIG_DRM_IMX_HDMI) += dw_hdmi-imx.o
diff --git a/drivers/gpu/drm/imx/dw_hdmi-imx.c b/drivers/gpu/drm/imx/ipuv3/dw_hdmi-imx.c
index a2277a0d6d06..a2277a0d6d06 100644
--- a/drivers/gpu/drm/imx/dw_hdmi-imx.c
+++ b/drivers/gpu/drm/imx/ipuv3/dw_hdmi-imx.c
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/ipuv3/imx-drm-core.c
index e060fa6cbcb9..e060fa6cbcb9 100644
--- a/drivers/gpu/drm/imx/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/ipuv3/imx-drm-core.c
diff --git a/drivers/gpu/drm/imx/imx-drm.h b/drivers/gpu/drm/imx/ipuv3/imx-drm.h
index e721bebda2bd..e721bebda2bd 100644
--- a/drivers/gpu/drm/imx/imx-drm.h
+++ b/drivers/gpu/drm/imx/ipuv3/imx-drm.h
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/ipuv3/imx-ldb.c
index c45fc8f4744d..c45fc8f4744d 100644
--- a/drivers/gpu/drm/imx/imx-ldb.c
+++ b/drivers/gpu/drm/imx/ipuv3/imx-ldb.c
diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/ipuv3/imx-tve.c
index d6832f506322..d6832f506322 100644
--- a/drivers/gpu/drm/imx/imx-tve.c
+++ b/drivers/gpu/drm/imx/ipuv3/imx-tve.c
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3/ipuv3-crtc.c
index 5f26090b0c98..5f26090b0c98 100644
--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3/ipuv3-crtc.c
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3/ipuv3-plane.c
index dba4f7d81d69..80142d9a4a55 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3/ipuv3-plane.c
@@ -614,6 +614,11 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
break;
}
+ if (ipu_plane->dp_flow == IPU_DP_FLOW_SYNC_BG)
+ width = ipu_src_rect_width(new_state);
+ else
+ width = drm_rect_width(&new_state->src) >> 16;
+
eba = drm_plane_state_to_eba(new_state, 0);
/*
@@ -622,8 +627,7 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
*/
if (ipu_state->use_pre) {
axi_id = ipu_chan_assign_axi_id(ipu_plane->dma);
- ipu_prg_channel_configure(ipu_plane->ipu_ch, axi_id,
- ipu_src_rect_width(new_state),
+ ipu_prg_channel_configure(ipu_plane->ipu_ch, axi_id, width,
drm_rect_height(&new_state->src) >> 16,
fb->pitches[0], fb->format->format,
fb->modifier, &eba);
@@ -678,9 +682,8 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
break;
}
- ipu_dmfc_config_wait4eot(ipu_plane->dmfc, ALIGN(drm_rect_width(dst), 8));
+ ipu_dmfc_config_wait4eot(ipu_plane->dmfc, width);
- width = ipu_src_rect_width(new_state);
height = drm_rect_height(&new_state->src) >> 16;
info = drm_format_info(fb->format->format);
ipu_calculate_bursts(width, info->cpp[0], fb->pitches[0],
@@ -744,8 +747,7 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
ipu_cpmem_set_burstsize(ipu_plane->ipu_ch, 16);
ipu_cpmem_zero(ipu_plane->alpha_ch);
- ipu_cpmem_set_resolution(ipu_plane->alpha_ch,
- ipu_src_rect_width(new_state),
+ ipu_cpmem_set_resolution(ipu_plane->alpha_ch, width,
drm_rect_height(&new_state->src) >> 16);
ipu_cpmem_set_format_passthrough(ipu_plane->alpha_ch, 8);
ipu_cpmem_set_high_priority(ipu_plane->alpha_ch);
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.h b/drivers/gpu/drm/imx/ipuv3/ipuv3-plane.h
index 6d544e6ce63f..6d544e6ce63f 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.h
+++ b/drivers/gpu/drm/imx/ipuv3/ipuv3-plane.h
diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/ipuv3/parallel-display.c
index 0fa0b590830b..0fa0b590830b 100644
--- a/drivers/gpu/drm/imx/parallel-display.c
+++ b/drivers/gpu/drm/imx/ipuv3/parallel-display.c
diff --git a/drivers/gpu/drm/logicvc/logicvc_drm.c b/drivers/gpu/drm/logicvc/logicvc_drm.c
index 9de24d9f0c96..2fb23697740a 100644
--- a/drivers/gpu/drm/logicvc/logicvc_drm.c
+++ b/drivers/gpu/drm/logicvc/logicvc_drm.c
@@ -301,6 +301,7 @@ static int logicvc_drm_probe(struct platform_device *pdev)
struct regmap *regmap = NULL;
struct resource res;
void __iomem *base;
+ unsigned int preferred_bpp;
int irq;
int ret;
@@ -438,7 +439,17 @@ static int logicvc_drm_probe(struct platform_device *pdev)
goto error_mode;
}
- drm_fbdev_generic_setup(drm_dev, drm_dev->mode_config.preferred_depth);
+ switch (drm_dev->mode_config.preferred_depth) {
+ case 16:
+ preferred_bpp = 16;
+ break;
+ case 24:
+ case 32:
+ default:
+ preferred_bpp = 32;
+ break;
+ }
+ drm_fbdev_generic_setup(drm_dev, preferred_bpp);
return 0;
diff --git a/drivers/gpu/drm/mediatek/mtk_dp.c b/drivers/gpu/drm/mediatek/mtk_dp.c
index 9d085c05c49c..b4feaabdb6a7 100644
--- a/drivers/gpu/drm/mediatek/mtk_dp.c
+++ b/drivers/gpu/drm/mediatek/mtk_dp.c
@@ -1981,7 +1981,7 @@ static struct edid *mtk_dp_get_edid(struct drm_bridge *bridge,
struct cea_sad *sads;
if (!enabled) {
- drm_bridge_chain_pre_enable(bridge);
+ drm_atomic_bridge_chain_pre_enable(bridge, connector->state->state);
/* power on aux */
mtk_dp_update_bits(mtk_dp, MTK_DP_TOP_PWR_STATE,
@@ -2019,7 +2019,7 @@ static struct edid *mtk_dp_get_edid(struct drm_bridge *bridge,
DP_PWR_STATE_BANDGAP_TPLL,
DP_PWR_STATE_MASK);
- drm_bridge_chain_post_disable(bridge);
+ drm_atomic_bridge_chain_post_disable(bridge, connector->state->state);
}
return new_edid;
diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.c b/drivers/gpu/drm/meson/meson_dw_hdmi.c
index 5cd2b2ebbbd3..534621a13a34 100644
--- a/drivers/gpu/drm/meson/meson_dw_hdmi.c
+++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c
@@ -140,7 +140,6 @@ struct meson_dw_hdmi {
struct reset_control *hdmitx_apb;
struct reset_control *hdmitx_ctrl;
struct reset_control *hdmitx_phy;
- struct regulator *hdmi_supply;
u32 irq_stat;
struct dw_hdmi *hdmi;
struct drm_bridge *bridge;
@@ -665,11 +664,6 @@ static void meson_dw_hdmi_init(struct meson_dw_hdmi *meson_dw_hdmi)
}
-static void meson_disable_regulator(void *data)
-{
- regulator_disable(data);
-}
-
static void meson_disable_clk(void *data)
{
clk_disable_unprepare(data);
@@ -723,20 +717,9 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master,
meson_dw_hdmi->data = match;
dw_plat_data = &meson_dw_hdmi->dw_plat_data;
- meson_dw_hdmi->hdmi_supply = devm_regulator_get_optional(dev, "hdmi");
- if (IS_ERR(meson_dw_hdmi->hdmi_supply)) {
- if (PTR_ERR(meson_dw_hdmi->hdmi_supply) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
- meson_dw_hdmi->hdmi_supply = NULL;
- } else {
- ret = regulator_enable(meson_dw_hdmi->hdmi_supply);
- if (ret)
- return ret;
- ret = devm_add_action_or_reset(dev, meson_disable_regulator,
- meson_dw_hdmi->hdmi_supply);
- if (ret)
- return ret;
- }
+ ret = devm_regulator_get_enable_optional(dev, "hdmi");
+ if (ret < 0)
+ return ret;
meson_dw_hdmi->hdmitx_apb = devm_reset_control_get_exclusive(dev,
"hdmitx_apb");
diff --git a/drivers/gpu/drm/mgag200/Kconfig b/drivers/gpu/drm/mgag200/Kconfig
index eec59658a938..b28c5e4828f4 100644
--- a/drivers/gpu/drm/mgag200/Kconfig
+++ b/drivers/gpu/drm/mgag200/Kconfig
@@ -4,6 +4,8 @@ config DRM_MGAG200
depends on DRM && PCI && MMU
select DRM_GEM_SHMEM_HELPER
select DRM_KMS_HELPER
+ select I2C
+ select I2C_ALGOBIT
help
This is a KMS driver for Matrox G200 chips. It supports the original
MGA G200 desktop chips and the server variants. It requires 0.3.0
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index 4d3fdc806bef..c373a40bef37 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -203,8 +203,6 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi,
goto fail;
}
- drm_bridge_connector_enable_hpd(hdmi->connector);
-
ret = msm_hdmi_hpd_enable(hdmi->bridge);
if (ret < 0) {
DRM_DEV_ERROR(&hdmi->pdev->dev, "failed to enable HPD: %d\n", ret);
diff --git a/drivers/gpu/drm/mxsfb/Kconfig b/drivers/gpu/drm/mxsfb/Kconfig
index 116f8168bda4..518b53345354 100644
--- a/drivers/gpu/drm/mxsfb/Kconfig
+++ b/drivers/gpu/drm/mxsfb/Kconfig
@@ -8,6 +8,7 @@ config DRM_MXSFB
tristate "i.MX (e)LCDIF LCD controller"
depends on DRM && OF
depends on COMMON_CLK
+ depends on ARCH_MXS || ARCH_MXC || COMPILE_TEST
select DRM_MXS
select DRM_KMS_HELPER
select DRM_GEM_DMA_HELPER
@@ -24,6 +25,7 @@ config DRM_IMX_LCDIF
tristate "i.MX LCDIFv3 LCD controller"
depends on DRM && OF
depends on COMMON_CLK
+ depends on ARCH_MXC || COMPILE_TEST
select DRM_MXS
select DRM_KMS_HELPER
select DRM_GEM_DMA_HELPER
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
index 810edea0a31e..b3ab86ad1b36 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
@@ -78,14 +78,12 @@ static const struct mxsfb_devdata mxsfb_devdata[] = {
void mxsfb_enable_axi_clk(struct mxsfb_drm_private *mxsfb)
{
- if (mxsfb->clk_axi)
- clk_prepare_enable(mxsfb->clk_axi);
+ clk_prepare_enable(mxsfb->clk_axi);
}
void mxsfb_disable_axi_clk(struct mxsfb_drm_private *mxsfb)
{
- if (mxsfb->clk_axi)
- clk_disable_unprepare(mxsfb->clk_axi);
+ clk_disable_unprepare(mxsfb->clk_axi);
}
static struct drm_framebuffer *
@@ -235,9 +233,9 @@ static int mxsfb_load(struct drm_device *drm,
if (IS_ERR(mxsfb->clk))
return PTR_ERR(mxsfb->clk);
- mxsfb->clk_axi = devm_clk_get(drm->dev, "axi");
+ mxsfb->clk_axi = devm_clk_get_optional(drm->dev, "axi");
if (IS_ERR(mxsfb->clk_axi))
- mxsfb->clk_axi = NULL;
+ return PTR_ERR(mxsfb->clk_axi);
mxsfb->clk_disp_axi = devm_clk_get(drm->dev, "disp_axi");
if (IS_ERR(mxsfb->clk_disp_axi))
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index 03d12caf9e26..a0bb3987bf63 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -10,6 +10,8 @@ config DRM_NOUVEAU
select DRM_KMS_HELPER
select DRM_TTM
select DRM_TTM_HELPER
+ select I2C
+ select I2C_ALGOBIT
select BACKLIGHT_CLASS_DEVICE if DRM_NOUVEAU_BACKLIGHT
select X86_PLATFORM_DEVICES if ACPI && X86
select ACPI_WMI if ACPI && X86
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
index be28e7bd7490..e5480dab55e3 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
@@ -653,7 +653,7 @@ static int nv17_tv_create_resources(struct drm_encoder *encoder,
tv_enc->tv_norm = i;
}
- drm_mode_create_tv_properties(dev, num_tv_norms, nv17_tv_norm_names);
+ drm_mode_create_tv_properties_legacy(dev, num_tv_norms, nv17_tv_norm_names);
drm_object_attach_property(&connector->base,
conf->tv_select_subconnector_property,
@@ -662,7 +662,7 @@ static int nv17_tv_create_resources(struct drm_encoder *encoder,
conf->tv_subconnector_property,
tv_enc->subconnector);
drm_object_attach_property(&connector->base,
- conf->tv_mode_property,
+ conf->legacy_tv_mode_property,
tv_enc->tv_norm);
drm_object_attach_property(&connector->base,
conf->tv_flicker_reduction_property,
@@ -722,7 +722,7 @@ static int nv17_tv_set_property(struct drm_encoder *encoder,
if (encoder->crtc)
nv17_tv_update_rescaler(encoder);
- } else if (property == conf->tv_mode_property) {
+ } else if (property == conf->legacy_tv_mode_property) {
if (connector->dpms != DRM_MODE_DPMS_OFF)
return -EINVAL;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index a11871e3119c..335fa91ca4ad 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -28,6 +28,7 @@
*/
#include <linux/dma-mapping.h>
+#include <drm/ttm/ttm_tt.h>
#include "nouveau_drv.h"
#include "nouveau_chan.h"
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h
index c2d3f9c48eba..774dd93ca76b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.h
@@ -1,8 +1,9 @@
/* SPDX-License-Identifier: MIT */
#ifndef __NOUVEAU_BO_H__
#define __NOUVEAU_BO_H__
-#include <drm/ttm/ttm_bo_driver.h>
#include <drm/drm_gem.h>
+#include <drm/ttm/ttm_bo.h>
+#include <drm/ttm/ttm_placement.h>
struct nouveau_channel;
struct nouveau_cli;
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index d6dd07bfa64a..b5de312a523f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -51,8 +51,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
-#include <drm/ttm/ttm_bo_api.h>
-#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_bo.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/drm_audio_component.h>
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index 1fde3a5d7c32..25f31d5169e5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -19,11 +19,12 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
+#include <drm/ttm/ttm_tt.h>
+
#include "nouveau_mem.h"
#include "nouveau_drv.h"
#include "nouveau_bo.h"
-#include <drm/ttm/ttm_bo_driver.h>
#include <nvif/class.h>
#include <nvif/if000a.h>
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.h b/drivers/gpu/drm/nouveau/nouveau_mem.h
index 1ee6cdb9ad9b..76c86d8bb01e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.h
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.h
@@ -1,6 +1,6 @@
#ifndef __NOUVEAU_MEM_H__
#define __NOUVEAU_MEM_H__
-#include <drm/ttm/ttm_bo_api.h>
+#include <drm/ttm/ttm_bo.h>
struct ttm_tt;
#include <nvif/mem.h>
diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
index 9608121e49b7..f42c2b1b0363 100644
--- a/drivers/gpu/drm/nouveau/nouveau_prime.c
+++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
@@ -23,6 +23,7 @@
*/
#include <linux/dma-buf.h>
+#include <drm/ttm/ttm_tt.h>
#include "nouveau_drv.h"
#include "nouveau_gem.h"
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index 85c03c83259b..b14895f75b3c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: MIT
#include <linux/pagemap.h>
#include <linux/slab.h>
+#include <drm/ttm/ttm_tt.h>
#include "nouveau_drv.h"
#include "nouveau_mem.h"
diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.c b/drivers/gpu/drm/omapdrm/dss/dispc.c
index 0ee344ebcd1c..aacad5045e95 100644
--- a/drivers/gpu/drm/omapdrm/dss/dispc.c
+++ b/drivers/gpu/drm/omapdrm/dss/dispc.c
@@ -855,11 +855,6 @@ struct csc_coef_yuv2rgb {
bool full_range;
};
-struct csc_coef_rgb2yuv {
- int yr, yg, yb, cbr, cbg, cbb, crr, crg, crb;
- bool full_range;
-};
-
static void dispc_ovl_write_color_conv_coef(struct dispc_device *dispc,
enum omap_plane_id plane,
const struct csc_coef_yuv2rgb *ct)
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
index a6845856cbce..4c1084eb0175 100644
--- a/drivers/gpu/drm/omapdrm/dss/dsi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
@@ -1039,22 +1039,26 @@ static int dsi_dump_dsi_irqs(struct seq_file *s, void *p)
{
struct dsi_data *dsi = s->private;
unsigned long flags;
- struct dsi_irq_stats stats;
+ struct dsi_irq_stats *stats;
+
+ stats = kmalloc(sizeof(*stats), GFP_KERNEL);
+ if (!stats)
+ return -ENOMEM;
spin_lock_irqsave(&dsi->irq_stats_lock, flags);
- stats = dsi->irq_stats;
+ *stats = dsi->irq_stats;
memset(&dsi->irq_stats, 0, sizeof(dsi->irq_stats));
dsi->irq_stats.last_reset = jiffies;
spin_unlock_irqrestore(&dsi->irq_stats_lock, flags);
seq_printf(s, "period %u ms\n",
- jiffies_to_msecs(jiffies - stats.last_reset));
+ jiffies_to_msecs(jiffies - stats->last_reset));
- seq_printf(s, "irqs %d\n", stats.irq_count);
+ seq_printf(s, "irqs %d\n", stats->irq_count);
#define PIS(x) \
- seq_printf(s, "%-20s %10d\n", #x, stats.dsi_irqs[ffs(DSI_IRQ_##x)-1]);
+ seq_printf(s, "%-20s %10d\n", #x, stats->dsi_irqs[ffs(DSI_IRQ_##x)-1]);
seq_printf(s, "-- DSI%d interrupts --\n", dsi->module_id + 1);
PIS(VC0);
@@ -1078,10 +1082,10 @@ static int dsi_dump_dsi_irqs(struct seq_file *s, void *p)
#define PIS(x) \
seq_printf(s, "%-20s %10d %10d %10d %10d\n", #x, \
- stats.vc_irqs[0][ffs(DSI_VC_IRQ_##x)-1], \
- stats.vc_irqs[1][ffs(DSI_VC_IRQ_##x)-1], \
- stats.vc_irqs[2][ffs(DSI_VC_IRQ_##x)-1], \
- stats.vc_irqs[3][ffs(DSI_VC_IRQ_##x)-1]);
+ stats->vc_irqs[0][ffs(DSI_VC_IRQ_##x)-1], \
+ stats->vc_irqs[1][ffs(DSI_VC_IRQ_##x)-1], \
+ stats->vc_irqs[2][ffs(DSI_VC_IRQ_##x)-1], \
+ stats->vc_irqs[3][ffs(DSI_VC_IRQ_##x)-1]);
seq_printf(s, "-- VC interrupts --\n");
PIS(CS);
@@ -1097,7 +1101,7 @@ static int dsi_dump_dsi_irqs(struct seq_file *s, void *p)
#define PIS(x) \
seq_printf(s, "%-20s %10d\n", #x, \
- stats.cio_irqs[ffs(DSI_CIO_IRQ_##x)-1]);
+ stats->cio_irqs[ffs(DSI_CIO_IRQ_##x)-1]);
seq_printf(s, "-- CIO interrupts --\n");
PIS(ERRSYNCESC1);
@@ -1122,6 +1126,8 @@ static int dsi_dump_dsi_irqs(struct seq_file *s, void *p)
PIS(ULPSACTIVENOT_ALL1);
#undef PIS
+ kfree(stats);
+
return 0;
}
#endif
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index eaf67b9e5f12..699ed814e021 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -546,44 +546,6 @@ static void omap_modeset_fini(struct drm_device *ddev)
}
/*
- * Enable the HPD in external components if supported
- */
-static void omap_modeset_enable_external_hpd(struct drm_device *ddev)
-{
- struct omap_drm_private *priv = ddev->dev_private;
- unsigned int i;
-
- for (i = 0; i < priv->num_pipes; i++) {
- struct drm_connector *connector = priv->pipes[i].connector;
-
- if (!connector)
- continue;
-
- if (priv->pipes[i].output->bridge)
- drm_bridge_connector_enable_hpd(connector);
- }
-}
-
-/*
- * Disable the HPD in external components if supported
- */
-static void omap_modeset_disable_external_hpd(struct drm_device *ddev)
-{
- struct omap_drm_private *priv = ddev->dev_private;
- unsigned int i;
-
- for (i = 0; i < priv->num_pipes; i++) {
- struct drm_connector *connector = priv->pipes[i].connector;
-
- if (!connector)
- continue;
-
- if (priv->pipes[i].output->bridge)
- drm_bridge_connector_disable_hpd(connector);
- }
-}
-
-/*
* drm ioctl funcs
*/
@@ -782,7 +744,6 @@ static int omapdrm_init(struct omap_drm_private *priv, struct device *dev)
omap_fbdev_init(ddev);
drm_kms_helper_poll_init(ddev);
- omap_modeset_enable_external_hpd(ddev);
/*
* Register the DRM device with the core and the connectors with
@@ -795,7 +756,6 @@ static int omapdrm_init(struct omap_drm_private *priv, struct device *dev)
return 0;
err_cleanup_helpers:
- omap_modeset_disable_external_hpd(ddev);
drm_kms_helper_poll_fini(ddev);
omap_fbdev_fini(ddev);
@@ -822,7 +782,6 @@ static void omapdrm_cleanup(struct omap_drm_private *priv)
drm_dev_unregister(ddev);
- omap_modeset_disable_external_hpd(ddev);
drm_kms_helper_poll_fini(ddev);
omap_fbdev_fini(ddev);
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index 737edcdf9eef..dd79928f5482 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -154,6 +154,18 @@ config DRM_PANEL_FEIYANG_FY07024DI26A30D
Say Y if you want to enable support for panels based on the
Feiyang FY07024DI26A30-D MIPI-DSI interface.
+config DRM_PANEL_HIMAX_HX8394
+ tristate "HIMAX HX8394 MIPI-DSI LCD panels"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y if you want to enable support for panels based on the
+ Himax HX8394 controller, such as the HannStar HSD060BHW4
+ 720x1440 TFT LCD panel that uses a MIPI-DSI interface.
+
+ If M is selected the module will be called panel-himax-hx8394.
+
config DRM_PANEL_ILITEK_IL9322
tristate "Ilitek ILI9322 320x240 QVGA panels"
depends on OF && SPI
@@ -400,6 +412,15 @@ config DRM_PANEL_OLIMEX_LCD_OLINUXINO
Say Y here if you want to enable support for Olimex Ltd.
LCD-OLinuXino panel.
+config DRM_PANEL_ORISETECH_OTA5601A
+ tristate "Orise Technology ota5601a RGB/SPI panel"
+ depends on SPI
+ depends on BACKLIGHT_CLASS_DEVICE
+ select REGMAP_SPI
+ help
+ Say Y here if you want to enable support for the panels built
+ around the Orise Technology OTA9601A display controller.
+
config DRM_PANEL_ORISETECH_OTM8009A
tristate "Orise Technology otm8009a 480x800 dsi 2dl panel"
depends on OF
@@ -717,6 +738,14 @@ config DRM_PANEL_VISIONOX_RM69299
Say Y here if you want to enable support for Visionox
RM69299 DSI Video Mode panel.
+config DRM_PANEL_VISIONOX_VTDR6130
+ tristate "Visionox VTDR6130"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ help
+ Say Y here if you want to enable support for Visionox
+ VTDR6130 1080x2400 AMOLED DSI panel.
+
config DRM_PANEL_WIDECHIPS_WS2401
tristate "Widechips WS2401 DPI panel driver"
depends on SPI && GPIOLIB
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
index f8f9d9f6a307..d07e6fa28bdf 100644
--- a/drivers/gpu/drm/panel/Makefile
+++ b/drivers/gpu/drm/panel/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_DRM_PANEL_EBBG_FT8719) += panel-ebbg-ft8719.o
obj-$(CONFIG_DRM_PANEL_ELIDA_KD35T133) += panel-elida-kd35t133.o
obj-$(CONFIG_DRM_PANEL_FEIXIN_K101_IM2BA02) += panel-feixin-k101-im2ba02.o
obj-$(CONFIG_DRM_PANEL_FEIYANG_FY07024DI26A30D) += panel-feiyang-fy07024di26a30d.o
+obj-$(CONFIG_DRM_PANEL_HIMAX_HX8394) += panel-himax-hx8394.o
obj-$(CONFIG_DRM_PANEL_ILITEK_IL9322) += panel-ilitek-ili9322.o
obj-$(CONFIG_DRM_PANEL_ILITEK_ILI9341) += panel-ilitek-ili9341.o
obj-$(CONFIG_DRM_PANEL_ILITEK_ILI9881C) += panel-ilitek-ili9881c.o
@@ -37,6 +38,7 @@ obj-$(CONFIG_DRM_PANEL_NOVATEK_NT36672A) += panel-novatek-nt36672a.o
obj-$(CONFIG_DRM_PANEL_NOVATEK_NT39016) += panel-novatek-nt39016.o
obj-$(CONFIG_DRM_PANEL_MANTIX_MLAF057WE51) += panel-mantix-mlaf057we51.o
obj-$(CONFIG_DRM_PANEL_OLIMEX_LCD_OLINUXINO) += panel-olimex-lcd-olinuxino.o
+obj-$(CONFIG_DRM_PANEL_ORISETECH_OTA5601A) += panel-orisetech-ota5601a.o
obj-$(CONFIG_DRM_PANEL_ORISETECH_OTM8009A) += panel-orisetech-otm8009a.o
obj-$(CONFIG_DRM_PANEL_OSD_OSD101T2587_53TS) += panel-osd-osd101t2587-53ts.o
obj-$(CONFIG_DRM_PANEL_PANASONIC_VVX10F034N00) += panel-panasonic-vvx10f034n00.o
@@ -73,5 +75,6 @@ obj-$(CONFIG_DRM_PANEL_TPO_TD043MTEA1) += panel-tpo-td043mtea1.o
obj-$(CONFIG_DRM_PANEL_TPO_TPG110) += panel-tpo-tpg110.o
obj-$(CONFIG_DRM_PANEL_TRULY_NT35597_WQXGA) += panel-truly-nt35597.o
obj-$(CONFIG_DRM_PANEL_VISIONOX_RM69299) += panel-visionox-rm69299.o
+obj-$(CONFIG_DRM_PANEL_VISIONOX_VTDR6130) += panel-visionox-vtdr6130.o
obj-$(CONFIG_DRM_PANEL_WIDECHIPS_WS2401) += panel-widechips-ws2401.o
obj-$(CONFIG_DRM_PANEL_XINPENG_XPP055C272) += panel-xinpeng-xpp055c272.o
diff --git a/drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c b/drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c
index b3235781e6ba..075a7af81eff 100644
--- a/drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c
+++ b/drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c
@@ -24,22 +24,6 @@ static inline struct tm5p5_nt35596 *to_tm5p5_nt35596(struct drm_panel *panel)
return container_of(panel, struct tm5p5_nt35596, panel);
}
-#define dsi_generic_write_seq(dsi, seq...) do { \
- static const u8 d[] = { seq }; \
- int ret; \
- ret = mipi_dsi_generic_write(dsi, d, ARRAY_SIZE(d)); \
- if (ret < 0) \
- return ret; \
- } while (0)
-
-#define dsi_dcs_write_seq(dsi, seq...) do { \
- static const u8 d[] = { seq }; \
- int ret; \
- ret = mipi_dsi_dcs_write_buffer(dsi, d, ARRAY_SIZE(d)); \
- if (ret < 0) \
- return ret; \
- } while (0)
-
static void tm5p5_nt35596_reset(struct tm5p5_nt35596 *ctx)
{
gpiod_set_value_cansleep(ctx->reset_gpio, 1);
@@ -54,46 +38,46 @@ static int tm5p5_nt35596_on(struct tm5p5_nt35596 *ctx)
{
struct mipi_dsi_device *dsi = ctx->dsi;
- dsi_generic_write_seq(dsi, 0xff, 0x05);
- dsi_generic_write_seq(dsi, 0xfb, 0x01);
- dsi_generic_write_seq(dsi, 0xc5, 0x31);
- dsi_generic_write_seq(dsi, 0xff, 0x04);
- dsi_generic_write_seq(dsi, 0x01, 0x84);
- dsi_generic_write_seq(dsi, 0x05, 0x25);
- dsi_generic_write_seq(dsi, 0x06, 0x01);
- dsi_generic_write_seq(dsi, 0x07, 0x20);
- dsi_generic_write_seq(dsi, 0x08, 0x06);
- dsi_generic_write_seq(dsi, 0x09, 0x08);
- dsi_generic_write_seq(dsi, 0x0a, 0x10);
- dsi_generic_write_seq(dsi, 0x0b, 0x10);
- dsi_generic_write_seq(dsi, 0x0c, 0x10);
- dsi_generic_write_seq(dsi, 0x0d, 0x14);
- dsi_generic_write_seq(dsi, 0x0e, 0x14);
- dsi_generic_write_seq(dsi, 0x0f, 0x14);
- dsi_generic_write_seq(dsi, 0x10, 0x14);
- dsi_generic_write_seq(dsi, 0x11, 0x14);
- dsi_generic_write_seq(dsi, 0x12, 0x14);
- dsi_generic_write_seq(dsi, 0x17, 0xf3);
- dsi_generic_write_seq(dsi, 0x18, 0xc0);
- dsi_generic_write_seq(dsi, 0x19, 0xc0);
- dsi_generic_write_seq(dsi, 0x1a, 0xc0);
- dsi_generic_write_seq(dsi, 0x1b, 0xb3);
- dsi_generic_write_seq(dsi, 0x1c, 0xb3);
- dsi_generic_write_seq(dsi, 0x1d, 0xb3);
- dsi_generic_write_seq(dsi, 0x1e, 0xb3);
- dsi_generic_write_seq(dsi, 0x1f, 0xb3);
- dsi_generic_write_seq(dsi, 0x20, 0xb3);
- dsi_generic_write_seq(dsi, 0xfb, 0x01);
- dsi_generic_write_seq(dsi, 0xff, 0x00);
- dsi_generic_write_seq(dsi, 0xfb, 0x01);
- dsi_generic_write_seq(dsi, 0x35, 0x01);
- dsi_generic_write_seq(dsi, 0xd3, 0x06);
- dsi_generic_write_seq(dsi, 0xd4, 0x04);
- dsi_generic_write_seq(dsi, 0x5e, 0x0d);
- dsi_generic_write_seq(dsi, 0x11, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0xff, 0x05);
+ mipi_dsi_generic_write_seq(dsi, 0xfb, 0x01);
+ mipi_dsi_generic_write_seq(dsi, 0xc5, 0x31);
+ mipi_dsi_generic_write_seq(dsi, 0xff, 0x04);
+ mipi_dsi_generic_write_seq(dsi, 0x01, 0x84);
+ mipi_dsi_generic_write_seq(dsi, 0x05, 0x25);
+ mipi_dsi_generic_write_seq(dsi, 0x06, 0x01);
+ mipi_dsi_generic_write_seq(dsi, 0x07, 0x20);
+ mipi_dsi_generic_write_seq(dsi, 0x08, 0x06);
+ mipi_dsi_generic_write_seq(dsi, 0x09, 0x08);
+ mipi_dsi_generic_write_seq(dsi, 0x0a, 0x10);
+ mipi_dsi_generic_write_seq(dsi, 0x0b, 0x10);
+ mipi_dsi_generic_write_seq(dsi, 0x0c, 0x10);
+ mipi_dsi_generic_write_seq(dsi, 0x0d, 0x14);
+ mipi_dsi_generic_write_seq(dsi, 0x0e, 0x14);
+ mipi_dsi_generic_write_seq(dsi, 0x0f, 0x14);
+ mipi_dsi_generic_write_seq(dsi, 0x10, 0x14);
+ mipi_dsi_generic_write_seq(dsi, 0x11, 0x14);
+ mipi_dsi_generic_write_seq(dsi, 0x12, 0x14);
+ mipi_dsi_generic_write_seq(dsi, 0x17, 0xf3);
+ mipi_dsi_generic_write_seq(dsi, 0x18, 0xc0);
+ mipi_dsi_generic_write_seq(dsi, 0x19, 0xc0);
+ mipi_dsi_generic_write_seq(dsi, 0x1a, 0xc0);
+ mipi_dsi_generic_write_seq(dsi, 0x1b, 0xb3);
+ mipi_dsi_generic_write_seq(dsi, 0x1c, 0xb3);
+ mipi_dsi_generic_write_seq(dsi, 0x1d, 0xb3);
+ mipi_dsi_generic_write_seq(dsi, 0x1e, 0xb3);
+ mipi_dsi_generic_write_seq(dsi, 0x1f, 0xb3);
+ mipi_dsi_generic_write_seq(dsi, 0x20, 0xb3);
+ mipi_dsi_generic_write_seq(dsi, 0xfb, 0x01);
+ mipi_dsi_generic_write_seq(dsi, 0xff, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0xfb, 0x01);
+ mipi_dsi_generic_write_seq(dsi, 0x35, 0x01);
+ mipi_dsi_generic_write_seq(dsi, 0xd3, 0x06);
+ mipi_dsi_generic_write_seq(dsi, 0xd4, 0x04);
+ mipi_dsi_generic_write_seq(dsi, 0x5e, 0x0d);
+ mipi_dsi_generic_write_seq(dsi, 0x11, 0x00);
msleep(100);
- dsi_generic_write_seq(dsi, 0x29, 0x00);
- dsi_generic_write_seq(dsi, 0x53, 0x24);
+ mipi_dsi_generic_write_seq(dsi, 0x29, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0x53, 0x24);
return 0;
}
@@ -117,7 +101,7 @@ static int tm5p5_nt35596_off(struct tm5p5_nt35596 *ctx)
return ret;
}
- dsi_dcs_write_seq(dsi, 0x4f, 0x01);
+ mipi_dsi_dcs_write_seq(dsi, 0x4f, 0x01);
return 0;
}
diff --git a/drivers/gpu/drm/panel/panel-boe-bf060y8m-aj0.c b/drivers/gpu/drm/panel/panel-boe-bf060y8m-aj0.c
index ad58840eda41..90098b753e3b 100644
--- a/drivers/gpu/drm/panel/panel-boe-bf060y8m-aj0.c
+++ b/drivers/gpu/drm/panel/panel-boe-bf060y8m-aj0.c
@@ -43,14 +43,6 @@ struct boe_bf060y8m_aj0 *to_boe_bf060y8m_aj0(struct drm_panel *panel)
return container_of(panel, struct boe_bf060y8m_aj0, panel);
}
-#define dsi_dcs_write_seq(dsi, seq...) do { \
- static const u8 d[] = { seq }; \
- int ret; \
- ret = mipi_dsi_dcs_write_buffer(dsi, d, ARRAY_SIZE(d)); \
- if (ret < 0) \
- return ret; \
- } while (0)
-
static void boe_bf060y8m_aj0_reset(struct boe_bf060y8m_aj0 *boe)
{
gpiod_set_value_cansleep(boe->reset_gpio, 0);
@@ -67,12 +59,12 @@ static int boe_bf060y8m_aj0_on(struct boe_bf060y8m_aj0 *boe)
struct device *dev = &dsi->dev;
int ret;
- dsi_dcs_write_seq(dsi, 0xb0, 0xa5, 0x00);
- dsi_dcs_write_seq(dsi, 0xb2, 0x00, 0x4c);
- dsi_dcs_write_seq(dsi, MIPI_DCS_SET_3D_CONTROL, 0x10);
- dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_POWER_SAVE, DCS_ALLOW_HBM_RANGE);
- dsi_dcs_write_seq(dsi, 0xf8,
- 0x00, 0x08, 0x10, 0x00, 0x22, 0x00, 0x00, 0x2d);
+ mipi_dsi_dcs_write_seq(dsi, 0xb0, 0xa5, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0xb2, 0x00, 0x4c);
+ mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_3D_CONTROL, 0x10);
+ mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_POWER_SAVE, DCS_ALLOW_HBM_RANGE);
+ mipi_dsi_dcs_write_seq(dsi, 0xf8,
+ 0x00, 0x08, 0x10, 0x00, 0x22, 0x00, 0x00, 0x2d);
ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
if (ret < 0) {
@@ -81,17 +73,17 @@ static int boe_bf060y8m_aj0_on(struct boe_bf060y8m_aj0 *boe)
}
msleep(30);
- dsi_dcs_write_seq(dsi, 0xb0, 0xa5, 0x00);
- dsi_dcs_write_seq(dsi, 0xc0,
- 0x08, 0x48, 0x65, 0x33, 0x33, 0x33,
- 0x2a, 0x31, 0x39, 0x20, 0x09);
- dsi_dcs_write_seq(dsi, 0xc1, 0x00, 0x00, 0x00, 0x1f, 0x1f,
- 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f,
- 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f);
- dsi_dcs_write_seq(dsi, 0xe2, 0x20, 0x04, 0x10, 0x12, 0x92,
- 0x4f, 0x8f, 0x44, 0x84, 0x83, 0x83, 0x83,
- 0x5c, 0x5c, 0x5c);
- dsi_dcs_write_seq(dsi, 0xde, 0x01, 0x2c, 0x00, 0x77, 0x3e);
+ mipi_dsi_dcs_write_seq(dsi, 0xb0, 0xa5, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0xc0,
+ 0x08, 0x48, 0x65, 0x33, 0x33, 0x33,
+ 0x2a, 0x31, 0x39, 0x20, 0x09);
+ mipi_dsi_dcs_write_seq(dsi, 0xc1, 0x00, 0x00, 0x00, 0x1f, 0x1f,
+ 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f,
+ 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f);
+ mipi_dsi_dcs_write_seq(dsi, 0xe2, 0x20, 0x04, 0x10, 0x12, 0x92,
+ 0x4f, 0x8f, 0x44, 0x84, 0x83, 0x83, 0x83,
+ 0x5c, 0x5c, 0x5c);
+ mipi_dsi_dcs_write_seq(dsi, 0xde, 0x01, 0x2c, 0x00, 0x77, 0x3e);
msleep(30);
diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c
index 5cb8dc2ebe18..01bfe0783304 100644
--- a/drivers/gpu/drm/panel/panel-edp.c
+++ b/drivers/gpu/drm/panel/panel-edp.c
@@ -351,7 +351,7 @@ static void panel_edp_wait(ktime_t start_ktime, unsigned int min_ms)
return;
min_ktime = ktime_add(start_ktime, ms_to_ktime(min_ms));
- now_ktime = ktime_get();
+ now_ktime = ktime_get_boottime();
if (ktime_before(now_ktime, min_ktime))
msleep(ktime_to_ms(ktime_sub(min_ktime, now_ktime)) + 1);
@@ -378,7 +378,7 @@ static int panel_edp_suspend(struct device *dev)
gpiod_set_value_cansleep(p->enable_gpio, 0);
regulator_disable(p->supply);
- p->unprepared_time = ktime_get();
+ p->unprepared_time = ktime_get_boottime();
return 0;
}
@@ -464,14 +464,14 @@ static int panel_edp_prepare_once(struct panel_edp *p)
}
}
- p->prepared_time = ktime_get();
+ p->prepared_time = ktime_get_boottime();
return 0;
error:
gpiod_set_value_cansleep(p->enable_gpio, 0);
regulator_disable(p->supply);
- p->unprepared_time = ktime_get();
+ p->unprepared_time = ktime_get_boottime();
return err;
}
@@ -1891,7 +1891,8 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1247, &delay_200_500_e80_d50, "N120ACA-EA1"),
EDP_PANEL_ENTRY('I', 'V', 'O', 0x057d, &delay_200_500_e200, "R140NWF5 RH"),
- EDP_PANEL_ENTRY('I', 'V', 'O', 0x854b, &delay_200_500_p2e100, "M133NW4J-R3"),
+ EDP_PANEL_ENTRY('I', 'V', 'O', 0x854a, &delay_200_500_p2e100, "M133NW4J"),
+ EDP_PANEL_ENTRY('I', 'V', 'O', 0x854b, &delay_200_500_p2e100, "R133NW4K-R0"),
EDP_PANEL_ENTRY('K', 'D', 'B', 0x0624, &kingdisplay_kd116n21_30nv_a010.delay, "116N21-30NV-A010"),
EDP_PANEL_ENTRY('K', 'D', 'B', 0x1120, &delay_200_500_e80_d50, "116N29-30NK-C007"),
diff --git a/drivers/gpu/drm/panel/panel-elida-kd35t133.c b/drivers/gpu/drm/panel/panel-elida-kd35t133.c
index eee714cf3f49..e7be15b68102 100644
--- a/drivers/gpu/drm/panel/panel-elida-kd35t133.c
+++ b/drivers/gpu/drm/panel/panel-elida-kd35t133.c
@@ -51,14 +51,6 @@ static inline struct kd35t133 *panel_to_kd35t133(struct drm_panel *panel)
return container_of(panel, struct kd35t133, panel);
}
-#define dsi_dcs_write_seq(dsi, cmd, seq...) do { \
- static const u8 b[] = { cmd, seq }; \
- int ret; \
- ret = mipi_dsi_dcs_write_buffer(dsi, b, ARRAY_SIZE(b)); \
- if (ret < 0) \
- return ret; \
- } while (0)
-
static int kd35t133_init_sequence(struct kd35t133 *ctx)
{
struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
@@ -68,25 +60,25 @@ static int kd35t133_init_sequence(struct kd35t133 *ctx)
* Init sequence was supplied by the panel vendor with minimal
* documentation.
*/
- dsi_dcs_write_seq(dsi, KD35T133_CMD_POSITIVEGAMMA,
- 0x00, 0x13, 0x18, 0x04, 0x0f, 0x06, 0x3a, 0x56,
- 0x4d, 0x03, 0x0a, 0x06, 0x30, 0x3e, 0x0f);
- dsi_dcs_write_seq(dsi, KD35T133_CMD_NEGATIVEGAMMA,
- 0x00, 0x13, 0x18, 0x01, 0x11, 0x06, 0x38, 0x34,
- 0x4d, 0x06, 0x0d, 0x0b, 0x31, 0x37, 0x0f);
- dsi_dcs_write_seq(dsi, KD35T133_CMD_POWERCONTROL1, 0x18, 0x17);
- dsi_dcs_write_seq(dsi, KD35T133_CMD_POWERCONTROL2, 0x41);
- dsi_dcs_write_seq(dsi, KD35T133_CMD_VCOMCONTROL, 0x00, 0x1a, 0x80);
- dsi_dcs_write_seq(dsi, MIPI_DCS_SET_ADDRESS_MODE, 0x48);
- dsi_dcs_write_seq(dsi, MIPI_DCS_SET_PIXEL_FORMAT, 0x55);
- dsi_dcs_write_seq(dsi, KD35T133_CMD_INTERFACEMODECTRL, 0x00);
- dsi_dcs_write_seq(dsi, KD35T133_CMD_FRAMERATECTRL, 0xa0);
- dsi_dcs_write_seq(dsi, KD35T133_CMD_DISPLAYINVERSIONCTRL, 0x02);
- dsi_dcs_write_seq(dsi, KD35T133_CMD_DISPLAYFUNCTIONCTRL,
- 0x20, 0x02);
- dsi_dcs_write_seq(dsi, KD35T133_CMD_SETIMAGEFUNCTION, 0x00);
- dsi_dcs_write_seq(dsi, KD35T133_CMD_ADJUSTCONTROL3,
- 0xa9, 0x51, 0x2c, 0x82);
+ mipi_dsi_dcs_write_seq(dsi, KD35T133_CMD_POSITIVEGAMMA,
+ 0x00, 0x13, 0x18, 0x04, 0x0f, 0x06, 0x3a, 0x56,
+ 0x4d, 0x03, 0x0a, 0x06, 0x30, 0x3e, 0x0f);
+ mipi_dsi_dcs_write_seq(dsi, KD35T133_CMD_NEGATIVEGAMMA,
+ 0x00, 0x13, 0x18, 0x01, 0x11, 0x06, 0x38, 0x34,
+ 0x4d, 0x06, 0x0d, 0x0b, 0x31, 0x37, 0x0f);
+ mipi_dsi_dcs_write_seq(dsi, KD35T133_CMD_POWERCONTROL1, 0x18, 0x17);
+ mipi_dsi_dcs_write_seq(dsi, KD35T133_CMD_POWERCONTROL2, 0x41);
+ mipi_dsi_dcs_write_seq(dsi, KD35T133_CMD_VCOMCONTROL, 0x00, 0x1a, 0x80);
+ mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_ADDRESS_MODE, 0x48);
+ mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_PIXEL_FORMAT, 0x55);
+ mipi_dsi_dcs_write_seq(dsi, KD35T133_CMD_INTERFACEMODECTRL, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, KD35T133_CMD_FRAMERATECTRL, 0xa0);
+ mipi_dsi_dcs_write_seq(dsi, KD35T133_CMD_DISPLAYINVERSIONCTRL, 0x02);
+ mipi_dsi_dcs_write_seq(dsi, KD35T133_CMD_DISPLAYFUNCTIONCTRL,
+ 0x20, 0x02);
+ mipi_dsi_dcs_write_seq(dsi, KD35T133_CMD_SETIMAGEFUNCTION, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, KD35T133_CMD_ADJUSTCONTROL3,
+ 0xa9, 0x51, 0x2c, 0x82);
mipi_dsi_dcs_write(dsi, MIPI_DCS_ENTER_INVERT_MODE, NULL, 0);
dev_dbg(dev, "Panel init sequence done\n");
diff --git a/drivers/gpu/drm/panel/panel-himax-hx8394.c b/drivers/gpu/drm/panel/panel-himax-hx8394.c
new file mode 100644
index 000000000000..d4fb5d1b295b
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-himax-hx8394.c
@@ -0,0 +1,451 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for panels based on Himax HX8394 controller, such as:
+ *
+ * - HannStar HSD060BHW4 5.99" MIPI-DSI panel
+ *
+ * Copyright (C) 2021 Kamil Trzciński
+ *
+ * Based on drivers/gpu/drm/panel/panel-sitronix-st7703.c
+ * Copyright (C) Purism SPC 2019
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/media-bus-format.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/mipi_display.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+#define DRV_NAME "panel-himax-hx8394"
+
+/* Manufacturer specific commands sent via DSI, listed in HX8394-F datasheet */
+#define HX8394_CMD_SETSEQUENCE 0xb0
+#define HX8394_CMD_SETPOWER 0xb1
+#define HX8394_CMD_SETDISP 0xb2
+#define HX8394_CMD_SETCYC 0xb4
+#define HX8394_CMD_SETVCOM 0xb6
+#define HX8394_CMD_SETTE 0xb7
+#define HX8394_CMD_SETSENSOR 0xb8
+#define HX8394_CMD_SETEXTC 0xb9
+#define HX8394_CMD_SETMIPI 0xba
+#define HX8394_CMD_SETOTP 0xbb
+#define HX8394_CMD_SETREGBANK 0xbd
+#define HX8394_CMD_UNKNOWN1 0xc0
+#define HX8394_CMD_SETDGCLUT 0xc1
+#define HX8394_CMD_SETID 0xc3
+#define HX8394_CMD_SETDDB 0xc4
+#define HX8394_CMD_UNKNOWN2 0xc6
+#define HX8394_CMD_SETCABC 0xc9
+#define HX8394_CMD_SETCABCGAIN 0xca
+#define HX8394_CMD_SETPANEL 0xcc
+#define HX8394_CMD_SETOFFSET 0xd2
+#define HX8394_CMD_SETGIP0 0xd3
+#define HX8394_CMD_UNKNOWN3 0xd4
+#define HX8394_CMD_SETGIP1 0xd5
+#define HX8394_CMD_SETGIP2 0xd6
+#define HX8394_CMD_SETGPO 0xd6
+#define HX8394_CMD_SETSCALING 0xdd
+#define HX8394_CMD_SETIDLE 0xdf
+#define HX8394_CMD_SETGAMMA 0xe0
+#define HX8394_CMD_SETCHEMODE_DYN 0xe4
+#define HX8394_CMD_SETCHE 0xe5
+#define HX8394_CMD_SETCESEL 0xe6
+#define HX8394_CMD_SET_SP_CMD 0xe9
+#define HX8394_CMD_SETREADINDEX 0xfe
+#define HX8394_CMD_GETSPIREAD 0xff
+
+struct hx8394 {
+ struct device *dev;
+ struct drm_panel panel;
+ struct gpio_desc *reset_gpio;
+ struct regulator *vcc;
+ struct regulator *iovcc;
+ bool prepared;
+
+ const struct hx8394_panel_desc *desc;
+};
+
+struct hx8394_panel_desc {
+ const struct drm_display_mode *mode;
+ unsigned int lanes;
+ unsigned long mode_flags;
+ enum mipi_dsi_pixel_format format;
+ int (*init_sequence)(struct hx8394 *ctx);
+};
+
+static inline struct hx8394 *panel_to_hx8394(struct drm_panel *panel)
+{
+ return container_of(panel, struct hx8394, panel);
+}
+
+static int hsd060bhw4_init_sequence(struct hx8394 *ctx)
+{
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+
+ /* 5.19.8 SETEXTC: Set extension command (B9h) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETEXTC,
+ 0xff, 0x83, 0x94);
+
+ /* 5.19.2 SETPOWER: Set power (B1h) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETPOWER,
+ 0x48, 0x11, 0x71, 0x09, 0x32, 0x24, 0x71, 0x31, 0x55, 0x30);
+
+ /* 5.19.9 SETMIPI: Set MIPI control (BAh) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETMIPI,
+ 0x63, 0x03, 0x68, 0x6b, 0xb2, 0xc0);
+
+ /* 5.19.3 SETDISP: Set display related register (B2h) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETDISP,
+ 0x00, 0x80, 0x78, 0x0c, 0x07);
+
+ /* 5.19.4 SETCYC: Set display waveform cycles (B4h) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETCYC,
+ 0x12, 0x63, 0x12, 0x63, 0x12, 0x63, 0x01, 0x0c, 0x7c, 0x55,
+ 0x00, 0x3f, 0x12, 0x6b, 0x12, 0x6b, 0x12, 0x6b, 0x01, 0x0c,
+ 0x7c);
+
+ /* 5.19.19 SETGIP0: Set GIP Option0 (D3h) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGIP0,
+ 0x00, 0x00, 0x00, 0x00, 0x3c, 0x1c, 0x00, 0x00, 0x32, 0x10,
+ 0x09, 0x00, 0x09, 0x32, 0x15, 0xad, 0x05, 0xad, 0x32, 0x00,
+ 0x00, 0x00, 0x00, 0x37, 0x03, 0x0b, 0x0b, 0x37, 0x00, 0x00,
+ 0x00, 0x0c, 0x40);
+
+ /* 5.19.20 Set GIP Option1 (D5h) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGIP1,
+ 0x19, 0x19, 0x18, 0x18, 0x1b, 0x1b, 0x1a, 0x1a, 0x00, 0x01,
+ 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x20, 0x21, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x24, 0x25, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18);
+
+ /* 5.19.21 Set GIP Option2 (D6h) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGIP2,
+ 0x18, 0x18, 0x19, 0x19, 0x1b, 0x1b, 0x1a, 0x1a, 0x07, 0x06,
+ 0x05, 0x04, 0x03, 0x02, 0x01, 0x00, 0x25, 0x24, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x21, 0x20, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18);
+
+ /* 5.19.25 SETGAMMA: Set gamma curve related setting (E0h) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGAMMA,
+ 0x00, 0x04, 0x0c, 0x12, 0x14, 0x18, 0x1a, 0x18, 0x31, 0x3f,
+ 0x4d, 0x4c, 0x54, 0x65, 0x6b, 0x70, 0x7f, 0x82, 0x7e, 0x8a,
+ 0x99, 0x4a, 0x48, 0x49, 0x4b, 0x4a, 0x4c, 0x4b, 0x7f, 0x00,
+ 0x04, 0x0c, 0x11, 0x13, 0x17, 0x1a, 0x18, 0x31,
+ 0x3f, 0x4d, 0x4c, 0x54, 0x65, 0x6b, 0x70, 0x7f,
+ 0x82, 0x7e, 0x8a, 0x99, 0x4a, 0x48, 0x49, 0x4b,
+ 0x4a, 0x4c, 0x4b, 0x7f);
+
+ /* 5.19.17 SETPANEL (CCh) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETPANEL,
+ 0x0b);
+
+ /* Unknown command, not listed in the HX8394-F datasheet */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN1,
+ 0x1f, 0x31);
+
+ /* 5.19.5 SETVCOM: Set VCOM voltage (B6h) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETVCOM,
+ 0x7d, 0x7d);
+
+ /* Unknown command, not listed in the HX8394-F datasheet */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN3,
+ 0x02);
+
+ /* 5.19.11 Set register bank (BDh) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK,
+ 0x01);
+
+ /* 5.19.2 SETPOWER: Set power (B1h) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETPOWER,
+ 0x00);
+
+ /* 5.19.11 Set register bank (BDh) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK,
+ 0x00);
+
+ /* Unknown command, not listed in the HX8394-F datasheet */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN3,
+ 0xed);
+
+ return 0;
+}
+
+static const struct drm_display_mode hsd060bhw4_mode = {
+ .hdisplay = 720,
+ .hsync_start = 720 + 40,
+ .hsync_end = 720 + 40 + 46,
+ .htotal = 720 + 40 + 46 + 40,
+ .vdisplay = 1440,
+ .vsync_start = 1440 + 9,
+ .vsync_end = 1440 + 9 + 7,
+ .vtotal = 1440 + 9 + 7 + 7,
+ .clock = 74250,
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+ .width_mm = 68,
+ .height_mm = 136,
+};
+
+static const struct hx8394_panel_desc hsd060bhw4_desc = {
+ .mode = &hsd060bhw4_mode,
+ .lanes = 4,
+ .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST,
+ .format = MIPI_DSI_FMT_RGB888,
+ .init_sequence = hsd060bhw4_init_sequence,
+};
+
+static int hx8394_enable(struct drm_panel *panel)
+{
+ struct hx8394 *ctx = panel_to_hx8394(panel);
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+ int ret;
+
+ ret = ctx->desc->init_sequence(ctx);
+ if (ret) {
+ dev_err(ctx->dev, "Panel init sequence failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
+ if (ret) {
+ dev_err(ctx->dev, "Failed to exit sleep mode: %d\n", ret);
+ return ret;
+ }
+
+ /* Panel is operational 120 msec after reset */
+ msleep(120);
+
+ ret = mipi_dsi_dcs_set_display_on(dsi);
+ if (ret) {
+ dev_err(ctx->dev, "Failed to turn on the display: %d\n", ret);
+ goto sleep_in;
+ }
+
+ return 0;
+
+sleep_in:
+ /* This will probably fail, but let's try orderly power off anyway. */
+ ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
+ if (!ret)
+ msleep(50);
+
+ return ret;
+}
+
+static int hx8394_disable(struct drm_panel *panel)
+{
+ struct hx8394 *ctx = panel_to_hx8394(panel);
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+ int ret;
+
+ ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
+ if (ret) {
+ dev_err(ctx->dev, "Failed to enter sleep mode: %d\n", ret);
+ return ret;
+ }
+
+ msleep(50); /* about 3 frames */
+
+ return 0;
+}
+
+static int hx8394_unprepare(struct drm_panel *panel)
+{
+ struct hx8394 *ctx = panel_to_hx8394(panel);
+
+ if (!ctx->prepared)
+ return 0;
+
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+
+ regulator_disable(ctx->iovcc);
+ regulator_disable(ctx->vcc);
+
+ ctx->prepared = false;
+
+ return 0;
+}
+
+static int hx8394_prepare(struct drm_panel *panel)
+{
+ struct hx8394 *ctx = panel_to_hx8394(panel);
+ int ret;
+
+ if (ctx->prepared)
+ return 0;
+
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+
+ ret = regulator_enable(ctx->vcc);
+ if (ret) {
+ dev_err(ctx->dev, "Failed to enable vcc supply: %d\n", ret);
+ return ret;
+ }
+
+ ret = regulator_enable(ctx->iovcc);
+ if (ret) {
+ dev_err(ctx->dev, "Failed to enable iovcc supply: %d\n", ret);
+ goto disable_vcc;
+ }
+
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+
+ msleep(180);
+
+ ctx->prepared = true;
+
+ return 0;
+
+disable_vcc:
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ regulator_disable(ctx->vcc);
+ return ret;
+}
+
+static int hx8394_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct hx8394 *ctx = panel_to_hx8394(panel);
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, ctx->desc->mode);
+ if (!mode) {
+ dev_err(ctx->dev, "Failed to add mode %ux%u@%u\n",
+ ctx->desc->mode->hdisplay, ctx->desc->mode->vdisplay,
+ drm_mode_vrefresh(ctx->desc->mode));
+ return -ENOMEM;
+ }
+
+ drm_mode_set_name(mode);
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ connector->display_info.width_mm = mode->width_mm;
+ connector->display_info.height_mm = mode->height_mm;
+ drm_mode_probed_add(connector, mode);
+
+ return 1;
+}
+
+static const struct drm_panel_funcs hx8394_drm_funcs = {
+ .disable = hx8394_disable,
+ .unprepare = hx8394_unprepare,
+ .prepare = hx8394_prepare,
+ .enable = hx8394_enable,
+ .get_modes = hx8394_get_modes,
+};
+
+static int hx8394_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct hx8394 *ctx;
+ int ret;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(ctx->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio),
+ "Failed to get reset gpio\n");
+
+ mipi_dsi_set_drvdata(dsi, ctx);
+
+ ctx->dev = dev;
+ ctx->desc = of_device_get_match_data(dev);
+
+ dsi->mode_flags = ctx->desc->mode_flags;
+ dsi->format = ctx->desc->format;
+ dsi->lanes = ctx->desc->lanes;
+
+ ctx->vcc = devm_regulator_get(dev, "vcc");
+ if (IS_ERR(ctx->vcc))
+ return dev_err_probe(dev, PTR_ERR(ctx->vcc),
+ "Failed to request vcc regulator\n");
+
+ ctx->iovcc = devm_regulator_get(dev, "iovcc");
+ if (IS_ERR(ctx->iovcc))
+ return dev_err_probe(dev, PTR_ERR(ctx->iovcc),
+ "Failed to request iovcc regulator\n");
+
+ drm_panel_init(&ctx->panel, dev, &hx8394_drm_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+
+ ret = drm_panel_of_backlight(&ctx->panel);
+ if (ret)
+ return ret;
+
+ drm_panel_add(&ctx->panel);
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ dev_err_probe(dev, ret, "mipi_dsi_attach failed\n");
+ drm_panel_remove(&ctx->panel);
+ return ret;
+ }
+
+ dev_dbg(dev, "%ux%u@%u %ubpp dsi %udl - ready\n",
+ ctx->desc->mode->hdisplay, ctx->desc->mode->vdisplay,
+ drm_mode_vrefresh(ctx->desc->mode),
+ mipi_dsi_pixel_format_to_bpp(dsi->format), dsi->lanes);
+
+ return 0;
+}
+
+static void hx8394_shutdown(struct mipi_dsi_device *dsi)
+{
+ struct hx8394 *ctx = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ ret = drm_panel_disable(&ctx->panel);
+ if (ret < 0)
+ dev_err(&dsi->dev, "Failed to disable panel: %d\n", ret);
+
+ ret = drm_panel_unprepare(&ctx->panel);
+ if (ret < 0)
+ dev_err(&dsi->dev, "Failed to unprepare panel: %d\n", ret);
+}
+
+static void hx8394_remove(struct mipi_dsi_device *dsi)
+{
+ struct hx8394 *ctx = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ hx8394_shutdown(dsi);
+
+ ret = mipi_dsi_detach(dsi);
+ if (ret < 0)
+ dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret);
+
+ drm_panel_remove(&ctx->panel);
+}
+
+static const struct of_device_id hx8394_of_match[] = {
+ { .compatible = "hannstar,hsd060bhw4", .data = &hsd060bhw4_desc },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, hx8394_of_match);
+
+static struct mipi_dsi_driver hx8394_driver = {
+ .probe = hx8394_probe,
+ .remove = hx8394_remove,
+ .shutdown = hx8394_shutdown,
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = hx8394_of_match,
+ },
+};
+module_mipi_dsi_driver(hx8394_driver);
+
+MODULE_AUTHOR("Kamil Trzciński <ayufan@ayufan.eu>");
+MODULE_DESCRIPTION("DRM driver for Himax HX8394 based MIPI DSI panels");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9341.c b/drivers/gpu/drm/panel/panel-ilitek-ili9341.c
index 384a724f2822..3fdf884b3257 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9341.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9341.c
@@ -577,11 +577,7 @@ out_exit:
}
static const struct drm_simple_display_pipe_funcs ili9341_dbi_funcs = {
- .mode_valid = mipi_dbi_pipe_mode_valid,
- .enable = ili9341_dbi_enable,
- .disable = mipi_dbi_pipe_disable,
- .update = mipi_dbi_pipe_update,
- .prepare_fb = drm_gem_simple_display_pipe_prepare_fb,
+ DRM_MIPI_DBI_SIMPLE_DISPLAY_PIPE_FUNCS(ili9341_dbi_enable),
};
static const struct drm_display_mode ili9341_dbi_mode = {
diff --git a/drivers/gpu/drm/panel/panel-jdi-fhd-r63452.c b/drivers/gpu/drm/panel/panel-jdi-fhd-r63452.c
index d8765b2294fb..8912757a6f42 100644
--- a/drivers/gpu/drm/panel/panel-jdi-fhd-r63452.c
+++ b/drivers/gpu/drm/panel/panel-jdi-fhd-r63452.c
@@ -29,22 +29,6 @@ static inline struct jdi_fhd_r63452 *to_jdi_fhd_r63452(struct drm_panel *panel)
return container_of(panel, struct jdi_fhd_r63452, panel);
}
-#define dsi_generic_write_seq(dsi, seq...) do { \
- static const u8 d[] = { seq }; \
- int ret; \
- ret = mipi_dsi_generic_write(dsi, d, ARRAY_SIZE(d)); \
- if (ret < 0) \
- return ret; \
- } while (0)
-
-#define dsi_dcs_write_seq(dsi, seq...) do { \
- static const u8 d[] = { seq }; \
- int ret; \
- ret = mipi_dsi_dcs_write_buffer(dsi, d, ARRAY_SIZE(d)); \
- if (ret < 0) \
- return ret; \
- } while (0)
-
static void jdi_fhd_r63452_reset(struct jdi_fhd_r63452 *ctx)
{
gpiod_set_value_cansleep(ctx->reset_gpio, 0);
@@ -63,12 +47,12 @@ static int jdi_fhd_r63452_on(struct jdi_fhd_r63452 *ctx)
dsi->mode_flags |= MIPI_DSI_MODE_LPM;
- dsi_generic_write_seq(dsi, 0xb0, 0x00);
- dsi_generic_write_seq(dsi, 0xd6, 0x01);
- dsi_generic_write_seq(dsi, 0xec,
- 0x64, 0xdc, 0xec, 0x3b, 0x52, 0x00, 0x0b, 0x0b,
- 0x13, 0x15, 0x68, 0x0b, 0xb5);
- dsi_generic_write_seq(dsi, 0xb0, 0x03);
+ mipi_dsi_generic_write_seq(dsi, 0xb0, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0xd6, 0x01);
+ mipi_dsi_generic_write_seq(dsi, 0xec,
+ 0x64, 0xdc, 0xec, 0x3b, 0x52, 0x00, 0x0b, 0x0b,
+ 0x13, 0x15, 0x68, 0x0b, 0xb5);
+ mipi_dsi_generic_write_seq(dsi, 0xb0, 0x03);
ret = mipi_dsi_dcs_set_tear_on(dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
if (ret < 0) {
@@ -76,7 +60,7 @@ static int jdi_fhd_r63452_on(struct jdi_fhd_r63452 *ctx)
return ret;
}
- dsi_dcs_write_seq(dsi, MIPI_DCS_SET_ADDRESS_MODE, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_ADDRESS_MODE, 0x00);
ret = mipi_dsi_dcs_set_pixel_format(dsi, 0x77);
if (ret < 0) {
@@ -108,10 +92,10 @@ static int jdi_fhd_r63452_on(struct jdi_fhd_r63452 *ctx)
return ret;
}
- dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x24);
- dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_POWER_SAVE, 0x00);
- dsi_dcs_write_seq(dsi, MIPI_DCS_SET_CABC_MIN_BRIGHTNESS, 0x00);
- dsi_dcs_write_seq(dsi, 0x84, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x24);
+ mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_POWER_SAVE, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_CABC_MIN_BRIGHTNESS, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0x84, 0x00);
ret = mipi_dsi_dcs_set_display_on(dsi);
if (ret < 0) {
@@ -127,10 +111,10 @@ static int jdi_fhd_r63452_on(struct jdi_fhd_r63452 *ctx)
}
msleep(80);
- dsi_generic_write_seq(dsi, 0xb0, 0x04);
- dsi_dcs_write_seq(dsi, 0x84, 0x00);
- dsi_generic_write_seq(dsi, 0xc8, 0x11);
- dsi_generic_write_seq(dsi, 0xb0, 0x03);
+ mipi_dsi_generic_write_seq(dsi, 0xb0, 0x04);
+ mipi_dsi_dcs_write_seq(dsi, 0x84, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0xc8, 0x11);
+ mipi_dsi_generic_write_seq(dsi, 0xb0, 0x03);
return 0;
}
@@ -143,12 +127,12 @@ static int jdi_fhd_r63452_off(struct jdi_fhd_r63452 *ctx)
dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
- dsi_generic_write_seq(dsi, 0xb0, 0x00);
- dsi_generic_write_seq(dsi, 0xd6, 0x01);
- dsi_generic_write_seq(dsi, 0xec,
- 0x64, 0xdc, 0xec, 0x3b, 0x52, 0x00, 0x0b, 0x0b,
- 0x13, 0x15, 0x68, 0x0b, 0x95);
- dsi_generic_write_seq(dsi, 0xb0, 0x03);
+ mipi_dsi_generic_write_seq(dsi, 0xb0, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0xd6, 0x01);
+ mipi_dsi_generic_write_seq(dsi, 0xec,
+ 0x64, 0xdc, 0xec, 0x3b, 0x52, 0x00, 0x0b, 0x0b,
+ 0x13, 0x15, 0x68, 0x0b, 0x95);
+ mipi_dsi_generic_write_seq(dsi, 0xb0, 0x03);
ret = mipi_dsi_dcs_set_display_off(dsi);
if (ret < 0) {
diff --git a/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c b/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c
index 5619f186d28c..d2efd887484b 100644
--- a/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c
+++ b/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c
@@ -244,14 +244,6 @@ struct ltk050h3146w *panel_to_ltk050h3146w(struct drm_panel *panel)
return container_of(panel, struct ltk050h3146w, panel);
}
-#define dsi_dcs_write_seq(dsi, cmd, seq...) do { \
- static const u8 b[] = { cmd, seq }; \
- int ret; \
- ret = mipi_dsi_dcs_write_buffer(dsi, b, ARRAY_SIZE(b)); \
- if (ret < 0) \
- return ret; \
- } while (0)
-
static int ltk050h3146w_init_sequence(struct ltk050h3146w *ctx)
{
struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
@@ -261,55 +253,55 @@ static int ltk050h3146w_init_sequence(struct ltk050h3146w *ctx)
* Init sequence was supplied by the panel vendor without much
* documentation.
*/
- dsi_dcs_write_seq(dsi, 0xdf, 0x93, 0x65, 0xf8);
- dsi_dcs_write_seq(dsi, 0xb0, 0x01, 0x03, 0x02, 0x00, 0x64, 0x06,
- 0x01);
- dsi_dcs_write_seq(dsi, 0xb2, 0x00, 0xb5);
- dsi_dcs_write_seq(dsi, 0xb3, 0x00, 0xb5);
- dsi_dcs_write_seq(dsi, 0xb7, 0x00, 0xbf, 0x00, 0x00, 0xbf, 0x00);
-
- dsi_dcs_write_seq(dsi, 0xb9, 0x00, 0xc4, 0x23, 0x07);
- dsi_dcs_write_seq(dsi, 0xbb, 0x02, 0x01, 0x24, 0x00, 0x28, 0x0f,
- 0x28, 0x04, 0xcc, 0xcc, 0xcc);
- dsi_dcs_write_seq(dsi, 0xbc, 0x0f, 0x04);
- dsi_dcs_write_seq(dsi, 0xbe, 0x1e, 0xf2);
- dsi_dcs_write_seq(dsi, 0xc0, 0x26, 0x03);
- dsi_dcs_write_seq(dsi, 0xc1, 0x00, 0x12);
- dsi_dcs_write_seq(dsi, 0xc3, 0x04, 0x02, 0x02, 0x76, 0x01, 0x80,
- 0x80);
- dsi_dcs_write_seq(dsi, 0xc4, 0x24, 0x80, 0xb4, 0x81, 0x12, 0x0f,
- 0x16, 0x00, 0x00);
- dsi_dcs_write_seq(dsi, 0xc8, 0x7f, 0x72, 0x67, 0x5d, 0x5d, 0x50,
- 0x56, 0x41, 0x59, 0x57, 0x55, 0x70, 0x5b, 0x5f,
- 0x4f, 0x47, 0x38, 0x23, 0x08, 0x7f, 0x72, 0x67,
- 0x5d, 0x5d, 0x50, 0x56, 0x41, 0x59, 0x57, 0x55,
- 0x70, 0x5b, 0x5f, 0x4f, 0x47, 0x38, 0x23, 0x08);
- dsi_dcs_write_seq(dsi, 0xd0, 0x1e, 0x1f, 0x57, 0x58, 0x48, 0x4a,
- 0x44, 0x46, 0x40, 0x1f, 0x42, 0x1f, 0x1f, 0x1f,
- 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f);
- dsi_dcs_write_seq(dsi, 0xd1, 0x1e, 0x1f, 0x57, 0x58, 0x49, 0x4b,
- 0x45, 0x47, 0x41, 0x1f, 0x43, 0x1f, 0x1f, 0x1f,
- 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f);
- dsi_dcs_write_seq(dsi, 0xd2, 0x1f, 0x1e, 0x17, 0x18, 0x07, 0x05,
- 0x0b, 0x09, 0x03, 0x1f, 0x01, 0x1f, 0x1f, 0x1f,
- 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f);
- dsi_dcs_write_seq(dsi, 0xd3, 0x1f, 0x1e, 0x17, 0x18, 0x06, 0x04,
- 0x0a, 0x08, 0x02, 0x1f, 0x00, 0x1f, 0x1f, 0x1f,
- 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f);
- dsi_dcs_write_seq(dsi, 0xd4, 0x00, 0x00, 0x00, 0x0c, 0x06, 0x20,
- 0x01, 0x02, 0x00, 0x60, 0x15, 0xb0, 0x30, 0x03,
- 0x04, 0x00, 0x60, 0x72, 0x0a, 0x00, 0x60, 0x08);
- dsi_dcs_write_seq(dsi, 0xd5, 0x00, 0x06, 0x06, 0x00, 0x30, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0xbc, 0x50, 0x00, 0x05,
- 0x21, 0x00, 0x60);
- dsi_dcs_write_seq(dsi, 0xdd, 0x2c, 0xa3, 0x00);
- dsi_dcs_write_seq(dsi, 0xde, 0x02);
- dsi_dcs_write_seq(dsi, 0xb2, 0x32, 0x1c);
- dsi_dcs_write_seq(dsi, 0xb7, 0x3b, 0x70, 0x00, 0x04);
- dsi_dcs_write_seq(dsi, 0xc1, 0x11);
- dsi_dcs_write_seq(dsi, 0xbb, 0x21, 0x22, 0x23, 0x24, 0x36, 0x37);
- dsi_dcs_write_seq(dsi, 0xc2, 0x20, 0x38, 0x1e, 0x84);
- dsi_dcs_write_seq(dsi, 0xde, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0xdf, 0x93, 0x65, 0xf8);
+ mipi_dsi_dcs_write_seq(dsi, 0xb0, 0x01, 0x03, 0x02, 0x00, 0x64, 0x06,
+ 0x01);
+ mipi_dsi_dcs_write_seq(dsi, 0xb2, 0x00, 0xb5);
+ mipi_dsi_dcs_write_seq(dsi, 0xb3, 0x00, 0xb5);
+ mipi_dsi_dcs_write_seq(dsi, 0xb7, 0x00, 0xbf, 0x00, 0x00, 0xbf, 0x00);
+
+ mipi_dsi_dcs_write_seq(dsi, 0xb9, 0x00, 0xc4, 0x23, 0x07);
+ mipi_dsi_dcs_write_seq(dsi, 0xbb, 0x02, 0x01, 0x24, 0x00, 0x28, 0x0f,
+ 0x28, 0x04, 0xcc, 0xcc, 0xcc);
+ mipi_dsi_dcs_write_seq(dsi, 0xbc, 0x0f, 0x04);
+ mipi_dsi_dcs_write_seq(dsi, 0xbe, 0x1e, 0xf2);
+ mipi_dsi_dcs_write_seq(dsi, 0xc0, 0x26, 0x03);
+ mipi_dsi_dcs_write_seq(dsi, 0xc1, 0x00, 0x12);
+ mipi_dsi_dcs_write_seq(dsi, 0xc3, 0x04, 0x02, 0x02, 0x76, 0x01, 0x80,
+ 0x80);
+ mipi_dsi_dcs_write_seq(dsi, 0xc4, 0x24, 0x80, 0xb4, 0x81, 0x12, 0x0f,
+ 0x16, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0xc8, 0x7f, 0x72, 0x67, 0x5d, 0x5d, 0x50,
+ 0x56, 0x41, 0x59, 0x57, 0x55, 0x70, 0x5b, 0x5f,
+ 0x4f, 0x47, 0x38, 0x23, 0x08, 0x7f, 0x72, 0x67,
+ 0x5d, 0x5d, 0x50, 0x56, 0x41, 0x59, 0x57, 0x55,
+ 0x70, 0x5b, 0x5f, 0x4f, 0x47, 0x38, 0x23, 0x08);
+ mipi_dsi_dcs_write_seq(dsi, 0xd0, 0x1e, 0x1f, 0x57, 0x58, 0x48, 0x4a,
+ 0x44, 0x46, 0x40, 0x1f, 0x42, 0x1f, 0x1f, 0x1f,
+ 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f);
+ mipi_dsi_dcs_write_seq(dsi, 0xd1, 0x1e, 0x1f, 0x57, 0x58, 0x49, 0x4b,
+ 0x45, 0x47, 0x41, 0x1f, 0x43, 0x1f, 0x1f, 0x1f,
+ 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f);
+ mipi_dsi_dcs_write_seq(dsi, 0xd2, 0x1f, 0x1e, 0x17, 0x18, 0x07, 0x05,
+ 0x0b, 0x09, 0x03, 0x1f, 0x01, 0x1f, 0x1f, 0x1f,
+ 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f);
+ mipi_dsi_dcs_write_seq(dsi, 0xd3, 0x1f, 0x1e, 0x17, 0x18, 0x06, 0x04,
+ 0x0a, 0x08, 0x02, 0x1f, 0x00, 0x1f, 0x1f, 0x1f,
+ 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f);
+ mipi_dsi_dcs_write_seq(dsi, 0xd4, 0x00, 0x00, 0x00, 0x0c, 0x06, 0x20,
+ 0x01, 0x02, 0x00, 0x60, 0x15, 0xb0, 0x30, 0x03,
+ 0x04, 0x00, 0x60, 0x72, 0x0a, 0x00, 0x60, 0x08);
+ mipi_dsi_dcs_write_seq(dsi, 0xd5, 0x00, 0x06, 0x06, 0x00, 0x30, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0xbc, 0x50, 0x00, 0x05,
+ 0x21, 0x00, 0x60);
+ mipi_dsi_dcs_write_seq(dsi, 0xdd, 0x2c, 0xa3, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0xde, 0x02);
+ mipi_dsi_dcs_write_seq(dsi, 0xb2, 0x32, 0x1c);
+ mipi_dsi_dcs_write_seq(dsi, 0xb7, 0x3b, 0x70, 0x00, 0x04);
+ mipi_dsi_dcs_write_seq(dsi, 0xc1, 0x11);
+ mipi_dsi_dcs_write_seq(dsi, 0xbb, 0x21, 0x22, 0x23, 0x24, 0x36, 0x37);
+ mipi_dsi_dcs_write_seq(dsi, 0xc2, 0x20, 0x38, 0x1e, 0x84);
+ mipi_dsi_dcs_write_seq(dsi, 0xde, 0x00);
ret = mipi_dsi_dcs_set_tear_on(dsi, 1);
if (ret < 0) {
diff --git a/drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c b/drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c
index 772e3b6acece..9243b2ad828d 100644
--- a/drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c
+++ b/drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c
@@ -45,14 +45,6 @@ static inline struct mantix *panel_to_mantix(struct drm_panel *panel)
return container_of(panel, struct mantix, panel);
}
-#define dsi_generic_write_seq(dsi, seq...) do { \
- static const u8 d[] = { seq }; \
- int ret; \
- ret = mipi_dsi_generic_write(dsi, d, ARRAY_SIZE(d)); \
- if (ret < 0) \
- return ret; \
- } while (0)
-
static int mantix_init_sequence(struct mantix *ctx)
{
struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
@@ -61,18 +53,18 @@ static int mantix_init_sequence(struct mantix *ctx)
/*
* Init sequence was supplied by the panel vendor.
*/
- dsi_generic_write_seq(dsi, MANTIX_CMD_OTP_STOP_RELOAD_MIPI, 0x5A);
+ mipi_dsi_generic_write_seq(dsi, MANTIX_CMD_OTP_STOP_RELOAD_MIPI, 0x5A);
- dsi_generic_write_seq(dsi, MANTIX_CMD_INT_CANCEL, 0x03);
- dsi_generic_write_seq(dsi, MANTIX_CMD_OTP_STOP_RELOAD_MIPI, 0x5A, 0x03);
- dsi_generic_write_seq(dsi, 0x80, 0xA9, 0x00);
+ mipi_dsi_generic_write_seq(dsi, MANTIX_CMD_INT_CANCEL, 0x03);
+ mipi_dsi_generic_write_seq(dsi, MANTIX_CMD_OTP_STOP_RELOAD_MIPI, 0x5A, 0x03);
+ mipi_dsi_generic_write_seq(dsi, 0x80, 0xA9, 0x00);
- dsi_generic_write_seq(dsi, MANTIX_CMD_OTP_STOP_RELOAD_MIPI, 0x5A, 0x09);
- dsi_generic_write_seq(dsi, 0x80, 0x64, 0x00, 0x64, 0x00, 0x00);
+ mipi_dsi_generic_write_seq(dsi, MANTIX_CMD_OTP_STOP_RELOAD_MIPI, 0x5A, 0x09);
+ mipi_dsi_generic_write_seq(dsi, 0x80, 0x64, 0x00, 0x64, 0x00, 0x00);
msleep(20);
- dsi_generic_write_seq(dsi, MANTIX_CMD_SPI_FINISH, 0xA5);
- dsi_generic_write_seq(dsi, MANTIX_CMD_OTP_STOP_RELOAD_MIPI, 0x00, 0x2F);
+ mipi_dsi_generic_write_seq(dsi, MANTIX_CMD_SPI_FINISH, 0xA5);
+ mipi_dsi_generic_write_seq(dsi, MANTIX_CMD_OTP_STOP_RELOAD_MIPI, 0x00, 0x2F);
msleep(20);
dev_dbg(dev, "Panel init sequence done\n");
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt35950.c b/drivers/gpu/drm/panel/panel-novatek-nt35950.c
index 3a844917da07..abf752b36a52 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt35950.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt35950.c
@@ -89,14 +89,6 @@ static inline struct nt35950 *to_nt35950(struct drm_panel *panel)
return container_of(panel, struct nt35950, panel);
}
-#define dsi_dcs_write_seq(dsi, seq...) do { \
- static const u8 d[] = { seq }; \
- int ret; \
- ret = mipi_dsi_dcs_write_buffer(dsi, d, ARRAY_SIZE(d)); \
- if (ret < 0) \
- return ret; \
- } while (0)
-
static void nt35950_reset(struct nt35950 *nt)
{
gpiod_set_value_cansleep(nt->reset_gpio, 1);
@@ -338,7 +330,7 @@ static int nt35950_on(struct nt35950 *nt)
return ret;
/* Unknown command */
- dsi_dcs_write_seq(dsi, 0xd4, 0x88, 0x88);
+ mipi_dsi_dcs_write_seq(dsi, 0xd4, 0x88, 0x88);
/* CMD2 Page 7 */
ret = nt35950_set_cmd2_page(nt, 7);
@@ -346,10 +338,10 @@ static int nt35950_on(struct nt35950 *nt)
return ret;
/* Enable SubPixel Rendering */
- dsi_dcs_write_seq(dsi, MCS_PARAM_SPR_EN, 0x01);
+ mipi_dsi_dcs_write_seq(dsi, MCS_PARAM_SPR_EN, 0x01);
/* SPR Mode: YYG Rainbow-RGB */
- dsi_dcs_write_seq(dsi, MCS_PARAM_SPR_MODE, MCS_SPR_MODE_YYG_RAINBOW_RGB);
+ mipi_dsi_dcs_write_seq(dsi, MCS_PARAM_SPR_MODE, MCS_SPR_MODE_YYG_RAINBOW_RGB);
/* CMD3 */
ret = nt35950_inject_black_image(nt);
diff --git a/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c b/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c
index 36a46cb7fe1c..aba556c98300 100644
--- a/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c
+++ b/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c
@@ -202,8 +202,7 @@ static const struct drm_panel_funcs lcd_olinuxino_funcs = {
.get_modes = lcd_olinuxino_get_modes,
};
-static int lcd_olinuxino_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int lcd_olinuxino_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct lcd_olinuxino *lcd;
@@ -309,7 +308,7 @@ static struct i2c_driver lcd_olinuxino_driver = {
.name = "lcd_olinuxino",
.of_match_table = lcd_olinuxino_of_ids,
},
- .probe = lcd_olinuxino_probe,
+ .probe_new = lcd_olinuxino_probe,
.remove = lcd_olinuxino_remove,
};
diff --git a/drivers/gpu/drm/panel/panel-orisetech-ota5601a.c b/drivers/gpu/drm/panel/panel-orisetech-ota5601a.c
new file mode 100644
index 000000000000..e46be5014d42
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-orisetech-ota5601a.c
@@ -0,0 +1,364 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Orisetech OTA5601A TFT LCD panel driver
+ *
+ * Copyright (C) 2021, Christophe Branchereau <cbranchereau@gmail.com>
+ */
+
+#include <linux/bits.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/gpio/consumer.h>
+#include <linux/media-bus-format.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spi/spi.h>
+
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+#define OTA5601A_CTL 0x01
+#define OTA5601A_CTL_OFF 0x00
+#define OTA5601A_CTL_ON BIT(0)
+
+struct ota5601a_panel_info {
+ const struct drm_display_mode *display_modes;
+ unsigned int num_modes;
+ u16 width_mm, height_mm;
+ u32 bus_format, bus_flags;
+};
+
+struct ota5601a {
+ struct drm_panel drm_panel;
+ struct regmap *map;
+ struct regulator *supply;
+ const struct ota5601a_panel_info *panel_info;
+
+ struct gpio_desc *reset_gpio;
+};
+
+static inline struct ota5601a *to_ota5601a(struct drm_panel *panel)
+{
+ return container_of(panel, struct ota5601a, drm_panel);
+}
+
+static const struct reg_sequence ota5601a_panel_regs[] = {
+ { 0xfd, 0x00 }, /* Page Shift */
+ { 0x02, 0x00 }, /* Reset */
+
+ { 0x18, 0x00 }, /* Interface Sel: RGB 24 Bits */
+ { 0x34, 0x20 }, /* Undocumented */
+
+ { 0x0c, 0x01 }, /* Contrast set by CMD1 == within page 0x00 */
+ { 0x0d, 0x48 }, /* R Brightness */
+ { 0x0e, 0x48 }, /* G Brightness */
+ { 0x0f, 0x48 }, /* B Brightness */
+ { 0x07, 0x40 }, /* R Contrast */
+ { 0x08, 0x33 }, /* G Contrast */
+ { 0x09, 0x3a }, /* B Contrast */
+
+ { 0x16, 0x01 }, /* NTSC Sel */
+ { 0x19, 0x8d }, /* VBLK */
+ { 0x1a, 0x28 }, /* HBLK */
+ { 0x1c, 0x00 }, /* Scan Shift Dir. */
+
+ { 0xfd, 0xc5 }, /* Page Shift */
+ { 0x82, 0x0c }, /* PWR_CTRL Pump */
+ { 0xa2, 0xb4 }, /* PWR_CTRL VGH/VGL */
+
+ { 0xfd, 0xc4 }, /* Page Shift - What follows is listed as "RGB 24bit Timing Set" */
+ { 0x82, 0x45 },
+
+ { 0xfd, 0xc1 },
+ { 0x91, 0x02 },
+
+ { 0xfd, 0xc0 },
+ { 0xa1, 0x01 },
+ { 0xa2, 0x1f },
+ { 0xa3, 0x0b },
+ { 0xa4, 0x38 },
+ { 0xa5, 0x00 },
+ { 0xa6, 0x0a },
+ { 0xa7, 0x38 },
+ { 0xa8, 0x00 },
+ { 0xa9, 0x0a },
+ { 0xaa, 0x37 },
+
+ { 0xfd, 0xce },
+ { 0x81, 0x18 },
+ { 0x82, 0x43 },
+ { 0x83, 0x43 },
+ { 0x91, 0x06 },
+ { 0x93, 0x38 },
+ { 0x94, 0x02 },
+ { 0x95, 0x06 },
+ { 0x97, 0x38 },
+ { 0x98, 0x02 },
+ { 0x99, 0x06 },
+ { 0x9b, 0x38 },
+ { 0x9c, 0x02 },
+
+ { 0xfd, 0x00 }, /* Page Shift */
+};
+
+static const struct regmap_config ota5601a_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+static int ota5601a_prepare(struct drm_panel *drm_panel)
+{
+ struct ota5601a *panel = to_ota5601a(drm_panel);
+ int err;
+
+ err = regulator_enable(panel->supply);
+ if (err) {
+ dev_err(drm_panel->dev, "Failed to enable power supply: %d\n", err);
+ return err;
+ }
+
+ /* Reset to be held low for 10us min according to the doc, 10ms before sending commands */
+ gpiod_set_value_cansleep(panel->reset_gpio, 1);
+ usleep_range(10, 30);
+ gpiod_set_value_cansleep(panel->reset_gpio, 0);
+ usleep_range(10000, 20000);
+
+ /* Init all registers. */
+ err = regmap_multi_reg_write(panel->map, ota5601a_panel_regs,
+ ARRAY_SIZE(ota5601a_panel_regs));
+ if (err) {
+ dev_err(drm_panel->dev, "Failed to init registers: %d\n", err);
+ goto err_disable_regulator;
+ }
+
+ msleep(120);
+
+ return 0;
+
+err_disable_regulator:
+ regulator_disable(panel->supply);
+ return err;
+}
+
+static int ota5601a_unprepare(struct drm_panel *drm_panel)
+{
+ struct ota5601a *panel = to_ota5601a(drm_panel);
+
+ gpiod_set_value_cansleep(panel->reset_gpio, 1);
+
+ regulator_disable(panel->supply);
+
+ return 0;
+}
+
+static int ota5601a_enable(struct drm_panel *drm_panel)
+{
+ struct ota5601a *panel = to_ota5601a(drm_panel);
+ int err;
+
+ err = regmap_write(panel->map, OTA5601A_CTL, OTA5601A_CTL_ON);
+
+ if (err) {
+ dev_err(drm_panel->dev, "Unable to enable panel: %d\n", err);
+ return err;
+ }
+
+ if (drm_panel->backlight) {
+ /* Wait for the picture to be ready before enabling backlight */
+ msleep(120);
+ }
+
+ return 0;
+}
+
+static int ota5601a_disable(struct drm_panel *drm_panel)
+{
+ struct ota5601a *panel = to_ota5601a(drm_panel);
+ int err;
+
+ err = regmap_write(panel->map, OTA5601A_CTL, OTA5601A_CTL_OFF);
+
+ if (err) {
+ dev_err(drm_panel->dev, "Unable to disable panel: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int ota5601a_get_modes(struct drm_panel *drm_panel,
+ struct drm_connector *connector)
+{
+ struct ota5601a *panel = to_ota5601a(drm_panel);
+ const struct ota5601a_panel_info *panel_info = panel->panel_info;
+ struct drm_display_mode *mode;
+ unsigned int i;
+
+ for (i = 0; i < panel_info->num_modes; i++) {
+ mode = drm_mode_duplicate(connector->dev,
+ &panel_info->display_modes[i]);
+ if (!mode)
+ return -ENOMEM;
+
+ drm_mode_set_name(mode);
+
+ mode->type = DRM_MODE_TYPE_DRIVER;
+ if (panel_info->num_modes == 1)
+ mode->type |= DRM_MODE_TYPE_PREFERRED;
+
+ drm_mode_probed_add(connector, mode);
+ }
+
+ connector->display_info.bpc = 8;
+ connector->display_info.width_mm = panel_info->width_mm;
+ connector->display_info.height_mm = panel_info->height_mm;
+
+ drm_display_info_set_bus_formats(&connector->display_info,
+ &panel_info->bus_format, 1);
+ connector->display_info.bus_flags = panel_info->bus_flags;
+
+ return panel_info->num_modes;
+}
+
+static const struct drm_panel_funcs ota5601a_funcs = {
+ .prepare = ota5601a_prepare,
+ .unprepare = ota5601a_unprepare,
+ .enable = ota5601a_enable,
+ .disable = ota5601a_disable,
+ .get_modes = ota5601a_get_modes,
+};
+
+static int ota5601a_probe(struct spi_device *spi)
+{
+ const struct spi_device_id *id = spi_get_device_id(spi);
+ struct device *dev = &spi->dev;
+ struct ota5601a *panel;
+ int err;
+
+ panel = devm_kzalloc(dev, sizeof(*panel), GFP_KERNEL);
+ if (!panel)
+ return -ENOMEM;
+
+ spi_set_drvdata(spi, panel);
+
+ panel->panel_info = (const struct ota5601a_panel_info *)id->driver_data;
+ if (!panel->panel_info)
+ return -EINVAL;
+
+ panel->supply = devm_regulator_get(dev, "power");
+ if (IS_ERR(panel->supply)) {
+ dev_err(dev, "Failed to get power supply\n");
+ return PTR_ERR(panel->supply);
+ }
+
+ panel->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(panel->reset_gpio)) {
+ dev_err(dev, "Failed to get reset GPIO\n");
+ return PTR_ERR(panel->reset_gpio);
+ }
+
+ spi->bits_per_word = 8;
+ spi->mode = SPI_MODE_3 | SPI_3WIRE;
+ err = spi_setup(spi);
+ if (err) {
+ dev_err(dev, "Failed to setup SPI\n");
+ return err;
+ }
+
+ panel->map = devm_regmap_init_spi(spi, &ota5601a_regmap_config);
+ if (IS_ERR(panel->map)) {
+ dev_err(dev, "Failed to init regmap\n");
+ return PTR_ERR(panel->map);
+ }
+
+ drm_panel_init(&panel->drm_panel, dev, &ota5601a_funcs,
+ DRM_MODE_CONNECTOR_DPI);
+
+ err = drm_panel_of_backlight(&panel->drm_panel);
+ if (err) {
+ if (err != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get backlight handle\n");
+ return err;
+ }
+
+ drm_panel_add(&panel->drm_panel);
+
+ return 0;
+}
+
+static void ota5601a_remove(struct spi_device *spi)
+{
+ struct ota5601a *panel = spi_get_drvdata(spi);
+
+ drm_panel_remove(&panel->drm_panel);
+
+ ota5601a_disable(&panel->drm_panel);
+ ota5601a_unprepare(&panel->drm_panel);
+}
+
+static const struct drm_display_mode gpt3_display_modes[] = {
+ { /* 60 Hz */
+ .clock = 27000,
+ .hdisplay = 640,
+ .hsync_start = 640 + 220,
+ .hsync_end = 640 + 220 + 20,
+ .htotal = 640 + 220 + 20 + 20,
+ .vdisplay = 480,
+ .vsync_start = 480 + 7,
+ .vsync_end = 480 + 7 + 6,
+ .vtotal = 480 + 7 + 6 + 7,
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+ },
+
+ { /* 50 Hz */
+ .clock = 24000,
+ .hdisplay = 640,
+ .hsync_start = 640 + 280,
+ .hsync_end = 640 + 280 + 20,
+ .htotal = 640 + 280 + 20 + 20,
+ .vdisplay = 480,
+ .vsync_start = 480 + 7,
+ .vsync_end = 480 + 7 + 6,
+ .vtotal = 480 + 7 + 6 + 7,
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+ },
+};
+
+static const struct ota5601a_panel_info gpt3_info = {
+ .display_modes = gpt3_display_modes,
+ .num_modes = ARRAY_SIZE(gpt3_display_modes),
+ .width_mm = 71,
+ .height_mm = 51,
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+ .bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE,
+};
+
+static const struct spi_device_id gpt3_id[] = {
+ { "gpt3", (kernel_ulong_t)&gpt3_info },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(spi, gpt3_id);
+
+static const struct of_device_id ota5601a_of_match[] = {
+ { .compatible = "focaltech,gpt3" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ota5601a_of_match);
+
+static struct spi_driver ota5601a_driver = {
+ .driver = {
+ .name = "ota5601a",
+ .of_match_table = ota5601a_of_match,
+ },
+ .id_table = gpt3_id,
+ .probe = ota5601a_probe,
+ .remove = ota5601a_remove,
+};
+
+module_spi_driver(ota5601a_driver);
+
+MODULE_AUTHOR("Christophe Branchereau <cbranchereau@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
index 79f852465a84..1ef1cfd01c77 100644
--- a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
+++ b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
@@ -362,8 +362,7 @@ static const struct drm_panel_funcs rpi_touchscreen_funcs = {
.get_modes = rpi_touchscreen_get_modes,
};
-static int rpi_touchscreen_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int rpi_touchscreen_probe(struct i2c_client *i2c)
{
struct device *dev = &i2c->dev;
struct rpi_touchscreen *ts;
@@ -491,7 +490,7 @@ static struct i2c_driver rpi_touchscreen_driver = {
.name = "rpi_touchscreen",
.of_match_table = rpi_touchscreen_of_ids,
},
- .probe = rpi_touchscreen_probe,
+ .probe_new = rpi_touchscreen_probe,
.remove = rpi_touchscreen_remove,
};
diff --git a/drivers/gpu/drm/panel/panel-samsung-atna33xc20.c b/drivers/gpu/drm/panel/panel-samsung-atna33xc20.c
index 5a8b978c6415..5703f4712d96 100644
--- a/drivers/gpu/drm/panel/panel-samsung-atna33xc20.c
+++ b/drivers/gpu/drm/panel/panel-samsung-atna33xc20.c
@@ -53,7 +53,7 @@ static void atana33xc20_wait(ktime_t start_ktime, unsigned int min_ms)
ktime_t now_ktime, min_ktime;
min_ktime = ktime_add(start_ktime, ms_to_ktime(min_ms));
- now_ktime = ktime_get();
+ now_ktime = ktime_get_boottime();
if (ktime_before(now_ktime, min_ktime))
msleep(ktime_to_ms(ktime_sub(min_ktime, now_ktime)) + 1);
@@ -75,7 +75,7 @@ static int atana33xc20_suspend(struct device *dev)
ret = regulator_disable(p->supply);
if (ret)
return ret;
- p->powered_off_time = ktime_get();
+ p->powered_off_time = ktime_get_boottime();
p->el3_was_on = false;
return 0;
@@ -93,7 +93,7 @@ static int atana33xc20_resume(struct device *dev)
ret = regulator_enable(p->supply);
if (ret)
return ret;
- p->powered_on_time = ktime_get();
+ p->powered_on_time = ktime_get_boottime();
if (p->no_hpd) {
msleep(HPD_MAX_MS);
@@ -142,7 +142,7 @@ static int atana33xc20_disable(struct drm_panel *panel)
return 0;
gpiod_set_value_cansleep(p->el_on3_gpio, 0);
- p->el_on3_off_time = ktime_get();
+ p->el_on3_off_time = ktime_get_boottime();
p->enabled = false;
/*
@@ -310,7 +310,7 @@ static int atana33xc20_probe(struct dp_aux_ep_device *aux_ep)
ret = devm_add_action_or_reset(dev, atana33xc20_runtime_disable, dev);
if (ret)
return ret;
- pm_runtime_set_autosuspend_delay(dev, 1000);
+ pm_runtime_set_autosuspend_delay(dev, 2000);
pm_runtime_use_autosuspend(dev);
ret = devm_add_action_or_reset(dev, atana33xc20_dont_use_autosuspend, dev);
if (ret)
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c b/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c
index 97ff7a18545c..7431cae7427e 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c
@@ -28,14 +28,6 @@ s6e88a0_ams452ef01 *to_s6e88a0_ams452ef01(struct drm_panel *panel)
return container_of(panel, struct s6e88a0_ams452ef01, panel);
}
-#define dsi_dcs_write_seq(dsi, seq...) do { \
- static const u8 d[] = { seq }; \
- int ret; \
- ret = mipi_dsi_dcs_write_buffer(dsi, d, ARRAY_SIZE(d)); \
- if (ret < 0) \
- return ret; \
- } while (0)
-
static void s6e88a0_ams452ef01_reset(struct s6e88a0_ams452ef01 *ctx)
{
gpiod_set_value_cansleep(ctx->reset_gpio, 1);
@@ -54,8 +46,8 @@ static int s6e88a0_ams452ef01_on(struct s6e88a0_ams452ef01 *ctx)
dsi->mode_flags |= MIPI_DSI_MODE_LPM;
- dsi_dcs_write_seq(dsi, 0xf0, 0x5a, 0x5a); // enable LEVEL2 commands
- dsi_dcs_write_seq(dsi, 0xcc, 0x4c); // set Pixel Clock Divider polarity
+ mipi_dsi_dcs_write_seq(dsi, 0xf0, 0x5a, 0x5a); // enable LEVEL2 commands
+ mipi_dsi_dcs_write_seq(dsi, 0xcc, 0x4c); // set Pixel Clock Divider polarity
ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
if (ret < 0) {
@@ -65,23 +57,23 @@ static int s6e88a0_ams452ef01_on(struct s6e88a0_ams452ef01 *ctx)
msleep(120);
// set default brightness/gama
- dsi_dcs_write_seq(dsi, 0xca,
- 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, // V255 RR,GG,BB
- 0x80, 0x80, 0x80, // V203 R,G,B
- 0x80, 0x80, 0x80, // V151 R,G,B
- 0x80, 0x80, 0x80, // V87 R,G,B
- 0x80, 0x80, 0x80, // V51 R,G,B
- 0x80, 0x80, 0x80, // V35 R,G,B
- 0x80, 0x80, 0x80, // V23 R,G,B
- 0x80, 0x80, 0x80, // V11 R,G,B
- 0x6b, 0x68, 0x71, // V3 R,G,B
- 0x00, 0x00, 0x00); // V1 R,G,B
+ mipi_dsi_dcs_write_seq(dsi, 0xca,
+ 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, // V255 RR,GG,BB
+ 0x80, 0x80, 0x80, // V203 R,G,B
+ 0x80, 0x80, 0x80, // V151 R,G,B
+ 0x80, 0x80, 0x80, // V87 R,G,B
+ 0x80, 0x80, 0x80, // V51 R,G,B
+ 0x80, 0x80, 0x80, // V35 R,G,B
+ 0x80, 0x80, 0x80, // V23 R,G,B
+ 0x80, 0x80, 0x80, // V11 R,G,B
+ 0x6b, 0x68, 0x71, // V3 R,G,B
+ 0x00, 0x00, 0x00); // V1 R,G,B
// set default Amoled Off Ratio
- dsi_dcs_write_seq(dsi, 0xb2, 0x40, 0x0a, 0x17, 0x00, 0x0a);
- dsi_dcs_write_seq(dsi, 0xb6, 0x2c, 0x0b); // set default elvss voltage
- dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_POWER_SAVE, 0x00);
- dsi_dcs_write_seq(dsi, 0xf7, 0x03); // gamma/aor update
- dsi_dcs_write_seq(dsi, 0xf0, 0xa5, 0xa5); // disable LEVEL2 commands
+ mipi_dsi_dcs_write_seq(dsi, 0xb2, 0x40, 0x0a, 0x17, 0x00, 0x0a);
+ mipi_dsi_dcs_write_seq(dsi, 0xb6, 0x2c, 0x0b); // set default elvss voltage
+ mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_POWER_SAVE, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0xf7, 0x03); // gamma/aor update
+ mipi_dsi_dcs_write_seq(dsi, 0xf0, 0xa5, 0xa5); // disable LEVEL2 commands
ret = mipi_dsi_dcs_set_display_on(dsi);
if (ret < 0) {
diff --git a/drivers/gpu/drm/panel/panel-samsung-sofef00.c b/drivers/gpu/drm/panel/panel-samsung-sofef00.c
index 1a0d24595faa..9db49a028930 100644
--- a/drivers/gpu/drm/panel/panel-samsung-sofef00.c
+++ b/drivers/gpu/drm/panel/panel-samsung-sofef00.c
@@ -34,14 +34,6 @@ struct sofef00_panel *to_sofef00_panel(struct drm_panel *panel)
return container_of(panel, struct sofef00_panel, panel);
}
-#define dsi_dcs_write_seq(dsi, seq...) do { \
- static const u8 d[] = { seq }; \
- int ret; \
- ret = mipi_dsi_dcs_write_buffer(dsi, d, ARRAY_SIZE(d)); \
- if (ret < 0) \
- return ret; \
- } while (0)
-
static void sofef00_panel_reset(struct sofef00_panel *ctx)
{
gpiod_set_value_cansleep(ctx->reset_gpio, 0);
@@ -67,7 +59,7 @@ static int sofef00_panel_on(struct sofef00_panel *ctx)
}
usleep_range(10000, 11000);
- dsi_dcs_write_seq(dsi, 0xf0, 0x5a, 0x5a);
+ mipi_dsi_dcs_write_seq(dsi, 0xf0, 0x5a, 0x5a);
ret = mipi_dsi_dcs_set_tear_on(dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
if (ret < 0) {
@@ -75,13 +67,13 @@ static int sofef00_panel_on(struct sofef00_panel *ctx)
return ret;
}
- dsi_dcs_write_seq(dsi, 0xf0, 0xa5, 0xa5);
- dsi_dcs_write_seq(dsi, 0xf0, 0x5a, 0x5a);
- dsi_dcs_write_seq(dsi, 0xb0, 0x07);
- dsi_dcs_write_seq(dsi, 0xb6, 0x12);
- dsi_dcs_write_seq(dsi, 0xf0, 0xa5, 0xa5);
- dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x20);
- dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_POWER_SAVE, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0xf0, 0xa5, 0xa5);
+ mipi_dsi_dcs_write_seq(dsi, 0xf0, 0x5a, 0x5a);
+ mipi_dsi_dcs_write_seq(dsi, 0xb0, 0x07);
+ mipi_dsi_dcs_write_seq(dsi, 0xb6, 0x12);
+ mipi_dsi_dcs_write_seq(dsi, 0xf0, 0xa5, 0xa5);
+ mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x20);
+ mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_POWER_SAVE, 0x00);
ret = mipi_dsi_dcs_set_display_on(dsi);
if (ret < 0) {
diff --git a/drivers/gpu/drm/panel/panel-sharp-ls060t1sx01.c b/drivers/gpu/drm/panel/panel-sharp-ls060t1sx01.c
index 8a4e0c1fe73f..68f52eaaf4fa 100644
--- a/drivers/gpu/drm/panel/panel-sharp-ls060t1sx01.c
+++ b/drivers/gpu/drm/panel/panel-sharp-ls060t1sx01.c
@@ -32,12 +32,6 @@ static inline struct sharp_ls060 *to_sharp_ls060(struct drm_panel *panel)
return container_of(panel, struct sharp_ls060, panel);
}
-#define dsi_dcs_write_seq(dsi, seq...) ({ \
- static const u8 d[] = { seq }; \
- \
- mipi_dsi_dcs_write_buffer(dsi, d, ARRAY_SIZE(d)); \
- })
-
static void sharp_ls060_reset(struct sharp_ls060 *ctx)
{
gpiod_set_value_cansleep(ctx->reset_gpio, 0);
@@ -56,17 +50,8 @@ static int sharp_ls060_on(struct sharp_ls060 *ctx)
dsi->mode_flags |= MIPI_DSI_MODE_LPM;
- ret = dsi_dcs_write_seq(dsi, 0xbb, 0x13);
- if (ret < 0) {
- dev_err(dev, "Failed to send command: %d\n", ret);
- return ret;
- }
-
- ret = dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_MEMORY_START);
- if (ret < 0) {
- dev_err(dev, "Failed to send command: %d\n", ret);
- return ret;
- }
+ mipi_dsi_dcs_write_seq(dsi, 0xbb, 0x13);
+ mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_MEMORY_START);
ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
if (ret < 0) {
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 8a3b685c2fcc..065f378bba9d 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -280,7 +280,7 @@ static void panel_simple_wait(ktime_t start_ktime, unsigned int min_ms)
return;
min_ktime = ktime_add(start_ktime, ms_to_ktime(min_ms));
- now_ktime = ktime_get();
+ now_ktime = ktime_get_boottime();
if (ktime_before(now_ktime, min_ktime))
msleep(ktime_to_ms(ktime_sub(min_ktime, now_ktime)) + 1);
@@ -307,7 +307,7 @@ static int panel_simple_suspend(struct device *dev)
gpiod_set_value_cansleep(p->enable_gpio, 0);
regulator_disable(p->supply);
- p->unprepared_time = ktime_get();
+ p->unprepared_time = ktime_get_boottime();
kfree(p->edid);
p->edid = NULL;
@@ -351,7 +351,7 @@ static int panel_simple_resume(struct device *dev)
if (p->desc->delay.prepare)
msleep(p->desc->delay.prepare);
- p->prepared_time = ktime_get();
+ p->prepared_time = ktime_get_boottime();
return 0;
}
diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7703.c b/drivers/gpu/drm/panel/panel-sitronix-st7703.c
index 86a472b01360..6747ca237ced 100644
--- a/drivers/gpu/drm/panel/panel-sitronix-st7703.c
+++ b/drivers/gpu/drm/panel/panel-sitronix-st7703.c
@@ -73,14 +73,6 @@ static inline struct st7703 *panel_to_st7703(struct drm_panel *panel)
return container_of(panel, struct st7703, panel);
}
-#define dsi_generic_write_seq(dsi, seq...) do { \
- static const u8 d[] = { seq }; \
- int ret; \
- ret = mipi_dsi_generic_write(dsi, d, ARRAY_SIZE(d)); \
- if (ret < 0) \
- return ret; \
- } while (0)
-
static int jh057n_init_sequence(struct st7703 *ctx)
{
struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
@@ -90,50 +82,50 @@ static int jh057n_init_sequence(struct st7703 *ctx)
* resemble the ST7703 but the number of parameters often don't match
* so it's likely a clone.
*/
- dsi_generic_write_seq(dsi, ST7703_CMD_SETEXTC,
- 0xF1, 0x12, 0x83);
- dsi_generic_write_seq(dsi, ST7703_CMD_SETRGBIF,
- 0x10, 0x10, 0x05, 0x05, 0x03, 0xFF, 0x00, 0x00,
- 0x00, 0x00);
- dsi_generic_write_seq(dsi, ST7703_CMD_SETSCR,
- 0x73, 0x73, 0x50, 0x50, 0x00, 0x00, 0x08, 0x70,
- 0x00);
- dsi_generic_write_seq(dsi, ST7703_CMD_SETVDC, 0x4E);
- dsi_generic_write_seq(dsi, ST7703_CMD_SETPANEL, 0x0B);
- dsi_generic_write_seq(dsi, ST7703_CMD_SETCYC, 0x80);
- dsi_generic_write_seq(dsi, ST7703_CMD_SETDISP, 0xF0, 0x12, 0x30);
- dsi_generic_write_seq(dsi, ST7703_CMD_SETEQ,
- 0x07, 0x07, 0x0B, 0x0B, 0x03, 0x0B, 0x00, 0x00,
- 0x00, 0x00, 0xFF, 0x00, 0xC0, 0x10);
- dsi_generic_write_seq(dsi, ST7703_CMD_SETBGP, 0x08, 0x08);
+ mipi_dsi_generic_write_seq(dsi, ST7703_CMD_SETEXTC,
+ 0xF1, 0x12, 0x83);
+ mipi_dsi_generic_write_seq(dsi, ST7703_CMD_SETRGBIF,
+ 0x10, 0x10, 0x05, 0x05, 0x03, 0xFF, 0x00, 0x00,
+ 0x00, 0x00);
+ mipi_dsi_generic_write_seq(dsi, ST7703_CMD_SETSCR,
+ 0x73, 0x73, 0x50, 0x50, 0x00, 0x00, 0x08, 0x70,
+ 0x00);
+ mipi_dsi_generic_write_seq(dsi, ST7703_CMD_SETVDC, 0x4E);
+ mipi_dsi_generic_write_seq(dsi, ST7703_CMD_SETPANEL, 0x0B);
+ mipi_dsi_generic_write_seq(dsi, ST7703_CMD_SETCYC, 0x80);
+ mipi_dsi_generic_write_seq(dsi, ST7703_CMD_SETDISP, 0xF0, 0x12, 0x30);
+ mipi_dsi_generic_write_seq(dsi, ST7703_CMD_SETEQ,
+ 0x07, 0x07, 0x0B, 0x0B, 0x03, 0x0B, 0x00, 0x00,
+ 0x00, 0x00, 0xFF, 0x00, 0xC0, 0x10);
+ mipi_dsi_generic_write_seq(dsi, ST7703_CMD_SETBGP, 0x08, 0x08);
msleep(20);
- dsi_generic_write_seq(dsi, ST7703_CMD_SETVCOM, 0x3F, 0x3F);
- dsi_generic_write_seq(dsi, ST7703_CMD_UNKNOWN_BF, 0x02, 0x11, 0x00);
- dsi_generic_write_seq(dsi, ST7703_CMD_SETGIP1,
- 0x82, 0x10, 0x06, 0x05, 0x9E, 0x0A, 0xA5, 0x12,
- 0x31, 0x23, 0x37, 0x83, 0x04, 0xBC, 0x27, 0x38,
- 0x0C, 0x00, 0x03, 0x00, 0x00, 0x00, 0x0C, 0x00,
- 0x03, 0x00, 0x00, 0x00, 0x75, 0x75, 0x31, 0x88,
- 0x88, 0x88, 0x88, 0x88, 0x88, 0x13, 0x88, 0x64,
- 0x64, 0x20, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88,
- 0x02, 0x88, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
- dsi_generic_write_seq(dsi, ST7703_CMD_SETGIP2,
- 0x02, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x02, 0x46, 0x02, 0x88,
- 0x88, 0x88, 0x88, 0x88, 0x88, 0x64, 0x88, 0x13,
- 0x57, 0x13, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88,
- 0x75, 0x88, 0x23, 0x14, 0x00, 0x00, 0x02, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x0A,
- 0xA5, 0x00, 0x00, 0x00, 0x00);
- dsi_generic_write_seq(dsi, ST7703_CMD_SETGAMMA,
- 0x00, 0x09, 0x0E, 0x29, 0x2D, 0x3C, 0x41, 0x37,
- 0x07, 0x0B, 0x0D, 0x10, 0x11, 0x0F, 0x10, 0x11,
- 0x18, 0x00, 0x09, 0x0E, 0x29, 0x2D, 0x3C, 0x41,
- 0x37, 0x07, 0x0B, 0x0D, 0x10, 0x11, 0x0F, 0x10,
- 0x11, 0x18);
+ mipi_dsi_generic_write_seq(dsi, ST7703_CMD_SETVCOM, 0x3F, 0x3F);
+ mipi_dsi_generic_write_seq(dsi, ST7703_CMD_UNKNOWN_BF, 0x02, 0x11, 0x00);
+ mipi_dsi_generic_write_seq(dsi, ST7703_CMD_SETGIP1,
+ 0x82, 0x10, 0x06, 0x05, 0x9E, 0x0A, 0xA5, 0x12,
+ 0x31, 0x23, 0x37, 0x83, 0x04, 0xBC, 0x27, 0x38,
+ 0x0C, 0x00, 0x03, 0x00, 0x00, 0x00, 0x0C, 0x00,
+ 0x03, 0x00, 0x00, 0x00, 0x75, 0x75, 0x31, 0x88,
+ 0x88, 0x88, 0x88, 0x88, 0x88, 0x13, 0x88, 0x64,
+ 0x64, 0x20, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88,
+ 0x02, 0x88, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
+ mipi_dsi_generic_write_seq(dsi, ST7703_CMD_SETGIP2,
+ 0x02, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x02, 0x46, 0x02, 0x88,
+ 0x88, 0x88, 0x88, 0x88, 0x88, 0x64, 0x88, 0x13,
+ 0x57, 0x13, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88,
+ 0x75, 0x88, 0x23, 0x14, 0x00, 0x00, 0x02, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x0A,
+ 0xA5, 0x00, 0x00, 0x00, 0x00);
+ mipi_dsi_generic_write_seq(dsi, ST7703_CMD_SETGAMMA,
+ 0x00, 0x09, 0x0E, 0x29, 0x2D, 0x3C, 0x41, 0x37,
+ 0x07, 0x0B, 0x0D, 0x10, 0x11, 0x0F, 0x10, 0x11,
+ 0x18, 0x00, 0x09, 0x0E, 0x29, 0x2D, 0x3C, 0x41,
+ 0x37, 0x07, 0x0B, 0x0D, 0x10, 0x11, 0x0F, 0x10,
+ 0x11, 0x18);
return 0;
}
@@ -162,15 +154,6 @@ static const struct st7703_panel_desc jh057n00900_panel_desc = {
.init_sequence = jh057n_init_sequence,
};
-#define dsi_dcs_write_seq(dsi, cmd, seq...) do { \
- static const u8 d[] = { seq }; \
- int ret; \
- ret = mipi_dsi_dcs_write(dsi, cmd, d, ARRAY_SIZE(d)); \
- if (ret < 0) \
- return ret; \
- } while (0)
-
-
static int xbd599_init_sequence(struct st7703 *ctx)
{
struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
@@ -180,154 +163,154 @@ static int xbd599_init_sequence(struct st7703 *ctx)
*/
/* Magic sequence to unlock user commands below. */
- dsi_dcs_write_seq(dsi, ST7703_CMD_SETEXTC, 0xF1, 0x12, 0x83);
-
- dsi_dcs_write_seq(dsi, ST7703_CMD_SETMIPI,
- 0x33, /* VC_main = 0, Lane_Number = 3 (4 lanes) */
- 0x81, /* DSI_LDO_SEL = 1.7V, RTERM = 90 Ohm */
- 0x05, /* IHSRX = x6 (Low High Speed driving ability) */
- 0xF9, /* TX_CLK_SEL = fDSICLK/16 */
- 0x0E, /* HFP_OSC (min. HFP number in DSI mode) */
- 0x0E, /* HBP_OSC (min. HBP number in DSI mode) */
- /* The rest is undocumented in ST7703 datasheet */
- 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x44, 0x25, 0x00, 0x91, 0x0a, 0x00, 0x00, 0x02,
- 0x4F, 0x11, 0x00, 0x00, 0x37);
-
- dsi_dcs_write_seq(dsi, ST7703_CMD_SETPOWER_EXT,
- 0x25, /* PCCS = 2, ECP_DC_DIV = 1/4 HSYNC */
- 0x22, /* DT = 15ms XDK_ECP = x2 */
- 0x20, /* PFM_DC_DIV = /1 */
- 0x03 /* ECP_SYNC_EN = 1, VGX_SYNC_EN = 1 */);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETEXTC, 0xF1, 0x12, 0x83);
+
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETMIPI,
+ 0x33, /* VC_main = 0, Lane_Number = 3 (4 lanes) */
+ 0x81, /* DSI_LDO_SEL = 1.7V, RTERM = 90 Ohm */
+ 0x05, /* IHSRX = x6 (Low High Speed driving ability) */
+ 0xF9, /* TX_CLK_SEL = fDSICLK/16 */
+ 0x0E, /* HFP_OSC (min. HFP number in DSI mode) */
+ 0x0E, /* HBP_OSC (min. HBP number in DSI mode) */
+ /* The rest is undocumented in ST7703 datasheet */
+ 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x44, 0x25, 0x00, 0x91, 0x0a, 0x00, 0x00, 0x02,
+ 0x4F, 0x11, 0x00, 0x00, 0x37);
+
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETPOWER_EXT,
+ 0x25, /* PCCS = 2, ECP_DC_DIV = 1/4 HSYNC */
+ 0x22, /* DT = 15ms XDK_ECP = x2 */
+ 0x20, /* PFM_DC_DIV = /1 */
+ 0x03 /* ECP_SYNC_EN = 1, VGX_SYNC_EN = 1 */);
/* RGB I/F porch timing */
- dsi_dcs_write_seq(dsi, ST7703_CMD_SETRGBIF,
- 0x10, /* VBP_RGB_GEN */
- 0x10, /* VFP_RGB_GEN */
- 0x05, /* DE_BP_RGB_GEN */
- 0x05, /* DE_FP_RGB_GEN */
- /* The rest is undocumented in ST7703 datasheet */
- 0x03, 0xFF,
- 0x00, 0x00,
- 0x00, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETRGBIF,
+ 0x10, /* VBP_RGB_GEN */
+ 0x10, /* VFP_RGB_GEN */
+ 0x05, /* DE_BP_RGB_GEN */
+ 0x05, /* DE_FP_RGB_GEN */
+ /* The rest is undocumented in ST7703 datasheet */
+ 0x03, 0xFF,
+ 0x00, 0x00,
+ 0x00, 0x00);
/* Source driving settings. */
- dsi_dcs_write_seq(dsi, ST7703_CMD_SETSCR,
- 0x73, /* N_POPON */
- 0x73, /* N_NOPON */
- 0x50, /* I_POPON */
- 0x50, /* I_NOPON */
- 0x00, /* SCR[31,24] */
- 0xC0, /* SCR[23,16] */
- 0x08, /* SCR[15,8] */
- 0x70, /* SCR[7,0] */
- 0x00 /* Undocumented */);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETSCR,
+ 0x73, /* N_POPON */
+ 0x73, /* N_NOPON */
+ 0x50, /* I_POPON */
+ 0x50, /* I_NOPON */
+ 0x00, /* SCR[31,24] */
+ 0xC0, /* SCR[23,16] */
+ 0x08, /* SCR[15,8] */
+ 0x70, /* SCR[7,0] */
+ 0x00 /* Undocumented */);
/* NVDDD_SEL = -1.8V, VDDD_SEL = out of range (possibly 1.9V?) */
- dsi_dcs_write_seq(dsi, ST7703_CMD_SETVDC, 0x4E);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETVDC, 0x4E);
/*
* SS_PANEL = 1 (reverse scan), GS_PANEL = 0 (normal scan)
* REV_PANEL = 1 (normally black panel), BGR_PANEL = 1 (BGR)
*/
- dsi_dcs_write_seq(dsi, ST7703_CMD_SETPANEL, 0x0B);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETPANEL, 0x0B);
/* Zig-Zag Type C column inversion. */
- dsi_dcs_write_seq(dsi, ST7703_CMD_SETCYC, 0x80);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETCYC, 0x80);
/* Set display resolution. */
- dsi_dcs_write_seq(dsi, ST7703_CMD_SETDISP,
- 0xF0, /* NL = 240 */
- 0x12, /* RES_V_LSB = 0, BLK_CON = VSSD,
- * RESO_SEL = 720RGB
- */
- 0xF0 /* WHITE_GND_EN = 1 (GND),
- * WHITE_FRAME_SEL = 7 frames,
- * ISC = 0 frames
- */);
-
- dsi_dcs_write_seq(dsi, ST7703_CMD_SETEQ,
- 0x00, /* PNOEQ */
- 0x00, /* NNOEQ */
- 0x0B, /* PEQGND */
- 0x0B, /* NEQGND */
- 0x10, /* PEQVCI */
- 0x10, /* NEQVCI */
- 0x00, /* PEQVCI1 */
- 0x00, /* NEQVCI1 */
- 0x00, /* reserved */
- 0x00, /* reserved */
- 0xFF, /* reserved */
- 0x00, /* reserved */
- 0xC0, /* ESD_DET_DATA_WHITE = 1, ESD_WHITE_EN = 1 */
- 0x10 /* SLPIN_OPTION = 1 (no need vsync after sleep-in)
- * VEDIO_NO_CHECK_EN = 0
- * ESD_WHITE_GND_EN = 0
- * ESD_DET_TIME_SEL = 0 frames
- */);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETDISP,
+ 0xF0, /* NL = 240 */
+ 0x12, /* RES_V_LSB = 0, BLK_CON = VSSD,
+ * RESO_SEL = 720RGB
+ */
+ 0xF0 /* WHITE_GND_EN = 1 (GND),
+ * WHITE_FRAME_SEL = 7 frames,
+ * ISC = 0 frames
+ */);
+
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETEQ,
+ 0x00, /* PNOEQ */
+ 0x00, /* NNOEQ */
+ 0x0B, /* PEQGND */
+ 0x0B, /* NEQGND */
+ 0x10, /* PEQVCI */
+ 0x10, /* NEQVCI */
+ 0x00, /* PEQVCI1 */
+ 0x00, /* NEQVCI1 */
+ 0x00, /* reserved */
+ 0x00, /* reserved */
+ 0xFF, /* reserved */
+ 0x00, /* reserved */
+ 0xC0, /* ESD_DET_DATA_WHITE = 1, ESD_WHITE_EN = 1 */
+ 0x10 /* SLPIN_OPTION = 1 (no need vsync after sleep-in)
+ * VEDIO_NO_CHECK_EN = 0
+ * ESD_WHITE_GND_EN = 0
+ * ESD_DET_TIME_SEL = 0 frames
+ */);
/* Undocumented command. */
- dsi_dcs_write_seq(dsi, ST7703_CMD_UNKNOWN_C6, 0x01, 0x00, 0xFF, 0xFF, 0x00);
-
- dsi_dcs_write_seq(dsi, ST7703_CMD_SETPOWER,
- 0x74, /* VBTHS, VBTLS: VGH = 17V, VBL = -11V */
- 0x00, /* FBOFF_VGH = 0, FBOFF_VGL = 0 */
- 0x32, /* VRP */
- 0x32, /* VRN */
- 0x77, /* reserved */
- 0xF1, /* APS = 1 (small),
- * VGL_DET_EN = 1, VGH_DET_EN = 1,
- * VGL_TURBO = 1, VGH_TURBO = 1
- */
- 0xFF, /* VGH1_L_DIV, VGL1_L_DIV (1.5MHz) */
- 0xFF, /* VGH1_R_DIV, VGL1_R_DIV (1.5MHz) */
- 0xCC, /* VGH2_L_DIV, VGL2_L_DIV (2.6MHz) */
- 0xCC, /* VGH2_R_DIV, VGL2_R_DIV (2.6MHz) */
- 0x77, /* VGH3_L_DIV, VGL3_L_DIV (4.5MHz) */
- 0x77 /* VGH3_R_DIV, VGL3_R_DIV (4.5MHz) */);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_UNKNOWN_C6, 0x01, 0x00, 0xFF, 0xFF, 0x00);
+
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETPOWER,
+ 0x74, /* VBTHS, VBTLS: VGH = 17V, VBL = -11V */
+ 0x00, /* FBOFF_VGH = 0, FBOFF_VGL = 0 */
+ 0x32, /* VRP */
+ 0x32, /* VRN */
+ 0x77, /* reserved */
+ 0xF1, /* APS = 1 (small),
+ * VGL_DET_EN = 1, VGH_DET_EN = 1,
+ * VGL_TURBO = 1, VGH_TURBO = 1
+ */
+ 0xFF, /* VGH1_L_DIV, VGL1_L_DIV (1.5MHz) */
+ 0xFF, /* VGH1_R_DIV, VGL1_R_DIV (1.5MHz) */
+ 0xCC, /* VGH2_L_DIV, VGL2_L_DIV (2.6MHz) */
+ 0xCC, /* VGH2_R_DIV, VGL2_R_DIV (2.6MHz) */
+ 0x77, /* VGH3_L_DIV, VGL3_L_DIV (4.5MHz) */
+ 0x77 /* VGH3_R_DIV, VGL3_R_DIV (4.5MHz) */);
/* Reference voltage. */
- dsi_dcs_write_seq(dsi, ST7703_CMD_SETBGP,
- 0x07, /* VREF_SEL = 4.2V */
- 0x07 /* NVREF_SEL = 4.2V */);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETBGP,
+ 0x07, /* VREF_SEL = 4.2V */
+ 0x07 /* NVREF_SEL = 4.2V */);
msleep(20);
- dsi_dcs_write_seq(dsi, ST7703_CMD_SETVCOM,
- 0x2C, /* VCOMDC_F = -0.67V */
- 0x2C /* VCOMDC_B = -0.67V */);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETVCOM,
+ 0x2C, /* VCOMDC_F = -0.67V */
+ 0x2C /* VCOMDC_B = -0.67V */);
/* Undocumented command. */
- dsi_dcs_write_seq(dsi, ST7703_CMD_UNKNOWN_BF, 0x02, 0x11, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_UNKNOWN_BF, 0x02, 0x11, 0x00);
/* This command is to set forward GIP timing. */
- dsi_dcs_write_seq(dsi, ST7703_CMD_SETGIP1,
- 0x82, 0x10, 0x06, 0x05, 0xA2, 0x0A, 0xA5, 0x12,
- 0x31, 0x23, 0x37, 0x83, 0x04, 0xBC, 0x27, 0x38,
- 0x0C, 0x00, 0x03, 0x00, 0x00, 0x00, 0x0C, 0x00,
- 0x03, 0x00, 0x00, 0x00, 0x75, 0x75, 0x31, 0x88,
- 0x88, 0x88, 0x88, 0x88, 0x88, 0x13, 0x88, 0x64,
- 0x64, 0x20, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88,
- 0x02, 0x88, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETGIP1,
+ 0x82, 0x10, 0x06, 0x05, 0xA2, 0x0A, 0xA5, 0x12,
+ 0x31, 0x23, 0x37, 0x83, 0x04, 0xBC, 0x27, 0x38,
+ 0x0C, 0x00, 0x03, 0x00, 0x00, 0x00, 0x0C, 0x00,
+ 0x03, 0x00, 0x00, 0x00, 0x75, 0x75, 0x31, 0x88,
+ 0x88, 0x88, 0x88, 0x88, 0x88, 0x13, 0x88, 0x64,
+ 0x64, 0x20, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88,
+ 0x02, 0x88, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
/* This command is to set backward GIP timing. */
- dsi_dcs_write_seq(dsi, ST7703_CMD_SETGIP2,
- 0x02, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x02, 0x46, 0x02, 0x88,
- 0x88, 0x88, 0x88, 0x88, 0x88, 0x64, 0x88, 0x13,
- 0x57, 0x13, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88,
- 0x75, 0x88, 0x23, 0x14, 0x00, 0x00, 0x02, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x0A,
- 0xA5, 0x00, 0x00, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETGIP2,
+ 0x02, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x02, 0x46, 0x02, 0x88,
+ 0x88, 0x88, 0x88, 0x88, 0x88, 0x64, 0x88, 0x13,
+ 0x57, 0x13, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88,
+ 0x75, 0x88, 0x23, 0x14, 0x00, 0x00, 0x02, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x0A,
+ 0xA5, 0x00, 0x00, 0x00, 0x00);
/* Adjust the gamma characteristics of the panel. */
- dsi_dcs_write_seq(dsi, ST7703_CMD_SETGAMMA,
- 0x00, 0x09, 0x0D, 0x23, 0x27, 0x3C, 0x41, 0x35,
- 0x07, 0x0D, 0x0E, 0x12, 0x13, 0x10, 0x12, 0x12,
- 0x18, 0x00, 0x09, 0x0D, 0x23, 0x27, 0x3C, 0x41,
- 0x35, 0x07, 0x0D, 0x0E, 0x12, 0x13, 0x10, 0x12,
- 0x12, 0x18);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETGAMMA,
+ 0x00, 0x09, 0x0D, 0x23, 0x27, 0x3C, 0x41, 0x35,
+ 0x07, 0x0D, 0x0E, 0x12, 0x13, 0x10, 0x12, 0x12,
+ 0x18, 0x00, 0x09, 0x0D, 0x23, 0x27, 0x3C, 0x41,
+ 0x35, 0x07, 0x0D, 0x0E, 0x12, 0x13, 0x10, 0x12,
+ 0x12, 0x18);
return 0;
}
@@ -499,7 +482,7 @@ static int allpixelson_set(void *data, u64 val)
struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
dev_dbg(ctx->dev, "Setting all pixels on\n");
- dsi_generic_write_seq(dsi, ST7703_CMD_ALL_PIXEL_ON);
+ mipi_dsi_generic_write_seq(dsi, ST7703_CMD_ALL_PIXEL_ON);
msleep(val * 1000);
/* Reset the panel to get video back */
drm_panel_disable(&ctx->panel);
diff --git a/drivers/gpu/drm/panel/panel-sony-tulip-truly-nt35521.c b/drivers/gpu/drm/panel/panel-sony-tulip-truly-nt35521.c
index fa9be3c299c0..ee5d20ecc577 100644
--- a/drivers/gpu/drm/panel/panel-sony-tulip-truly-nt35521.c
+++ b/drivers/gpu/drm/panel/panel-sony-tulip-truly-nt35521.c
@@ -33,14 +33,6 @@ struct truly_nt35521 *to_truly_nt35521(struct drm_panel *panel)
return container_of(panel, struct truly_nt35521, panel);
}
-#define dsi_generic_write_seq(dsi, seq...) do { \
- static const u8 d[] = { seq }; \
- int ret; \
- ret = mipi_dsi_generic_write(dsi, d, ARRAY_SIZE(d)); \
- if (ret < 0) \
- return ret; \
- } while (0)
-
static void truly_nt35521_reset(struct truly_nt35521 *ctx)
{
gpiod_set_value_cansleep(ctx->reset_gpio, 1);
@@ -59,200 +51,200 @@ static int truly_nt35521_on(struct truly_nt35521 *ctx)
dsi->mode_flags |= MIPI_DSI_MODE_LPM;
- dsi_generic_write_seq(dsi, 0xf0, 0x55, 0xaa, 0x52, 0x08, 0x00);
- dsi_generic_write_seq(dsi, 0xff, 0xaa, 0x55, 0xa5, 0x80);
- dsi_generic_write_seq(dsi, 0x6f, 0x11, 0x00);
- dsi_generic_write_seq(dsi, 0xf7, 0x20, 0x00);
- dsi_generic_write_seq(dsi, 0x6f, 0x01);
- dsi_generic_write_seq(dsi, 0xb1, 0x21);
- dsi_generic_write_seq(dsi, 0xbd, 0x01, 0xa0, 0x10, 0x08, 0x01);
- dsi_generic_write_seq(dsi, 0xb8, 0x01, 0x02, 0x0c, 0x02);
- dsi_generic_write_seq(dsi, 0xbb, 0x11, 0x11);
- dsi_generic_write_seq(dsi, 0xbc, 0x00, 0x00);
- dsi_generic_write_seq(dsi, 0xb6, 0x02);
- dsi_generic_write_seq(dsi, 0xf0, 0x55, 0xaa, 0x52, 0x08, 0x01);
- dsi_generic_write_seq(dsi, 0xb0, 0x09, 0x09);
- dsi_generic_write_seq(dsi, 0xb1, 0x09, 0x09);
- dsi_generic_write_seq(dsi, 0xbc, 0x8c, 0x00);
- dsi_generic_write_seq(dsi, 0xbd, 0x8c, 0x00);
- dsi_generic_write_seq(dsi, 0xca, 0x00);
- dsi_generic_write_seq(dsi, 0xc0, 0x04);
- dsi_generic_write_seq(dsi, 0xbe, 0xb5);
- dsi_generic_write_seq(dsi, 0xb3, 0x35, 0x35);
- dsi_generic_write_seq(dsi, 0xb4, 0x25, 0x25);
- dsi_generic_write_seq(dsi, 0xb9, 0x43, 0x43);
- dsi_generic_write_seq(dsi, 0xba, 0x24, 0x24);
- dsi_generic_write_seq(dsi, 0xf0, 0x55, 0xaa, 0x52, 0x08, 0x02);
- dsi_generic_write_seq(dsi, 0xee, 0x03);
- dsi_generic_write_seq(dsi, 0xb0,
- 0x00, 0xb2, 0x00, 0xb3, 0x00, 0xb6, 0x00, 0xc3,
- 0x00, 0xce, 0x00, 0xe1, 0x00, 0xf3, 0x01, 0x11);
- dsi_generic_write_seq(dsi, 0xb1,
- 0x01, 0x2e, 0x01, 0x5c, 0x01, 0x82, 0x01, 0xc3,
- 0x01, 0xfe, 0x02, 0x00, 0x02, 0x37, 0x02, 0x77);
- dsi_generic_write_seq(dsi, 0xb2,
- 0x02, 0xa1, 0x02, 0xd7, 0x02, 0xfe, 0x03, 0x2c,
- 0x03, 0x4b, 0x03, 0x63, 0x03, 0x8f, 0x03, 0x90);
- dsi_generic_write_seq(dsi, 0xb3, 0x03, 0x96, 0x03, 0x98);
- dsi_generic_write_seq(dsi, 0xb4,
- 0x00, 0x81, 0x00, 0x8b, 0x00, 0x9c, 0x00, 0xa9,
- 0x00, 0xb5, 0x00, 0xcb, 0x00, 0xdf, 0x01, 0x02);
- dsi_generic_write_seq(dsi, 0xb5,
- 0x01, 0x1f, 0x01, 0x51, 0x01, 0x7a, 0x01, 0xbf,
- 0x01, 0xfa, 0x01, 0xfc, 0x02, 0x34, 0x02, 0x76);
- dsi_generic_write_seq(dsi, 0xb6,
- 0x02, 0x9f, 0x02, 0xd7, 0x02, 0xfc, 0x03, 0x2c,
- 0x03, 0x4a, 0x03, 0x63, 0x03, 0x8f, 0x03, 0xa2);
- dsi_generic_write_seq(dsi, 0xb7, 0x03, 0xb8, 0x03, 0xba);
- dsi_generic_write_seq(dsi, 0xb8,
- 0x00, 0x01, 0x00, 0x02, 0x00, 0x0e, 0x00, 0x2a,
- 0x00, 0x41, 0x00, 0x67, 0x00, 0x87, 0x00, 0xb9);
- dsi_generic_write_seq(dsi, 0xb9,
- 0x00, 0xe2, 0x01, 0x22, 0x01, 0x54, 0x01, 0xa3,
- 0x01, 0xe6, 0x01, 0xe7, 0x02, 0x24, 0x02, 0x67);
- dsi_generic_write_seq(dsi, 0xba,
- 0x02, 0x93, 0x02, 0xcd, 0x02, 0xf6, 0x03, 0x31,
- 0x03, 0x6c, 0x03, 0xe9, 0x03, 0xef, 0x03, 0xf4);
- dsi_generic_write_seq(dsi, 0xbb, 0x03, 0xf6, 0x03, 0xf7);
- dsi_generic_write_seq(dsi, 0xf0, 0x55, 0xaa, 0x52, 0x08, 0x03);
- dsi_generic_write_seq(dsi, 0xb0, 0x22, 0x00);
- dsi_generic_write_seq(dsi, 0xb1, 0x22, 0x00);
- dsi_generic_write_seq(dsi, 0xb2, 0x05, 0x00, 0x60, 0x00, 0x00);
- dsi_generic_write_seq(dsi, 0xb3, 0x05, 0x00, 0x60, 0x00, 0x00);
- dsi_generic_write_seq(dsi, 0xb4, 0x05, 0x00, 0x60, 0x00, 0x00);
- dsi_generic_write_seq(dsi, 0xb5, 0x05, 0x00, 0x60, 0x00, 0x00);
- dsi_generic_write_seq(dsi, 0xba, 0x53, 0x00, 0x60, 0x00, 0x00);
- dsi_generic_write_seq(dsi, 0xbb, 0x53, 0x00, 0x60, 0x00, 0x00);
- dsi_generic_write_seq(dsi, 0xbc, 0x53, 0x00, 0x60, 0x00, 0x00);
- dsi_generic_write_seq(dsi, 0xbd, 0x53, 0x00, 0x60, 0x00, 0x00);
- dsi_generic_write_seq(dsi, 0xc0, 0x00, 0x34, 0x00, 0x00);
- dsi_generic_write_seq(dsi, 0xc1, 0x00, 0x00, 0x34, 0x00);
- dsi_generic_write_seq(dsi, 0xc2, 0x00, 0x00, 0x34, 0x00);
- dsi_generic_write_seq(dsi, 0xc3, 0x00, 0x00, 0x34, 0x00);
- dsi_generic_write_seq(dsi, 0xc4, 0x60);
- dsi_generic_write_seq(dsi, 0xc5, 0xc0);
- dsi_generic_write_seq(dsi, 0xc6, 0x00);
- dsi_generic_write_seq(dsi, 0xc7, 0x00);
- dsi_generic_write_seq(dsi, 0xf0, 0x55, 0xaa, 0x52, 0x08, 0x05);
- dsi_generic_write_seq(dsi, 0xb0, 0x17, 0x06);
- dsi_generic_write_seq(dsi, 0xb1, 0x17, 0x06);
- dsi_generic_write_seq(dsi, 0xb2, 0x17, 0x06);
- dsi_generic_write_seq(dsi, 0xb3, 0x17, 0x06);
- dsi_generic_write_seq(dsi, 0xb4, 0x17, 0x06);
- dsi_generic_write_seq(dsi, 0xb5, 0x17, 0x06);
- dsi_generic_write_seq(dsi, 0xb6, 0x17, 0x06);
- dsi_generic_write_seq(dsi, 0xb7, 0x17, 0x06);
- dsi_generic_write_seq(dsi, 0xb8, 0x00);
- dsi_generic_write_seq(dsi, 0xb9, 0x00, 0x03);
- dsi_generic_write_seq(dsi, 0xba, 0x00, 0x00);
- dsi_generic_write_seq(dsi, 0xbb, 0x02, 0x03);
- dsi_generic_write_seq(dsi, 0xbc, 0x02, 0x03);
- dsi_generic_write_seq(dsi, 0xbd, 0x03, 0x03, 0x00, 0x03, 0x03);
- dsi_generic_write_seq(dsi, 0xc0, 0x0b);
- dsi_generic_write_seq(dsi, 0xc1, 0x09);
- dsi_generic_write_seq(dsi, 0xc2, 0xa6);
- dsi_generic_write_seq(dsi, 0xc3, 0x05);
- dsi_generic_write_seq(dsi, 0xc4, 0x00);
- dsi_generic_write_seq(dsi, 0xc5, 0x02);
- dsi_generic_write_seq(dsi, 0xc6, 0x22);
- dsi_generic_write_seq(dsi, 0xc7, 0x03);
- dsi_generic_write_seq(dsi, 0xc8, 0x07, 0x20);
- dsi_generic_write_seq(dsi, 0xc9, 0x03, 0x20);
- dsi_generic_write_seq(dsi, 0xca, 0x01, 0x60);
- dsi_generic_write_seq(dsi, 0xcb, 0x01, 0x60);
- dsi_generic_write_seq(dsi, 0xcc, 0x00, 0x00, 0x02);
- dsi_generic_write_seq(dsi, 0xcd, 0x00, 0x00, 0x02);
- dsi_generic_write_seq(dsi, 0xce, 0x00, 0x00, 0x02);
- dsi_generic_write_seq(dsi, 0xcf, 0x00, 0x00, 0x02);
- dsi_generic_write_seq(dsi, 0xd1, 0x00, 0x05, 0x01, 0x07, 0x10);
- dsi_generic_write_seq(dsi, 0xd2, 0x10, 0x05, 0x05, 0x03, 0x10);
- dsi_generic_write_seq(dsi, 0xd3, 0x20, 0x00, 0x43, 0x07, 0x10);
- dsi_generic_write_seq(dsi, 0xd4, 0x30, 0x00, 0x43, 0x07, 0x10);
- dsi_generic_write_seq(dsi, 0xd0,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
- dsi_generic_write_seq(dsi, 0xd5,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00);
- dsi_generic_write_seq(dsi, 0xd6,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00);
- dsi_generic_write_seq(dsi, 0xd7,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00);
- dsi_generic_write_seq(dsi, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00);
- dsi_generic_write_seq(dsi, 0xe5, 0x06);
- dsi_generic_write_seq(dsi, 0xe6, 0x06);
- dsi_generic_write_seq(dsi, 0xe7, 0x00);
- dsi_generic_write_seq(dsi, 0xe8, 0x06);
- dsi_generic_write_seq(dsi, 0xe9, 0x06);
- dsi_generic_write_seq(dsi, 0xea, 0x06);
- dsi_generic_write_seq(dsi, 0xeb, 0x00);
- dsi_generic_write_seq(dsi, 0xec, 0x00);
- dsi_generic_write_seq(dsi, 0xed, 0x30);
- dsi_generic_write_seq(dsi, 0xf0, 0x55, 0xaa, 0x52, 0x08, 0x06);
- dsi_generic_write_seq(dsi, 0xb0, 0x31, 0x31);
- dsi_generic_write_seq(dsi, 0xb1, 0x31, 0x31);
- dsi_generic_write_seq(dsi, 0xb2, 0x2d, 0x2e);
- dsi_generic_write_seq(dsi, 0xb3, 0x31, 0x34);
- dsi_generic_write_seq(dsi, 0xb4, 0x29, 0x2a);
- dsi_generic_write_seq(dsi, 0xb5, 0x12, 0x10);
- dsi_generic_write_seq(dsi, 0xb6, 0x18, 0x16);
- dsi_generic_write_seq(dsi, 0xb7, 0x00, 0x02);
- dsi_generic_write_seq(dsi, 0xb8, 0x08, 0x31);
- dsi_generic_write_seq(dsi, 0xb9, 0x31, 0x31);
- dsi_generic_write_seq(dsi, 0xba, 0x31, 0x31);
- dsi_generic_write_seq(dsi, 0xbb, 0x31, 0x08);
- dsi_generic_write_seq(dsi, 0xbc, 0x03, 0x01);
- dsi_generic_write_seq(dsi, 0xbd, 0x17, 0x19);
- dsi_generic_write_seq(dsi, 0xbe, 0x11, 0x13);
- dsi_generic_write_seq(dsi, 0xbf, 0x2a, 0x29);
- dsi_generic_write_seq(dsi, 0xc0, 0x34, 0x31);
- dsi_generic_write_seq(dsi, 0xc1, 0x2e, 0x2d);
- dsi_generic_write_seq(dsi, 0xc2, 0x31, 0x31);
- dsi_generic_write_seq(dsi, 0xc3, 0x31, 0x31);
- dsi_generic_write_seq(dsi, 0xc4, 0x31, 0x31);
- dsi_generic_write_seq(dsi, 0xc5, 0x31, 0x31);
- dsi_generic_write_seq(dsi, 0xc6, 0x2e, 0x2d);
- dsi_generic_write_seq(dsi, 0xc7, 0x31, 0x34);
- dsi_generic_write_seq(dsi, 0xc8, 0x29, 0x2a);
- dsi_generic_write_seq(dsi, 0xc9, 0x17, 0x19);
- dsi_generic_write_seq(dsi, 0xca, 0x11, 0x13);
- dsi_generic_write_seq(dsi, 0xcb, 0x03, 0x01);
- dsi_generic_write_seq(dsi, 0xcc, 0x08, 0x31);
- dsi_generic_write_seq(dsi, 0xcd, 0x31, 0x31);
- dsi_generic_write_seq(dsi, 0xce, 0x31, 0x31);
- dsi_generic_write_seq(dsi, 0xcf, 0x31, 0x08);
- dsi_generic_write_seq(dsi, 0xd0, 0x00, 0x02);
- dsi_generic_write_seq(dsi, 0xd1, 0x12, 0x10);
- dsi_generic_write_seq(dsi, 0xd2, 0x18, 0x16);
- dsi_generic_write_seq(dsi, 0xd3, 0x2a, 0x29);
- dsi_generic_write_seq(dsi, 0xd4, 0x34, 0x31);
- dsi_generic_write_seq(dsi, 0xd5, 0x2d, 0x2e);
- dsi_generic_write_seq(dsi, 0xd6, 0x31, 0x31);
- dsi_generic_write_seq(dsi, 0xd7, 0x31, 0x31);
- dsi_generic_write_seq(dsi, 0xe5, 0x31, 0x31);
- dsi_generic_write_seq(dsi, 0xe6, 0x31, 0x31);
- dsi_generic_write_seq(dsi, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00);
- dsi_generic_write_seq(dsi, 0xd9, 0x00, 0x00, 0x00, 0x00, 0x00);
- dsi_generic_write_seq(dsi, 0xe7, 0x00);
- dsi_generic_write_seq(dsi, 0x6f, 0x02);
- dsi_generic_write_seq(dsi, 0xf7, 0x47);
- dsi_generic_write_seq(dsi, 0x6f, 0x0a);
- dsi_generic_write_seq(dsi, 0xf7, 0x02);
- dsi_generic_write_seq(dsi, 0x6f, 0x17);
- dsi_generic_write_seq(dsi, 0xf4, 0x60);
- dsi_generic_write_seq(dsi, 0x6f, 0x01);
- dsi_generic_write_seq(dsi, 0xf9, 0x46);
- dsi_generic_write_seq(dsi, 0x6f, 0x11);
- dsi_generic_write_seq(dsi, 0xf3, 0x01);
- dsi_generic_write_seq(dsi, 0x35, 0x00);
- dsi_generic_write_seq(dsi, 0xf0, 0x55, 0xaa, 0x52, 0x08, 0x00);
- dsi_generic_write_seq(dsi, 0xd9, 0x02, 0x03, 0x00);
- dsi_generic_write_seq(dsi, 0xf0, 0x55, 0xaa, 0x52, 0x00, 0x00);
- dsi_generic_write_seq(dsi, 0xf0, 0x55, 0xaa, 0x52, 0x08, 0x00);
- dsi_generic_write_seq(dsi, 0xb1, 0x6c, 0x21);
- dsi_generic_write_seq(dsi, 0xf0, 0x55, 0xaa, 0x52, 0x00, 0x00);
- dsi_generic_write_seq(dsi, 0x35, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0xf0, 0x55, 0xaa, 0x52, 0x08, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0xff, 0xaa, 0x55, 0xa5, 0x80);
+ mipi_dsi_generic_write_seq(dsi, 0x6f, 0x11, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0xf7, 0x20, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0x6f, 0x01);
+ mipi_dsi_generic_write_seq(dsi, 0xb1, 0x21);
+ mipi_dsi_generic_write_seq(dsi, 0xbd, 0x01, 0xa0, 0x10, 0x08, 0x01);
+ mipi_dsi_generic_write_seq(dsi, 0xb8, 0x01, 0x02, 0x0c, 0x02);
+ mipi_dsi_generic_write_seq(dsi, 0xbb, 0x11, 0x11);
+ mipi_dsi_generic_write_seq(dsi, 0xbc, 0x00, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0xb6, 0x02);
+ mipi_dsi_generic_write_seq(dsi, 0xf0, 0x55, 0xaa, 0x52, 0x08, 0x01);
+ mipi_dsi_generic_write_seq(dsi, 0xb0, 0x09, 0x09);
+ mipi_dsi_generic_write_seq(dsi, 0xb1, 0x09, 0x09);
+ mipi_dsi_generic_write_seq(dsi, 0xbc, 0x8c, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0xbd, 0x8c, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0xca, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0xc0, 0x04);
+ mipi_dsi_generic_write_seq(dsi, 0xbe, 0xb5);
+ mipi_dsi_generic_write_seq(dsi, 0xb3, 0x35, 0x35);
+ mipi_dsi_generic_write_seq(dsi, 0xb4, 0x25, 0x25);
+ mipi_dsi_generic_write_seq(dsi, 0xb9, 0x43, 0x43);
+ mipi_dsi_generic_write_seq(dsi, 0xba, 0x24, 0x24);
+ mipi_dsi_generic_write_seq(dsi, 0xf0, 0x55, 0xaa, 0x52, 0x08, 0x02);
+ mipi_dsi_generic_write_seq(dsi, 0xee, 0x03);
+ mipi_dsi_generic_write_seq(dsi, 0xb0,
+ 0x00, 0xb2, 0x00, 0xb3, 0x00, 0xb6, 0x00, 0xc3,
+ 0x00, 0xce, 0x00, 0xe1, 0x00, 0xf3, 0x01, 0x11);
+ mipi_dsi_generic_write_seq(dsi, 0xb1,
+ 0x01, 0x2e, 0x01, 0x5c, 0x01, 0x82, 0x01, 0xc3,
+ 0x01, 0xfe, 0x02, 0x00, 0x02, 0x37, 0x02, 0x77);
+ mipi_dsi_generic_write_seq(dsi, 0xb2,
+ 0x02, 0xa1, 0x02, 0xd7, 0x02, 0xfe, 0x03, 0x2c,
+ 0x03, 0x4b, 0x03, 0x63, 0x03, 0x8f, 0x03, 0x90);
+ mipi_dsi_generic_write_seq(dsi, 0xb3, 0x03, 0x96, 0x03, 0x98);
+ mipi_dsi_generic_write_seq(dsi, 0xb4,
+ 0x00, 0x81, 0x00, 0x8b, 0x00, 0x9c, 0x00, 0xa9,
+ 0x00, 0xb5, 0x00, 0xcb, 0x00, 0xdf, 0x01, 0x02);
+ mipi_dsi_generic_write_seq(dsi, 0xb5,
+ 0x01, 0x1f, 0x01, 0x51, 0x01, 0x7a, 0x01, 0xbf,
+ 0x01, 0xfa, 0x01, 0xfc, 0x02, 0x34, 0x02, 0x76);
+ mipi_dsi_generic_write_seq(dsi, 0xb6,
+ 0x02, 0x9f, 0x02, 0xd7, 0x02, 0xfc, 0x03, 0x2c,
+ 0x03, 0x4a, 0x03, 0x63, 0x03, 0x8f, 0x03, 0xa2);
+ mipi_dsi_generic_write_seq(dsi, 0xb7, 0x03, 0xb8, 0x03, 0xba);
+ mipi_dsi_generic_write_seq(dsi, 0xb8,
+ 0x00, 0x01, 0x00, 0x02, 0x00, 0x0e, 0x00, 0x2a,
+ 0x00, 0x41, 0x00, 0x67, 0x00, 0x87, 0x00, 0xb9);
+ mipi_dsi_generic_write_seq(dsi, 0xb9,
+ 0x00, 0xe2, 0x01, 0x22, 0x01, 0x54, 0x01, 0xa3,
+ 0x01, 0xe6, 0x01, 0xe7, 0x02, 0x24, 0x02, 0x67);
+ mipi_dsi_generic_write_seq(dsi, 0xba,
+ 0x02, 0x93, 0x02, 0xcd, 0x02, 0xf6, 0x03, 0x31,
+ 0x03, 0x6c, 0x03, 0xe9, 0x03, 0xef, 0x03, 0xf4);
+ mipi_dsi_generic_write_seq(dsi, 0xbb, 0x03, 0xf6, 0x03, 0xf7);
+ mipi_dsi_generic_write_seq(dsi, 0xf0, 0x55, 0xaa, 0x52, 0x08, 0x03);
+ mipi_dsi_generic_write_seq(dsi, 0xb0, 0x22, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0xb1, 0x22, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0xb2, 0x05, 0x00, 0x60, 0x00, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0xb3, 0x05, 0x00, 0x60, 0x00, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0xb4, 0x05, 0x00, 0x60, 0x00, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0xb5, 0x05, 0x00, 0x60, 0x00, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0xba, 0x53, 0x00, 0x60, 0x00, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0xbb, 0x53, 0x00, 0x60, 0x00, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0xbc, 0x53, 0x00, 0x60, 0x00, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0xbd, 0x53, 0x00, 0x60, 0x00, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0xc0, 0x00, 0x34, 0x00, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0xc1, 0x00, 0x00, 0x34, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0xc2, 0x00, 0x00, 0x34, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0xc3, 0x00, 0x00, 0x34, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0xc4, 0x60);
+ mipi_dsi_generic_write_seq(dsi, 0xc5, 0xc0);
+ mipi_dsi_generic_write_seq(dsi, 0xc6, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0xc7, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0xf0, 0x55, 0xaa, 0x52, 0x08, 0x05);
+ mipi_dsi_generic_write_seq(dsi, 0xb0, 0x17, 0x06);
+ mipi_dsi_generic_write_seq(dsi, 0xb1, 0x17, 0x06);
+ mipi_dsi_generic_write_seq(dsi, 0xb2, 0x17, 0x06);
+ mipi_dsi_generic_write_seq(dsi, 0xb3, 0x17, 0x06);
+ mipi_dsi_generic_write_seq(dsi, 0xb4, 0x17, 0x06);
+ mipi_dsi_generic_write_seq(dsi, 0xb5, 0x17, 0x06);
+ mipi_dsi_generic_write_seq(dsi, 0xb6, 0x17, 0x06);
+ mipi_dsi_generic_write_seq(dsi, 0xb7, 0x17, 0x06);
+ mipi_dsi_generic_write_seq(dsi, 0xb8, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0xb9, 0x00, 0x03);
+ mipi_dsi_generic_write_seq(dsi, 0xba, 0x00, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0xbb, 0x02, 0x03);
+ mipi_dsi_generic_write_seq(dsi, 0xbc, 0x02, 0x03);
+ mipi_dsi_generic_write_seq(dsi, 0xbd, 0x03, 0x03, 0x00, 0x03, 0x03);
+ mipi_dsi_generic_write_seq(dsi, 0xc0, 0x0b);
+ mipi_dsi_generic_write_seq(dsi, 0xc1, 0x09);
+ mipi_dsi_generic_write_seq(dsi, 0xc2, 0xa6);
+ mipi_dsi_generic_write_seq(dsi, 0xc3, 0x05);
+ mipi_dsi_generic_write_seq(dsi, 0xc4, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0xc5, 0x02);
+ mipi_dsi_generic_write_seq(dsi, 0xc6, 0x22);
+ mipi_dsi_generic_write_seq(dsi, 0xc7, 0x03);
+ mipi_dsi_generic_write_seq(dsi, 0xc8, 0x07, 0x20);
+ mipi_dsi_generic_write_seq(dsi, 0xc9, 0x03, 0x20);
+ mipi_dsi_generic_write_seq(dsi, 0xca, 0x01, 0x60);
+ mipi_dsi_generic_write_seq(dsi, 0xcb, 0x01, 0x60);
+ mipi_dsi_generic_write_seq(dsi, 0xcc, 0x00, 0x00, 0x02);
+ mipi_dsi_generic_write_seq(dsi, 0xcd, 0x00, 0x00, 0x02);
+ mipi_dsi_generic_write_seq(dsi, 0xce, 0x00, 0x00, 0x02);
+ mipi_dsi_generic_write_seq(dsi, 0xcf, 0x00, 0x00, 0x02);
+ mipi_dsi_generic_write_seq(dsi, 0xd1, 0x00, 0x05, 0x01, 0x07, 0x10);
+ mipi_dsi_generic_write_seq(dsi, 0xd2, 0x10, 0x05, 0x05, 0x03, 0x10);
+ mipi_dsi_generic_write_seq(dsi, 0xd3, 0x20, 0x00, 0x43, 0x07, 0x10);
+ mipi_dsi_generic_write_seq(dsi, 0xd4, 0x30, 0x00, 0x43, 0x07, 0x10);
+ mipi_dsi_generic_write_seq(dsi, 0xd0,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0xd5,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0xd6,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0xd7,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0xe5, 0x06);
+ mipi_dsi_generic_write_seq(dsi, 0xe6, 0x06);
+ mipi_dsi_generic_write_seq(dsi, 0xe7, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0xe8, 0x06);
+ mipi_dsi_generic_write_seq(dsi, 0xe9, 0x06);
+ mipi_dsi_generic_write_seq(dsi, 0xea, 0x06);
+ mipi_dsi_generic_write_seq(dsi, 0xeb, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0xec, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0xed, 0x30);
+ mipi_dsi_generic_write_seq(dsi, 0xf0, 0x55, 0xaa, 0x52, 0x08, 0x06);
+ mipi_dsi_generic_write_seq(dsi, 0xb0, 0x31, 0x31);
+ mipi_dsi_generic_write_seq(dsi, 0xb1, 0x31, 0x31);
+ mipi_dsi_generic_write_seq(dsi, 0xb2, 0x2d, 0x2e);
+ mipi_dsi_generic_write_seq(dsi, 0xb3, 0x31, 0x34);
+ mipi_dsi_generic_write_seq(dsi, 0xb4, 0x29, 0x2a);
+ mipi_dsi_generic_write_seq(dsi, 0xb5, 0x12, 0x10);
+ mipi_dsi_generic_write_seq(dsi, 0xb6, 0x18, 0x16);
+ mipi_dsi_generic_write_seq(dsi, 0xb7, 0x00, 0x02);
+ mipi_dsi_generic_write_seq(dsi, 0xb8, 0x08, 0x31);
+ mipi_dsi_generic_write_seq(dsi, 0xb9, 0x31, 0x31);
+ mipi_dsi_generic_write_seq(dsi, 0xba, 0x31, 0x31);
+ mipi_dsi_generic_write_seq(dsi, 0xbb, 0x31, 0x08);
+ mipi_dsi_generic_write_seq(dsi, 0xbc, 0x03, 0x01);
+ mipi_dsi_generic_write_seq(dsi, 0xbd, 0x17, 0x19);
+ mipi_dsi_generic_write_seq(dsi, 0xbe, 0x11, 0x13);
+ mipi_dsi_generic_write_seq(dsi, 0xbf, 0x2a, 0x29);
+ mipi_dsi_generic_write_seq(dsi, 0xc0, 0x34, 0x31);
+ mipi_dsi_generic_write_seq(dsi, 0xc1, 0x2e, 0x2d);
+ mipi_dsi_generic_write_seq(dsi, 0xc2, 0x31, 0x31);
+ mipi_dsi_generic_write_seq(dsi, 0xc3, 0x31, 0x31);
+ mipi_dsi_generic_write_seq(dsi, 0xc4, 0x31, 0x31);
+ mipi_dsi_generic_write_seq(dsi, 0xc5, 0x31, 0x31);
+ mipi_dsi_generic_write_seq(dsi, 0xc6, 0x2e, 0x2d);
+ mipi_dsi_generic_write_seq(dsi, 0xc7, 0x31, 0x34);
+ mipi_dsi_generic_write_seq(dsi, 0xc8, 0x29, 0x2a);
+ mipi_dsi_generic_write_seq(dsi, 0xc9, 0x17, 0x19);
+ mipi_dsi_generic_write_seq(dsi, 0xca, 0x11, 0x13);
+ mipi_dsi_generic_write_seq(dsi, 0xcb, 0x03, 0x01);
+ mipi_dsi_generic_write_seq(dsi, 0xcc, 0x08, 0x31);
+ mipi_dsi_generic_write_seq(dsi, 0xcd, 0x31, 0x31);
+ mipi_dsi_generic_write_seq(dsi, 0xce, 0x31, 0x31);
+ mipi_dsi_generic_write_seq(dsi, 0xcf, 0x31, 0x08);
+ mipi_dsi_generic_write_seq(dsi, 0xd0, 0x00, 0x02);
+ mipi_dsi_generic_write_seq(dsi, 0xd1, 0x12, 0x10);
+ mipi_dsi_generic_write_seq(dsi, 0xd2, 0x18, 0x16);
+ mipi_dsi_generic_write_seq(dsi, 0xd3, 0x2a, 0x29);
+ mipi_dsi_generic_write_seq(dsi, 0xd4, 0x34, 0x31);
+ mipi_dsi_generic_write_seq(dsi, 0xd5, 0x2d, 0x2e);
+ mipi_dsi_generic_write_seq(dsi, 0xd6, 0x31, 0x31);
+ mipi_dsi_generic_write_seq(dsi, 0xd7, 0x31, 0x31);
+ mipi_dsi_generic_write_seq(dsi, 0xe5, 0x31, 0x31);
+ mipi_dsi_generic_write_seq(dsi, 0xe6, 0x31, 0x31);
+ mipi_dsi_generic_write_seq(dsi, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0xd9, 0x00, 0x00, 0x00, 0x00, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0xe7, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0x6f, 0x02);
+ mipi_dsi_generic_write_seq(dsi, 0xf7, 0x47);
+ mipi_dsi_generic_write_seq(dsi, 0x6f, 0x0a);
+ mipi_dsi_generic_write_seq(dsi, 0xf7, 0x02);
+ mipi_dsi_generic_write_seq(dsi, 0x6f, 0x17);
+ mipi_dsi_generic_write_seq(dsi, 0xf4, 0x60);
+ mipi_dsi_generic_write_seq(dsi, 0x6f, 0x01);
+ mipi_dsi_generic_write_seq(dsi, 0xf9, 0x46);
+ mipi_dsi_generic_write_seq(dsi, 0x6f, 0x11);
+ mipi_dsi_generic_write_seq(dsi, 0xf3, 0x01);
+ mipi_dsi_generic_write_seq(dsi, 0x35, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0xf0, 0x55, 0xaa, 0x52, 0x08, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0xd9, 0x02, 0x03, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0xf0, 0x55, 0xaa, 0x52, 0x00, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0xf0, 0x55, 0xaa, 0x52, 0x08, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0xb1, 0x6c, 0x21);
+ mipi_dsi_generic_write_seq(dsi, 0xf0, 0x55, 0xaa, 0x52, 0x00, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0x35, 0x00);
ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
if (ret < 0) {
@@ -268,7 +260,7 @@ static int truly_nt35521_on(struct truly_nt35521 *ctx)
}
usleep_range(1000, 2000);
- dsi_generic_write_seq(dsi, 0x53, 0x24);
+ mipi_dsi_generic_write_seq(dsi, 0x53, 0x24);
return 0;
}
diff --git a/drivers/gpu/drm/panel/panel-visionox-vtdr6130.c b/drivers/gpu/drm/panel/panel-visionox-vtdr6130.c
new file mode 100644
index 000000000000..f9a6abc1e121
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-visionox-vtdr6130.c
@@ -0,0 +1,358 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (c) 2023, Linaro Limited
+
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/regulator/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+
+#include <drm/display/drm_dsc.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+#include <video/mipi_display.h>
+
+struct visionox_vtdr6130 {
+ struct drm_panel panel;
+ struct mipi_dsi_device *dsi;
+ struct gpio_desc *reset_gpio;
+ struct regulator_bulk_data supplies[3];
+ bool prepared;
+};
+
+static inline struct visionox_vtdr6130 *to_visionox_vtdr6130(struct drm_panel *panel)
+{
+ return container_of(panel, struct visionox_vtdr6130, panel);
+}
+
+static void visionox_vtdr6130_reset(struct visionox_vtdr6130 *ctx)
+{
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ usleep_range(10000, 11000);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ usleep_range(10000, 11000);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ usleep_range(10000, 11000);
+}
+
+static int visionox_vtdr6130_on(struct visionox_vtdr6130 *ctx)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct device *dev = &dsi->dev;
+ int ret;
+
+ dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ ret = mipi_dsi_dcs_set_tear_on(dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
+ if (ret)
+ return ret;
+
+ mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x20);
+ mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_DISPLAY_BRIGHTNESS, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0x59, 0x09);
+ mipi_dsi_dcs_write_seq(dsi, 0x6c, 0x01);
+ mipi_dsi_dcs_write_seq(dsi, 0x6d, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0x6f, 0x01);
+ mipi_dsi_dcs_write_seq(dsi, 0x70,
+ 0x12, 0x00, 0x00, 0xab, 0x30, 0x80, 0x09, 0x60, 0x04,
+ 0x38, 0x00, 0x28, 0x02, 0x1c, 0x02, 0x1c, 0x02, 0x00,
+ 0x02, 0x0e, 0x00, 0x20, 0x03, 0xdd, 0x00, 0x07, 0x00,
+ 0x0c, 0x02, 0x77, 0x02, 0x8b, 0x18, 0x00, 0x10, 0xf0,
+ 0x07, 0x10, 0x20, 0x00, 0x06, 0x0f, 0x0f, 0x33, 0x0e,
+ 0x1c, 0x2a, 0x38, 0x46, 0x54, 0x62, 0x69, 0x70, 0x77,
+ 0x79, 0x7b, 0x7d, 0x7e, 0x02, 0x02, 0x22, 0x00, 0x2a,
+ 0x40, 0x2a, 0xbe, 0x3a, 0xfc, 0x3a, 0xfa, 0x3a, 0xf8,
+ 0x3b, 0x38, 0x3b, 0x78, 0x3b, 0xb6, 0x4b, 0xb6, 0x4b,
+ 0xf4, 0x4b, 0xf4, 0x6c, 0x34, 0x84, 0x74, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0xf0, 0xaa, 0x10);
+ mipi_dsi_dcs_write_seq(dsi, 0xb1,
+ 0x01, 0x38, 0x00, 0x14, 0x00, 0x1c, 0x00, 0x01, 0x66,
+ 0x00, 0x14, 0x00, 0x14, 0x00, 0x01, 0x66, 0x00, 0x14,
+ 0x05, 0xcc, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0xf0, 0xaa, 0x13);
+ mipi_dsi_dcs_write_seq(dsi, 0xce,
+ 0x09, 0x11, 0x09, 0x11, 0x08, 0xc1, 0x07, 0xfa, 0x05,
+ 0xa4, 0x00, 0x3c, 0x00, 0x34, 0x00, 0x24, 0x00, 0x0c,
+ 0x00, 0x0c, 0x04, 0x00, 0x35);
+ mipi_dsi_dcs_write_seq(dsi, 0xf0, 0xaa, 0x14);
+ mipi_dsi_dcs_write_seq(dsi, 0xb2, 0x03, 0x33);
+ mipi_dsi_dcs_write_seq(dsi, 0xb4,
+ 0x00, 0x33, 0x00, 0x00, 0x00, 0x3e, 0x00, 0x00, 0x00,
+ 0x3e, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0xb5,
+ 0x00, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x06, 0x01);
+ mipi_dsi_dcs_write_seq(dsi, 0xb9, 0x00, 0x00, 0x08, 0x09, 0x09, 0x09);
+ mipi_dsi_dcs_write_seq(dsi, 0xbc,
+ 0x10, 0x00, 0x00, 0x06, 0x11, 0x09, 0x3b, 0x09, 0x47,
+ 0x09, 0x47, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0xbe,
+ 0x10, 0x10, 0x00, 0x08, 0x22, 0x09, 0x19, 0x09, 0x25,
+ 0x09, 0x25, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0xff, 0x5a, 0x80);
+ mipi_dsi_dcs_write_seq(dsi, 0x65, 0x14);
+ mipi_dsi_dcs_write_seq(dsi, 0xfa, 0x08, 0x08, 0x08);
+ mipi_dsi_dcs_write_seq(dsi, 0xff, 0x5a, 0x81);
+ mipi_dsi_dcs_write_seq(dsi, 0x65, 0x05);
+ mipi_dsi_dcs_write_seq(dsi, 0xf3, 0x0f);
+ mipi_dsi_dcs_write_seq(dsi, 0xf0, 0xaa, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0xff, 0x5a, 0x82);
+ mipi_dsi_dcs_write_seq(dsi, 0xf9, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0xff, 0x51, 0x83);
+ mipi_dsi_dcs_write_seq(dsi, 0x65, 0x04);
+ mipi_dsi_dcs_write_seq(dsi, 0xf8, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0xff, 0x5a, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0x65, 0x01);
+ mipi_dsi_dcs_write_seq(dsi, 0xf4, 0x9a);
+ mipi_dsi_dcs_write_seq(dsi, 0xff, 0x5a, 0x00);
+
+ ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to exit sleep mode: %d\n", ret);
+ return ret;
+ }
+ msleep(120);
+
+ ret = mipi_dsi_dcs_set_display_on(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set display on: %d\n", ret);
+ return ret;
+ }
+ msleep(20);
+
+ return 0;
+}
+
+static int visionox_vtdr6130_off(struct visionox_vtdr6130 *ctx)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct device *dev = &dsi->dev;
+ int ret;
+
+ dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+ ret = mipi_dsi_dcs_set_display_off(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set display off: %d\n", ret);
+ return ret;
+ }
+ msleep(20);
+
+ ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to enter sleep mode: %d\n", ret);
+ return ret;
+ }
+ msleep(120);
+
+ return 0;
+}
+
+static int visionox_vtdr6130_prepare(struct drm_panel *panel)
+{
+ struct visionox_vtdr6130 *ctx = to_visionox_vtdr6130(panel);
+ struct device *dev = &ctx->dsi->dev;
+ int ret;
+
+ if (ctx->prepared)
+ return 0;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies),
+ ctx->supplies);
+ if (ret < 0)
+ return ret;
+
+ visionox_vtdr6130_reset(ctx);
+
+ ret = visionox_vtdr6130_on(ctx);
+ if (ret < 0) {
+ dev_err(dev, "Failed to initialize panel: %d\n", ret);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+ return ret;
+ }
+
+ ctx->prepared = true;
+ return 0;
+}
+
+static int visionox_vtdr6130_unprepare(struct drm_panel *panel)
+{
+ struct visionox_vtdr6130 *ctx = to_visionox_vtdr6130(panel);
+ struct device *dev = &ctx->dsi->dev;
+ int ret;
+
+ if (!ctx->prepared)
+ return 0;
+
+ ret = visionox_vtdr6130_off(ctx);
+ if (ret < 0)
+ dev_err(dev, "Failed to un-initialize panel: %d\n", ret);
+
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+
+ regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+
+ ctx->prepared = false;
+ return 0;
+}
+
+static const struct drm_display_mode visionox_vtdr6130_mode = {
+ .clock = (1080 + 20 + 2 + 20) * (2400 + 20 + 2 + 18) * 144 / 1000,
+ .hdisplay = 1080,
+ .hsync_start = 1080 + 20,
+ .hsync_end = 1080 + 20 + 2,
+ .htotal = 1080 + 20 + 2 + 20,
+ .vdisplay = 2400,
+ .vsync_start = 2400 + 20,
+ .vsync_end = 2400 + 20 + 2,
+ .vtotal = 2400 + 20 + 2 + 18,
+ .width_mm = 71,
+ .height_mm = 157,
+};
+
+static int visionox_vtdr6130_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, &visionox_vtdr6130_mode);
+ if (!mode)
+ return -ENOMEM;
+
+ drm_mode_set_name(mode);
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ connector->display_info.width_mm = mode->width_mm;
+ connector->display_info.height_mm = mode->height_mm;
+ drm_mode_probed_add(connector, mode);
+
+ return 1;
+}
+
+static const struct drm_panel_funcs visionox_vtdr6130_panel_funcs = {
+ .prepare = visionox_vtdr6130_prepare,
+ .unprepare = visionox_vtdr6130_unprepare,
+ .get_modes = visionox_vtdr6130_get_modes,
+};
+
+static int visionox_vtdr6130_bl_update_status(struct backlight_device *bl)
+{
+ struct mipi_dsi_device *dsi = bl_get_data(bl);
+ u16 brightness = backlight_get_brightness(bl);
+ /* Panel needs big-endian order of brightness value */
+ u8 payload[2] = { brightness >> 8, brightness & 0xff };
+ int ret;
+
+ ret = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_DISPLAY_BRIGHTNESS,
+ payload, sizeof(payload));
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static const struct backlight_ops visionox_vtdr6130_bl_ops = {
+ .update_status = visionox_vtdr6130_bl_update_status,
+};
+
+static struct backlight_device *
+visionox_vtdr6130_create_backlight(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ const struct backlight_properties props = {
+ .type = BACKLIGHT_RAW,
+ .brightness = 4095,
+ .max_brightness = 4095,
+ };
+
+ return devm_backlight_device_register(dev, dev_name(dev), dev, dsi,
+ &visionox_vtdr6130_bl_ops, &props);
+}
+
+static int visionox_vtdr6130_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct visionox_vtdr6130 *ctx;
+ int ret;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->supplies[0].supply = "vddio";
+ ctx->supplies[1].supply = "vci";
+ ctx->supplies[2].supply = "vdd";
+
+ ret = devm_regulator_bulk_get(&dsi->dev, ARRAY_SIZE(ctx->supplies),
+ ctx->supplies);
+ if (ret < 0)
+ return ret;
+
+ ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(ctx->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio),
+ "Failed to get reset-gpios\n");
+
+ ctx->dsi = dsi;
+ mipi_dsi_set_drvdata(dsi, ctx);
+
+ dsi->lanes = 4;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_NO_EOT_PACKET |
+ MIPI_DSI_CLOCK_NON_CONTINUOUS;
+
+ drm_panel_init(&ctx->panel, dev, &visionox_vtdr6130_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+
+ ctx->panel.backlight = visionox_vtdr6130_create_backlight(dsi);
+ if (IS_ERR(ctx->panel.backlight))
+ return dev_err_probe(dev, PTR_ERR(ctx->panel.backlight),
+ "Failed to create backlight\n");
+
+ drm_panel_add(&ctx->panel);
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to attach to DSI host: %d\n", ret);
+ drm_panel_remove(&ctx->panel);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void visionox_vtdr6130_remove(struct mipi_dsi_device *dsi)
+{
+ struct visionox_vtdr6130 *ctx = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ ret = mipi_dsi_detach(dsi);
+ if (ret < 0)
+ dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret);
+
+ drm_panel_remove(&ctx->panel);
+}
+
+static const struct of_device_id visionox_vtdr6130_of_match[] = {
+ { .compatible = "visionox,vtdr6130" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, visionox_vtdr6130_of_match);
+
+static struct mipi_dsi_driver visionox_vtdr6130_driver = {
+ .probe = visionox_vtdr6130_probe,
+ .remove = visionox_vtdr6130_remove,
+ .driver = {
+ .name = "panel-visionox-vtdr6130",
+ .of_match_table = visionox_vtdr6130_of_match,
+ },
+};
+module_mipi_dsi_driver(visionox_vtdr6130_driver);
+
+MODULE_AUTHOR("Neil Armstrong <neil.armstrong@linaro.org>");
+MODULE_DESCRIPTION("Panel driver for the Visionox VTDR6130 AMOLED DSI panel");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c b/drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c
index 2c54733ee241..8670386498a4 100644
--- a/drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c
+++ b/drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c
@@ -60,14 +60,6 @@ static inline struct xpp055c272 *panel_to_xpp055c272(struct drm_panel *panel)
return container_of(panel, struct xpp055c272, panel);
}
-#define dsi_generic_write_seq(dsi, cmd, seq...) do { \
- static const u8 b[] = { cmd, seq }; \
- int ret; \
- ret = mipi_dsi_dcs_write_buffer(dsi, b, ARRAY_SIZE(b)); \
- if (ret < 0) \
- return ret; \
- } while (0)
-
static int xpp055c272_init_sequence(struct xpp055c272 *ctx)
{
struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
@@ -77,60 +69,60 @@ static int xpp055c272_init_sequence(struct xpp055c272 *ctx)
* Init sequence was supplied by the panel vendor without much
* documentation.
*/
- dsi_generic_write_seq(dsi, XPP055C272_CMD_SETEXTC, 0xf1, 0x12, 0x83);
- dsi_generic_write_seq(dsi, XPP055C272_CMD_SETMIPI,
- 0x33, 0x81, 0x05, 0xf9, 0x0e, 0x0e, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x44, 0x25,
- 0x00, 0x91, 0x0a, 0x00, 0x00, 0x02, 0x4f, 0x01,
- 0x00, 0x00, 0x37);
- dsi_generic_write_seq(dsi, XPP055C272_CMD_SETPOWER_EXT, 0x25);
- dsi_generic_write_seq(dsi, XPP055C272_CMD_SETPCR, 0x02, 0x11, 0x00);
- dsi_generic_write_seq(dsi, XPP055C272_CMD_SETRGBIF,
- 0x0c, 0x10, 0x0a, 0x50, 0x03, 0xff, 0x00, 0x00,
- 0x00, 0x00);
- dsi_generic_write_seq(dsi, XPP055C272_CMD_SETSCR,
- 0x73, 0x73, 0x50, 0x50, 0x00, 0x00, 0x08, 0x70,
- 0x00);
- dsi_generic_write_seq(dsi, XPP055C272_CMD_SETVDC, 0x46);
- dsi_generic_write_seq(dsi, XPP055C272_CMD_SETPANEL, 0x0b);
- dsi_generic_write_seq(dsi, XPP055C272_CMD_SETCYC, 0x80);
- dsi_generic_write_seq(dsi, XPP055C272_CMD_SETDISP, 0xc8, 0x12, 0x30);
- dsi_generic_write_seq(dsi, XPP055C272_CMD_SETEQ,
- 0x07, 0x07, 0x0B, 0x0B, 0x03, 0x0B, 0x00, 0x00,
- 0x00, 0x00, 0xFF, 0x00, 0xC0, 0x10);
- dsi_generic_write_seq(dsi, XPP055C272_CMD_SETPOWER,
- 0x53, 0x00, 0x1e, 0x1e, 0x77, 0xe1, 0xcc, 0xdd,
- 0x67, 0x77, 0x33, 0x33);
- dsi_generic_write_seq(dsi, XPP055C272_CMD_SETECO, 0x00, 0x00, 0xff,
- 0xff, 0x01, 0xff);
- dsi_generic_write_seq(dsi, XPP055C272_CMD_SETBGP, 0x09, 0x09);
+ mipi_dsi_dcs_write_seq(dsi, XPP055C272_CMD_SETEXTC, 0xf1, 0x12, 0x83);
+ mipi_dsi_dcs_write_seq(dsi, XPP055C272_CMD_SETMIPI,
+ 0x33, 0x81, 0x05, 0xf9, 0x0e, 0x0e, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x44, 0x25,
+ 0x00, 0x91, 0x0a, 0x00, 0x00, 0x02, 0x4f, 0x01,
+ 0x00, 0x00, 0x37);
+ mipi_dsi_dcs_write_seq(dsi, XPP055C272_CMD_SETPOWER_EXT, 0x25);
+ mipi_dsi_dcs_write_seq(dsi, XPP055C272_CMD_SETPCR, 0x02, 0x11, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, XPP055C272_CMD_SETRGBIF,
+ 0x0c, 0x10, 0x0a, 0x50, 0x03, 0xff, 0x00, 0x00,
+ 0x00, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, XPP055C272_CMD_SETSCR,
+ 0x73, 0x73, 0x50, 0x50, 0x00, 0x00, 0x08, 0x70,
+ 0x00);
+ mipi_dsi_dcs_write_seq(dsi, XPP055C272_CMD_SETVDC, 0x46);
+ mipi_dsi_dcs_write_seq(dsi, XPP055C272_CMD_SETPANEL, 0x0b);
+ mipi_dsi_dcs_write_seq(dsi, XPP055C272_CMD_SETCYC, 0x80);
+ mipi_dsi_dcs_write_seq(dsi, XPP055C272_CMD_SETDISP, 0xc8, 0x12, 0x30);
+ mipi_dsi_dcs_write_seq(dsi, XPP055C272_CMD_SETEQ,
+ 0x07, 0x07, 0x0B, 0x0B, 0x03, 0x0B, 0x00, 0x00,
+ 0x00, 0x00, 0xFF, 0x00, 0xC0, 0x10);
+ mipi_dsi_dcs_write_seq(dsi, XPP055C272_CMD_SETPOWER,
+ 0x53, 0x00, 0x1e, 0x1e, 0x77, 0xe1, 0xcc, 0xdd,
+ 0x67, 0x77, 0x33, 0x33);
+ mipi_dsi_dcs_write_seq(dsi, XPP055C272_CMD_SETECO, 0x00, 0x00, 0xff,
+ 0xff, 0x01, 0xff);
+ mipi_dsi_dcs_write_seq(dsi, XPP055C272_CMD_SETBGP, 0x09, 0x09);
msleep(20);
- dsi_generic_write_seq(dsi, XPP055C272_CMD_SETVCOM, 0x87, 0x95);
- dsi_generic_write_seq(dsi, XPP055C272_CMD_SETGIP1,
- 0xc2, 0x10, 0x05, 0x05, 0x10, 0x05, 0xa0, 0x12,
- 0x31, 0x23, 0x3f, 0x81, 0x0a, 0xa0, 0x37, 0x18,
- 0x00, 0x80, 0x01, 0x00, 0x00, 0x00, 0x00, 0x80,
- 0x01, 0x00, 0x00, 0x00, 0x48, 0xf8, 0x86, 0x42,
- 0x08, 0x88, 0x88, 0x80, 0x88, 0x88, 0x88, 0x58,
- 0xf8, 0x87, 0x53, 0x18, 0x88, 0x88, 0x81, 0x88,
- 0x88, 0x88, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
- dsi_generic_write_seq(dsi, XPP055C272_CMD_SETGIP2,
- 0x00, 0x1a, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x1f, 0x88, 0x81, 0x35,
- 0x78, 0x88, 0x88, 0x85, 0x88, 0x88, 0x88, 0x0f,
- 0x88, 0x80, 0x24, 0x68, 0x88, 0x88, 0x84, 0x88,
- 0x88, 0x88, 0x23, 0x10, 0x00, 0x00, 0x1c, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x05,
- 0xa0, 0x00, 0x00, 0x00, 0x00);
- dsi_generic_write_seq(dsi, XPP055C272_CMD_SETGAMMA,
- 0x00, 0x06, 0x08, 0x2a, 0x31, 0x3f, 0x38, 0x36,
- 0x07, 0x0c, 0x0d, 0x11, 0x13, 0x12, 0x13, 0x11,
- 0x18, 0x00, 0x06, 0x08, 0x2a, 0x31, 0x3f, 0x38,
- 0x36, 0x07, 0x0c, 0x0d, 0x11, 0x13, 0x12, 0x13,
- 0x11, 0x18);
+ mipi_dsi_dcs_write_seq(dsi, XPP055C272_CMD_SETVCOM, 0x87, 0x95);
+ mipi_dsi_dcs_write_seq(dsi, XPP055C272_CMD_SETGIP1,
+ 0xc2, 0x10, 0x05, 0x05, 0x10, 0x05, 0xa0, 0x12,
+ 0x31, 0x23, 0x3f, 0x81, 0x0a, 0xa0, 0x37, 0x18,
+ 0x00, 0x80, 0x01, 0x00, 0x00, 0x00, 0x00, 0x80,
+ 0x01, 0x00, 0x00, 0x00, 0x48, 0xf8, 0x86, 0x42,
+ 0x08, 0x88, 0x88, 0x80, 0x88, 0x88, 0x88, 0x58,
+ 0xf8, 0x87, 0x53, 0x18, 0x88, 0x88, 0x81, 0x88,
+ 0x88, 0x88, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, XPP055C272_CMD_SETGIP2,
+ 0x00, 0x1a, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x1f, 0x88, 0x81, 0x35,
+ 0x78, 0x88, 0x88, 0x85, 0x88, 0x88, 0x88, 0x0f,
+ 0x88, 0x80, 0x24, 0x68, 0x88, 0x88, 0x84, 0x88,
+ 0x88, 0x88, 0x23, 0x10, 0x00, 0x00, 0x1c, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x05,
+ 0xa0, 0x00, 0x00, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, XPP055C272_CMD_SETGAMMA,
+ 0x00, 0x06, 0x08, 0x2a, 0x31, 0x3f, 0x38, 0x36,
+ 0x07, 0x0c, 0x0d, 0x11, 0x13, 0x12, 0x13, 0x11,
+ 0x18, 0x00, 0x06, 0x08, 0x2a, 0x31, 0x3f, 0x38,
+ 0x36, 0x07, 0x0c, 0x0d, 0x11, 0x13, 0x12, 0x13,
+ 0x11, 0x18);
msleep(60);
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.c b/drivers/gpu/drm/panfrost/panfrost_device.c
index ee612303f076..fa1a086a862b 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.c
+++ b/drivers/gpu/drm/panfrost/panfrost_device.c
@@ -6,6 +6,7 @@
#include <linux/reset.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
#include "panfrost_device.h"
@@ -400,8 +401,7 @@ void panfrost_device_reset(struct panfrost_device *pfdev)
panfrost_job_enable_interrupts(pfdev);
}
-#ifdef CONFIG_PM
-int panfrost_device_resume(struct device *dev)
+static int panfrost_device_resume(struct device *dev)
{
struct panfrost_device *pfdev = dev_get_drvdata(dev);
@@ -411,7 +411,7 @@ int panfrost_device_resume(struct device *dev)
return 0;
}
-int panfrost_device_suspend(struct device *dev)
+static int panfrost_device_suspend(struct device *dev)
{
struct panfrost_device *pfdev = dev_get_drvdata(dev);
@@ -423,4 +423,6 @@ int panfrost_device_suspend(struct device *dev)
return 0;
}
-#endif
+
+EXPORT_GPL_RUNTIME_DEV_PM_OPS(panfrost_pm_ops, panfrost_device_suspend,
+ panfrost_device_resume, NULL);
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.h b/drivers/gpu/drm/panfrost/panfrost_device.h
index 8b25278f34c8..d9ba68cffb77 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.h
+++ b/drivers/gpu/drm/panfrost/panfrost_device.h
@@ -7,6 +7,7 @@
#include <linux/atomic.h>
#include <linux/io-pgtable.h>
+#include <linux/pm.h>
#include <linux/regulator/consumer.h>
#include <linux/spinlock.h>
#include <drm/drm_device.h>
@@ -172,8 +173,7 @@ int panfrost_device_init(struct panfrost_device *pfdev);
void panfrost_device_fini(struct panfrost_device *pfdev);
void panfrost_device_reset(struct panfrost_device *pfdev);
-int panfrost_device_resume(struct device *dev);
-int panfrost_device_suspend(struct device *dev);
+extern const struct dev_pm_ops panfrost_pm_ops;
enum drm_panfrost_exception_type {
DRM_PANFROST_EXCEPTION_OK = 0x00,
diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
index 2fa5afe21288..fa619fe72086 100644
--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
@@ -676,17 +676,12 @@ static const struct of_device_id dt_match[] = {
};
MODULE_DEVICE_TABLE(of, dt_match);
-static const struct dev_pm_ops panfrost_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
- SET_RUNTIME_PM_OPS(panfrost_device_suspend, panfrost_device_resume, NULL)
-};
-
static struct platform_driver panfrost_driver = {
.probe = panfrost_probe,
.remove = panfrost_remove,
.driver = {
.name = "panfrost",
- .pm = &panfrost_pm_ops,
+ .pm = pm_ptr(&panfrost_pm_ops),
.of_match_table = dt_match,
},
};
diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
index 63aa96a69752..281edab518cd 100644
--- a/drivers/gpu/drm/qxl/qxl_cmd.c
+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
@@ -579,7 +579,7 @@ void qxl_surface_evict(struct qxl_device *qdev, struct qxl_bo *surf, bool do_upd
static int qxl_reap_surf(struct qxl_device *qdev, struct qxl_bo *surf, bool stall)
{
- int ret;
+ long ret;
ret = qxl_bo_reserve(surf);
if (ret)
@@ -588,7 +588,19 @@ static int qxl_reap_surf(struct qxl_device *qdev, struct qxl_bo *surf, bool stal
if (stall)
mutex_unlock(&qdev->surf_evict_mutex);
- ret = ttm_bo_wait(&surf->tbo, true, !stall);
+ if (stall) {
+ ret = dma_resv_wait_timeout(surf->tbo.base.resv,
+ DMA_RESV_USAGE_BOOKKEEP, true,
+ 15 * HZ);
+ if (ret > 0)
+ ret = 0;
+ else if (ret == 0)
+ ret = -EBUSY;
+ } else {
+ ret = dma_resv_test_signaled(surf->tbo.base.resv,
+ DMA_RESV_USAGE_BOOKKEEP);
+ ret = ret ? -EBUSY : 0;
+ }
if (stall)
mutex_lock(&qdev->surf_evict_mutex);
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 76f060810f63..ea993d7162e8 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -42,8 +42,7 @@
#include <drm/drm_ioctl.h>
#include <drm/drm_gem.h>
#include <drm/qxl_drm.h>
-#include <drm/ttm/ttm_bo_api.h>
-#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_bo.h>
#include <drm/ttm/ttm_execbuf_util.h>
#include <drm/ttm/ttm_placement.h>
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index ee95001e6b5e..a92a5b0d4c25 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -29,10 +29,10 @@
#include <drm/drm_file.h>
#include <drm/drm_debugfs.h>
#include <drm/qxl_drm.h>
-#include <drm/ttm/ttm_bo_api.h>
-#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_bo.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_range_manager.h>
+#include <drm/ttm/ttm_tt.h>
#include "qxl_drv.h"
#include "qxl_object.h"
diff --git a/drivers/gpu/drm/radeon/Kconfig b/drivers/gpu/drm/radeon/Kconfig
index 97a277f9a25e..62a596d3a891 100644
--- a/drivers/gpu/drm/radeon/Kconfig
+++ b/drivers/gpu/drm/radeon/Kconfig
@@ -15,6 +15,8 @@ config DRM_RADEON
select HWMON
select BACKLIGHT_CLASS_DEVICE
select INTERVAL_TREE
+ select I2C
+ select I2C_ALGOBIT
# radeon depends on ACPI_VIDEO when ACPI is enabled, for select to work
# ACPI_VIDEO's dependencies must also be selected.
select INPUT if ACPI
diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h
index 235e59b547a1..8a6621f1e82c 100644
--- a/drivers/gpu/drm/radeon/atombios.h
+++ b/drivers/gpu/drm/radeon/atombios.h
@@ -4020,7 +4020,7 @@ typedef struct _ATOM_DISPLAY_OBJECT_PATH
USHORT usSize; //the size of ATOM_DISPLAY_OBJECT_PATH
USHORT usConnObjectId; //Connector Object ID
USHORT usGPUObjectId; //GPU ID
- USHORT usGraphicObjIds[1]; //1st Encoder Obj source from GPU to last Graphic Obj destinate to connector.
+ USHORT usGraphicObjIds[]; //1st Encoder Obj source from GPU to last Graphic Obj destinate to connector.
}ATOM_DISPLAY_OBJECT_PATH;
typedef struct _ATOM_DISPLAY_EXTERNAL_OBJECT_PATH
@@ -4037,7 +4037,7 @@ typedef struct _ATOM_DISPLAY_OBJECT_PATH_TABLE
UCHAR ucNumOfDispPath;
UCHAR ucVersion;
UCHAR ucPadding[2];
- ATOM_DISPLAY_OBJECT_PATH asDispPath[1];
+ ATOM_DISPLAY_OBJECT_PATH asDispPath[];
}ATOM_DISPLAY_OBJECT_PATH_TABLE;
@@ -4053,7 +4053,7 @@ typedef struct _ATOM_OBJECT_TABLE //Above 4 object table
{
UCHAR ucNumberOfObjects;
UCHAR ucPadding[3];
- ATOM_OBJECT asObjects[1];
+ ATOM_OBJECT asObjects[];
}ATOM_OBJECT_TABLE;
typedef struct _ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT //usSrcDstTableOffset pointing to this structure
@@ -4615,7 +4615,7 @@ typedef struct _ATOM_GPIO_VOLTAGE_OBJECT_V3
UCHAR ucPhaseDelay; // phase delay in unit of micro second
UCHAR ucReserved;
ULONG ulGpioMaskVal; // GPIO Mask value
- VOLTAGE_LUT_ENTRY_V2 asVolGpioLut[1];
+ VOLTAGE_LUT_ENTRY_V2 asVolGpioLut[];
}ATOM_GPIO_VOLTAGE_OBJECT_V3;
typedef struct _ATOM_LEAKAGE_VOLTAGE_OBJECT_V3
@@ -7964,7 +7964,7 @@ typedef struct {
typedef struct {
VFCT_IMAGE_HEADER VbiosHeader;
- UCHAR VbiosContent[1];
+ UCHAR VbiosContent[];
}GOP_VBIOS_CONTENT;
typedef struct {
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 2e7161acd443..57e20780a458 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -73,8 +73,7 @@
#include <linux/mmu_notifier.h>
#endif
-#include <drm/ttm/ttm_bo_api.h>
-#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_bo.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_execbuf_util.h>
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 6344454a7721..afbb3a80c0c6 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1023,6 +1023,7 @@ void radeon_atombios_fini(struct radeon_device *rdev)
{
if (rdev->mode_info.atom_context) {
kfree(rdev->mode_info.atom_context->scratch);
+ kfree(rdev->mode_info.atom_context->iio);
}
kfree(rdev->mode_info.atom_context);
rdev->mode_info.atom_context = NULL;
@@ -1772,7 +1773,6 @@ int radeon_gpu_reset(struct radeon_device *rdev)
bool saved = false;
int i, r;
- int resched;
down_write(&rdev->exclusive_lock);
@@ -1784,8 +1784,6 @@ int radeon_gpu_reset(struct radeon_device *rdev)
atomic_inc(&rdev->gpu_reset_counter);
radeon_save_bios_scratch_regs(rdev);
- /* block TTM */
- resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
radeon_suspend(rdev);
radeon_hpd_fini(rdev);
@@ -1844,8 +1842,6 @@ int radeon_gpu_reset(struct radeon_device *rdev)
/* reset hpd state */
radeon_hpd_init(rdev);
- ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
-
rdev->in_reset = true;
rdev->needs_reset = false;
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 04c693ca419a..cbc554928bcc 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -1853,11 +1853,10 @@ static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish
static void radeon_dynpm_idle_work_handler(struct work_struct *work)
{
struct radeon_device *rdev;
- int resched;
+
rdev = container_of(work, struct radeon_device,
pm.dynpm_idle_work.work);
- resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
mutex_lock(&rdev->pm.mutex);
if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) {
int not_processed = 0;
@@ -1908,7 +1907,6 @@ static void radeon_dynpm_idle_work_handler(struct work_struct *work)
msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
}
mutex_unlock(&rdev->pm.mutex);
- ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
}
/*
diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c
index 42a87948e28c..b3cfc99f4d7e 100644
--- a/drivers/gpu/drm/radeon/radeon_prime.c
+++ b/drivers/gpu/drm/radeon/radeon_prime.c
@@ -29,6 +29,8 @@
#include <drm/drm_prime.h>
#include <drm/radeon_drm.h>
+#include <drm/ttm/ttm_tt.h>
+
#include "radeon.h"
#include "radeon_prime.h"
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 30402b5ce4c5..1e8e287e113c 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -42,10 +42,10 @@
#include <drm/drm_file.h>
#include <drm/drm_prime.h>
#include <drm/radeon_drm.h>
-#include <drm/ttm/ttm_bo_api.h>
-#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_bo.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_range_manager.h>
+#include <drm/ttm/ttm_tt.h>
#include "radeon_reg.h"
#include "radeon.h"
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
index d003e8d9e7a2..eeec1e02446f 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
@@ -599,7 +599,6 @@ static const struct drm_driver rcar_du_driver = {
* Power management
*/
-#ifdef CONFIG_PM_SLEEP
static int rcar_du_pm_suspend(struct device *dev)
{
struct rcar_du_device *rcdu = dev_get_drvdata(dev);
@@ -613,11 +612,9 @@ static int rcar_du_pm_resume(struct device *dev)
return drm_mode_config_helper_resume(&rcdu->ddev);
}
-#endif
-static const struct dev_pm_ops rcar_du_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(rcar_du_pm_suspend, rcar_du_pm_resume)
-};
+static DEFINE_SIMPLE_DEV_PM_OPS(rcar_du_pm_ops,
+ rcar_du_pm_suspend, rcar_du_pm_resume);
/* -----------------------------------------------------------------------------
* Platform driver
@@ -712,7 +709,7 @@ static struct platform_driver rcar_du_platform_driver = {
.shutdown = rcar_du_shutdown,
.driver = {
.name = "rcar-du",
- .pm = &rcar_du_pm_ops,
+ .pm = pm_sleep_ptr(&rcar_du_pm_ops),
.of_match_table = rcar_du_of_table,
},
};
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index fe09e5be79bd..15d04a0ec623 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -81,7 +81,7 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
init_completion(&entity->entity_idle);
/* We start in an idle state. */
- complete(&entity->entity_idle);
+ complete_all(&entity->entity_idle);
spin_lock_init(&entity->rq_lock);
spsc_queue_init(&entity->job_queue);
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index 31f3a1267be4..fd22d753b4ed 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -987,7 +987,7 @@ static int drm_sched_main(void *param)
sched_job = drm_sched_entity_pop_job(entity);
if (!sched_job) {
- complete(&entity->entity_idle);
+ complete_all(&entity->entity_idle);
continue;
}
@@ -998,7 +998,7 @@ static int drm_sched_main(void *param)
trace_drm_run_job(sched_job, entity);
fence = sched->ops->run_job(sched_job);
- complete(&entity->entity_idle);
+ complete_all(&entity->entity_idle);
drm_sched_fence_scheduled(s_fence);
if (!IS_ERR_OR_NULL(fence)) {
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
index 3d511fa38913..337040fa6438 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_drv.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
@@ -143,7 +143,6 @@ static const struct drm_driver shmob_drm_driver = {
* Power management
*/
-#ifdef CONFIG_PM_SLEEP
static int shmob_drm_pm_suspend(struct device *dev)
{
struct shmob_drm_device *sdev = dev_get_drvdata(dev);
@@ -165,11 +164,9 @@ static int shmob_drm_pm_resume(struct device *dev)
drm_kms_helper_poll_enable(sdev->ddev);
return 0;
}
-#endif
-static const struct dev_pm_ops shmob_drm_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(shmob_drm_pm_suspend, shmob_drm_pm_resume)
-};
+static DEFINE_SIMPLE_DEV_PM_OPS(shmob_drm_pm_ops,
+ shmob_drm_pm_suspend, shmob_drm_pm_resume);
/* -----------------------------------------------------------------------------
* Platform driver
@@ -292,7 +289,7 @@ static struct platform_driver shmob_drm_platform_driver = {
.remove = shmob_drm_remove,
.driver = {
.name = "shmob-drm",
- .pm = &shmob_drm_pm_ops,
+ .pm = pm_sleep_ptr(&shmob_drm_pm_ops),
},
};
diff --git a/drivers/gpu/drm/solomon/ssd130x.c b/drivers/gpu/drm/solomon/ssd130x.c
index 53464afc2b9a..c3bf3a18302e 100644
--- a/drivers/gpu/drm/solomon/ssd130x.c
+++ b/drivers/gpu/drm/solomon/ssd130x.c
@@ -876,7 +876,7 @@ static int ssd130x_init_modeset(struct ssd130x_device *ssd130x)
drm->mode_config.max_width = max_width;
drm->mode_config.min_height = mode->vdisplay;
drm->mode_config.max_height = max_height;
- drm->mode_config.preferred_depth = 32;
+ drm->mode_config.preferred_depth = 24;
drm->mode_config.funcs = &ssd130x_mode_config_funcs;
/* Primary plane */
@@ -1006,7 +1006,7 @@ struct ssd130x_device *ssd130x_probe(struct device *dev, struct regmap *regmap)
if (ret)
return ERR_PTR(dev_err_probe(dev, ret, "DRM device register failed\n"));
- drm_fbdev_generic_setup(drm, 0);
+ drm_fbdev_generic_setup(drm, 32);
return ssd130x;
}
diff --git a/drivers/gpu/drm/sprd/sprd_dpu.c b/drivers/gpu/drm/sprd/sprd_dpu.c
index 88f4259680f1..db0bcea1d9f4 100644
--- a/drivers/gpu/drm/sprd/sprd_dpu.c
+++ b/drivers/gpu/drm/sprd/sprd_dpu.c
@@ -803,10 +803,8 @@ static int sprd_dpu_context_init(struct sprd_dpu *dpu,
}
ctx->irq = platform_get_irq(pdev, 0);
- if (ctx->irq < 0) {
- dev_err(dev, "failed to get dpu irq\n");
+ if (ctx->irq < 0)
return ctx->irq;
- }
/* disable and clear interrupts before register dpu IRQ. */
writel(0x00, ctx->base + REG_DPU_INT_EN);
diff --git a/drivers/gpu/drm/sun4i/sun4i_tv.c b/drivers/gpu/drm/sun4i/sun4i_tv.c
index c65f0a89b6b0..9625a00a48ba 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tv.c
@@ -141,23 +141,14 @@ struct resync_parameters {
struct tv_mode {
char *name;
+ unsigned int tv_mode;
+
u32 mode;
u32 chroma_freq;
u16 back_porch;
u16 front_porch;
- u16 line_number;
u16 vblank_level;
- u32 hdisplay;
- u16 hfront_porch;
- u16 hsync_len;
- u16 hback_porch;
-
- u32 vdisplay;
- u16 vfront_porch;
- u16 vsync_len;
- u16 vback_porch;
-
bool yc_en;
bool dac3_en;
bool dac_bit25_en;
@@ -213,7 +204,7 @@ static const struct resync_parameters pal_resync_parameters = {
static const struct tv_mode tv_modes[] = {
{
- .name = "NTSC",
+ .tv_mode = DRM_MODE_TV_MODE_NTSC,
.mode = SUN4I_TVE_CFG0_RES_480i,
.chroma_freq = 0x21f07c1f,
.yc_en = true,
@@ -222,17 +213,6 @@ static const struct tv_mode tv_modes[] = {
.back_porch = 118,
.front_porch = 32,
- .line_number = 525,
-
- .hdisplay = 720,
- .hfront_porch = 18,
- .hsync_len = 2,
- .hback_porch = 118,
-
- .vdisplay = 480,
- .vfront_porch = 26,
- .vsync_len = 2,
- .vback_porch = 17,
.vblank_level = 240,
@@ -242,23 +222,12 @@ static const struct tv_mode tv_modes[] = {
.resync_params = &ntsc_resync_parameters,
},
{
- .name = "PAL",
+ .tv_mode = DRM_MODE_TV_MODE_PAL,
.mode = SUN4I_TVE_CFG0_RES_576i,
.chroma_freq = 0x2a098acb,
.back_porch = 138,
.front_porch = 24,
- .line_number = 625,
-
- .hdisplay = 720,
- .hfront_porch = 3,
- .hsync_len = 2,
- .hback_porch = 139,
-
- .vdisplay = 576,
- .vfront_porch = 28,
- .vsync_len = 2,
- .vback_porch = 19,
.vblank_level = 252,
@@ -276,63 +245,21 @@ drm_encoder_to_sun4i_tv(struct drm_encoder *encoder)
encoder);
}
-/*
- * FIXME: If only the drm_display_mode private field was usable, this
- * could go away...
- *
- * So far, it doesn't seem to be preserved when the mode is passed by
- * to mode_set for some reason.
- */
-static const struct tv_mode *sun4i_tv_find_tv_by_mode(const struct drm_display_mode *mode)
+static const struct tv_mode *
+sun4i_tv_find_tv_by_mode(unsigned int mode)
{
int i;
- /* First try to identify the mode by name */
for (i = 0; i < ARRAY_SIZE(tv_modes); i++) {
const struct tv_mode *tv_mode = &tv_modes[i];
- DRM_DEBUG_DRIVER("Comparing mode %s vs %s",
- mode->name, tv_mode->name);
-
- if (!strcmp(mode->name, tv_mode->name))
- return tv_mode;
- }
-
- /* Then by number of lines */
- for (i = 0; i < ARRAY_SIZE(tv_modes); i++) {
- const struct tv_mode *tv_mode = &tv_modes[i];
-
- DRM_DEBUG_DRIVER("Comparing mode %s vs %s (X: %d vs %d)",
- mode->name, tv_mode->name,
- mode->vdisplay, tv_mode->vdisplay);
-
- if (mode->vdisplay == tv_mode->vdisplay)
+ if (tv_mode->tv_mode == mode)
return tv_mode;
}
return NULL;
}
-static void sun4i_tv_mode_to_drm_mode(const struct tv_mode *tv_mode,
- struct drm_display_mode *mode)
-{
- DRM_DEBUG_DRIVER("Creating mode %s\n", mode->name);
-
- mode->type = DRM_MODE_TYPE_DRIVER;
- mode->clock = 13500;
- mode->flags = DRM_MODE_FLAG_INTERLACE;
-
- mode->hdisplay = tv_mode->hdisplay;
- mode->hsync_start = mode->hdisplay + tv_mode->hfront_porch;
- mode->hsync_end = mode->hsync_start + tv_mode->hsync_len;
- mode->htotal = mode->hsync_end + tv_mode->hback_porch;
-
- mode->vdisplay = tv_mode->vdisplay;
- mode->vsync_start = mode->vdisplay + tv_mode->vfront_porch;
- mode->vsync_end = mode->vsync_start + tv_mode->vsync_len;
- mode->vtotal = mode->vsync_end + tv_mode->vback_porch;
-}
-
static void sun4i_tv_disable(struct drm_encoder *encoder,
struct drm_atomic_state *state)
{
@@ -356,7 +283,11 @@ static void sun4i_tv_enable(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_state =
drm_atomic_get_new_crtc_state(state, encoder->crtc);
struct drm_display_mode *mode = &crtc_state->mode;
- const struct tv_mode *tv_mode = sun4i_tv_find_tv_by_mode(mode);
+ struct drm_connector *connector = &tv->connector;
+ struct drm_connector_state *conn_state =
+ drm_atomic_get_new_connector_state(state, connector);
+ const struct tv_mode *tv_mode =
+ sun4i_tv_find_tv_by_mode(conn_state->tv.mode);
DRM_DEBUG_DRIVER("Enabling the TV Output\n");
@@ -404,7 +335,7 @@ static void sun4i_tv_enable(struct drm_encoder *encoder,
/* Set the lines setup */
regmap_write(tv->regs, SUN4I_TVE_LINE_REG,
SUN4I_TVE_LINE_FIRST(22) |
- SUN4I_TVE_LINE_NUMBER(tv_mode->line_number));
+ SUN4I_TVE_LINE_NUMBER(mode->vtotal));
regmap_write(tv->regs, SUN4I_TVE_LEVEL_REG,
SUN4I_TVE_LEVEL_BLANK(tv_mode->video_levels->blank) |
@@ -465,37 +396,21 @@ static const struct drm_encoder_helper_funcs sun4i_tv_helper_funcs = {
.atomic_enable = sun4i_tv_enable,
};
-static int sun4i_tv_comp_get_modes(struct drm_connector *connector)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(tv_modes); i++) {
- struct drm_display_mode *mode;
- const struct tv_mode *tv_mode = &tv_modes[i];
-
- mode = drm_mode_create(connector->dev);
- if (!mode) {
- DRM_ERROR("Failed to create a new display mode\n");
- return 0;
- }
-
- strcpy(mode->name, tv_mode->name);
-
- sun4i_tv_mode_to_drm_mode(tv_mode, mode);
- drm_mode_probed_add(connector, mode);
- }
-
- return i;
-}
-
static const struct drm_connector_helper_funcs sun4i_tv_comp_connector_helper_funcs = {
- .get_modes = sun4i_tv_comp_get_modes,
+ .atomic_check = drm_atomic_helper_connector_tv_check,
+ .get_modes = drm_connector_helper_tv_get_modes,
};
+static void sun4i_tv_connector_reset(struct drm_connector *connector)
+{
+ drm_atomic_helper_connector_reset(connector);
+ drm_atomic_helper_connector_tv_reset(connector);
+}
+
static const struct drm_connector_funcs sun4i_tv_comp_connector_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = drm_connector_cleanup,
- .reset = drm_atomic_helper_connector_reset,
+ .reset = sun4i_tv_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
@@ -587,8 +502,20 @@ static int sun4i_tv_bind(struct device *dev, struct device *master,
drm_connector_attach_encoder(&tv->connector, &tv->encoder);
+ ret = drm_mode_create_tv_properties(drm,
+ BIT(DRM_MODE_TV_MODE_NTSC) |
+ BIT(DRM_MODE_TV_MODE_PAL));
+ if (ret)
+ goto err_cleanup_connector;
+
+ drm_object_attach_property(&tv->connector.base,
+ drm->mode_config.tv_mode_property,
+ DRM_MODE_TV_MODE_NTSC);
+
return 0;
+err_cleanup_connector:
+ drm_connector_cleanup(&tv->connector);
err_cleanup_encoder:
drm_encoder_cleanup(&tv->encoder);
err_disable_clk:
diff --git a/drivers/gpu/drm/tests/Makefile b/drivers/gpu/drm/tests/Makefile
index b29ef1085cad..aaf357e29c65 100644
--- a/drivers/gpu/drm/tests/Makefile
+++ b/drivers/gpu/drm/tests/Makefile
@@ -1,14 +1,20 @@
# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_DRM_KUNIT_TEST_HELPERS) += \
+ drm_kunit_helpers.o
+
obj-$(CONFIG_DRM_KUNIT_TEST) += \
drm_buddy_test.o \
drm_cmdline_parser_test.o \
+ drm_connector_test.o \
drm_damage_helper_test.o \
drm_dp_mst_helper_test.o \
drm_format_helper_test.o \
drm_format_test.o \
drm_framebuffer_test.o \
- drm_kunit_helpers.o \
+ drm_managed_test.o \
drm_mm_test.o \
+ drm_modes_test.o \
drm_plane_helper_test.o \
+ drm_probe_helper_test.o \
drm_rect_test.o
diff --git a/drivers/gpu/drm/tests/drm_client_modeset_test.c b/drivers/gpu/drm/tests/drm_client_modeset_test.c
index 362a5fbd82f5..416a279b6dae 100644
--- a/drivers/gpu/drm/tests/drm_client_modeset_test.c
+++ b/drivers/gpu/drm/tests/drm_client_modeset_test.c
@@ -8,20 +8,39 @@
#include <drm/drm_connector.h>
#include <drm/drm_edid.h>
#include <drm/drm_drv.h>
+#include <drm/drm_kunit_helpers.h>
#include <drm/drm_modes.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_probe_helper.h>
-#include "drm_kunit_helpers.h"
-
struct drm_client_modeset_test_priv {
struct drm_device *drm;
+ struct device *dev;
struct drm_connector connector;
};
static int drm_client_modeset_connector_get_modes(struct drm_connector *connector)
{
- return drm_add_modes_noedid(connector, 1920, 1200);
+ struct drm_display_mode *mode;
+ int count;
+
+ count = drm_add_modes_noedid(connector, 1920, 1200);
+
+ mode = drm_mode_analog_ntsc_480i(connector->dev);
+ if (!mode)
+ return count;
+
+ drm_mode_probed_add(connector, mode);
+ count += 1;
+
+ mode = drm_mode_analog_pal_576i(connector->dev);
+ if (!mode)
+ return count;
+
+ drm_mode_probed_add(connector, mode);
+ count += 1;
+
+ return count;
}
static const struct drm_connector_helper_funcs drm_client_modeset_connector_helper_funcs = {
@@ -41,7 +60,12 @@ static int drm_client_modeset_test_init(struct kunit *test)
test->priv = priv;
- priv->drm = drm_kunit_device_init(test, DRIVER_MODESET, "drm-client-modeset-test");
+ priv->dev = drm_kunit_helper_alloc_device(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv->dev);
+
+ priv->drm = __drm_kunit_helper_alloc_drm_device(test, priv->dev,
+ sizeof(*priv->drm), 0,
+ DRIVER_MODESET);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv->drm);
ret = drmm_connector_init(priv->drm, &priv->connector,
@@ -52,9 +76,19 @@ static int drm_client_modeset_test_init(struct kunit *test)
drm_connector_helper_add(&priv->connector, &drm_client_modeset_connector_helper_funcs);
+ priv->connector.interlace_allowed = true;
+ priv->connector.doublescan_allowed = true;
+
return 0;
}
+static void drm_client_modeset_test_exit(struct kunit *test)
+{
+ struct drm_client_modeset_test_priv *priv = test->priv;
+
+ drm_kunit_helper_free_device(test, priv->dev);
+}
+
static void drm_test_pick_cmdline_res_1920_1080_60(struct kunit *test)
{
struct drm_client_modeset_test_priv *priv = test->priv;
@@ -84,15 +118,83 @@ static void drm_test_pick_cmdline_res_1920_1080_60(struct kunit *test)
KUNIT_EXPECT_TRUE(test, drm_mode_equal(expected_mode, mode));
}
+struct drm_connector_pick_cmdline_mode_test {
+ const char *cmdline;
+ struct drm_display_mode *(*func)(struct drm_device *drm);
+};
+
+#define TEST_CMDLINE(_cmdline, _fn) \
+ { \
+ .cmdline = _cmdline, \
+ .func = _fn, \
+ }
+
+static void drm_test_pick_cmdline_named(struct kunit *test)
+{
+ const struct drm_connector_pick_cmdline_mode_test *params = test->param_value;
+ struct drm_client_modeset_test_priv *priv = test->priv;
+ struct drm_device *drm = priv->drm;
+ struct drm_connector *connector = &priv->connector;
+ struct drm_cmdline_mode *cmdline_mode = &connector->cmdline_mode;
+ const struct drm_display_mode *expected_mode, *mode;
+ const char *cmdline = params->cmdline;
+ int ret;
+
+ KUNIT_ASSERT_TRUE(test,
+ drm_mode_parse_command_line_for_connector(cmdline,
+ connector,
+ cmdline_mode));
+
+ mutex_lock(&drm->mode_config.mutex);
+ ret = drm_helper_probe_single_connector_modes(connector, 1920, 1080);
+ mutex_unlock(&drm->mode_config.mutex);
+ KUNIT_ASSERT_GT(test, ret, 0);
+
+ mode = drm_connector_pick_cmdline_mode(connector);
+ KUNIT_ASSERT_NOT_NULL(test, mode);
+
+ expected_mode = params->func(drm);
+ KUNIT_ASSERT_NOT_NULL(test, expected_mode);
+
+ KUNIT_EXPECT_TRUE(test, drm_mode_equal(expected_mode, mode));
+}
+
+static const
+struct drm_connector_pick_cmdline_mode_test drm_connector_pick_cmdline_mode_tests[] = {
+ TEST_CMDLINE("NTSC", drm_mode_analog_ntsc_480i),
+ TEST_CMDLINE("NTSC-J", drm_mode_analog_ntsc_480i),
+ TEST_CMDLINE("PAL", drm_mode_analog_pal_576i),
+ TEST_CMDLINE("PAL-M", drm_mode_analog_ntsc_480i),
+};
+
+static void
+drm_connector_pick_cmdline_mode_desc(const struct drm_connector_pick_cmdline_mode_test *t,
+ char *desc)
+{
+ sprintf(desc, "%s", t->cmdline);
+}
+
+KUNIT_ARRAY_PARAM(drm_connector_pick_cmdline_mode,
+ drm_connector_pick_cmdline_mode_tests,
+ drm_connector_pick_cmdline_mode_desc);
+
static struct kunit_case drm_test_pick_cmdline_tests[] = {
KUNIT_CASE(drm_test_pick_cmdline_res_1920_1080_60),
+ KUNIT_CASE_PARAM(drm_test_pick_cmdline_named,
+ drm_connector_pick_cmdline_mode_gen_params),
{}
};
static struct kunit_suite drm_test_pick_cmdline_test_suite = {
.name = "drm_test_pick_cmdline",
.init = drm_client_modeset_test_init,
+ .exit = drm_client_modeset_test_exit,
.test_cases = drm_test_pick_cmdline_tests
};
kunit_test_suite(drm_test_pick_cmdline_test_suite);
+
+/*
+ * This file is included directly by drm_client_modeset.c so we can't
+ * use any MODULE_* macro here.
+ */
diff --git a/drivers/gpu/drm/tests/drm_cmdline_parser_test.c b/drivers/gpu/drm/tests/drm_cmdline_parser_test.c
index 34790e7a3760..88f7f518ffb3 100644
--- a/drivers/gpu/drm/tests/drm_cmdline_parser_test.c
+++ b/drivers/gpu/drm/tests/drm_cmdline_parser_test.c
@@ -927,6 +927,14 @@ static const struct drm_cmdline_invalid_test drm_cmdline_invalid_tests[] = {
.name = "invalid_option",
.cmdline = "720x480,test=42",
},
+ {
+ .name = "invalid_tv_option",
+ .cmdline = "720x480i,tv_mode=invalid",
+ },
+ {
+ .name = "truncated_tv_option",
+ .cmdline = "720x480i,tv_mode=NTS",
+ },
};
static void drm_cmdline_invalid_desc(const struct drm_cmdline_invalid_test *t,
@@ -937,6 +945,65 @@ static void drm_cmdline_invalid_desc(const struct drm_cmdline_invalid_test *t,
KUNIT_ARRAY_PARAM(drm_cmdline_invalid, drm_cmdline_invalid_tests, drm_cmdline_invalid_desc);
+struct drm_cmdline_tv_option_test {
+ const char *name;
+ const char *cmdline;
+ struct drm_display_mode *(*mode_fn)(struct drm_device *dev);
+ enum drm_connector_tv_mode tv_mode;
+};
+
+static void drm_test_cmdline_tv_options(struct kunit *test)
+{
+ const struct drm_cmdline_tv_option_test *params = test->param_value;
+ const struct drm_display_mode *expected_mode = params->mode_fn(NULL);
+ struct drm_cmdline_mode mode = { };
+
+ KUNIT_EXPECT_TRUE(test, drm_mode_parse_command_line_for_connector(params->cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_TRUE(test, mode.specified);
+ KUNIT_EXPECT_EQ(test, mode.xres, expected_mode->hdisplay);
+ KUNIT_EXPECT_EQ(test, mode.yres, expected_mode->vdisplay);
+ KUNIT_EXPECT_EQ(test, mode.tv_mode, params->tv_mode);
+
+ KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_FALSE(test, mode.cvt);
+ KUNIT_EXPECT_EQ(test, mode.interlace, !!(expected_mode->flags & DRM_MODE_FLAG_INTERLACE));
+ KUNIT_EXPECT_FALSE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED);
+}
+
+#define TV_OPT_TEST(_opt, _cmdline, _mode_fn) \
+ { \
+ .name = #_opt, \
+ .cmdline = _cmdline, \
+ .mode_fn = _mode_fn, \
+ .tv_mode = DRM_MODE_TV_MODE_ ## _opt, \
+ }
+
+static const struct drm_cmdline_tv_option_test drm_cmdline_tv_option_tests[] = {
+ TV_OPT_TEST(NTSC, "720x480i,tv_mode=NTSC", drm_mode_analog_ntsc_480i),
+ TV_OPT_TEST(NTSC_443, "720x480i,tv_mode=NTSC-443", drm_mode_analog_ntsc_480i),
+ TV_OPT_TEST(NTSC_J, "720x480i,tv_mode=NTSC-J", drm_mode_analog_ntsc_480i),
+ TV_OPT_TEST(PAL, "720x576i,tv_mode=PAL", drm_mode_analog_pal_576i),
+ TV_OPT_TEST(PAL_M, "720x480i,tv_mode=PAL-M", drm_mode_analog_ntsc_480i),
+ TV_OPT_TEST(PAL_N, "720x576i,tv_mode=PAL-N", drm_mode_analog_pal_576i),
+ TV_OPT_TEST(SECAM, "720x576i,tv_mode=SECAM", drm_mode_analog_pal_576i),
+};
+
+static void drm_cmdline_tv_option_desc(const struct drm_cmdline_tv_option_test *t,
+ char *desc)
+{
+ sprintf(desc, "%s", t->name);
+}
+
+KUNIT_ARRAY_PARAM(drm_cmdline_tv_option,
+ drm_cmdline_tv_option_tests,
+ drm_cmdline_tv_option_desc);
+
static struct kunit_case drm_cmdline_parser_tests[] = {
KUNIT_CASE(drm_test_cmdline_force_d_only),
KUNIT_CASE(drm_test_cmdline_force_D_only_dvi),
@@ -977,6 +1044,7 @@ static struct kunit_case drm_cmdline_parser_tests[] = {
KUNIT_CASE(drm_test_cmdline_freestanding_force_e_and_options),
KUNIT_CASE(drm_test_cmdline_panel_orientation),
KUNIT_CASE_PARAM(drm_test_cmdline_invalid, drm_cmdline_invalid_gen_params),
+ KUNIT_CASE_PARAM(drm_test_cmdline_tv_options, drm_cmdline_tv_option_gen_params),
{}
};
diff --git a/drivers/gpu/drm/tests/drm_connector_test.c b/drivers/gpu/drm/tests/drm_connector_test.c
new file mode 100644
index 000000000000..c66aa2dc8d9d
--- /dev/null
+++ b/drivers/gpu/drm/tests/drm_connector_test.c
@@ -0,0 +1,76 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Kunit test for drm_modes functions
+ */
+
+#include <drm/drm_connector.h>
+
+#include <kunit/test.h>
+
+struct drm_get_tv_mode_from_name_test {
+ const char *name;
+ enum drm_connector_tv_mode expected_mode;
+};
+
+#define TV_MODE_NAME(_name, _mode) \
+ { \
+ .name = _name, \
+ .expected_mode = _mode, \
+ }
+
+static void drm_test_get_tv_mode_from_name_valid(struct kunit *test)
+{
+ const struct drm_get_tv_mode_from_name_test *params = test->param_value;
+
+ KUNIT_EXPECT_EQ(test,
+ drm_get_tv_mode_from_name(params->name, strlen(params->name)),
+ params->expected_mode);
+}
+
+static const
+struct drm_get_tv_mode_from_name_test drm_get_tv_mode_from_name_valid_tests[] = {
+ TV_MODE_NAME("NTSC", DRM_MODE_TV_MODE_NTSC),
+ TV_MODE_NAME("NTSC-443", DRM_MODE_TV_MODE_NTSC_443),
+ TV_MODE_NAME("NTSC-J", DRM_MODE_TV_MODE_NTSC_J),
+ TV_MODE_NAME("PAL", DRM_MODE_TV_MODE_PAL),
+ TV_MODE_NAME("PAL-M", DRM_MODE_TV_MODE_PAL_M),
+ TV_MODE_NAME("PAL-N", DRM_MODE_TV_MODE_PAL_N),
+ TV_MODE_NAME("SECAM", DRM_MODE_TV_MODE_SECAM),
+};
+
+static void
+drm_get_tv_mode_from_name_valid_desc(const struct drm_get_tv_mode_from_name_test *t,
+ char *desc)
+{
+ sprintf(desc, "%s", t->name);
+}
+
+KUNIT_ARRAY_PARAM(drm_get_tv_mode_from_name_valid,
+ drm_get_tv_mode_from_name_valid_tests,
+ drm_get_tv_mode_from_name_valid_desc);
+
+static void drm_test_get_tv_mode_from_name_truncated(struct kunit *test)
+{
+ const char *name = "NTS";
+ int ret;
+
+ ret = drm_get_tv_mode_from_name(name, strlen(name));
+ KUNIT_EXPECT_LT(test, ret, 0);
+};
+
+static struct kunit_case drm_get_tv_mode_from_name_tests[] = {
+ KUNIT_CASE_PARAM(drm_test_get_tv_mode_from_name_valid,
+ drm_get_tv_mode_from_name_valid_gen_params),
+ KUNIT_CASE(drm_test_get_tv_mode_from_name_truncated),
+ { }
+};
+
+static struct kunit_suite drm_get_tv_mode_from_name_test_suite = {
+ .name = "drm_get_tv_mode_from_name",
+ .test_cases = drm_get_tv_mode_from_name_tests,
+};
+
+kunit_test_suite(drm_get_tv_mode_from_name_test_suite);
+
+MODULE_AUTHOR("Maxime Ripard <maxime@cerno.tech>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tests/drm_format_helper_test.c b/drivers/gpu/drm/tests/drm_format_helper_test.c
index 567c71f95edc..34e80eb6d96e 100644
--- a/drivers/gpu/drm/tests/drm_format_helper_test.c
+++ b/drivers/gpu/drm/tests/drm_format_helper_test.c
@@ -32,16 +32,41 @@ struct convert_to_rgb565_result {
const u16 expected_swab[TEST_BUF_SIZE];
};
+struct convert_to_xrgb1555_result {
+ unsigned int dst_pitch;
+ const u16 expected[TEST_BUF_SIZE];
+};
+
+struct convert_to_argb1555_result {
+ unsigned int dst_pitch;
+ const u16 expected[TEST_BUF_SIZE];
+};
+
+struct convert_to_rgba5551_result {
+ unsigned int dst_pitch;
+ const u16 expected[TEST_BUF_SIZE];
+};
+
struct convert_to_rgb888_result {
unsigned int dst_pitch;
const u8 expected[TEST_BUF_SIZE];
};
+struct convert_to_argb8888_result {
+ unsigned int dst_pitch;
+ const u32 expected[TEST_BUF_SIZE];
+};
+
struct convert_to_xrgb2101010_result {
unsigned int dst_pitch;
const u32 expected[TEST_BUF_SIZE];
};
+struct convert_to_argb2101010_result {
+ unsigned int dst_pitch;
+ const u32 expected[TEST_BUF_SIZE];
+};
+
struct convert_xrgb8888_case {
const char *name;
unsigned int pitch;
@@ -50,8 +75,13 @@ struct convert_xrgb8888_case {
struct convert_to_gray8_result gray8_result;
struct convert_to_rgb332_result rgb332_result;
struct convert_to_rgb565_result rgb565_result;
+ struct convert_to_xrgb1555_result xrgb1555_result;
+ struct convert_to_argb1555_result argb1555_result;
+ struct convert_to_rgba5551_result rgba5551_result;
struct convert_to_rgb888_result rgb888_result;
+ struct convert_to_argb8888_result argb8888_result;
struct convert_to_xrgb2101010_result xrgb2101010_result;
+ struct convert_to_argb2101010_result argb2101010_result;
};
static struct convert_xrgb8888_case convert_xrgb8888_cases[] = {
@@ -73,14 +103,34 @@ static struct convert_xrgb8888_case convert_xrgb8888_cases[] = {
.expected = { 0xF800 },
.expected_swab = { 0x00F8 },
},
+ .xrgb1555_result = {
+ .dst_pitch = 0,
+ .expected = { 0x7C00 },
+ },
+ .argb1555_result = {
+ .dst_pitch = 0,
+ .expected = { 0xFC00 },
+ },
+ .rgba5551_result = {
+ .dst_pitch = 0,
+ .expected = { 0xF801 },
+ },
.rgb888_result = {
.dst_pitch = 0,
.expected = { 0x00, 0x00, 0xFF },
},
+ .argb8888_result = {
+ .dst_pitch = 0,
+ .expected = { 0xFFFF0000 },
+ },
.xrgb2101010_result = {
.dst_pitch = 0,
.expected = { 0x3FF00000 },
},
+ .argb2101010_result = {
+ .dst_pitch = 0,
+ .expected = { 0xFFF00000 },
+ },
},
{
.name = "single_pixel_clip_rectangle",
@@ -103,14 +153,34 @@ static struct convert_xrgb8888_case convert_xrgb8888_cases[] = {
.expected = { 0xF800 },
.expected_swab = { 0x00F8 },
},
+ .xrgb1555_result = {
+ .dst_pitch = 0,
+ .expected = { 0x7C00 },
+ },
+ .argb1555_result = {
+ .dst_pitch = 0,
+ .expected = { 0xFC00 },
+ },
+ .rgba5551_result = {
+ .dst_pitch = 0,
+ .expected = { 0xF801 },
+ },
.rgb888_result = {
.dst_pitch = 0,
.expected = { 0x00, 0x00, 0xFF },
},
+ .argb8888_result = {
+ .dst_pitch = 0,
+ .expected = { 0xFFFF0000 },
+ },
.xrgb2101010_result = {
.dst_pitch = 0,
.expected = { 0x3FF00000 },
},
+ .argb2101010_result = {
+ .dst_pitch = 0,
+ .expected = { 0xFFF00000 },
+ },
},
{
/* Well known colors: White, black, red, green, blue, magenta,
@@ -160,6 +230,33 @@ static struct convert_xrgb8888_case convert_xrgb8888_cases[] = {
0xE0FF, 0xFF07,
},
},
+ .xrgb1555_result = {
+ .dst_pitch = 0,
+ .expected = {
+ 0x7FFF, 0x0000,
+ 0x7C00, 0x03E0,
+ 0x001F, 0x7C1F,
+ 0x7FE0, 0x03FF,
+ },
+ },
+ .argb1555_result = {
+ .dst_pitch = 0,
+ .expected = {
+ 0xFFFF, 0x8000,
+ 0xFC00, 0x83E0,
+ 0x801F, 0xFC1F,
+ 0xFFE0, 0x83FF,
+ },
+ },
+ .rgba5551_result = {
+ .dst_pitch = 0,
+ .expected = {
+ 0xFFFF, 0x0001,
+ 0xF801, 0x07C1,
+ 0x003F, 0xF83F,
+ 0xFFC1, 0x07FF,
+ },
+ },
.rgb888_result = {
.dst_pitch = 0,
.expected = {
@@ -169,6 +266,15 @@ static struct convert_xrgb8888_case convert_xrgb8888_cases[] = {
0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0x00,
},
},
+ .argb8888_result = {
+ .dst_pitch = 0,
+ .expected = {
+ 0xFFFFFFFF, 0xFF000000,
+ 0xFFFF0000, 0xFF00FF00,
+ 0xFF0000FF, 0xFFFF00FF,
+ 0xFFFFFF00, 0xFF00FFFF,
+ },
+ },
.xrgb2101010_result = {
.dst_pitch = 0,
.expected = {
@@ -178,6 +284,15 @@ static struct convert_xrgb8888_case convert_xrgb8888_cases[] = {
0x3FFFFC00, 0x000FFFFF,
},
},
+ .argb2101010_result = {
+ .dst_pitch = 0,
+ .expected = {
+ 0xFFFFFFFF, 0xC0000000,
+ 0xFFF00000, 0xC00FFC00,
+ 0xC00003FF, 0xFFF003FF,
+ 0xFFFFFC00, 0xC00FFFFF,
+ },
+ },
},
{
/* Randomly picked colors. Full buffer within the clip area. */
@@ -218,6 +333,30 @@ static struct convert_xrgb8888_case convert_xrgb8888_cases[] = {
0x00A8, 0x8E6B, 0x330A, 0x0000, 0x0000,
},
},
+ .xrgb1555_result = {
+ .dst_pitch = 10,
+ .expected = {
+ 0x0513, 0x0920, 0x5400, 0x0000, 0x0000,
+ 0x35CE, 0x0513, 0x0920, 0x0000, 0x0000,
+ 0x5400, 0x35CE, 0x0513, 0x0000, 0x0000,
+ },
+ },
+ .argb1555_result = {
+ .dst_pitch = 10,
+ .expected = {
+ 0x8513, 0x8920, 0xD400, 0x0000, 0x0000,
+ 0xB5CE, 0x8513, 0x8920, 0x0000, 0x0000,
+ 0xD400, 0xB5CE, 0x8513, 0x0000, 0x0000,
+ },
+ },
+ .rgba5551_result = {
+ .dst_pitch = 10,
+ .expected = {
+ 0x0A27, 0x1241, 0xA801, 0x0000, 0x0000,
+ 0x6B9D, 0x0A27, 0x1241, 0x0000, 0x0000,
+ 0xA801, 0x6B9D, 0x0A27, 0x0000, 0x0000,
+ },
+ },
.rgb888_result = {
.dst_pitch = 15,
.expected = {
@@ -229,6 +368,14 @@ static struct convert_xrgb8888_case convert_xrgb8888_cases[] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
},
},
+ .argb8888_result = {
+ .dst_pitch = 20,
+ .expected = {
+ 0xFF0E449C, 0xFF114D05, 0xFFA80303, 0x00000000, 0x00000000,
+ 0xFF6C7073, 0xFF0E449C, 0xFF114D05, 0x00000000, 0x00000000,
+ 0xFFA80303, 0xFF6C7073, 0xFF0E449C, 0x00000000, 0x00000000,
+ },
+ },
.xrgb2101010_result = {
.dst_pitch = 20,
.expected = {
@@ -237,6 +384,14 @@ static struct convert_xrgb8888_case convert_xrgb8888_cases[] = {
0x2A20300C, 0x1B1705CD, 0x03844672, 0x00000000, 0x00000000,
},
},
+ .argb2101010_result = {
+ .dst_pitch = 20,
+ .expected = {
+ 0xC3844672, 0xC444D414, 0xEA20300C, 0x00000000, 0x00000000,
+ 0xDB1705CD, 0xC3844672, 0xC444D414, 0x00000000, 0x00000000,
+ 0xEA20300C, 0xDB1705CD, 0xC3844672, 0x00000000, 0x00000000,
+ },
+ },
},
};
@@ -264,7 +419,22 @@ static size_t conversion_buf_size(u32 dst_format, unsigned int dst_pitch,
return dst_pitch * drm_rect_height(clip);
}
-static u32 *le32buf_to_cpu(struct kunit *test, const u32 *buf, size_t buf_size)
+static u16 *le16buf_to_cpu(struct kunit *test, const __le16 *buf, size_t buf_size)
+{
+ u16 *dst = NULL;
+ int n;
+
+ dst = kunit_kzalloc(test, sizeof(*dst) * buf_size, GFP_KERNEL);
+ if (!dst)
+ return NULL;
+
+ for (n = 0; n < buf_size; n++)
+ dst[n] = le16_to_cpu(buf[n]);
+
+ return dst;
+}
+
+static u32 *le32buf_to_cpu(struct kunit *test, const __le32 *buf, size_t buf_size)
{
u32 *dst = NULL;
int n;
@@ -279,6 +449,21 @@ static u32 *le32buf_to_cpu(struct kunit *test, const u32 *buf, size_t buf_size)
return dst;
}
+static __le32 *cpubuf_to_le32(struct kunit *test, const u32 *buf, size_t buf_size)
+{
+ __le32 *dst = NULL;
+ int n;
+
+ dst = kunit_kzalloc(test, sizeof(*dst) * buf_size, GFP_KERNEL);
+ if (!dst)
+ return NULL;
+
+ for (n = 0; n < buf_size; n++)
+ dst[n] = cpu_to_le32(buf[n]);
+
+ return dst;
+}
+
static void convert_xrgb8888_case_desc(struct convert_xrgb8888_case *t,
char *desc)
{
@@ -293,8 +478,8 @@ static void drm_test_fb_xrgb8888_to_gray8(struct kunit *test)
const struct convert_xrgb8888_case *params = test->param_value;
const struct convert_to_gray8_result *result = &params->gray8_result;
size_t dst_size;
- __u8 *buf = NULL;
- __u32 *xrgb8888 = NULL;
+ u8 *buf = NULL;
+ __le32 *xrgb8888 = NULL;
struct iosys_map dst, src;
struct drm_framebuffer fb = {
@@ -310,7 +495,7 @@ static void drm_test_fb_xrgb8888_to_gray8(struct kunit *test)
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf);
iosys_map_set_vaddr(&dst, buf);
- xrgb8888 = le32buf_to_cpu(test, params->xrgb8888, TEST_BUF_SIZE);
+ xrgb8888 = cpubuf_to_le32(test, params->xrgb8888, TEST_BUF_SIZE);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xrgb8888);
iosys_map_set_vaddr(&src, xrgb8888);
@@ -323,8 +508,8 @@ static void drm_test_fb_xrgb8888_to_rgb332(struct kunit *test)
const struct convert_xrgb8888_case *params = test->param_value;
const struct convert_to_rgb332_result *result = &params->rgb332_result;
size_t dst_size;
- __u8 *buf = NULL;
- __u32 *xrgb8888 = NULL;
+ u8 *buf = NULL;
+ __le32 *xrgb8888 = NULL;
struct iosys_map dst, src;
struct drm_framebuffer fb = {
@@ -340,7 +525,7 @@ static void drm_test_fb_xrgb8888_to_rgb332(struct kunit *test)
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf);
iosys_map_set_vaddr(&dst, buf);
- xrgb8888 = le32buf_to_cpu(test, params->xrgb8888, TEST_BUF_SIZE);
+ xrgb8888 = cpubuf_to_le32(test, params->xrgb8888, TEST_BUF_SIZE);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xrgb8888);
iosys_map_set_vaddr(&src, xrgb8888);
@@ -353,8 +538,8 @@ static void drm_test_fb_xrgb8888_to_rgb565(struct kunit *test)
const struct convert_xrgb8888_case *params = test->param_value;
const struct convert_to_rgb565_result *result = &params->rgb565_result;
size_t dst_size;
- __u16 *buf = NULL;
- __u32 *xrgb8888 = NULL;
+ u16 *buf = NULL;
+ __le32 *xrgb8888 = NULL;
struct iosys_map dst, src;
struct drm_framebuffer fb = {
@@ -370,24 +555,120 @@ static void drm_test_fb_xrgb8888_to_rgb565(struct kunit *test)
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf);
iosys_map_set_vaddr(&dst, buf);
- xrgb8888 = le32buf_to_cpu(test, params->xrgb8888, TEST_BUF_SIZE);
+ xrgb8888 = cpubuf_to_le32(test, params->xrgb8888, TEST_BUF_SIZE);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xrgb8888);
iosys_map_set_vaddr(&src, xrgb8888);
drm_fb_xrgb8888_to_rgb565(&dst, &result->dst_pitch, &src, &fb, &params->clip, false);
+ buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16));
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
+ buf = dst.vaddr; /* restore original value of buf */
drm_fb_xrgb8888_to_rgb565(&dst, &result->dst_pitch, &src, &fb, &params->clip, true);
+ buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16));
KUNIT_EXPECT_MEMEQ(test, buf, result->expected_swab, dst_size);
}
+static void drm_test_fb_xrgb8888_to_xrgb1555(struct kunit *test)
+{
+ const struct convert_xrgb8888_case *params = test->param_value;
+ const struct convert_to_xrgb1555_result *result = &params->xrgb1555_result;
+ size_t dst_size;
+ u16 *buf = NULL;
+ __le32 *xrgb8888 = NULL;
+ struct iosys_map dst, src;
+
+ struct drm_framebuffer fb = {
+ .format = drm_format_info(DRM_FORMAT_XRGB8888),
+ .pitches = { params->pitch, 0, 0 },
+ };
+
+ dst_size = conversion_buf_size(DRM_FORMAT_XRGB1555, result->dst_pitch,
+ &params->clip);
+ KUNIT_ASSERT_GT(test, dst_size, 0);
+
+ buf = kunit_kzalloc(test, dst_size, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf);
+ iosys_map_set_vaddr(&dst, buf);
+
+ xrgb8888 = cpubuf_to_le32(test, params->xrgb8888, TEST_BUF_SIZE);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xrgb8888);
+ iosys_map_set_vaddr(&src, xrgb8888);
+
+ drm_fb_xrgb8888_to_xrgb1555(&dst, &result->dst_pitch, &src, &fb, &params->clip);
+ buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16));
+ KUNIT_EXPECT_EQ(test, memcmp(buf, result->expected, dst_size), 0);
+}
+
+static void drm_test_fb_xrgb8888_to_argb1555(struct kunit *test)
+{
+ const struct convert_xrgb8888_case *params = test->param_value;
+ const struct convert_to_argb1555_result *result = &params->argb1555_result;
+ size_t dst_size;
+ u16 *buf = NULL;
+ __le32 *xrgb8888 = NULL;
+ struct iosys_map dst, src;
+
+ struct drm_framebuffer fb = {
+ .format = drm_format_info(DRM_FORMAT_XRGB8888),
+ .pitches = { params->pitch, 0, 0 },
+ };
+
+ dst_size = conversion_buf_size(DRM_FORMAT_ARGB1555, result->dst_pitch,
+ &params->clip);
+ KUNIT_ASSERT_GT(test, dst_size, 0);
+
+ buf = kunit_kzalloc(test, dst_size, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf);
+ iosys_map_set_vaddr(&dst, buf);
+
+ xrgb8888 = cpubuf_to_le32(test, params->xrgb8888, TEST_BUF_SIZE);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xrgb8888);
+ iosys_map_set_vaddr(&src, xrgb8888);
+
+ drm_fb_xrgb8888_to_argb1555(&dst, &result->dst_pitch, &src, &fb, &params->clip);
+ buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16));
+ KUNIT_EXPECT_EQ(test, memcmp(buf, result->expected, dst_size), 0);
+}
+
+static void drm_test_fb_xrgb8888_to_rgba5551(struct kunit *test)
+{
+ const struct convert_xrgb8888_case *params = test->param_value;
+ const struct convert_to_rgba5551_result *result = &params->rgba5551_result;
+ size_t dst_size;
+ u16 *buf = NULL;
+ __le32 *xrgb8888 = NULL;
+ struct iosys_map dst, src;
+
+ struct drm_framebuffer fb = {
+ .format = drm_format_info(DRM_FORMAT_XRGB8888),
+ .pitches = { params->pitch, 0, 0 },
+ };
+
+ dst_size = conversion_buf_size(DRM_FORMAT_RGBA5551, result->dst_pitch,
+ &params->clip);
+ KUNIT_ASSERT_GT(test, dst_size, 0);
+
+ buf = kunit_kzalloc(test, dst_size, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf);
+ iosys_map_set_vaddr(&dst, buf);
+
+ xrgb8888 = cpubuf_to_le32(test, params->xrgb8888, TEST_BUF_SIZE);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xrgb8888);
+ iosys_map_set_vaddr(&src, xrgb8888);
+
+ drm_fb_xrgb8888_to_rgba5551(&dst, &result->dst_pitch, &src, &fb, &params->clip);
+ buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16));
+ KUNIT_EXPECT_EQ(test, memcmp(buf, result->expected, dst_size), 0);
+}
+
static void drm_test_fb_xrgb8888_to_rgb888(struct kunit *test)
{
const struct convert_xrgb8888_case *params = test->param_value;
const struct convert_to_rgb888_result *result = &params->rgb888_result;
size_t dst_size;
- __u8 *buf = NULL;
- __u32 *xrgb8888 = NULL;
+ u8 *buf = NULL;
+ __le32 *xrgb8888 = NULL;
struct iosys_map dst, src;
struct drm_framebuffer fb = {
@@ -403,21 +684,56 @@ static void drm_test_fb_xrgb8888_to_rgb888(struct kunit *test)
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf);
iosys_map_set_vaddr(&dst, buf);
- xrgb8888 = le32buf_to_cpu(test, params->xrgb8888, TEST_BUF_SIZE);
+ xrgb8888 = cpubuf_to_le32(test, params->xrgb8888, TEST_BUF_SIZE);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xrgb8888);
iosys_map_set_vaddr(&src, xrgb8888);
+ /*
+ * RGB888 expected results are already in little-endian
+ * order, so there's no need to convert the test output.
+ */
drm_fb_xrgb8888_to_rgb888(&dst, &result->dst_pitch, &src, &fb, &params->clip);
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
}
+static void drm_test_fb_xrgb8888_to_argb8888(struct kunit *test)
+{
+ const struct convert_xrgb8888_case *params = test->param_value;
+ const struct convert_to_argb8888_result *result = &params->argb8888_result;
+ size_t dst_size;
+ u32 *buf = NULL;
+ __le32 *xrgb8888 = NULL;
+ struct iosys_map dst, src;
+
+ struct drm_framebuffer fb = {
+ .format = drm_format_info(DRM_FORMAT_XRGB8888),
+ .pitches = { params->pitch, 0, 0 },
+ };
+
+ dst_size = conversion_buf_size(DRM_FORMAT_ARGB8888,
+ result->dst_pitch, &params->clip);
+ KUNIT_ASSERT_GT(test, dst_size, 0);
+
+ buf = kunit_kzalloc(test, dst_size, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf);
+ iosys_map_set_vaddr(&dst, buf);
+
+ xrgb8888 = cpubuf_to_le32(test, params->xrgb8888, TEST_BUF_SIZE);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xrgb8888);
+ iosys_map_set_vaddr(&src, xrgb8888);
+
+ drm_fb_xrgb8888_to_argb8888(&dst, &result->dst_pitch, &src, &fb, &params->clip);
+ buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32));
+ KUNIT_EXPECT_EQ(test, memcmp(buf, result->expected, dst_size), 0);
+}
+
static void drm_test_fb_xrgb8888_to_xrgb2101010(struct kunit *test)
{
const struct convert_xrgb8888_case *params = test->param_value;
const struct convert_to_xrgb2101010_result *result = &params->xrgb2101010_result;
size_t dst_size;
- __u32 *buf = NULL;
- __u32 *xrgb8888 = NULL;
+ u32 *buf = NULL;
+ __le32 *xrgb8888 = NULL;
struct iosys_map dst, src;
struct drm_framebuffer fb = {
@@ -433,7 +749,7 @@ static void drm_test_fb_xrgb8888_to_xrgb2101010(struct kunit *test)
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf);
iosys_map_set_vaddr(&dst, buf);
- xrgb8888 = le32buf_to_cpu(test, params->xrgb8888, TEST_BUF_SIZE);
+ xrgb8888 = cpubuf_to_le32(test, params->xrgb8888, TEST_BUF_SIZE);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xrgb8888);
iosys_map_set_vaddr(&src, xrgb8888);
@@ -442,12 +758,48 @@ static void drm_test_fb_xrgb8888_to_xrgb2101010(struct kunit *test)
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
}
+static void drm_test_fb_xrgb8888_to_argb2101010(struct kunit *test)
+{
+ const struct convert_xrgb8888_case *params = test->param_value;
+ const struct convert_to_argb2101010_result *result = &params->argb2101010_result;
+ size_t dst_size;
+ u32 *buf = NULL;
+ __le32 *xrgb8888 = NULL;
+ struct iosys_map dst, src;
+
+ struct drm_framebuffer fb = {
+ .format = drm_format_info(DRM_FORMAT_XRGB8888),
+ .pitches = { params->pitch, 0, 0 },
+ };
+
+ dst_size = conversion_buf_size(DRM_FORMAT_ARGB2101010,
+ result->dst_pitch, &params->clip);
+ KUNIT_ASSERT_GT(test, dst_size, 0);
+
+ buf = kunit_kzalloc(test, dst_size, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf);
+ iosys_map_set_vaddr(&dst, buf);
+
+ xrgb8888 = cpubuf_to_le32(test, params->xrgb8888, TEST_BUF_SIZE);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xrgb8888);
+ iosys_map_set_vaddr(&src, xrgb8888);
+
+ drm_fb_xrgb8888_to_argb2101010(&dst, &result->dst_pitch, &src, &fb, &params->clip);
+ buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32));
+ KUNIT_EXPECT_EQ(test, memcmp(buf, result->expected, dst_size), 0);
+}
+
static struct kunit_case drm_format_helper_test_cases[] = {
KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_gray8, convert_xrgb8888_gen_params),
KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_rgb332, convert_xrgb8888_gen_params),
KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_rgb565, convert_xrgb8888_gen_params),
+ KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_xrgb1555, convert_xrgb8888_gen_params),
+ KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_argb1555, convert_xrgb8888_gen_params),
+ KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_rgba5551, convert_xrgb8888_gen_params),
KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_rgb888, convert_xrgb8888_gen_params),
+ KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_argb8888, convert_xrgb8888_gen_params),
KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_xrgb2101010, convert_xrgb8888_gen_params),
+ KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_argb2101010, convert_xrgb8888_gen_params),
{}
};
diff --git a/drivers/gpu/drm/tests/drm_kunit_helpers.c b/drivers/gpu/drm/tests/drm_kunit_helpers.c
index f1662091f250..e98b4150f556 100644
--- a/drivers/gpu/drm/tests/drm_kunit_helpers.c
+++ b/drivers/gpu/drm/tests/drm_kunit_helpers.c
@@ -1,63 +1,101 @@
// SPDX-License-Identifier: GPL-2.0
#include <drm/drm_drv.h>
+#include <drm/drm_kunit_helpers.h>
#include <drm/drm_managed.h>
#include <kunit/resource.h>
#include <linux/device.h>
+#include <linux/platform_device.h>
-#include "drm_kunit_helpers.h"
-
-struct kunit_dev {
- struct drm_device base;
-};
+#define KUNIT_DEVICE_NAME "drm-kunit-mock-device"
static const struct drm_mode_config_funcs drm_mode_config_funcs = {
};
-static int dev_init(struct kunit_resource *res, void *ptr)
+static int fake_probe(struct platform_device *pdev)
{
- char *name = ptr;
- struct device *dev;
-
- dev = root_device_register(name);
- if (IS_ERR(dev))
- return PTR_ERR(dev);
+ return 0;
+}
- res->data = dev;
+static int fake_remove(struct platform_device *pdev)
+{
return 0;
}
-static void dev_free(struct kunit_resource *res)
+static struct platform_driver fake_platform_driver = {
+ .probe = fake_probe,
+ .remove = fake_remove,
+ .driver = {
+ .name = KUNIT_DEVICE_NAME,
+ },
+};
+
+/**
+ * drm_kunit_helper_alloc_device - Allocate a mock device for a KUnit test
+ * @test: The test context object
+ *
+ * This allocates a fake struct &device to create a mock for a KUnit
+ * test. The device will also be bound to a fake driver. It will thus be
+ * able to leverage the usual infrastructure and most notably the
+ * device-managed resources just like a "real" device.
+ *
+ * Callers need to make sure drm_kunit_helper_free_device() on the
+ * device when done.
+ *
+ * Returns:
+ * A pointer to the new device, or an ERR_PTR() otherwise.
+ */
+struct device *drm_kunit_helper_alloc_device(struct kunit *test)
{
- struct device *dev = res->data;
+ struct platform_device *pdev;
+ int ret;
- root_device_unregister(dev);
+ ret = platform_driver_register(&fake_platform_driver);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ pdev = platform_device_alloc(KUNIT_DEVICE_NAME, PLATFORM_DEVID_NONE);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pdev);
+
+ ret = platform_device_add(pdev);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ return &pdev->dev;
+}
+EXPORT_SYMBOL_GPL(drm_kunit_helper_alloc_device);
+
+/**
+ * drm_kunit_helper_free_device - Frees a mock device
+ * @test: The test context object
+ * @dev: The device to free
+ *
+ * Frees a device allocated with drm_kunit_helper_alloc_device().
+ */
+void drm_kunit_helper_free_device(struct kunit *test, struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+
+ platform_device_unregister(pdev);
+ platform_driver_unregister(&fake_platform_driver);
}
+EXPORT_SYMBOL_GPL(drm_kunit_helper_free_device);
-struct drm_device *drm_kunit_device_init(struct kunit *test, u32 features, char *name)
+struct drm_device *
+__drm_kunit_helper_alloc_drm_device_with_driver(struct kunit *test,
+ struct device *dev,
+ size_t size, size_t offset,
+ const struct drm_driver *driver)
{
- struct kunit_dev *kdev;
struct drm_device *drm;
- struct drm_driver *driver;
- struct device *dev;
+ void *container;
int ret;
- dev = kunit_alloc_resource(test, dev_init, dev_free, GFP_KERNEL, name);
- if (!dev)
- return ERR_PTR(-ENOMEM);
-
- driver = kunit_kzalloc(test, sizeof(*driver), GFP_KERNEL);
- if (!driver)
- return ERR_PTR(-ENOMEM);
-
- driver->driver_features = features;
- kdev = devm_drm_dev_alloc(dev, driver, struct kunit_dev, base);
- if (IS_ERR(kdev))
- return ERR_CAST(kdev);
+ container = __devm_drm_dev_alloc(dev, driver, size, offset);
+ if (IS_ERR(container))
+ return ERR_CAST(container);
- drm = &kdev->base;
+ drm = container + offset;
drm->mode_config.funcs = &drm_mode_config_funcs;
ret = drmm_mode_config_init(drm);
@@ -66,6 +104,7 @@ struct drm_device *drm_kunit_device_init(struct kunit *test, u32 features, char
return drm;
}
+EXPORT_SYMBOL_GPL(__drm_kunit_helper_alloc_drm_device_with_driver);
MODULE_AUTHOR("Maxime Ripard <maxime@cerno.tech>");
MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tests/drm_kunit_helpers.h b/drivers/gpu/drm/tests/drm_kunit_helpers.h
deleted file mode 100644
index 20ab6eec4c89..000000000000
--- a/drivers/gpu/drm/tests/drm_kunit_helpers.h
+++ /dev/null
@@ -1,11 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-#ifndef DRM_KUNIT_HELPERS_H_
-#define DRM_KUNIT_HELPERS_H_
-
-struct drm_device;
-struct kunit;
-
-struct drm_device *drm_kunit_device_init(struct kunit *test, u32 features, char *name);
-
-#endif // DRM_KUNIT_HELPERS_H_
diff --git a/drivers/gpu/drm/tests/drm_managed_test.c b/drivers/gpu/drm/tests/drm_managed_test.c
new file mode 100644
index 000000000000..1652dca11d30
--- /dev/null
+++ b/drivers/gpu/drm/tests/drm_managed_test.c
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <drm/drm_drv.h>
+#include <drm/drm_kunit_helpers.h>
+#include <drm/drm_managed.h>
+
+#include <kunit/resource.h>
+
+#include <linux/device.h>
+
+/* Ought to be enough for anybody */
+#define TEST_TIMEOUT_MS 100
+
+struct managed_test_priv {
+ bool action_done;
+ wait_queue_head_t action_wq;
+};
+
+static void drm_action(struct drm_device *drm, void *ptr)
+{
+ struct managed_test_priv *priv = ptr;
+
+ priv->action_done = true;
+ wake_up_interruptible(&priv->action_wq);
+}
+
+static void drm_test_managed_run_action(struct kunit *test)
+{
+ struct managed_test_priv *priv;
+ struct drm_device *drm;
+ struct device *dev;
+ int ret;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv);
+ init_waitqueue_head(&priv->action_wq);
+
+ dev = drm_kunit_helper_alloc_device(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
+
+ drm = __drm_kunit_helper_alloc_drm_device(test, dev, sizeof(*drm), 0, DRIVER_MODESET);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, drm);
+
+ ret = drmm_add_action_or_reset(drm, drm_action, priv);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ ret = drm_dev_register(drm, 0);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ drm_dev_unregister(drm);
+ drm_kunit_helper_free_device(test, dev);
+
+ ret = wait_event_interruptible_timeout(priv->action_wq, priv->action_done,
+ msecs_to_jiffies(TEST_TIMEOUT_MS));
+ KUNIT_EXPECT_GT(test, ret, 0);
+}
+
+static struct kunit_case drm_managed_tests[] = {
+ KUNIT_CASE(drm_test_managed_run_action),
+ {}
+};
+
+static struct kunit_suite drm_managed_test_suite = {
+ .name = "drm-test-managed",
+ .test_cases = drm_managed_tests
+};
+
+kunit_test_suite(drm_managed_test_suite);
+
+MODULE_AUTHOR("Maxime Ripard <maxime@cerno.tech>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tests/drm_modes_test.c b/drivers/gpu/drm/tests/drm_modes_test.c
new file mode 100644
index 000000000000..bc4aa2ce78be
--- /dev/null
+++ b/drivers/gpu/drm/tests/drm_modes_test.c
@@ -0,0 +1,158 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Kunit test for drm_modes functions
+ */
+
+#include <drm/drm_drv.h>
+#include <drm/drm_kunit_helpers.h>
+#include <drm/drm_modes.h>
+
+#include <kunit/test.h>
+
+#include <linux/units.h>
+
+struct drm_test_modes_priv {
+ struct drm_device *drm;
+ struct device *dev;
+};
+
+static int drm_test_modes_init(struct kunit *test)
+{
+ struct drm_test_modes_priv *priv;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, priv);
+
+ priv->dev = drm_kunit_helper_alloc_device(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv->dev);
+
+ priv->drm = __drm_kunit_helper_alloc_drm_device(test, priv->dev,
+ sizeof(*priv->drm), 0,
+ DRIVER_MODESET);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv->drm);
+
+ test->priv = priv;
+
+ return 0;
+}
+
+static void drm_test_modes_exit(struct kunit *test)
+{
+ struct drm_test_modes_priv *priv = test->priv;
+
+ drm_kunit_helper_free_device(test, priv->dev);
+}
+
+static void drm_test_modes_analog_tv_ntsc_480i(struct kunit *test)
+{
+ struct drm_test_modes_priv *priv = test->priv;
+ struct drm_display_mode *mode;
+
+ mode = drm_analog_tv_mode(priv->drm,
+ DRM_MODE_TV_MODE_NTSC,
+ 13500 * HZ_PER_KHZ, 720, 480,
+ true);
+ KUNIT_ASSERT_NOT_NULL(test, mode);
+
+ KUNIT_EXPECT_EQ(test, drm_mode_vrefresh(mode), 60);
+ KUNIT_EXPECT_EQ(test, mode->hdisplay, 720);
+
+ /* BT.601 defines hsync_start at 736 for 480i */
+ KUNIT_EXPECT_EQ(test, mode->hsync_start, 736);
+
+ /*
+ * The NTSC standard expects a line to take 63.556us. With a
+ * pixel clock of 13.5 MHz, a pixel takes around 74ns, so we
+ * need to have 63556ns / 74ns = 858.
+ *
+ * This is also mandated by BT.601.
+ */
+ KUNIT_EXPECT_EQ(test, mode->htotal, 858);
+
+ KUNIT_EXPECT_EQ(test, mode->vdisplay, 480);
+ KUNIT_EXPECT_EQ(test, mode->vtotal, 525);
+}
+
+static void drm_test_modes_analog_tv_ntsc_480i_inlined(struct kunit *test)
+{
+ struct drm_test_modes_priv *priv = test->priv;
+ struct drm_display_mode *expected, *mode;
+
+ expected = drm_analog_tv_mode(priv->drm,
+ DRM_MODE_TV_MODE_NTSC,
+ 13500 * HZ_PER_KHZ, 720, 480,
+ true);
+ KUNIT_ASSERT_NOT_NULL(test, expected);
+
+ mode = drm_mode_analog_ntsc_480i(priv->drm);
+ KUNIT_ASSERT_NOT_NULL(test, mode);
+
+ KUNIT_EXPECT_TRUE(test, drm_mode_equal(expected, mode));
+}
+
+static void drm_test_modes_analog_tv_pal_576i(struct kunit *test)
+{
+ struct drm_test_modes_priv *priv = test->priv;
+ struct drm_display_mode *mode;
+
+ mode = drm_analog_tv_mode(priv->drm,
+ DRM_MODE_TV_MODE_PAL,
+ 13500 * HZ_PER_KHZ, 720, 576,
+ true);
+ KUNIT_ASSERT_NOT_NULL(test, mode);
+
+ KUNIT_EXPECT_EQ(test, drm_mode_vrefresh(mode), 50);
+ KUNIT_EXPECT_EQ(test, mode->hdisplay, 720);
+
+ /* BT.601 defines hsync_start at 732 for 576i */
+ KUNIT_EXPECT_EQ(test, mode->hsync_start, 732);
+
+ /*
+ * The PAL standard expects a line to take 64us. With a pixel
+ * clock of 13.5 MHz, a pixel takes around 74ns, so we need to
+ * have 64000ns / 74ns = 864.
+ *
+ * This is also mandated by BT.601.
+ */
+ KUNIT_EXPECT_EQ(test, mode->htotal, 864);
+
+ KUNIT_EXPECT_EQ(test, mode->vdisplay, 576);
+ KUNIT_EXPECT_EQ(test, mode->vtotal, 625);
+}
+
+static void drm_test_modes_analog_tv_pal_576i_inlined(struct kunit *test)
+{
+ struct drm_test_modes_priv *priv = test->priv;
+ struct drm_display_mode *expected, *mode;
+
+ expected = drm_analog_tv_mode(priv->drm,
+ DRM_MODE_TV_MODE_PAL,
+ 13500 * HZ_PER_KHZ, 720, 576,
+ true);
+ KUNIT_ASSERT_NOT_NULL(test, expected);
+
+ mode = drm_mode_analog_pal_576i(priv->drm);
+ KUNIT_ASSERT_NOT_NULL(test, mode);
+
+ KUNIT_EXPECT_TRUE(test, drm_mode_equal(expected, mode));
+}
+
+static struct kunit_case drm_modes_analog_tv_tests[] = {
+ KUNIT_CASE(drm_test_modes_analog_tv_ntsc_480i),
+ KUNIT_CASE(drm_test_modes_analog_tv_ntsc_480i_inlined),
+ KUNIT_CASE(drm_test_modes_analog_tv_pal_576i),
+ KUNIT_CASE(drm_test_modes_analog_tv_pal_576i_inlined),
+ { }
+};
+
+static struct kunit_suite drm_modes_analog_tv_test_suite = {
+ .name = "drm_modes_analog_tv",
+ .init = drm_test_modes_init,
+ .exit = drm_test_modes_exit,
+ .test_cases = drm_modes_analog_tv_tests,
+};
+
+kunit_test_suite(drm_modes_analog_tv_test_suite);
+
+MODULE_AUTHOR("Maxime Ripard <maxime@cerno.tech>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tests/drm_probe_helper_test.c b/drivers/gpu/drm/tests/drm_probe_helper_test.c
new file mode 100644
index 000000000000..0ee65828623e
--- /dev/null
+++ b/drivers/gpu/drm/tests/drm_probe_helper_test.c
@@ -0,0 +1,218 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Kunit test for drm_probe_helper functions
+ */
+
+#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_kunit_helpers.h>
+#include <drm/drm_mode.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_probe_helper.h>
+
+#include <kunit/test.h>
+
+struct drm_probe_helper_test_priv {
+ struct drm_device *drm;
+ struct device *dev;
+ struct drm_connector connector;
+};
+
+static const struct drm_connector_helper_funcs drm_probe_helper_connector_helper_funcs = {
+};
+
+static const struct drm_connector_funcs drm_probe_helper_connector_funcs = {
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .reset = drm_atomic_helper_connector_reset,
+};
+
+static int drm_probe_helper_test_init(struct kunit *test)
+{
+ struct drm_probe_helper_test_priv *priv;
+ struct drm_connector *connector;
+ int ret;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, priv);
+ test->priv = priv;
+
+ priv->dev = drm_kunit_helper_alloc_device(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv->dev);
+
+ priv->drm = __drm_kunit_helper_alloc_drm_device(test, priv->dev,
+ sizeof(*priv->drm), 0,
+ DRIVER_MODESET | DRIVER_ATOMIC);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv->drm);
+
+ connector = &priv->connector;
+ ret = drmm_connector_init(priv->drm, connector,
+ &drm_probe_helper_connector_funcs,
+ DRM_MODE_CONNECTOR_Unknown,
+ NULL);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ drm_connector_helper_add(connector, &drm_probe_helper_connector_helper_funcs);
+
+ return 0;
+}
+
+static void drm_probe_helper_test_exit(struct kunit *test)
+{
+ struct drm_probe_helper_test_priv *priv = test->priv;
+
+ drm_kunit_helper_free_device(test, priv->dev);
+}
+
+typedef struct drm_display_mode *(*expected_mode_func_t)(struct drm_device *);
+
+struct drm_connector_helper_tv_get_modes_test {
+ const char *name;
+ unsigned int supported_tv_modes;
+ enum drm_connector_tv_mode default_mode;
+ bool cmdline;
+ enum drm_connector_tv_mode cmdline_mode;
+ expected_mode_func_t *expected_modes;
+ unsigned int num_expected_modes;
+};
+
+#define _TV_MODE_TEST(_name, _supported, _default, _cmdline, _cmdline_mode, ...) \
+ { \
+ .name = _name, \
+ .supported_tv_modes = _supported, \
+ .default_mode = _default, \
+ .cmdline = _cmdline, \
+ .cmdline_mode = _cmdline_mode, \
+ .expected_modes = (expected_mode_func_t[]) { __VA_ARGS__ }, \
+ .num_expected_modes = sizeof((expected_mode_func_t[]) { __VA_ARGS__ }) / \
+ (sizeof(expected_mode_func_t)), \
+ }
+
+#define TV_MODE_TEST(_name, _supported, _default, ...) \
+ _TV_MODE_TEST(_name, _supported, _default, false, 0, __VA_ARGS__)
+
+#define TV_MODE_TEST_CMDLINE(_name, _supported, _default, _cmdline, ...) \
+ _TV_MODE_TEST(_name, _supported, _default, true, _cmdline, __VA_ARGS__)
+
+static void
+drm_test_connector_helper_tv_get_modes_check(struct kunit *test)
+{
+ const struct drm_connector_helper_tv_get_modes_test *params = test->param_value;
+ struct drm_probe_helper_test_priv *priv = test->priv;
+ struct drm_connector *connector = &priv->connector;
+ struct drm_cmdline_mode *cmdline = &connector->cmdline_mode;
+ struct drm_display_mode *mode;
+ const struct drm_display_mode *expected;
+ size_t len;
+ int ret;
+
+ if (params->cmdline) {
+ cmdline->tv_mode_specified = true;
+ cmdline->tv_mode = params->cmdline_mode;
+ }
+
+ ret = drm_mode_create_tv_properties(priv->drm, params->supported_tv_modes);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ drm_object_attach_property(&connector->base,
+ priv->drm->mode_config.tv_mode_property,
+ params->default_mode);
+
+ mutex_lock(&priv->drm->mode_config.mutex);
+
+ ret = drm_connector_helper_tv_get_modes(connector);
+ KUNIT_EXPECT_EQ(test, ret, params->num_expected_modes);
+
+ len = 0;
+ list_for_each_entry(mode, &connector->probed_modes, head)
+ len++;
+ KUNIT_EXPECT_EQ(test, len, params->num_expected_modes);
+
+ if (params->num_expected_modes >= 1) {
+ mode = list_first_entry_or_null(&connector->probed_modes,
+ struct drm_display_mode, head);
+ KUNIT_ASSERT_NOT_NULL(test, mode);
+
+ expected = params->expected_modes[0](priv->drm);
+ KUNIT_ASSERT_NOT_NULL(test, expected);
+
+ KUNIT_EXPECT_TRUE(test, drm_mode_equal(mode, expected));
+ KUNIT_EXPECT_TRUE(test, mode->type & DRM_MODE_TYPE_PREFERRED);
+ }
+
+ if (params->num_expected_modes >= 2) {
+ mode = list_next_entry(mode, head);
+ KUNIT_ASSERT_NOT_NULL(test, mode);
+
+ expected = params->expected_modes[1](priv->drm);
+ KUNIT_ASSERT_NOT_NULL(test, expected);
+
+ KUNIT_EXPECT_TRUE(test, drm_mode_equal(mode, expected));
+ KUNIT_EXPECT_FALSE(test, mode->type & DRM_MODE_TYPE_PREFERRED);
+ }
+
+ mutex_unlock(&priv->drm->mode_config.mutex);
+}
+
+static const
+struct drm_connector_helper_tv_get_modes_test drm_connector_helper_tv_get_modes_tests[] = {
+ { .name = "None" },
+ TV_MODE_TEST("PAL",
+ BIT(DRM_MODE_TV_MODE_PAL),
+ DRM_MODE_TV_MODE_PAL,
+ drm_mode_analog_pal_576i),
+ TV_MODE_TEST("NTSC",
+ BIT(DRM_MODE_TV_MODE_NTSC),
+ DRM_MODE_TV_MODE_NTSC,
+ drm_mode_analog_ntsc_480i),
+ TV_MODE_TEST("Both, NTSC Default",
+ BIT(DRM_MODE_TV_MODE_NTSC) | BIT(DRM_MODE_TV_MODE_PAL),
+ DRM_MODE_TV_MODE_NTSC,
+ drm_mode_analog_ntsc_480i, drm_mode_analog_pal_576i),
+ TV_MODE_TEST("Both, PAL Default",
+ BIT(DRM_MODE_TV_MODE_NTSC) | BIT(DRM_MODE_TV_MODE_PAL),
+ DRM_MODE_TV_MODE_PAL,
+ drm_mode_analog_pal_576i, drm_mode_analog_ntsc_480i),
+ TV_MODE_TEST_CMDLINE("Both, NTSC Default, with PAL on command-line",
+ BIT(DRM_MODE_TV_MODE_NTSC) | BIT(DRM_MODE_TV_MODE_PAL),
+ DRM_MODE_TV_MODE_NTSC,
+ DRM_MODE_TV_MODE_PAL,
+ drm_mode_analog_pal_576i, drm_mode_analog_ntsc_480i),
+ TV_MODE_TEST_CMDLINE("Both, PAL Default, with NTSC on command-line",
+ BIT(DRM_MODE_TV_MODE_NTSC) | BIT(DRM_MODE_TV_MODE_PAL),
+ DRM_MODE_TV_MODE_PAL,
+ DRM_MODE_TV_MODE_NTSC,
+ drm_mode_analog_ntsc_480i, drm_mode_analog_pal_576i),
+};
+
+static void
+drm_connector_helper_tv_get_modes_desc(const struct drm_connector_helper_tv_get_modes_test *t,
+ char *desc)
+{
+ sprintf(desc, "%s", t->name);
+}
+
+KUNIT_ARRAY_PARAM(drm_connector_helper_tv_get_modes,
+ drm_connector_helper_tv_get_modes_tests,
+ drm_connector_helper_tv_get_modes_desc);
+
+static struct kunit_case drm_test_connector_helper_tv_get_modes_tests[] = {
+ KUNIT_CASE_PARAM(drm_test_connector_helper_tv_get_modes_check,
+ drm_connector_helper_tv_get_modes_gen_params),
+ { }
+};
+
+static struct kunit_suite drm_test_connector_helper_tv_get_modes_suite = {
+ .name = "drm_connector_helper_tv_get_modes",
+ .init = drm_probe_helper_test_init,
+ .exit = drm_probe_helper_test_exit,
+ .test_cases = drm_test_connector_helper_tv_get_modes_tests,
+};
+
+kunit_test_suite(drm_test_connector_helper_tv_get_modes_suite);
+
+MODULE_AUTHOR("Maxime Ripard <maxime@cerno.tech>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tidss/tidss_dispc.c b/drivers/gpu/drm/tidss/tidss_dispc.c
index ad93acc9abd2..165365b515e1 100644
--- a/drivers/gpu/drm/tidss/tidss_dispc.c
+++ b/drivers/gpu/drm/tidss/tidss_dispc.c
@@ -1858,8 +1858,8 @@ static const struct {
{ DRM_FORMAT_XBGR4444, 0x21, },
{ DRM_FORMAT_RGBX4444, 0x22, },
- { DRM_FORMAT_ARGB1555, 0x25, },
- { DRM_FORMAT_ABGR1555, 0x26, },
+ { DRM_FORMAT_XRGB1555, 0x25, },
+ { DRM_FORMAT_XBGR1555, 0x26, },
{ DRM_FORMAT_XRGB8888, 0x27, },
{ DRM_FORMAT_XBGR8888, 0x28, },
@@ -2686,6 +2686,8 @@ int dispc_init(struct tidss_device *tidss)
dev_warn(dev, "cannot set DMA masks to 48-bit\n");
}
+ dma_set_max_seg_size(dev, UINT_MAX);
+
dispc = devm_kzalloc(dev, sizeof(*dispc), GFP_KERNEL);
if (!dispc)
return -ENOMEM;
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index 80615ecdae0b..4ca426007dc8 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -496,7 +496,6 @@ static const struct drm_driver tilcdc_driver = {
* Power management:
*/
-#ifdef CONFIG_PM_SLEEP
static int tilcdc_pm_suspend(struct device *dev)
{
struct drm_device *ddev = dev_get_drvdata(dev);
@@ -518,11 +517,9 @@ static int tilcdc_pm_resume(struct device *dev)
pinctrl_pm_select_default_state(dev);
return drm_mode_config_helper_resume(ddev);
}
-#endif
-static const struct dev_pm_ops tilcdc_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(tilcdc_pm_suspend, tilcdc_pm_resume)
-};
+static DEFINE_SIMPLE_DEV_PM_OPS(tilcdc_pm_ops,
+ tilcdc_pm_suspend, tilcdc_pm_resume);
/*
* Platform driver:
@@ -597,7 +594,7 @@ static struct platform_driver tilcdc_platform_driver = {
.remove = tilcdc_pdev_remove,
.driver = {
.name = "tilcdc",
- .pm = &tilcdc_pm_ops,
+ .pm = pm_sleep_ptr(&tilcdc_pm_ops),
.of_match_table = tilcdc_of_match,
},
};
diff --git a/drivers/gpu/drm/tiny/cirrus.c b/drivers/gpu/drm/tiny/cirrus.c
index 678c2ef1cae7..cf35b6090503 100644
--- a/drivers/gpu/drm/tiny/cirrus.c
+++ b/drivers/gpu/drm/tiny/cirrus.c
@@ -604,7 +604,7 @@ static int cirrus_pci_probe(struct pci_dev *pdev,
if (ret)
return ret;
- drm_fbdev_generic_setup(dev, dev->mode_config.preferred_depth);
+ drm_fbdev_generic_setup(dev, 16);
return 0;
}
diff --git a/drivers/gpu/drm/tiny/gm12u320.c b/drivers/gpu/drm/tiny/gm12u320.c
index 130fd07a967d..c5bb683e440c 100644
--- a/drivers/gpu/drm/tiny/gm12u320.c
+++ b/drivers/gpu/drm/tiny/gm12u320.c
@@ -4,6 +4,7 @@
*/
#include <linux/module.h>
+#include <linux/pm.h>
#include <linux/usb.h>
#include <drm/drm_atomic_helper.h>
@@ -718,15 +719,15 @@ static void gm12u320_usb_disconnect(struct usb_interface *interface)
drm_atomic_helper_shutdown(dev);
}
-static __maybe_unused int gm12u320_suspend(struct usb_interface *interface,
- pm_message_t message)
+static int gm12u320_suspend(struct usb_interface *interface,
+ pm_message_t message)
{
struct drm_device *dev = usb_get_intfdata(interface);
return drm_mode_config_helper_suspend(dev);
}
-static __maybe_unused int gm12u320_resume(struct usb_interface *interface)
+static int gm12u320_resume(struct usb_interface *interface)
{
struct drm_device *dev = usb_get_intfdata(interface);
struct gm12u320_device *gm12u320 = to_gm12u320(dev);
@@ -747,11 +748,9 @@ static struct usb_driver gm12u320_usb_driver = {
.probe = gm12u320_usb_probe,
.disconnect = gm12u320_usb_disconnect,
.id_table = id_table,
-#ifdef CONFIG_PM
- .suspend = gm12u320_suspend,
- .resume = gm12u320_resume,
- .reset_resume = gm12u320_resume,
-#endif
+ .suspend = pm_ptr(gm12u320_suspend),
+ .resume = pm_ptr(gm12u320_resume),
+ .reset_resume = pm_ptr(gm12u320_resume),
};
module_usb_driver(gm12u320_usb_driver);
diff --git a/drivers/gpu/drm/tiny/hx8357d.c b/drivers/gpu/drm/tiny/hx8357d.c
index 9f634f720817..cdc4486e059b 100644
--- a/drivers/gpu/drm/tiny/hx8357d.c
+++ b/drivers/gpu/drm/tiny/hx8357d.c
@@ -181,10 +181,7 @@ out_exit:
}
static const struct drm_simple_display_pipe_funcs hx8357d_pipe_funcs = {
- .mode_valid = mipi_dbi_pipe_mode_valid,
- .enable = yx240qv29_enable,
- .disable = mipi_dbi_pipe_disable,
- .update = mipi_dbi_pipe_update,
+ DRM_MIPI_DBI_SIMPLE_DISPLAY_PIPE_FUNCS(yx240qv29_enable),
};
static const struct drm_display_mode yx350hv15_mode = {
diff --git a/drivers/gpu/drm/tiny/ili9163.c b/drivers/gpu/drm/tiny/ili9163.c
index ca0451f79962..bc4384d410fc 100644
--- a/drivers/gpu/drm/tiny/ili9163.c
+++ b/drivers/gpu/drm/tiny/ili9163.c
@@ -100,11 +100,7 @@ out_exit:
}
static const struct drm_simple_display_pipe_funcs ili9163_pipe_funcs = {
- .mode_valid = mipi_dbi_pipe_mode_valid,
- .enable = yx240qv29_enable,
- .disable = mipi_dbi_pipe_disable,
- .update = mipi_dbi_pipe_update,
- .prepare_fb = drm_gem_simple_display_pipe_prepare_fb,
+ DRM_MIPI_DBI_SIMPLE_DISPLAY_PIPE_FUNCS(yx240qv29_enable),
};
static const struct drm_display_mode yx240qv29_mode = {
diff --git a/drivers/gpu/drm/tiny/ili9225.c b/drivers/gpu/drm/tiny/ili9225.c
index 815bab285823..077c6ff5a2e1 100644
--- a/drivers/gpu/drm/tiny/ili9225.c
+++ b/drivers/gpu/drm/tiny/ili9225.c
@@ -25,6 +25,7 @@
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_dma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_mipi_dbi.h>
#include <drm/drm_rect.h>
@@ -76,9 +77,9 @@ static inline int ili9225_command(struct mipi_dbi *dbi, u8 cmd, u16 data)
return mipi_dbi_command_buf(dbi, cmd, par, 2);
}
-static void ili9225_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect)
+static void ili9225_fb_dirty(struct iosys_map *src, struct drm_framebuffer *fb,
+ struct drm_rect *rect)
{
- struct drm_gem_dma_object *dma_obj = drm_fb_dma_get_gem_obj(fb, 0);
struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(fb->dev);
unsigned int height = rect->y2 - rect->y1;
unsigned int width = rect->x2 - rect->x1;
@@ -86,13 +87,10 @@ static void ili9225_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect)
bool swap = dbi->swap_bytes;
u16 x_start, y_start;
u16 x1, x2, y1, y2;
- int idx, ret = 0;
+ int ret = 0;
bool full;
void *tr;
- if (!drm_dev_enter(fb->dev, &idx))
- return;
-
full = width == fb->width && height == fb->height;
DRM_DEBUG_KMS("Flushing [FB:%d] " DRM_RECT_FMT "\n", fb->base.id, DRM_RECT_ARG(rect));
@@ -100,11 +98,11 @@ static void ili9225_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect)
if (!dbi->dc || !full || swap ||
fb->format->format == DRM_FORMAT_XRGB8888) {
tr = dbidev->tx_buf;
- ret = mipi_dbi_buf_copy(dbidev->tx_buf, fb, rect, swap);
+ ret = mipi_dbi_buf_copy(tr, src, fb, rect, swap);
if (ret)
goto err_msg;
} else {
- tr = dma_obj->vaddr;
+ tr = src->vaddr; /* TODO: Use mapping abstraction properly */
}
switch (dbidev->rotation) {
@@ -155,21 +153,27 @@ static void ili9225_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect)
err_msg:
if (ret)
dev_err_once(fb->dev->dev, "Failed to update display %d\n", ret);
-
- drm_dev_exit(idx);
}
static void ili9225_pipe_update(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *old_state)
{
struct drm_plane_state *state = pipe->plane.state;
+ struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(state);
+ struct drm_framebuffer *fb = state->fb;
struct drm_rect rect;
+ int idx;
if (!pipe->crtc.state->active)
return;
+ if (!drm_dev_enter(fb->dev, &idx))
+ return;
+
if (drm_atomic_helper_damage_merged(old_state, state, &rect))
- ili9225_fb_dirty(state->fb, &rect);
+ ili9225_fb_dirty(&shadow_plane_state->data[0], fb, &rect);
+
+ drm_dev_exit(idx);
}
static void ili9225_pipe_enable(struct drm_simple_display_pipe *pipe,
@@ -177,6 +181,7 @@ static void ili9225_pipe_enable(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *plane_state)
{
struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(pipe->crtc.dev);
+ struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
struct drm_framebuffer *fb = plane_state->fb;
struct device *dev = pipe->crtc.dev->dev;
struct mipi_dbi *dbi = &dbidev->dbi;
@@ -276,7 +281,8 @@ static void ili9225_pipe_enable(struct drm_simple_display_pipe *pipe,
ili9225_command(dbi, ILI9225_DISPLAY_CONTROL_1, 0x1017);
- ili9225_fb_dirty(fb, &rect);
+ ili9225_fb_dirty(&shadow_plane_state->data[0], fb, &rect);
+
out_exit:
drm_dev_exit(idx);
}
@@ -326,9 +332,15 @@ static int ili9225_dbi_command(struct mipi_dbi *dbi, u8 *cmd, u8 *par,
}
static const struct drm_simple_display_pipe_funcs ili9225_pipe_funcs = {
+ .mode_valid = mipi_dbi_pipe_mode_valid,
.enable = ili9225_pipe_enable,
.disable = ili9225_pipe_disable,
.update = ili9225_pipe_update,
+ .begin_fb_access = mipi_dbi_pipe_begin_fb_access,
+ .end_fb_access = mipi_dbi_pipe_end_fb_access,
+ .reset_plane = mipi_dbi_pipe_reset_plane,
+ .duplicate_plane_state = mipi_dbi_pipe_duplicate_plane_state,
+ .destroy_plane_state = mipi_dbi_pipe_destroy_plane_state,
};
static const struct drm_display_mode ili9225_mode = {
diff --git a/drivers/gpu/drm/tiny/ili9341.c b/drivers/gpu/drm/tiny/ili9341.c
index 420f6005a956..47b61c3bf145 100644
--- a/drivers/gpu/drm/tiny/ili9341.c
+++ b/drivers/gpu/drm/tiny/ili9341.c
@@ -137,10 +137,7 @@ out_exit:
}
static const struct drm_simple_display_pipe_funcs ili9341_pipe_funcs = {
- .mode_valid = mipi_dbi_pipe_mode_valid,
- .enable = yx240qv29_enable,
- .disable = mipi_dbi_pipe_disable,
- .update = mipi_dbi_pipe_update,
+ DRM_MIPI_DBI_SIMPLE_DISPLAY_PIPE_FUNCS(yx240qv29_enable),
};
static const struct drm_display_mode yx240qv29_mode = {
diff --git a/drivers/gpu/drm/tiny/ili9486.c b/drivers/gpu/drm/tiny/ili9486.c
index 1bb847466b10..02265c898816 100644
--- a/drivers/gpu/drm/tiny/ili9486.c
+++ b/drivers/gpu/drm/tiny/ili9486.c
@@ -43,6 +43,7 @@ static int waveshare_command(struct mipi_dbi *mipi, u8 *cmd, u8 *par,
size_t num)
{
struct spi_device *spi = mipi->spi;
+ unsigned int bpw = 8;
void *data = par;
u32 speed_hz;
int i, ret;
@@ -56,8 +57,6 @@ static int waveshare_command(struct mipi_dbi *mipi, u8 *cmd, u8 *par,
* The displays are Raspberry Pi HATs and connected to the 8-bit only
* SPI controller, so 16-bit command and parameters need byte swapping
* before being transferred as 8-bit on the big endian SPI bus.
- * Pixel data bytes have already been swapped before this function is
- * called.
*/
buf[0] = cpu_to_be16(*cmd);
gpiod_set_value_cansleep(mipi->dc, 0);
@@ -71,12 +70,18 @@ static int waveshare_command(struct mipi_dbi *mipi, u8 *cmd, u8 *par,
for (i = 0; i < num; i++)
buf[i] = cpu_to_be16(par[i]);
num *= 2;
- speed_hz = mipi_dbi_spi_cmd_max_speed(spi, num);
data = buf;
}
+ /*
+ * Check whether pixel data bytes needs to be swapped or not
+ */
+ if (*cmd == MIPI_DCS_WRITE_MEMORY_START && !mipi->swap_bytes)
+ bpw = 16;
+
gpiod_set_value_cansleep(mipi->dc, 1);
- ret = mipi_dbi_spi_transfer(spi, speed_hz, 8, data, num);
+ speed_hz = mipi_dbi_spi_cmd_max_speed(spi, num);
+ ret = mipi_dbi_spi_transfer(spi, speed_hz, bpw, data, num);
free:
kfree(buf);
@@ -150,10 +155,7 @@ static void waveshare_enable(struct drm_simple_display_pipe *pipe,
}
static const struct drm_simple_display_pipe_funcs waveshare_pipe_funcs = {
- .mode_valid = mipi_dbi_pipe_mode_valid,
- .enable = waveshare_enable,
- .disable = mipi_dbi_pipe_disable,
- .update = mipi_dbi_pipe_update,
+ DRM_MIPI_DBI_SIMPLE_DISPLAY_PIPE_FUNCS(waveshare_enable),
};
static const struct drm_display_mode waveshare_mode = {
@@ -183,6 +185,8 @@ MODULE_DEVICE_TABLE(of, ili9486_of_match);
static const struct spi_device_id ili9486_id[] = {
{ "ili9486", 0 },
+ { "rpi-lcd-35", 0 },
+ { "piscreen", 0 },
{ }
};
MODULE_DEVICE_TABLE(spi, ili9486_id);
diff --git a/drivers/gpu/drm/tiny/mi0283qt.c b/drivers/gpu/drm/tiny/mi0283qt.c
index 47df2b5a3048..01ff43c8ac3f 100644
--- a/drivers/gpu/drm/tiny/mi0283qt.c
+++ b/drivers/gpu/drm/tiny/mi0283qt.c
@@ -141,10 +141,7 @@ out_exit:
}
static const struct drm_simple_display_pipe_funcs mi0283qt_pipe_funcs = {
- .mode_valid = mipi_dbi_pipe_mode_valid,
- .enable = mi0283qt_enable,
- .disable = mipi_dbi_pipe_disable,
- .update = mipi_dbi_pipe_update,
+ DRM_MIPI_DBI_SIMPLE_DISPLAY_PIPE_FUNCS(mi0283qt_enable),
};
static const struct drm_display_mode mi0283qt_mode = {
diff --git a/drivers/gpu/drm/tiny/ofdrm.c b/drivers/gpu/drm/tiny/ofdrm.c
index dc9e4d71b12a..6e349ca42485 100644
--- a/drivers/gpu/drm/tiny/ofdrm.c
+++ b/drivers/gpu/drm/tiny/ofdrm.c
@@ -754,24 +754,6 @@ static void ofdrm_crtc_state_destroy(struct ofdrm_crtc_state *ofdrm_crtc_state)
kfree(ofdrm_crtc_state);
}
-/*
- * Support all formats of OF display and maybe more; in order
- * of preference. The display's update function will do any
- * conversion necessary.
- *
- * TODO: Add blit helpers for remaining formats and uncomment
- * constants.
- */
-static const uint32_t ofdrm_primary_plane_formats[] = {
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_RGB565,
- //DRM_FORMAT_XRGB1555,
- //DRM_FORMAT_C8,
- /* Big-endian formats below */
- DRM_FORMAT_BGRX8888,
- DRM_FORMAT_RGB565 | DRM_FORMAT_BIG_ENDIAN,
-};
-
static const uint64_t ofdrm_primary_plane_format_modifiers[] = {
DRM_FORMAT_MOD_LINEAR,
DRM_FORMAT_MOD_INVALID
@@ -1284,21 +1266,12 @@ static struct ofdrm_device *ofdrm_device_create(struct drm_driver *drv,
dev->mode_config.min_height = height;
dev->mode_config.max_height = max_height;
dev->mode_config.funcs = &ofdrm_mode_config_funcs;
- switch (depth) {
- case 32:
- dev->mode_config.preferred_depth = 24;
- break;
- default:
- dev->mode_config.preferred_depth = depth;
- break;
- }
+ dev->mode_config.preferred_depth = format->depth;
dev->mode_config.quirk_addfb_prefer_host_byte_order = true;
/* Primary plane */
nformats = drm_fb_build_fourcc_list(dev, &format->format, 1,
- ofdrm_primary_plane_formats,
- ARRAY_SIZE(ofdrm_primary_plane_formats),
odev->formats, ARRAY_SIZE(odev->formats));
primary_plane = &odev->primary_plane;
@@ -1379,6 +1352,7 @@ static int ofdrm_probe(struct platform_device *pdev)
{
struct ofdrm_device *odev;
struct drm_device *dev;
+ unsigned int color_mode;
int ret;
odev = ofdrm_device_create(&ofdrm_driver, pdev);
@@ -1390,11 +1364,11 @@ static int ofdrm_probe(struct platform_device *pdev)
if (ret)
return ret;
- /*
- * FIXME: 24-bit color depth does not work reliably with a 32-bpp
- * value. Force the bpp value of the scanout buffer's format.
- */
- drm_fbdev_generic_setup(dev, drm_format_info_bpp(odev->format, 0));
+ color_mode = drm_format_info_bpp(odev->format, 0);
+ if (color_mode == 16)
+ color_mode = odev->format->depth; // can be 15 or 16
+
+ drm_fbdev_generic_setup(dev, color_mode);
return 0;
}
diff --git a/drivers/gpu/drm/tiny/panel-mipi-dbi.c b/drivers/gpu/drm/tiny/panel-mipi-dbi.c
index 03a7d569cd56..eb9f13f18a02 100644
--- a/drivers/gpu/drm/tiny/panel-mipi-dbi.c
+++ b/drivers/gpu/drm/tiny/panel-mipi-dbi.c
@@ -212,10 +212,7 @@ out_exit:
}
static const struct drm_simple_display_pipe_funcs panel_mipi_dbi_pipe_funcs = {
- .mode_valid = mipi_dbi_pipe_mode_valid,
- .enable = panel_mipi_dbi_enable,
- .disable = mipi_dbi_pipe_disable,
- .update = mipi_dbi_pipe_update,
+ DRM_MIPI_DBI_SIMPLE_DISPLAY_PIPE_FUNCS(panel_mipi_dbi_enable),
};
DEFINE_DRM_GEM_DMA_FOPS(panel_mipi_dbi_fops);
@@ -297,6 +294,11 @@ static int panel_mipi_dbi_spi_probe(struct spi_device *spi)
return dev_err_probe(dev, PTR_ERR(dbidev->regulator),
"Failed to get regulator 'power'\n");
+ dbidev->io_regulator = devm_regulator_get(dev, "io");
+ if (IS_ERR(dbidev->io_regulator))
+ return dev_err_probe(dev, PTR_ERR(dbidev->io_regulator),
+ "Failed to get regulator 'io'\n");
+
dbidev->backlight = devm_of_find_backlight(dev);
if (IS_ERR(dbidev->backlight))
return dev_err_probe(dev, PTR_ERR(dbidev->backlight), "Failed to get backlight\n");
diff --git a/drivers/gpu/drm/tiny/simpledrm.c b/drivers/gpu/drm/tiny/simpledrm.c
index 162eb44dcba8..f658b99c796a 100644
--- a/drivers/gpu/drm/tiny/simpledrm.c
+++ b/drivers/gpu/drm/tiny/simpledrm.c
@@ -446,25 +446,6 @@ static int simpledrm_device_init_regulators(struct simpledrm_device *sdev)
* Modesetting
*/
-/*
- * Support all formats of simplefb and maybe more; in order
- * of preference. The display's update function will do any
- * conversion necessary.
- *
- * TODO: Add blit helpers for remaining formats and uncomment
- * constants.
- */
-static const uint32_t simpledrm_primary_plane_formats[] = {
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_ARGB8888,
- DRM_FORMAT_RGB565,
- //DRM_FORMAT_XRGB1555,
- //DRM_FORMAT_ARGB1555,
- DRM_FORMAT_RGB888,
- DRM_FORMAT_XRGB2101010,
- DRM_FORMAT_ARGB2101010,
-};
-
static const uint64_t simpledrm_primary_plane_format_modifiers[] = {
DRM_FORMAT_MOD_LINEAR,
DRM_FORMAT_MOD_INVALID
@@ -739,14 +720,12 @@ static struct simpledrm_device *simpledrm_device_create(struct drm_driver *drv,
dev->mode_config.max_width = max_width;
dev->mode_config.min_height = height;
dev->mode_config.max_height = max_height;
- dev->mode_config.preferred_depth = format->cpp[0] * 8;
+ dev->mode_config.preferred_depth = format->depth;
dev->mode_config.funcs = &simpledrm_mode_config_funcs;
/* Primary plane */
nformats = drm_fb_build_fourcc_list(dev, &format->format, 1,
- simpledrm_primary_plane_formats,
- ARRAY_SIZE(simpledrm_primary_plane_formats),
sdev->formats, ARRAY_SIZE(sdev->formats));
primary_plane = &sdev->primary_plane;
@@ -823,6 +802,7 @@ static int simpledrm_probe(struct platform_device *pdev)
{
struct simpledrm_device *sdev;
struct drm_device *dev;
+ unsigned int color_mode;
int ret;
sdev = simpledrm_device_create(&simpledrm_driver, pdev);
@@ -834,7 +814,11 @@ static int simpledrm_probe(struct platform_device *pdev)
if (ret)
return ret;
- drm_fbdev_generic_setup(dev, 0);
+ color_mode = drm_format_info_bpp(sdev->format, 0);
+ if (color_mode == 16)
+ color_mode = sdev->format->depth; // can be 15 or 16
+
+ drm_fbdev_generic_setup(dev, color_mode);
return 0;
}
diff --git a/drivers/gpu/drm/tiny/st7586.c b/drivers/gpu/drm/tiny/st7586.c
index ce57fa9917e5..3cf4eec16a81 100644
--- a/drivers/gpu/drm/tiny/st7586.c
+++ b/drivers/gpu/drm/tiny/st7586.c
@@ -92,32 +92,28 @@ static void st7586_xrgb8888_to_gray332(u8 *dst, void *vaddr,
kfree(buf);
}
-static int st7586_buf_copy(void *dst, struct drm_framebuffer *fb,
+static int st7586_buf_copy(void *dst, struct iosys_map *src, struct drm_framebuffer *fb,
struct drm_rect *clip)
{
- struct drm_gem_dma_object *dma_obj = drm_fb_dma_get_gem_obj(fb, 0);
- void *src = dma_obj->vaddr;
- int ret = 0;
+ int ret;
ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE);
if (ret)
return ret;
- st7586_xrgb8888_to_gray332(dst, src, fb, clip);
+ st7586_xrgb8888_to_gray332(dst, src->vaddr, fb, clip);
drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
return 0;
}
-static void st7586_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect)
+static void st7586_fb_dirty(struct iosys_map *src, struct drm_framebuffer *fb,
+ struct drm_rect *rect)
{
struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(fb->dev);
struct mipi_dbi *dbi = &dbidev->dbi;
- int start, end, idx, ret = 0;
-
- if (!drm_dev_enter(fb->dev, &idx))
- return;
+ int start, end, ret = 0;
/* 3 pixels per byte, so grow clip to nearest multiple of 3 */
rect->x1 = rounddown(rect->x1, 3);
@@ -125,7 +121,7 @@ static void st7586_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect)
DRM_DEBUG_KMS("Flushing [FB:%d] " DRM_RECT_FMT "\n", fb->base.id, DRM_RECT_ARG(rect));
- ret = st7586_buf_copy(dbidev->tx_buf, fb, rect);
+ ret = st7586_buf_copy(dbidev->tx_buf, src, fb, rect);
if (ret)
goto err_msg;
@@ -146,21 +142,27 @@ static void st7586_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect)
err_msg:
if (ret)
dev_err_once(fb->dev->dev, "Failed to update display %d\n", ret);
-
- drm_dev_exit(idx);
}
static void st7586_pipe_update(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *old_state)
{
struct drm_plane_state *state = pipe->plane.state;
+ struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(state);
+ struct drm_framebuffer *fb = state->fb;
struct drm_rect rect;
+ int idx;
if (!pipe->crtc.state->active)
return;
+ if (!drm_dev_enter(fb->dev, &idx))
+ return;
+
if (drm_atomic_helper_damage_merged(old_state, state, &rect))
- st7586_fb_dirty(state->fb, &rect);
+ st7586_fb_dirty(&shadow_plane_state->data[0], fb, &rect);
+
+ drm_dev_exit(idx);
}
static void st7586_pipe_enable(struct drm_simple_display_pipe *pipe,
@@ -168,6 +170,7 @@ static void st7586_pipe_enable(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *plane_state)
{
struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(pipe->crtc.dev);
+ struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
struct drm_framebuffer *fb = plane_state->fb;
struct mipi_dbi *dbi = &dbidev->dbi;
struct drm_rect rect = {
@@ -235,7 +238,7 @@ static void st7586_pipe_enable(struct drm_simple_display_pipe *pipe,
msleep(100);
- st7586_fb_dirty(fb, &rect);
+ st7586_fb_dirty(&shadow_plane_state->data[0], fb, &rect);
mipi_dbi_command(dbi, MIPI_DCS_SET_DISPLAY_ON);
out_exit:
@@ -263,9 +266,15 @@ static const u32 st7586_formats[] = {
};
static const struct drm_simple_display_pipe_funcs st7586_pipe_funcs = {
+ .mode_valid = mipi_dbi_pipe_mode_valid,
.enable = st7586_pipe_enable,
.disable = st7586_pipe_disable,
.update = st7586_pipe_update,
+ .begin_fb_access = mipi_dbi_pipe_begin_fb_access,
+ .end_fb_access = mipi_dbi_pipe_end_fb_access,
+ .reset_plane = mipi_dbi_pipe_reset_plane,
+ .duplicate_plane_state = mipi_dbi_pipe_duplicate_plane_state,
+ .destroy_plane_state = mipi_dbi_pipe_destroy_plane_state,
};
static const struct drm_display_mode st7586_mode = {
diff --git a/drivers/gpu/drm/tiny/st7735r.c b/drivers/gpu/drm/tiny/st7735r.c
index 15d9cf283c66..477eb36fbb70 100644
--- a/drivers/gpu/drm/tiny/st7735r.c
+++ b/drivers/gpu/drm/tiny/st7735r.c
@@ -133,10 +133,7 @@ out_exit:
}
static const struct drm_simple_display_pipe_funcs st7735r_pipe_funcs = {
- .mode_valid = mipi_dbi_pipe_mode_valid,
- .enable = st7735r_pipe_enable,
- .disable = mipi_dbi_pipe_disable,
- .update = mipi_dbi_pipe_update,
+ DRM_MIPI_DBI_SIMPLE_DISPLAY_PIPE_FUNCS(st7735r_pipe_enable),
};
static const struct st7735r_cfg jd_t18003_t01_cfg = {
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index c3f4b33136e5..cd266a067773 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -31,8 +31,10 @@
#define pr_fmt(fmt) "[TTM] " fmt
-#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_bo.h>
#include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_tt.h>
+
#include <linux/jiffies.h>
#include <linux/slab.h>
#include <linux/sched.h>
@@ -280,14 +282,13 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
ret = 0;
}
- if (ret || unlikely(list_empty(&bo->ddestroy))) {
+ if (ret) {
if (unlock_resv)
dma_resv_unlock(bo->base.resv);
spin_unlock(&bo->bdev->lru_lock);
return ret;
}
- list_del_init(&bo->ddestroy);
spin_unlock(&bo->bdev->lru_lock);
ttm_bo_cleanup_memtype_use(bo);
@@ -300,47 +301,21 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
}
/*
- * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
- * encountered buffers.
+ * Block for the dma_resv object to become idle, lock the buffer and clean up
+ * the resource and tt object.
*/
-bool ttm_bo_delayed_delete(struct ttm_device *bdev, bool remove_all)
+static void ttm_bo_delayed_delete(struct work_struct *work)
{
- struct list_head removed;
- bool empty;
-
- INIT_LIST_HEAD(&removed);
-
- spin_lock(&bdev->lru_lock);
- while (!list_empty(&bdev->ddestroy)) {
- struct ttm_buffer_object *bo;
-
- bo = list_first_entry(&bdev->ddestroy, struct ttm_buffer_object,
- ddestroy);
- list_move_tail(&bo->ddestroy, &removed);
- if (!ttm_bo_get_unless_zero(bo))
- continue;
-
- if (remove_all || bo->base.resv != &bo->base._resv) {
- spin_unlock(&bdev->lru_lock);
- dma_resv_lock(bo->base.resv, NULL);
-
- spin_lock(&bdev->lru_lock);
- ttm_bo_cleanup_refs(bo, false, !remove_all, true);
+ struct ttm_buffer_object *bo;
- } else if (dma_resv_trylock(bo->base.resv)) {
- ttm_bo_cleanup_refs(bo, false, !remove_all, true);
- } else {
- spin_unlock(&bdev->lru_lock);
- }
-
- ttm_bo_put(bo);
- spin_lock(&bdev->lru_lock);
- }
- list_splice_tail(&removed, &bdev->ddestroy);
- empty = list_empty(&bdev->ddestroy);
- spin_unlock(&bdev->lru_lock);
+ bo = container_of(work, typeof(*bo), delayed_delete);
- return empty;
+ dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP, false,
+ MAX_SCHEDULE_TIMEOUT);
+ dma_resv_lock(bo->base.resv, NULL);
+ ttm_bo_cleanup_memtype_use(bo);
+ dma_resv_unlock(bo->base.resv);
+ ttm_bo_put(bo);
}
static void ttm_bo_release(struct kref *kref)
@@ -369,69 +344,58 @@ static void ttm_bo_release(struct kref *kref)
drm_vma_offset_remove(bdev->vma_manager, &bo->base.vma_node);
ttm_mem_io_free(bdev, bo->resource);
- }
- if (!dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP) ||
- !dma_resv_trylock(bo->base.resv)) {
- /* The BO is not idle, resurrect it for delayed destroy */
- ttm_bo_flush_all_fences(bo);
- bo->deleted = true;
+ if (!dma_resv_test_signaled(bo->base.resv,
+ DMA_RESV_USAGE_BOOKKEEP) ||
+ !dma_resv_trylock(bo->base.resv)) {
+ /* The BO is not idle, resurrect it for delayed destroy */
+ ttm_bo_flush_all_fences(bo);
+ bo->deleted = true;
- spin_lock(&bo->bdev->lru_lock);
-
- /*
- * Make pinned bos immediately available to
- * shrinkers, now that they are queued for
- * destruction.
- *
- * FIXME: QXL is triggering this. Can be removed when the
- * driver is fixed.
- */
- if (bo->pin_count) {
- bo->pin_count = 0;
- ttm_resource_move_to_lru_tail(bo->resource);
- }
+ spin_lock(&bo->bdev->lru_lock);
- kref_init(&bo->kref);
- list_add_tail(&bo->ddestroy, &bdev->ddestroy);
- spin_unlock(&bo->bdev->lru_lock);
+ /*
+ * Make pinned bos immediately available to
+ * shrinkers, now that they are queued for
+ * destruction.
+ *
+ * FIXME: QXL is triggering this. Can be removed when the
+ * driver is fixed.
+ */
+ if (bo->pin_count) {
+ bo->pin_count = 0;
+ ttm_resource_move_to_lru_tail(bo->resource);
+ }
- schedule_delayed_work(&bdev->wq,
- ((HZ / 100) < 1) ? 1 : HZ / 100);
- return;
- }
+ kref_init(&bo->kref);
+ spin_unlock(&bo->bdev->lru_lock);
- spin_lock(&bo->bdev->lru_lock);
- list_del(&bo->ddestroy);
- spin_unlock(&bo->bdev->lru_lock);
+ INIT_WORK(&bo->delayed_delete, ttm_bo_delayed_delete);
+ queue_work(bdev->wq, &bo->delayed_delete);
+ return;
+ }
- ttm_bo_cleanup_memtype_use(bo);
- dma_resv_unlock(bo->base.resv);
+ ttm_bo_cleanup_memtype_use(bo);
+ dma_resv_unlock(bo->base.resv);
+ }
atomic_dec(&ttm_glob.bo_count);
bo->destroy(bo);
}
+/**
+ * ttm_bo_put
+ *
+ * @bo: The buffer object.
+ *
+ * Unreference a buffer object.
+ */
void ttm_bo_put(struct ttm_buffer_object *bo)
{
kref_put(&bo->kref, ttm_bo_release);
}
EXPORT_SYMBOL(ttm_bo_put);
-int ttm_bo_lock_delayed_workqueue(struct ttm_device *bdev)
-{
- return cancel_delayed_work_sync(&bdev->wq);
-}
-EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue);
-
-void ttm_bo_unlock_delayed_workqueue(struct ttm_device *bdev, int resched)
-{
- if (resched)
- schedule_delayed_work(&bdev->wq,
- ((HZ / 100) < 1) ? 1 : HZ / 100);
-}
-EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue);
-
static int ttm_bo_bounce_temp_buffer(struct ttm_buffer_object *bo,
struct ttm_resource **mem,
struct ttm_operation_ctx *ctx,
@@ -475,7 +439,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo,
bdev->funcs->evict_flags(bo, &placement);
if (!placement.num_placement && !placement.num_busy_placement) {
- ret = ttm_bo_wait(bo, true, false);
+ ret = ttm_bo_wait_ctx(bo, ctx);
if (ret)
return ret;
@@ -512,6 +476,14 @@ out:
return ret;
}
+/**
+ * ttm_bo_eviction_valuable
+ *
+ * @bo: The buffer object to evict
+ * @place: the placement we need to make room for
+ *
+ * Check if it is valuable to evict the BO to make room for the given placement.
+ */
bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
const struct ttm_place *place)
{
@@ -771,13 +743,23 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
return ttm_bo_add_move_fence(bo, man, *mem, ctx->no_wait_gpu);
}
-/*
- * Creates space for memory region @mem according to its type.
+/**
+ * ttm_bo_mem_space
*
- * This function first searches for free space in compatible memory types in
- * the priority order defined by the driver. If free space isn't found, then
- * ttm_bo_mem_force_space is attempted in priority order to evict and find
- * space.
+ * @bo: Pointer to a struct ttm_buffer_object. the data of which
+ * we want to allocate space for.
+ * @proposed_placement: Proposed new placement for the buffer object.
+ * @mem: A struct ttm_resource.
+ * @ctx: if and how to sleep, lock buffers and alloc memory
+ *
+ * Allocate memory space for the buffer object pointed to by @bo, using
+ * the placement flags in @placement, potentially evicting other idle buffer objects.
+ * This function may sleep while waiting for space to become available.
+ * Returns:
+ * -EBUSY: No space available (only if no_wait == 1).
+ * -ENOMEM: Could not allocate memory for the buffer object, either due to
+ * fragmentation or concurrent allocators.
+ * -ERESTARTSYS: An interruptible sleep was interrupted by a signal.
*/
int ttm_bo_mem_space(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
@@ -883,6 +865,21 @@ out:
return ret;
}
+/**
+ * ttm_bo_validate
+ *
+ * @bo: The buffer object.
+ * @placement: Proposed placement for the buffer object.
+ * @ctx: validation parameters.
+ *
+ * Changes placement and caching policy of the buffer object
+ * according proposed placement.
+ * Returns
+ * -EINVAL on invalid proposed placement.
+ * -ENOMEM on out-of-memory condition.
+ * -EBUSY if no_wait is true and buffer busy.
+ * -ERESTARTSYS if interrupted by a signal.
+ */
int ttm_bo_validate(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
struct ttm_operation_ctx *ctx)
@@ -960,7 +957,6 @@ int ttm_bo_init_reserved(struct ttm_device *bdev, struct ttm_buffer_object *bo,
int ret;
kref_init(&bo->kref);
- INIT_LIST_HEAD(&bo->ddestroy);
bo->bdev = bdev;
bo->type = type;
bo->page_alignment = alignment;
@@ -1076,6 +1072,11 @@ EXPORT_SYMBOL(ttm_bo_init_validate);
* buffer object vm functions.
*/
+/**
+ * ttm_bo_unmap_virtual
+ *
+ * @bo: tear down the virtual mappings for this BO
+ */
void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
{
struct ttm_device *bdev = bo->bdev;
@@ -1085,6 +1086,20 @@ void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
}
EXPORT_SYMBOL(ttm_bo_unmap_virtual);
+/**
+ * ttm_bo_wait - wait for buffer idle.
+ *
+ * @bo: The buffer object.
+ * @interruptible: Use interruptible wait.
+ * @no_wait: Return immediately if buffer is busy.
+ *
+ * This function must be called with the bo::mutex held, and makes
+ * sure any previous rendering to the buffer is completed.
+ * Note: It might be necessary to block validations before the
+ * wait by reserving the buffer.
+ * Returns -EBUSY if no_wait is true and the buffer is busy.
+ * Returns -ERESTARTSYS if interrupted by a signal.
+ */
int ttm_bo_wait(struct ttm_buffer_object *bo,
bool interruptible, bool no_wait)
{
@@ -1109,6 +1124,12 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
}
EXPORT_SYMBOL(ttm_bo_wait);
+int ttm_bo_wait_ctx(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx)
+{
+ return ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
+}
+EXPORT_SYMBOL(ttm_bo_wait_ctx);
+
int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
gfp_t gfp_flags)
{
@@ -1169,7 +1190,7 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
/*
* Make sure BO is idle.
*/
- ret = ttm_bo_wait(bo, false, false);
+ ret = ttm_bo_wait_ctx(bo, ctx);
if (unlikely(ret != 0))
goto out;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index ba3aa0a0fc43..fee7c20775c0 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -29,18 +29,11 @@
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
-#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_bo.h>
#include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_tt.h>
+
#include <drm/drm_cache.h>
-#include <drm/drm_vma_manager.h>
-#include <linux/iosys-map.h>
-#include <linux/io.h>
-#include <linux/highmem.h>
-#include <linux/wait.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-#include <linux/module.h>
-#include <linux/dma-resv.h>
struct ttm_transfer_obj {
struct ttm_buffer_object base;
@@ -128,6 +121,23 @@ void ttm_move_memcpy(bool clear,
}
EXPORT_SYMBOL(ttm_move_memcpy);
+/**
+ * ttm_bo_move_memcpy
+ *
+ * @bo: A pointer to a struct ttm_buffer_object.
+ * @interruptible: Sleep interruptible if waiting.
+ * @no_wait_gpu: Return immediately if the GPU is busy.
+ * @new_mem: struct ttm_resource indicating where to move.
+ *
+ * Fallback move function for a mappable buffer object in mappable memory.
+ * The function will, if successful,
+ * free any old aperture space, and set (@new_mem)->mm_node to NULL,
+ * and update the (@bo)->mem placement flags. If unsuccessful, the old
+ * data remains untouched, and it's up to the caller to free the
+ * memory space indicated by @new_mem.
+ * Returns:
+ * !0: Failure.
+ */
int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
struct ttm_operation_ctx *ctx,
struct ttm_resource *dst_mem)
@@ -230,7 +240,6 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
*/
atomic_inc(&ttm_glob.bo_count);
- INIT_LIST_HEAD(&fbo->base.ddestroy);
drm_vma_node_reset(&fbo->base.base.vma_node);
kref_init(&fbo->base.kref);
@@ -267,6 +276,16 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
return 0;
}
+/**
+ * ttm_io_prot
+ *
+ * bo: ttm buffer object
+ * res: ttm resource object
+ * @tmp: Page protection flag for a normal, cached mapping.
+ *
+ * Utility function that returns the pgprot_t that should be used for
+ * setting up a PTE with the caching model indicated by @c_state.
+ */
pgprot_t ttm_io_prot(struct ttm_buffer_object *bo, struct ttm_resource *res,
pgprot_t tmp)
{
@@ -348,6 +367,22 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
return (!map->virtual) ? -ENOMEM : 0;
}
+/**
+ * ttm_bo_kmap
+ *
+ * @bo: The buffer object.
+ * @start_page: The first page to map.
+ * @num_pages: Number of pages to map.
+ * @map: pointer to a struct ttm_bo_kmap_obj representing the map.
+ *
+ * Sets up a kernel virtual mapping, using ioremap, vmap or kmap to the
+ * data in the buffer object. The ttm_kmap_obj_virtual function can then be
+ * used to obtain a virtual address to the data.
+ *
+ * Returns
+ * -ENOMEM: Out of memory.
+ * -EINVAL: Invalid range.
+ */
int ttm_bo_kmap(struct ttm_buffer_object *bo,
unsigned long start_page, unsigned long num_pages,
struct ttm_bo_kmap_obj *map)
@@ -375,6 +410,13 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo,
}
EXPORT_SYMBOL(ttm_bo_kmap);
+/**
+ * ttm_bo_kunmap
+ *
+ * @map: Object describing the map to unmap.
+ *
+ * Unmaps a kernel map set up by ttm_bo_kmap.
+ */
void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
{
if (!map->virtual)
@@ -400,6 +442,20 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
}
EXPORT_SYMBOL(ttm_bo_kunmap);
+/**
+ * ttm_bo_vmap
+ *
+ * @bo: The buffer object.
+ * @map: pointer to a struct iosys_map representing the map.
+ *
+ * Sets up a kernel virtual mapping, using ioremap or vmap to the
+ * data in the buffer object. The parameter @map returns the virtual
+ * address as struct iosys_map. Unmap the buffer with ttm_bo_vunmap().
+ *
+ * Returns
+ * -ENOMEM: Out of memory.
+ * -EINVAL: Invalid range.
+ */
int ttm_bo_vmap(struct ttm_buffer_object *bo, struct iosys_map *map)
{
struct ttm_resource *mem = bo->resource;
@@ -461,6 +517,14 @@ int ttm_bo_vmap(struct ttm_buffer_object *bo, struct iosys_map *map)
}
EXPORT_SYMBOL(ttm_bo_vmap);
+/**
+ * ttm_bo_vunmap
+ *
+ * @bo: The buffer object.
+ * @map: Object describing the map to unmap.
+ *
+ * Unmaps a kernel map set up by ttm_bo_vmap().
+ */
void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct iosys_map *map)
{
struct ttm_resource *mem = bo->resource;
@@ -554,6 +618,22 @@ static void ttm_bo_move_pipeline_evict(struct ttm_buffer_object *bo,
ttm_resource_free(bo, &bo->resource);
}
+/**
+ * ttm_bo_move_accel_cleanup.
+ *
+ * @bo: A pointer to a struct ttm_buffer_object.
+ * @fence: A fence object that signals when moving is complete.
+ * @evict: This is an evict move. Don't return until the buffer is idle.
+ * @pipeline: evictions are to be pipelined.
+ * @new_mem: struct ttm_resource indicating where to move.
+ *
+ * Accelerated move function to be called when an accelerated move
+ * has been scheduled. The function will create a new temporary buffer object
+ * representing the old placement, and put the sync object on both buffer
+ * objects. After that the newly created buffer object is unref'd to be
+ * destroyed when the move is complete. This will help pipeline
+ * buffer moves.
+ */
int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
struct dma_fence *fence,
bool evict,
@@ -582,6 +662,15 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
}
EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
+/**
+ * ttm_bo_move_sync_cleanup.
+ *
+ * @bo: A pointer to a struct ttm_buffer_object.
+ * @new_mem: struct ttm_resource indicating where to move.
+ *
+ * Special case of ttm_bo_move_accel_cleanup where the bo is guaranteed
+ * by the caller to be idle. Typically used after memcpy buffer moves.
+ */
void ttm_bo_move_sync_cleanup(struct ttm_buffer_object *bo,
struct ttm_resource *new_mem)
{
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 5a3e4b891377..3ecda6db24b8 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -31,17 +31,12 @@
#define pr_fmt(fmt) "[TTM] " fmt
-#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_bo.h>
#include <drm/ttm/ttm_placement.h>
-#include <drm/drm_vma_manager.h>
+#include <drm/ttm/ttm_tt.h>
+
#include <drm/drm_drv.h>
#include <drm/drm_managed.h>
-#include <linux/mm.h>
-#include <linux/pfn_t.h>
-#include <linux/rbtree.h>
-#include <linux/module.h>
-#include <linux/uaccess.h>
-#include <linux/mem_encrypt.h>
static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
struct vm_fault *vmf)
@@ -446,6 +441,14 @@ static const struct vm_operations_struct ttm_bo_vm_ops = {
.access = ttm_bo_vm_access,
};
+/**
+ * ttm_bo_mmap_obj - mmap memory backed by a ttm buffer object.
+ *
+ * @vma: vma as input from the fbdev mmap method.
+ * @bo: The bo backing the address space.
+ *
+ * Maps a buffer object.
+ */
int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
{
/* Enforce no COW since would have really strange behavior with it. */
diff --git a/drivers/gpu/drm/ttm/ttm_device.c b/drivers/gpu/drm/ttm/ttm_device.c
index e7147e304637..c7a1862f322a 100644
--- a/drivers/gpu/drm/ttm/ttm_device.c
+++ b/drivers/gpu/drm/ttm/ttm_device.c
@@ -29,10 +29,10 @@
#include <linux/mm.h>
+#include <drm/ttm/ttm_bo.h>
#include <drm/ttm/ttm_device.h>
#include <drm/ttm/ttm_tt.h>
#include <drm/ttm/ttm_placement.h>
-#include <drm/ttm/ttm_bo_api.h>
#include "ttm_module.h"
@@ -175,16 +175,6 @@ int ttm_device_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
}
EXPORT_SYMBOL(ttm_device_swapout);
-static void ttm_device_delayed_workqueue(struct work_struct *work)
-{
- struct ttm_device *bdev =
- container_of(work, struct ttm_device, wq.work);
-
- if (!ttm_bo_delayed_delete(bdev, false))
- schedule_delayed_work(&bdev->wq,
- ((HZ / 100) < 1) ? 1 : HZ / 100);
-}
-
/**
* ttm_device_init
*
@@ -215,15 +205,19 @@ int ttm_device_init(struct ttm_device *bdev, struct ttm_device_funcs *funcs,
if (ret)
return ret;
+ bdev->wq = alloc_workqueue("ttm", WQ_MEM_RECLAIM | WQ_HIGHPRI, 16);
+ if (!bdev->wq) {
+ ttm_global_release();
+ return -ENOMEM;
+ }
+
bdev->funcs = funcs;
ttm_sys_man_init(bdev);
ttm_pool_init(&bdev->pool, dev, use_dma_alloc, use_dma32);
bdev->vma_manager = vma_manager;
- INIT_DELAYED_WORK(&bdev->wq, ttm_device_delayed_workqueue);
spin_lock_init(&bdev->lru_lock);
- INIT_LIST_HEAD(&bdev->ddestroy);
INIT_LIST_HEAD(&bdev->pinned);
bdev->dev_mapping = mapping;
mutex_lock(&ttm_global_mutex);
@@ -247,10 +241,8 @@ void ttm_device_fini(struct ttm_device *bdev)
list_del(&bdev->device_list);
mutex_unlock(&ttm_global_mutex);
- cancel_delayed_work_sync(&bdev->wq);
-
- if (ttm_bo_delayed_delete(bdev, true))
- pr_debug("Delayed destroy list was clean\n");
+ drain_workqueue(bdev->wq);
+ destroy_workqueue(bdev->wq);
spin_lock(&bdev->lru_lock);
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index dbee34a058df..f1c60fa80c2d 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -27,11 +27,7 @@
**************************************************************************/
#include <drm/ttm/ttm_execbuf_util.h>
-#include <drm/ttm/ttm_bo_driver.h>
-#include <drm/ttm/ttm_placement.h>
-#include <linux/wait.h>
-#include <linux/sched.h>
-#include <linux/module.h>
+#include <drm/ttm/ttm_bo.h>
static void ttm_eu_backoff_reservation_reverse(struct list_head *list,
struct ttm_validate_buffer *entry)
diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
index 9f6764bf3b15..aa116a7bbae3 100644
--- a/drivers/gpu/drm/ttm/ttm_pool.c
+++ b/drivers/gpu/drm/ttm/ttm_pool.c
@@ -33,6 +33,7 @@
#include <linux/module.h>
#include <linux/dma-mapping.h>
+#include <linux/debugfs.h>
#include <linux/highmem.h>
#include <linux/sched/mm.h>
@@ -41,8 +42,8 @@
#endif
#include <drm/ttm/ttm_pool.h>
-#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_tt.h>
+#include <drm/ttm/ttm_bo.h>
#include "ttm_module.h"
diff --git a/drivers/gpu/drm/ttm/ttm_range_manager.c b/drivers/gpu/drm/ttm/ttm_range_manager.c
index 0a8bc0b7f380..ae11d07eb63a 100644
--- a/drivers/gpu/drm/ttm/ttm_range_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_range_manager.c
@@ -32,7 +32,7 @@
#include <drm/ttm/ttm_device.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_range_manager.h>
-#include <drm/ttm/ttm_bo_api.h>
+#include <drm/ttm/ttm_bo.h>
#include <drm/drm_mm.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c
index 328391bb1d87..b8a826a24fb2 100644
--- a/drivers/gpu/drm/ttm/ttm_resource.c
+++ b/drivers/gpu/drm/ttm/ttm_resource.c
@@ -26,8 +26,9 @@
#include <linux/io-mapping.h>
#include <linux/scatterlist.h>
+#include <drm/ttm/ttm_bo.h>
+#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_resource.h>
-#include <drm/ttm/ttm_bo_driver.h>
/**
* ttm_lru_bulk_move_init - initialize a bulk move structure
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index d505603930a7..ab725d9d14a6 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -36,7 +36,8 @@
#include <linux/file.h>
#include <linux/module.h>
#include <drm/drm_cache.h>
-#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_bo.h>
+#include <drm/ttm/ttm_tt.h>
#include "ttm_module.h"
diff --git a/drivers/gpu/drm/v3d/v3d_debugfs.c b/drivers/gpu/drm/v3d/v3d_debugfs.c
index efbde124c296..330669f51fa7 100644
--- a/drivers/gpu/drm/v3d/v3d_debugfs.c
+++ b/drivers/gpu/drm/v3d/v3d_debugfs.c
@@ -79,8 +79,8 @@ static const struct v3d_reg_def v3d_csd_reg_defs[] = {
static int v3d_v3d_debugfs_regs(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = (struct drm_info_node *)m->private;
- struct drm_device *dev = node->minor->dev;
+ struct drm_debugfs_entry *entry = m->private;
+ struct drm_device *dev = entry->dev;
struct v3d_dev *v3d = to_v3d_dev(dev);
int i, core;
@@ -126,8 +126,8 @@ static int v3d_v3d_debugfs_regs(struct seq_file *m, void *unused)
static int v3d_v3d_debugfs_ident(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = (struct drm_info_node *)m->private;
- struct drm_device *dev = node->minor->dev;
+ struct drm_debugfs_entry *entry = m->private;
+ struct drm_device *dev = entry->dev;
struct v3d_dev *v3d = to_v3d_dev(dev);
u32 ident0, ident1, ident2, ident3, cores;
int core;
@@ -188,8 +188,8 @@ static int v3d_v3d_debugfs_ident(struct seq_file *m, void *unused)
static int v3d_debugfs_bo_stats(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = (struct drm_info_node *)m->private;
- struct drm_device *dev = node->minor->dev;
+ struct drm_debugfs_entry *entry = m->private;
+ struct drm_device *dev = entry->dev;
struct v3d_dev *v3d = to_v3d_dev(dev);
mutex_lock(&v3d->bo_lock);
@@ -204,8 +204,8 @@ static int v3d_debugfs_bo_stats(struct seq_file *m, void *unused)
static int v3d_measure_clock(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = (struct drm_info_node *)m->private;
- struct drm_device *dev = node->minor->dev;
+ struct drm_debugfs_entry *entry = m->private;
+ struct drm_device *dev = entry->dev;
struct v3d_dev *v3d = to_v3d_dev(dev);
uint32_t cycles;
int core = 0;
@@ -236,7 +236,7 @@ static int v3d_measure_clock(struct seq_file *m, void *unused)
return 0;
}
-static const struct drm_info_list v3d_debugfs_list[] = {
+static const struct drm_debugfs_info v3d_debugfs_list[] = {
{"v3d_ident", v3d_v3d_debugfs_ident, 0},
{"v3d_regs", v3d_v3d_debugfs_regs, 0},
{"measure_clock", v3d_measure_clock, 0},
@@ -246,7 +246,5 @@ static const struct drm_info_list v3d_debugfs_list[] = {
void
v3d_debugfs_init(struct drm_minor *minor)
{
- drm_debugfs_create_files(v3d_debugfs_list,
- ARRAY_SIZE(v3d_debugfs_list),
- minor->debugfs_root, minor);
+ drm_debugfs_add_files(minor->dev, v3d_debugfs_list, ARRAY_SIZE(v3d_debugfs_list));
}
diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c
index 96af1cb5202a..5da1806f3969 100644
--- a/drivers/gpu/drm/v3d/v3d_gem.c
+++ b/drivers/gpu/drm/v3d/v3d_gem.c
@@ -299,10 +299,6 @@ v3d_lookup_bos(struct drm_device *dev,
u64 bo_handles,
u32 bo_count)
{
- u32 *handles;
- int ret = 0;
- int i;
-
job->bo_count = bo_count;
if (!job->bo_count) {
@@ -313,48 +309,9 @@ v3d_lookup_bos(struct drm_device *dev,
return -EINVAL;
}
- job->bo = kvmalloc_array(job->bo_count,
- sizeof(struct drm_gem_dma_object *),
- GFP_KERNEL | __GFP_ZERO);
- if (!job->bo) {
- DRM_DEBUG("Failed to allocate validated BO pointers\n");
- return -ENOMEM;
- }
-
- handles = kvmalloc_array(job->bo_count, sizeof(u32), GFP_KERNEL);
- if (!handles) {
- ret = -ENOMEM;
- DRM_DEBUG("Failed to allocate incoming GEM handles\n");
- goto fail;
- }
-
- if (copy_from_user(handles,
- (void __user *)(uintptr_t)bo_handles,
- job->bo_count * sizeof(u32))) {
- ret = -EFAULT;
- DRM_DEBUG("Failed to copy in GEM handles\n");
- goto fail;
- }
-
- spin_lock(&file_priv->table_lock);
- for (i = 0; i < job->bo_count; i++) {
- struct drm_gem_object *bo = idr_find(&file_priv->object_idr,
- handles[i]);
- if (!bo) {
- DRM_DEBUG("Failed to look up GEM BO %d: %d\n",
- i, handles[i]);
- ret = -ENOENT;
- spin_unlock(&file_priv->table_lock);
- goto fail;
- }
- drm_gem_object_get(bo);
- job->bo[i] = bo;
- }
- spin_unlock(&file_priv->table_lock);
-
-fail:
- kvfree(handles);
- return ret;
+ return drm_gem_objects_lookup(file_priv,
+ (void __user *)(uintptr_t)bo_handles,
+ job->bo_count, &job->bo);
}
static void
@@ -363,11 +320,11 @@ v3d_job_free(struct kref *ref)
struct v3d_job *job = container_of(ref, struct v3d_job, refcount);
int i;
- for (i = 0; i < job->bo_count; i++) {
- if (job->bo[i])
+ if (job->bo) {
+ for (i = 0; i < job->bo_count; i++)
drm_gem_object_put(job->bo[i]);
+ kvfree(job->bo);
}
- kvfree(job->bo);
dma_fence_put(job->irq_fence);
dma_fence_put(job->done_fence);
@@ -904,7 +861,6 @@ v3d_submit_tfu_ioctl(struct drm_device *dev, void *data,
job->args = *args;
- spin_lock(&file_priv->table_lock);
for (job->base.bo_count = 0;
job->base.bo_count < ARRAY_SIZE(args->bo_handles);
job->base.bo_count++) {
@@ -913,20 +869,16 @@ v3d_submit_tfu_ioctl(struct drm_device *dev, void *data,
if (!args->bo_handles[job->base.bo_count])
break;
- bo = idr_find(&file_priv->object_idr,
- args->bo_handles[job->base.bo_count]);
+ bo = drm_gem_object_lookup(file_priv, args->bo_handles[job->base.bo_count]);
if (!bo) {
DRM_DEBUG("Failed to look up GEM BO %d: %d\n",
job->base.bo_count,
args->bo_handles[job->base.bo_count]);
ret = -ENOENT;
- spin_unlock(&file_priv->table_lock);
goto fail;
}
- drm_gem_object_get(bo);
job->base.bo[job->base.bo_count] = bo;
}
- spin_unlock(&file_priv->table_lock);
ret = v3d_lock_bo_reservations(&job->base, &acquire_ctx);
if (ret)
diff --git a/drivers/gpu/drm/vboxvideo/vbox_drv.c b/drivers/gpu/drm/vboxvideo/vbox_drv.c
index b450f449a3ab..12fee99dbfe8 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_drv.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_drv.c
@@ -102,7 +102,6 @@ static void vbox_pci_remove(struct pci_dev *pdev)
vbox_hw_fini(vbox);
}
-#ifdef CONFIG_PM_SLEEP
static int vbox_pm_suspend(struct device *dev)
{
struct vbox_private *vbox = dev_get_drvdata(dev);
@@ -160,16 +159,13 @@ static const struct dev_pm_ops vbox_pm_ops = {
.poweroff = vbox_pm_poweroff,
.restore = vbox_pm_resume,
};
-#endif
static struct pci_driver vbox_pci_driver = {
.name = DRIVER_NAME,
.id_table = pciidlist,
.probe = vbox_pci_probe,
.remove = vbox_pci_remove,
-#ifdef CONFIG_PM_SLEEP
- .driver.pm = &vbox_pm_ops,
-#endif
+ .driver.pm = pm_sleep_ptr(&vbox_pm_ops),
};
DEFINE_DRM_GEM_FOPS(vbox_fops);
diff --git a/drivers/gpu/drm/vc4/Kconfig b/drivers/gpu/drm/vc4/Kconfig
index 246305d17a52..f423941c028d 100644
--- a/drivers/gpu/drm/vc4/Kconfig
+++ b/drivers/gpu/drm/vc4/Kconfig
@@ -34,3 +34,19 @@ config DRM_VC4_HDMI_CEC
help
Choose this option if you have a Broadcom VC4 GPU
and want to use CEC.
+
+config DRM_VC4_KUNIT_TEST
+ bool "KUnit tests for VC4" if !KUNIT_ALL_TESTS
+ depends on DRM_VC4 && KUNIT
+ select DRM_KUNIT_TEST_HELPERS
+ default KUNIT_ALL_TESTS
+ help
+ This builds unit tests for the VC4 DRM/KMS driver. This option is
+ not useful for distributions or general kernels, but only for kernel
+ developers working on the VC4 driver.
+
+ For more information on KUnit and unit tests in general,
+ please refer to the KUnit documentation in
+ Documentation/dev-tools/kunit/.
+
+ If in doubt, say "N".
diff --git a/drivers/gpu/drm/vc4/Makefile b/drivers/gpu/drm/vc4/Makefile
index d0163e18e9ca..c41f89a15a55 100644
--- a/drivers/gpu/drm/vc4/Makefile
+++ b/drivers/gpu/drm/vc4/Makefile
@@ -25,6 +25,13 @@ vc4-y := \
vc4_validate.o \
vc4_validate_shaders.o
+vc4-$(CONFIG_DRM_VC4_KUNIT_TEST) += \
+ tests/vc4_mock.o \
+ tests/vc4_mock_crtc.o \
+ tests/vc4_mock_output.o \
+ tests/vc4_mock_plane.o \
+ tests/vc4_test_pv_muxing.o
+
vc4-$(CONFIG_DEBUG_FS) += vc4_debugfs.o
obj-$(CONFIG_DRM_VC4) += vc4.o
diff --git a/drivers/gpu/drm/vc4/tests/.kunitconfig b/drivers/gpu/drm/vc4/tests/.kunitconfig
new file mode 100644
index 000000000000..b503e9036c7f
--- /dev/null
+++ b/drivers/gpu/drm/vc4/tests/.kunitconfig
@@ -0,0 +1,13 @@
+CONFIG_ARCH_BCM=y
+CONFIG_ARCH_BCM2835=y
+CONFIG_BCM2835_MBOX=y
+CONFIG_KUNIT=y
+CONFIG_DRM=y
+CONFIG_DRM_VC4=y
+CONFIG_DRM_VC4_KUNIT_TEST=y
+CONFIG_MAILBOX=y
+CONFIG_RASPBERRYPI_FIRMWARE=y
+CONFIG_SND=y
+CONFIG_SND_SOC=y
+CONFIG_SOUND=y
+CONFIG_COMMON_CLK=y
diff --git a/drivers/gpu/drm/vc4/tests/vc4_mock.c b/drivers/gpu/drm/vc4/tests/vc4_mock.c
new file mode 100644
index 000000000000..a4bed26af32f
--- /dev/null
+++ b/drivers/gpu/drm/vc4/tests/vc4_mock.c
@@ -0,0 +1,200 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <drm/drm_drv.h>
+#include <drm/drm_kunit_helpers.h>
+
+#include <kunit/test.h>
+
+#include "vc4_mock.h"
+
+struct vc4_mock_output_desc {
+ enum vc4_encoder_type vc4_encoder_type;
+ unsigned int encoder_type;
+ unsigned int connector_type;
+};
+
+#define VC4_MOCK_OUTPUT_DESC(_vc4_type, _etype, _ctype) \
+ { \
+ .vc4_encoder_type = _vc4_type, \
+ .encoder_type = _etype, \
+ .connector_type = _ctype, \
+ }
+
+struct vc4_mock_pipe_desc {
+ const struct vc4_crtc_data *data;
+ const struct vc4_mock_output_desc *outputs;
+ unsigned int noutputs;
+};
+
+#define VC4_MOCK_CRTC_DESC(_data, ...) \
+ { \
+ .data = _data, \
+ .outputs = (struct vc4_mock_output_desc[]) { __VA_ARGS__ }, \
+ .noutputs = sizeof((struct vc4_mock_output_desc[]) { __VA_ARGS__ }) / \
+ sizeof(struct vc4_mock_output_desc), \
+ }
+
+#define VC4_MOCK_PIXELVALVE_DESC(_data, ...) \
+ VC4_MOCK_CRTC_DESC(&(_data)->base, __VA_ARGS__)
+
+struct vc4_mock_desc {
+ const struct vc4_mock_pipe_desc *pipes;
+ unsigned int npipes;
+};
+
+#define VC4_MOCK_DESC(...) \
+ { \
+ .pipes = (struct vc4_mock_pipe_desc[]) { __VA_ARGS__ }, \
+ .npipes = sizeof((struct vc4_mock_pipe_desc[]) { __VA_ARGS__ }) / \
+ sizeof(struct vc4_mock_pipe_desc), \
+ }
+
+static const struct vc4_mock_desc vc4_mock =
+ VC4_MOCK_DESC(
+ VC4_MOCK_CRTC_DESC(&vc4_txp_crtc_data,
+ VC4_MOCK_OUTPUT_DESC(VC4_ENCODER_TYPE_TXP,
+ DRM_MODE_ENCODER_VIRTUAL,
+ DRM_MODE_CONNECTOR_WRITEBACK)),
+ VC4_MOCK_PIXELVALVE_DESC(&bcm2835_pv0_data,
+ VC4_MOCK_OUTPUT_DESC(VC4_ENCODER_TYPE_DSI0,
+ DRM_MODE_ENCODER_DSI,
+ DRM_MODE_CONNECTOR_DSI),
+ VC4_MOCK_OUTPUT_DESC(VC4_ENCODER_TYPE_DPI,
+ DRM_MODE_ENCODER_DPI,
+ DRM_MODE_CONNECTOR_DPI)),
+ VC4_MOCK_PIXELVALVE_DESC(&bcm2835_pv1_data,
+ VC4_MOCK_OUTPUT_DESC(VC4_ENCODER_TYPE_DSI1,
+ DRM_MODE_ENCODER_DSI,
+ DRM_MODE_CONNECTOR_DSI)),
+ VC4_MOCK_PIXELVALVE_DESC(&bcm2835_pv2_data,
+ VC4_MOCK_OUTPUT_DESC(VC4_ENCODER_TYPE_HDMI0,
+ DRM_MODE_ENCODER_TMDS,
+ DRM_MODE_CONNECTOR_HDMIA),
+ VC4_MOCK_OUTPUT_DESC(VC4_ENCODER_TYPE_VEC,
+ DRM_MODE_ENCODER_TVDAC,
+ DRM_MODE_CONNECTOR_Composite)),
+);
+
+static const struct vc4_mock_desc vc5_mock =
+ VC4_MOCK_DESC(
+ VC4_MOCK_CRTC_DESC(&vc4_txp_crtc_data,
+ VC4_MOCK_OUTPUT_DESC(VC4_ENCODER_TYPE_TXP,
+ DRM_MODE_ENCODER_VIRTUAL,
+ DRM_MODE_CONNECTOR_WRITEBACK)),
+ VC4_MOCK_PIXELVALVE_DESC(&bcm2711_pv0_data,
+ VC4_MOCK_OUTPUT_DESC(VC4_ENCODER_TYPE_DSI0,
+ DRM_MODE_ENCODER_DSI,
+ DRM_MODE_CONNECTOR_DSI),
+ VC4_MOCK_OUTPUT_DESC(VC4_ENCODER_TYPE_DPI,
+ DRM_MODE_ENCODER_DPI,
+ DRM_MODE_CONNECTOR_DPI)),
+ VC4_MOCK_PIXELVALVE_DESC(&bcm2711_pv1_data,
+ VC4_MOCK_OUTPUT_DESC(VC4_ENCODER_TYPE_DSI1,
+ DRM_MODE_ENCODER_DSI,
+ DRM_MODE_CONNECTOR_DSI)),
+ VC4_MOCK_PIXELVALVE_DESC(&bcm2711_pv2_data,
+ VC4_MOCK_OUTPUT_DESC(VC4_ENCODER_TYPE_HDMI0,
+ DRM_MODE_ENCODER_TMDS,
+ DRM_MODE_CONNECTOR_HDMIA)),
+ VC4_MOCK_PIXELVALVE_DESC(&bcm2711_pv3_data,
+ VC4_MOCK_OUTPUT_DESC(VC4_ENCODER_TYPE_VEC,
+ DRM_MODE_ENCODER_TVDAC,
+ DRM_MODE_CONNECTOR_Composite)),
+ VC4_MOCK_PIXELVALVE_DESC(&bcm2711_pv4_data,
+ VC4_MOCK_OUTPUT_DESC(VC4_ENCODER_TYPE_HDMI1,
+ DRM_MODE_ENCODER_TMDS,
+ DRM_MODE_CONNECTOR_HDMIA)),
+);
+
+static int __build_one_pipe(struct kunit *test, struct drm_device *drm,
+ const struct vc4_mock_pipe_desc *pipe)
+{
+ struct vc4_dummy_plane *dummy_plane;
+ struct drm_plane *plane;
+ struct vc4_dummy_crtc *dummy_crtc;
+ struct drm_crtc *crtc;
+ unsigned int i;
+
+ dummy_plane = vc4_dummy_plane(test, drm, DRM_PLANE_TYPE_PRIMARY);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dummy_plane);
+
+ plane = &dummy_plane->plane.base;
+ dummy_crtc = vc4_mock_pv(test, drm, plane, pipe->data);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dummy_crtc);
+
+ crtc = &dummy_crtc->crtc.base;
+ for (i = 0; i < pipe->noutputs; i++) {
+ const struct vc4_mock_output_desc *mock_output = &pipe->outputs[i];
+ struct vc4_dummy_output *dummy_output;
+
+ dummy_output = vc4_dummy_output(test, drm, crtc,
+ mock_output->vc4_encoder_type,
+ mock_output->encoder_type,
+ mock_output->connector_type);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dummy_output);
+ }
+
+ return 0;
+}
+
+static int __build_mock(struct kunit *test, struct drm_device *drm,
+ const struct vc4_mock_desc *mock)
+{
+ unsigned int i;
+
+ for (i = 0; i < mock->npipes; i++) {
+ const struct vc4_mock_pipe_desc *pipe = &mock->pipes[i];
+ int ret;
+
+ ret = __build_one_pipe(test, drm, pipe);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+ }
+
+ return 0;
+}
+
+static struct vc4_dev *__mock_device(struct kunit *test, bool is_vc5)
+{
+ struct drm_device *drm;
+ const struct drm_driver *drv = is_vc5 ? &vc5_drm_driver : &vc4_drm_driver;
+ const struct vc4_mock_desc *desc = is_vc5 ? &vc5_mock : &vc4_mock;
+ struct vc4_dev *vc4;
+ struct device *dev;
+ int ret;
+
+ dev = drm_kunit_helper_alloc_device(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
+
+ vc4 = drm_kunit_helper_alloc_drm_device_with_driver(test, dev,
+ struct vc4_dev, base,
+ drv);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vc4);
+
+ vc4->dev = dev;
+ vc4->is_vc5 = is_vc5;
+
+ vc4->hvs = __vc4_hvs_alloc(vc4, NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vc4->hvs);
+
+ drm = &vc4->base;
+ ret = __build_mock(test, drm, desc);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ ret = vc4_kms_load(drm);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ ret = drm_dev_register(drm, 0);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ return vc4;
+}
+
+struct vc4_dev *vc4_mock_device(struct kunit *test)
+{
+ return __mock_device(test, false);
+}
+
+struct vc4_dev *vc5_mock_device(struct kunit *test)
+{
+ return __mock_device(test, true);
+}
diff --git a/drivers/gpu/drm/vc4/tests/vc4_mock.h b/drivers/gpu/drm/vc4/tests/vc4_mock.h
new file mode 100644
index 000000000000..db8e9a141ef8
--- /dev/null
+++ b/drivers/gpu/drm/vc4/tests/vc4_mock.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef VC4_MOCK_H_
+#define VC4_MOCK_H_
+
+#include "../vc4_drv.h"
+
+static inline
+struct drm_crtc *vc4_find_crtc_for_encoder(struct kunit *test,
+ struct drm_device *drm,
+ struct drm_encoder *encoder)
+{
+ struct drm_crtc *crtc;
+
+ KUNIT_ASSERT_EQ(test, hweight32(encoder->possible_crtcs), 1);
+
+ drm_for_each_crtc(crtc, drm)
+ if (encoder->possible_crtcs & drm_crtc_mask(crtc))
+ return crtc;
+
+ return NULL;
+}
+
+struct vc4_dummy_plane {
+ struct vc4_plane plane;
+};
+
+struct vc4_dummy_plane *vc4_dummy_plane(struct kunit *test,
+ struct drm_device *drm,
+ enum drm_plane_type type);
+
+struct vc4_dummy_crtc {
+ struct vc4_crtc crtc;
+};
+
+struct vc4_dummy_crtc *vc4_mock_pv(struct kunit *test,
+ struct drm_device *drm,
+ struct drm_plane *plane,
+ const struct vc4_crtc_data *data);
+
+struct vc4_dummy_output {
+ struct vc4_encoder encoder;
+ struct drm_connector connector;
+};
+
+struct vc4_dummy_output *vc4_dummy_output(struct kunit *test,
+ struct drm_device *drm,
+ struct drm_crtc *crtc,
+ enum vc4_encoder_type vc4_encoder_type,
+ unsigned int kms_encoder_type,
+ unsigned int connector_type);
+
+struct vc4_dev *vc4_mock_device(struct kunit *test);
+struct vc4_dev *vc5_mock_device(struct kunit *test);
+
+int vc4_mock_atomic_add_output(struct kunit *test,
+ struct drm_atomic_state *state,
+ enum vc4_encoder_type type);
+int vc4_mock_atomic_del_output(struct kunit *test,
+ struct drm_atomic_state *state,
+ enum vc4_encoder_type type);
+
+#endif // VC4_MOCK_H_
diff --git a/drivers/gpu/drm/vc4/tests/vc4_mock_crtc.c b/drivers/gpu/drm/vc4/tests/vc4_mock_crtc.c
new file mode 100644
index 000000000000..5d12d7beef0e
--- /dev/null
+++ b/drivers/gpu/drm/vc4/tests/vc4_mock_crtc.c
@@ -0,0 +1,41 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_modeset_helper_vtables.h>
+
+#include <kunit/test.h>
+
+#include "vc4_mock.h"
+
+static const struct drm_crtc_helper_funcs vc4_dummy_crtc_helper_funcs = {
+ .atomic_check = vc4_crtc_atomic_check,
+};
+
+static const struct drm_crtc_funcs vc4_dummy_crtc_funcs = {
+ .atomic_destroy_state = vc4_crtc_destroy_state,
+ .atomic_duplicate_state = vc4_crtc_duplicate_state,
+ .reset = vc4_crtc_reset,
+};
+
+struct vc4_dummy_crtc *vc4_mock_pv(struct kunit *test,
+ struct drm_device *drm,
+ struct drm_plane *plane,
+ const struct vc4_crtc_data *data)
+{
+ struct vc4_dummy_crtc *dummy_crtc;
+ struct vc4_crtc *vc4_crtc;
+ int ret;
+
+ dummy_crtc = kunit_kzalloc(test, sizeof(*dummy_crtc), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, dummy_crtc);
+
+ vc4_crtc = &dummy_crtc->crtc;
+ ret = __vc4_crtc_init(drm, NULL,
+ vc4_crtc, data, plane,
+ &vc4_dummy_crtc_funcs,
+ &vc4_dummy_crtc_helper_funcs,
+ false);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ return dummy_crtc;
+}
diff --git a/drivers/gpu/drm/vc4/tests/vc4_mock_output.c b/drivers/gpu/drm/vc4/tests/vc4_mock_output.c
new file mode 100644
index 000000000000..8d33be828d9a
--- /dev/null
+++ b/drivers/gpu/drm/vc4/tests/vc4_mock_output.c
@@ -0,0 +1,138 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_atomic_uapi.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_encoder.h>
+#include <drm/drm_modeset_helper_vtables.h>
+
+#include <kunit/test.h>
+
+#include "vc4_mock.h"
+
+static const struct drm_connector_helper_funcs vc4_dummy_connector_helper_funcs = {
+};
+
+static const struct drm_connector_funcs vc4_dummy_connector_funcs = {
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .reset = drm_atomic_helper_connector_reset,
+};
+
+struct vc4_dummy_output *vc4_dummy_output(struct kunit *test,
+ struct drm_device *drm,
+ struct drm_crtc *crtc,
+ enum vc4_encoder_type vc4_encoder_type,
+ unsigned int kms_encoder_type,
+ unsigned int connector_type)
+{
+ struct vc4_dummy_output *dummy_output;
+ struct drm_connector *conn;
+ struct drm_encoder *enc;
+ int ret;
+
+ dummy_output = kunit_kzalloc(test, sizeof(*dummy_output), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dummy_output);
+ dummy_output->encoder.type = vc4_encoder_type;
+
+ enc = &dummy_output->encoder.base;
+ ret = drmm_encoder_init(drm, enc,
+ NULL,
+ kms_encoder_type,
+ NULL);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+ enc->possible_crtcs = drm_crtc_mask(crtc);
+
+ conn = &dummy_output->connector;
+ ret = drmm_connector_init(drm, conn,
+ &vc4_dummy_connector_funcs,
+ connector_type,
+ NULL);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ drm_connector_helper_add(conn, &vc4_dummy_connector_helper_funcs);
+ drm_connector_attach_encoder(conn, enc);
+
+ return dummy_output;
+}
+
+static const struct drm_display_mode default_mode = {
+ DRM_SIMPLE_MODE(640, 480, 64, 48)
+};
+
+int vc4_mock_atomic_add_output(struct kunit *test,
+ struct drm_atomic_state *state,
+ enum vc4_encoder_type type)
+{
+ struct drm_device *drm = state->dev;
+ struct drm_connector_state *conn_state;
+ struct drm_crtc_state *crtc_state;
+ struct vc4_dummy_output *output;
+ struct drm_connector *conn;
+ struct drm_encoder *encoder;
+ struct drm_crtc *crtc;
+ int ret;
+
+ encoder = vc4_find_encoder_by_type(drm, type);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, encoder);
+
+ crtc = vc4_find_crtc_for_encoder(test, drm, encoder);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc);
+
+ output = container_of(encoder, struct vc4_dummy_output, encoder.base);
+ conn = &output->connector;
+ conn_state = drm_atomic_get_connector_state(state, conn);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, conn_state);
+
+ ret = drm_atomic_set_crtc_for_connector(conn_state, crtc);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_state);
+
+ ret = drm_atomic_set_mode_for_crtc(crtc_state, &default_mode);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ crtc_state->active = true;
+
+ return 0;
+}
+
+int vc4_mock_atomic_del_output(struct kunit *test,
+ struct drm_atomic_state *state,
+ enum vc4_encoder_type type)
+{
+ struct drm_device *drm = state->dev;
+ struct drm_connector_state *conn_state;
+ struct drm_crtc_state *crtc_state;
+ struct vc4_dummy_output *output;
+ struct drm_connector *conn;
+ struct drm_encoder *encoder;
+ struct drm_crtc *crtc;
+ int ret;
+
+ encoder = vc4_find_encoder_by_type(drm, type);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, encoder);
+
+ crtc = vc4_find_crtc_for_encoder(test, drm, encoder);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc);
+
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_state);
+
+ crtc_state->active = false;
+
+ ret = drm_atomic_set_mode_for_crtc(crtc_state, NULL);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ output = container_of(encoder, struct vc4_dummy_output, encoder.base);
+ conn = &output->connector;
+ conn_state = drm_atomic_get_connector_state(state, conn);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, conn_state);
+
+ ret = drm_atomic_set_crtc_for_connector(conn_state, NULL);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/vc4/tests/vc4_mock_plane.c b/drivers/gpu/drm/vc4/tests/vc4_mock_plane.c
new file mode 100644
index 000000000000..62b18f5f41db
--- /dev/null
+++ b/drivers/gpu/drm/vc4/tests/vc4_mock_plane.c
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_plane.h>
+
+#include <kunit/test.h>
+
+#include "vc4_mock.h"
+
+static const struct drm_plane_helper_funcs vc4_dummy_plane_helper_funcs = {
+};
+
+static const struct drm_plane_funcs vc4_dummy_plane_funcs = {
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .reset = drm_atomic_helper_plane_reset,
+};
+
+static const uint32_t vc4_dummy_plane_formats[] = {
+ DRM_FORMAT_XRGB8888,
+};
+
+struct vc4_dummy_plane *vc4_dummy_plane(struct kunit *test,
+ struct drm_device *drm,
+ enum drm_plane_type type)
+{
+ struct vc4_dummy_plane *dummy_plane;
+ struct drm_plane *plane;
+
+ dummy_plane = drmm_universal_plane_alloc(drm,
+ struct vc4_dummy_plane, plane.base,
+ 0,
+ &vc4_dummy_plane_funcs,
+ vc4_dummy_plane_formats,
+ ARRAY_SIZE(vc4_dummy_plane_formats),
+ NULL,
+ DRM_PLANE_TYPE_PRIMARY,
+ NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dummy_plane);
+
+ plane = &dummy_plane->plane.base;
+ drm_plane_helper_add(plane, &vc4_dummy_plane_helper_funcs);
+
+ return dummy_plane;
+}
diff --git a/drivers/gpu/drm/vc4/tests/vc4_test_pv_muxing.c b/drivers/gpu/drm/vc4/tests/vc4_test_pv_muxing.c
new file mode 100644
index 000000000000..ae0bd0f81698
--- /dev/null
+++ b/drivers/gpu/drm/vc4/tests/vc4_test_pv_muxing.c
@@ -0,0 +1,1039 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_atomic_uapi.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_kunit_helpers.h>
+#include <drm/drm_mode.h>
+#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_plane.h>
+
+#include <kunit/test.h>
+
+#include "../vc4_drv.h"
+
+#include "vc4_mock.h"
+
+struct pv_muxing_priv {
+ struct vc4_dev *vc4;
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_atomic_state *state;
+};
+
+static bool check_fifo_conflict(struct kunit *test,
+ const struct drm_atomic_state *state)
+{
+ struct vc4_hvs_state *hvs_state;
+ unsigned int used_fifos = 0;
+ unsigned int i;
+
+ hvs_state = vc4_hvs_get_new_global_state(state);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, hvs_state);
+
+ for (i = 0; i < HVS_NUM_CHANNELS; i++) {
+ if (!hvs_state->fifo_state[i].in_use)
+ continue;
+
+ KUNIT_EXPECT_FALSE(test, used_fifos & BIT(i));
+ used_fifos |= BIT(i);
+ }
+
+ return true;
+}
+
+struct encoder_constraint {
+ enum vc4_encoder_type type;
+ unsigned int *channels;
+ size_t nchannels;
+};
+
+#define ENCODER_CONSTRAINT(_type, ...) \
+ { \
+ .type = _type, \
+ .channels = (unsigned int[]) { __VA_ARGS__ }, \
+ .nchannels = sizeof((unsigned int[]) { __VA_ARGS__ }) / \
+ sizeof(unsigned int), \
+ }
+
+static bool __check_encoder_constraints(const struct encoder_constraint *constraints,
+ size_t nconstraints,
+ enum vc4_encoder_type type,
+ unsigned int channel)
+{
+ unsigned int i;
+
+ for (i = 0; i < nconstraints; i++) {
+ const struct encoder_constraint *constraint = &constraints[i];
+ unsigned int j;
+
+ if (constraint->type != type)
+ continue;
+
+ for (j = 0; j < constraint->nchannels; j++) {
+ unsigned int _channel = constraint->channels[j];
+
+ if (channel != _channel)
+ continue;
+
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static const struct encoder_constraint vc4_encoder_constraints[] = {
+ ENCODER_CONSTRAINT(VC4_ENCODER_TYPE_DPI, 0),
+ ENCODER_CONSTRAINT(VC4_ENCODER_TYPE_DSI0, 0),
+ ENCODER_CONSTRAINT(VC4_ENCODER_TYPE_HDMI0, 1),
+ ENCODER_CONSTRAINT(VC4_ENCODER_TYPE_VEC, 1),
+ ENCODER_CONSTRAINT(VC4_ENCODER_TYPE_TXP, 2),
+ ENCODER_CONSTRAINT(VC4_ENCODER_TYPE_DSI1, 2),
+};
+
+static const struct encoder_constraint vc5_encoder_constraints[] = {
+ ENCODER_CONSTRAINT(VC4_ENCODER_TYPE_DPI, 0),
+ ENCODER_CONSTRAINT(VC4_ENCODER_TYPE_DSI0, 0),
+ ENCODER_CONSTRAINT(VC4_ENCODER_TYPE_VEC, 1),
+ ENCODER_CONSTRAINT(VC4_ENCODER_TYPE_TXP, 0, 2),
+ ENCODER_CONSTRAINT(VC4_ENCODER_TYPE_DSI1, 0, 1, 2),
+ ENCODER_CONSTRAINT(VC4_ENCODER_TYPE_HDMI0, 0, 1, 2),
+ ENCODER_CONSTRAINT(VC4_ENCODER_TYPE_HDMI1, 0, 1, 2),
+};
+
+static bool check_vc4_encoder_constraints(enum vc4_encoder_type type, unsigned int channel)
+{
+ return __check_encoder_constraints(vc4_encoder_constraints,
+ ARRAY_SIZE(vc4_encoder_constraints),
+ type, channel);
+}
+
+static bool check_vc5_encoder_constraints(enum vc4_encoder_type type, unsigned int channel)
+{
+ return __check_encoder_constraints(vc5_encoder_constraints,
+ ARRAY_SIZE(vc5_encoder_constraints),
+ type, channel);
+}
+
+static struct vc4_crtc_state *
+get_vc4_crtc_state_for_encoder(struct kunit *test,
+ const struct drm_atomic_state *state,
+ enum vc4_encoder_type type)
+{
+ struct drm_device *drm = state->dev;
+ struct drm_crtc_state *new_crtc_state;
+ struct drm_encoder *encoder;
+ struct drm_crtc *crtc;
+
+ encoder = vc4_find_encoder_by_type(drm, type);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, encoder);
+
+ crtc = vc4_find_crtc_for_encoder(test, drm, encoder);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc);
+
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+ if (!new_crtc_state)
+ return NULL;
+
+ return to_vc4_crtc_state(new_crtc_state);
+}
+
+static bool check_channel_for_encoder(struct kunit *test,
+ const struct drm_atomic_state *state,
+ enum vc4_encoder_type type,
+ bool (*check_fn)(enum vc4_encoder_type type, unsigned int channel))
+{
+ struct vc4_crtc_state *new_vc4_crtc_state;
+ struct vc4_hvs_state *new_hvs_state;
+ unsigned int channel;
+
+ new_hvs_state = vc4_hvs_get_new_global_state(state);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, new_hvs_state);
+
+ new_vc4_crtc_state = get_vc4_crtc_state_for_encoder(test, state, type);
+ KUNIT_ASSERT_NOT_NULL(test, new_vc4_crtc_state);
+
+ channel = new_vc4_crtc_state->assigned_channel;
+ KUNIT_EXPECT_NE(test, channel, VC4_HVS_CHANNEL_DISABLED);
+
+ KUNIT_EXPECT_TRUE(test, new_hvs_state->fifo_state[channel].in_use);
+
+ KUNIT_EXPECT_TRUE(test, check_fn(type, channel));
+
+ return true;
+}
+
+struct pv_muxing_param {
+ const char *name;
+ struct vc4_dev *(*mock_fn)(struct kunit *test);
+ bool (*check_fn)(enum vc4_encoder_type type, unsigned int channel);
+ enum vc4_encoder_type *encoders;
+ size_t nencoders;
+};
+
+static void vc4_test_pv_muxing_desc(const struct pv_muxing_param *t, char *desc)
+{
+ strscpy(desc, t->name, KUNIT_PARAM_DESC_SIZE);
+}
+
+#define PV_MUXING_TEST(_name, _mock_fn, _check_fn, ...) \
+ { \
+ .name = _name, \
+ .mock_fn = &_mock_fn, \
+ .check_fn = &_check_fn, \
+ .encoders = (enum vc4_encoder_type[]) { __VA_ARGS__ }, \
+ .nencoders = sizeof((enum vc4_encoder_type[]) { __VA_ARGS__ }) / \
+ sizeof(enum vc4_encoder_type), \
+ }
+
+#define VC4_PV_MUXING_TEST(_name, ...) \
+ PV_MUXING_TEST(_name, vc4_mock_device, check_vc4_encoder_constraints, __VA_ARGS__)
+
+#define VC5_PV_MUXING_TEST(_name, ...) \
+ PV_MUXING_TEST(_name, vc5_mock_device, check_vc5_encoder_constraints, __VA_ARGS__)
+
+static const struct pv_muxing_param vc4_test_pv_muxing_params[] = {
+ VC4_PV_MUXING_TEST("1 output: DSI0",
+ VC4_ENCODER_TYPE_DSI0),
+ VC4_PV_MUXING_TEST("1 output: DPI",
+ VC4_ENCODER_TYPE_DPI),
+ VC4_PV_MUXING_TEST("1 output: HDMI0",
+ VC4_ENCODER_TYPE_HDMI0),
+ VC4_PV_MUXING_TEST("1 output: VEC",
+ VC4_ENCODER_TYPE_VEC),
+ VC4_PV_MUXING_TEST("1 output: DSI1",
+ VC4_ENCODER_TYPE_DSI1),
+ VC4_PV_MUXING_TEST("1 output: TXP",
+ VC4_ENCODER_TYPE_TXP),
+ VC4_PV_MUXING_TEST("2 outputs: DSI0, HDMI0",
+ VC4_ENCODER_TYPE_DSI0,
+ VC4_ENCODER_TYPE_HDMI0),
+ VC4_PV_MUXING_TEST("2 outputs: DSI0, VEC",
+ VC4_ENCODER_TYPE_DSI0,
+ VC4_ENCODER_TYPE_VEC),
+ VC4_PV_MUXING_TEST("2 outputs: DSI0, DSI1",
+ VC4_ENCODER_TYPE_DSI0,
+ VC4_ENCODER_TYPE_DSI1),
+ VC4_PV_MUXING_TEST("2 outputs: DSI0, TXP",
+ VC4_ENCODER_TYPE_DSI0,
+ VC4_ENCODER_TYPE_TXP),
+ VC4_PV_MUXING_TEST("2 outputs: DPI, HDMI0",
+ VC4_ENCODER_TYPE_DPI,
+ VC4_ENCODER_TYPE_HDMI0),
+ VC4_PV_MUXING_TEST("2 outputs: DPI, VEC",
+ VC4_ENCODER_TYPE_DPI,
+ VC4_ENCODER_TYPE_VEC),
+ VC4_PV_MUXING_TEST("2 outputs: DPI, DSI1",
+ VC4_ENCODER_TYPE_DPI,
+ VC4_ENCODER_TYPE_DSI1),
+ VC4_PV_MUXING_TEST("2 outputs: DPI, TXP",
+ VC4_ENCODER_TYPE_DPI,
+ VC4_ENCODER_TYPE_TXP),
+ VC4_PV_MUXING_TEST("2 outputs: HDMI0, DSI1",
+ VC4_ENCODER_TYPE_HDMI0,
+ VC4_ENCODER_TYPE_DSI1),
+ VC4_PV_MUXING_TEST("2 outputs: HDMI0, TXP",
+ VC4_ENCODER_TYPE_HDMI0,
+ VC4_ENCODER_TYPE_TXP),
+ VC4_PV_MUXING_TEST("2 outputs: VEC, DSI1",
+ VC4_ENCODER_TYPE_VEC,
+ VC4_ENCODER_TYPE_DSI1),
+ VC4_PV_MUXING_TEST("2 outputs: VEC, TXP",
+ VC4_ENCODER_TYPE_VEC,
+ VC4_ENCODER_TYPE_TXP),
+ VC4_PV_MUXING_TEST("3 outputs: DSI0, HDMI0, DSI1",
+ VC4_ENCODER_TYPE_DSI0,
+ VC4_ENCODER_TYPE_HDMI0,
+ VC4_ENCODER_TYPE_DSI1),
+ VC4_PV_MUXING_TEST("3 outputs: DSI0, HDMI0, TXP",
+ VC4_ENCODER_TYPE_DSI0,
+ VC4_ENCODER_TYPE_HDMI0,
+ VC4_ENCODER_TYPE_TXP),
+ VC4_PV_MUXING_TEST("3 outputs: DSI0, VEC, DSI1",
+ VC4_ENCODER_TYPE_DSI0,
+ VC4_ENCODER_TYPE_VEC,
+ VC4_ENCODER_TYPE_DSI1),
+ VC4_PV_MUXING_TEST("3 outputs: DSI0, VEC, TXP",
+ VC4_ENCODER_TYPE_DSI0,
+ VC4_ENCODER_TYPE_VEC,
+ VC4_ENCODER_TYPE_TXP),
+ VC4_PV_MUXING_TEST("3 outputs: DPI, HDMI0, DSI1",
+ VC4_ENCODER_TYPE_DPI,
+ VC4_ENCODER_TYPE_HDMI0,
+ VC4_ENCODER_TYPE_DSI1),
+ VC4_PV_MUXING_TEST("3 outputs: DPI, HDMI0, TXP",
+ VC4_ENCODER_TYPE_DPI,
+ VC4_ENCODER_TYPE_HDMI0,
+ VC4_ENCODER_TYPE_TXP),
+ VC4_PV_MUXING_TEST("3 outputs: DPI, VEC, DSI1",
+ VC4_ENCODER_TYPE_DPI,
+ VC4_ENCODER_TYPE_VEC,
+ VC4_ENCODER_TYPE_DSI1),
+ VC4_PV_MUXING_TEST("3 outputs: DPI, VEC, TXP",
+ VC4_ENCODER_TYPE_DPI,
+ VC4_ENCODER_TYPE_VEC,
+ VC4_ENCODER_TYPE_TXP),
+};
+
+KUNIT_ARRAY_PARAM(vc4_test_pv_muxing,
+ vc4_test_pv_muxing_params,
+ vc4_test_pv_muxing_desc);
+
+static const struct pv_muxing_param vc4_test_pv_muxing_invalid_params[] = {
+ VC4_PV_MUXING_TEST("DPI/DSI0 Conflict",
+ VC4_ENCODER_TYPE_DPI,
+ VC4_ENCODER_TYPE_DSI0),
+ VC4_PV_MUXING_TEST("TXP/DSI1 Conflict",
+ VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_DSI1),
+ VC4_PV_MUXING_TEST("HDMI0/VEC Conflict",
+ VC4_ENCODER_TYPE_HDMI0,
+ VC4_ENCODER_TYPE_VEC),
+ VC4_PV_MUXING_TEST("More than 3 outputs: DSI0, HDMI0, DSI1, TXP",
+ VC4_ENCODER_TYPE_DSI0,
+ VC4_ENCODER_TYPE_HDMI0,
+ VC4_ENCODER_TYPE_DSI1,
+ VC4_ENCODER_TYPE_TXP),
+ VC4_PV_MUXING_TEST("More than 3 outputs: DSI0, VEC, DSI1, TXP",
+ VC4_ENCODER_TYPE_DSI0,
+ VC4_ENCODER_TYPE_VEC,
+ VC4_ENCODER_TYPE_DSI1,
+ VC4_ENCODER_TYPE_TXP),
+ VC4_PV_MUXING_TEST("More than 3 outputs: DPI, HDMI0, DSI1, TXP",
+ VC4_ENCODER_TYPE_DPI,
+ VC4_ENCODER_TYPE_HDMI0,
+ VC4_ENCODER_TYPE_DSI1,
+ VC4_ENCODER_TYPE_TXP),
+ VC4_PV_MUXING_TEST("More than 3 outputs: DPI, VEC, DSI1, TXP",
+ VC4_ENCODER_TYPE_DPI,
+ VC4_ENCODER_TYPE_VEC,
+ VC4_ENCODER_TYPE_DSI1,
+ VC4_ENCODER_TYPE_TXP),
+};
+
+KUNIT_ARRAY_PARAM(vc4_test_pv_muxing_invalid,
+ vc4_test_pv_muxing_invalid_params,
+ vc4_test_pv_muxing_desc);
+
+static const struct pv_muxing_param vc5_test_pv_muxing_params[] = {
+ VC5_PV_MUXING_TEST("1 output: DPI",
+ VC4_ENCODER_TYPE_DPI),
+ VC5_PV_MUXING_TEST("1 output: DSI0",
+ VC4_ENCODER_TYPE_DSI0),
+ VC5_PV_MUXING_TEST("1 output: DSI1",
+ VC4_ENCODER_TYPE_DSI1),
+ VC5_PV_MUXING_TEST("1 output: HDMI0",
+ VC4_ENCODER_TYPE_HDMI0),
+ VC5_PV_MUXING_TEST("1 output: HDMI1",
+ VC4_ENCODER_TYPE_HDMI1),
+ VC5_PV_MUXING_TEST("1 output: VEC",
+ VC4_ENCODER_TYPE_VEC),
+ VC5_PV_MUXING_TEST("2 outputs: DPI, DSI1",
+ VC4_ENCODER_TYPE_DPI,
+ VC4_ENCODER_TYPE_DSI1),
+ VC5_PV_MUXING_TEST("2 outputs: DPI, HDMI0",
+ VC4_ENCODER_TYPE_DPI,
+ VC4_ENCODER_TYPE_HDMI0),
+ VC5_PV_MUXING_TEST("2 outputs: DPI, HDMI1",
+ VC4_ENCODER_TYPE_DPI,
+ VC4_ENCODER_TYPE_HDMI1),
+ VC5_PV_MUXING_TEST("2 outputs: DPI, TXP",
+ VC4_ENCODER_TYPE_DPI,
+ VC4_ENCODER_TYPE_TXP),
+ VC5_PV_MUXING_TEST("2 outputs: DPI, VEC",
+ VC4_ENCODER_TYPE_DPI,
+ VC4_ENCODER_TYPE_VEC),
+ VC5_PV_MUXING_TEST("2 outputs: DPI, DSI1",
+ VC4_ENCODER_TYPE_DPI,
+ VC4_ENCODER_TYPE_DSI1),
+ VC5_PV_MUXING_TEST("2 outputs: DSI0, DSI1",
+ VC4_ENCODER_TYPE_DSI0,
+ VC4_ENCODER_TYPE_DSI1),
+ VC5_PV_MUXING_TEST("2 outputs: DSI0, HDMI0",
+ VC4_ENCODER_TYPE_DSI0,
+ VC4_ENCODER_TYPE_HDMI0),
+ VC5_PV_MUXING_TEST("2 outputs: DSI0, HDMI1",
+ VC4_ENCODER_TYPE_DSI0,
+ VC4_ENCODER_TYPE_HDMI1),
+ VC5_PV_MUXING_TEST("2 outputs: DSI0, TXP",
+ VC4_ENCODER_TYPE_DSI0,
+ VC4_ENCODER_TYPE_TXP),
+ VC5_PV_MUXING_TEST("2 outputs: DSI0, VEC",
+ VC4_ENCODER_TYPE_DSI0,
+ VC4_ENCODER_TYPE_VEC),
+ VC5_PV_MUXING_TEST("2 outputs: DSI0, DSI1",
+ VC4_ENCODER_TYPE_DSI0,
+ VC4_ENCODER_TYPE_DSI1),
+ VC5_PV_MUXING_TEST("2 outputs: DSI1, VEC",
+ VC4_ENCODER_TYPE_DSI1,
+ VC4_ENCODER_TYPE_VEC),
+ VC5_PV_MUXING_TEST("2 outputs: DSI1, TXP",
+ VC4_ENCODER_TYPE_DSI1,
+ VC4_ENCODER_TYPE_TXP),
+ VC5_PV_MUXING_TEST("2 outputs: DSI1, HDMI0",
+ VC4_ENCODER_TYPE_DSI1,
+ VC4_ENCODER_TYPE_HDMI0),
+ VC5_PV_MUXING_TEST("2 outputs: DSI1, HDMI1",
+ VC4_ENCODER_TYPE_DSI1,
+ VC4_ENCODER_TYPE_HDMI1),
+ VC5_PV_MUXING_TEST("2 outputs: HDMI0, VEC",
+ VC4_ENCODER_TYPE_HDMI0,
+ VC4_ENCODER_TYPE_VEC),
+ VC5_PV_MUXING_TEST("2 outputs: HDMI0, TXP",
+ VC4_ENCODER_TYPE_HDMI0,
+ VC4_ENCODER_TYPE_TXP),
+ VC5_PV_MUXING_TEST("2 outputs: HDMI0, HDMI1",
+ VC4_ENCODER_TYPE_HDMI0,
+ VC4_ENCODER_TYPE_HDMI1),
+ VC5_PV_MUXING_TEST("2 outputs: HDMI1, VEC",
+ VC4_ENCODER_TYPE_HDMI1,
+ VC4_ENCODER_TYPE_VEC),
+ VC5_PV_MUXING_TEST("2 outputs: HDMI1, TXP",
+ VC4_ENCODER_TYPE_HDMI1,
+ VC4_ENCODER_TYPE_TXP),
+ VC5_PV_MUXING_TEST("2 outputs: TXP, VEC",
+ VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_VEC),
+ VC5_PV_MUXING_TEST("3 outputs: DPI, VEC, TXP",
+ VC4_ENCODER_TYPE_DPI,
+ VC4_ENCODER_TYPE_VEC,
+ VC4_ENCODER_TYPE_TXP),
+ VC5_PV_MUXING_TEST("3 outputs: DPI, VEC, DSI1",
+ VC4_ENCODER_TYPE_DPI,
+ VC4_ENCODER_TYPE_VEC,
+ VC4_ENCODER_TYPE_DSI1),
+ VC5_PV_MUXING_TEST("3 outputs: DPI, VEC, HDMI0",
+ VC4_ENCODER_TYPE_DPI,
+ VC4_ENCODER_TYPE_VEC,
+ VC4_ENCODER_TYPE_HDMI0),
+ VC5_PV_MUXING_TEST("3 outputs: DPI, VEC, HDMI1",
+ VC4_ENCODER_TYPE_DPI,
+ VC4_ENCODER_TYPE_VEC,
+ VC4_ENCODER_TYPE_HDMI1),
+ VC5_PV_MUXING_TEST("3 outputs: DPI, TXP, DSI1",
+ VC4_ENCODER_TYPE_DPI,
+ VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_DSI1),
+ VC5_PV_MUXING_TEST("3 outputs: DPI, TXP, HDMI0",
+ VC4_ENCODER_TYPE_DPI,
+ VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_HDMI0),
+ VC5_PV_MUXING_TEST("3 outputs: DPI, TXP, HDMI1",
+ VC4_ENCODER_TYPE_DPI,
+ VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_HDMI1),
+ VC5_PV_MUXING_TEST("3 outputs: DPI, DSI1, HDMI0",
+ VC4_ENCODER_TYPE_DPI,
+ VC4_ENCODER_TYPE_DSI1,
+ VC4_ENCODER_TYPE_HDMI0),
+ VC5_PV_MUXING_TEST("3 outputs: DPI, DSI1, HDMI1",
+ VC4_ENCODER_TYPE_DPI,
+ VC4_ENCODER_TYPE_DSI1,
+ VC4_ENCODER_TYPE_HDMI1),
+ VC5_PV_MUXING_TEST("3 outputs: DPI, HDMI0, HDMI1",
+ VC4_ENCODER_TYPE_DPI,
+ VC4_ENCODER_TYPE_HDMI0,
+ VC4_ENCODER_TYPE_HDMI1),
+ VC5_PV_MUXING_TEST("3 outputs: DSI0, VEC, TXP",
+ VC4_ENCODER_TYPE_DSI0,
+ VC4_ENCODER_TYPE_VEC,
+ VC4_ENCODER_TYPE_TXP),
+ VC5_PV_MUXING_TEST("3 outputs: DSI0, VEC, DSI1",
+ VC4_ENCODER_TYPE_DSI0,
+ VC4_ENCODER_TYPE_VEC,
+ VC4_ENCODER_TYPE_DSI1),
+ VC5_PV_MUXING_TEST("3 outputs: DSI0, VEC, HDMI0",
+ VC4_ENCODER_TYPE_DSI0,
+ VC4_ENCODER_TYPE_VEC,
+ VC4_ENCODER_TYPE_HDMI0),
+ VC5_PV_MUXING_TEST("3 outputs: DSI0, VEC, HDMI1",
+ VC4_ENCODER_TYPE_DSI0,
+ VC4_ENCODER_TYPE_VEC,
+ VC4_ENCODER_TYPE_HDMI1),
+ VC5_PV_MUXING_TEST("3 outputs: DSI0, TXP, DSI1",
+ VC4_ENCODER_TYPE_DSI0,
+ VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_DSI1),
+ VC5_PV_MUXING_TEST("3 outputs: DSI0, TXP, HDMI0",
+ VC4_ENCODER_TYPE_DSI0,
+ VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_HDMI0),
+ VC5_PV_MUXING_TEST("3 outputs: DSI0, TXP, HDMI1",
+ VC4_ENCODER_TYPE_DSI0,
+ VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_HDMI1),
+ VC5_PV_MUXING_TEST("3 outputs: DSI0, DSI1, HDMI0",
+ VC4_ENCODER_TYPE_DSI0,
+ VC4_ENCODER_TYPE_DSI1,
+ VC4_ENCODER_TYPE_HDMI0),
+ VC5_PV_MUXING_TEST("3 outputs: DSI0, DSI1, HDMI1",
+ VC4_ENCODER_TYPE_DSI0,
+ VC4_ENCODER_TYPE_DSI1,
+ VC4_ENCODER_TYPE_HDMI1),
+ VC5_PV_MUXING_TEST("3 outputs: DSI0, HDMI0, HDMI1",
+ VC4_ENCODER_TYPE_DSI0,
+ VC4_ENCODER_TYPE_HDMI0,
+ VC4_ENCODER_TYPE_HDMI1),
+};
+
+KUNIT_ARRAY_PARAM(vc5_test_pv_muxing,
+ vc5_test_pv_muxing_params,
+ vc4_test_pv_muxing_desc);
+
+static const struct pv_muxing_param vc5_test_pv_muxing_invalid_params[] = {
+ VC5_PV_MUXING_TEST("DPI/DSI0 Conflict",
+ VC4_ENCODER_TYPE_DPI,
+ VC4_ENCODER_TYPE_DSI0),
+ VC5_PV_MUXING_TEST("More than 3 outputs: DPI, VEC, TXP, DSI1",
+ VC4_ENCODER_TYPE_DPI,
+ VC4_ENCODER_TYPE_VEC,
+ VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_DSI1),
+ VC5_PV_MUXING_TEST("More than 3 outputs: DPI, VEC, TXP, HDMI0",
+ VC4_ENCODER_TYPE_DPI,
+ VC4_ENCODER_TYPE_VEC,
+ VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_HDMI0),
+ VC5_PV_MUXING_TEST("More than 3 outputs: DPI, VEC, TXP, HDMI1",
+ VC4_ENCODER_TYPE_DPI,
+ VC4_ENCODER_TYPE_VEC,
+ VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_HDMI1),
+ VC5_PV_MUXING_TEST("More than 3 outputs: DPI, VEC, DSI1, HDMI0",
+ VC4_ENCODER_TYPE_DPI,
+ VC4_ENCODER_TYPE_VEC,
+ VC4_ENCODER_TYPE_DSI1,
+ VC4_ENCODER_TYPE_HDMI0),
+ VC5_PV_MUXING_TEST("More than 3 outputs: DPI, VEC, DSI1, HDMI1",
+ VC4_ENCODER_TYPE_DPI,
+ VC4_ENCODER_TYPE_VEC,
+ VC4_ENCODER_TYPE_DSI1,
+ VC4_ENCODER_TYPE_HDMI1),
+ VC5_PV_MUXING_TEST("More than 3 outputs: DPI, VEC, HDMI0, HDMI1",
+ VC4_ENCODER_TYPE_DPI,
+ VC4_ENCODER_TYPE_VEC,
+ VC4_ENCODER_TYPE_HDMI0,
+ VC4_ENCODER_TYPE_HDMI1),
+ VC5_PV_MUXING_TEST("More than 3 outputs: DPI, TXP, DSI1, HDMI0",
+ VC4_ENCODER_TYPE_DPI,
+ VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_DSI1,
+ VC4_ENCODER_TYPE_HDMI0),
+ VC5_PV_MUXING_TEST("More than 3 outputs: DPI, TXP, DSI1, HDMI1",
+ VC4_ENCODER_TYPE_DPI,
+ VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_DSI1,
+ VC4_ENCODER_TYPE_HDMI1),
+ VC5_PV_MUXING_TEST("More than 3 outputs: DPI, TXP, HDMI0, HDMI1",
+ VC4_ENCODER_TYPE_DPI,
+ VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_HDMI0,
+ VC4_ENCODER_TYPE_HDMI1),
+ VC5_PV_MUXING_TEST("More than 3 outputs: DPI, DSI1, HDMI0, HDMI1",
+ VC4_ENCODER_TYPE_DPI,
+ VC4_ENCODER_TYPE_DSI1,
+ VC4_ENCODER_TYPE_HDMI0,
+ VC4_ENCODER_TYPE_HDMI1),
+ VC5_PV_MUXING_TEST("More than 3 outputs: DPI, VEC, TXP, DSI1, HDMI0",
+ VC4_ENCODER_TYPE_DPI,
+ VC4_ENCODER_TYPE_VEC,
+ VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_DSI1,
+ VC4_ENCODER_TYPE_HDMI0),
+ VC5_PV_MUXING_TEST("More than 3 outputs: DPI, VEC, TXP, DSI1, HDMI1",
+ VC4_ENCODER_TYPE_DPI,
+ VC4_ENCODER_TYPE_VEC,
+ VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_DSI1,
+ VC4_ENCODER_TYPE_HDMI1),
+ VC5_PV_MUXING_TEST("More than 3 outputs: DPI, VEC, TXP, HDMI0, HDMI1",
+ VC4_ENCODER_TYPE_DPI,
+ VC4_ENCODER_TYPE_VEC,
+ VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_HDMI0,
+ VC4_ENCODER_TYPE_HDMI1),
+ VC5_PV_MUXING_TEST("More than 3 outputs: DPI, VEC, DSI1, HDMI0, HDMI1",
+ VC4_ENCODER_TYPE_DPI,
+ VC4_ENCODER_TYPE_VEC,
+ VC4_ENCODER_TYPE_DSI1,
+ VC4_ENCODER_TYPE_HDMI0,
+ VC4_ENCODER_TYPE_HDMI1),
+ VC5_PV_MUXING_TEST("More than 3 outputs: DPI, TXP, DSI1, HDMI0, HDMI1",
+ VC4_ENCODER_TYPE_DPI,
+ VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_DSI1,
+ VC4_ENCODER_TYPE_HDMI0,
+ VC4_ENCODER_TYPE_HDMI1),
+ VC5_PV_MUXING_TEST("More than 3 outputs: DSI0, VEC, TXP, DSI1",
+ VC4_ENCODER_TYPE_DSI0,
+ VC4_ENCODER_TYPE_VEC,
+ VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_DSI1),
+ VC5_PV_MUXING_TEST("More than 3 outputs: DSI0, VEC, TXP, HDMI0",
+ VC4_ENCODER_TYPE_DSI0,
+ VC4_ENCODER_TYPE_VEC,
+ VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_HDMI0),
+ VC5_PV_MUXING_TEST("More than 3 outputs: DSI0, VEC, TXP, HDMI1",
+ VC4_ENCODER_TYPE_DSI0,
+ VC4_ENCODER_TYPE_VEC,
+ VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_HDMI1),
+ VC5_PV_MUXING_TEST("More than 3 outputs: DSI0, VEC, DSI1, HDMI0",
+ VC4_ENCODER_TYPE_DSI0,
+ VC4_ENCODER_TYPE_VEC,
+ VC4_ENCODER_TYPE_DSI1,
+ VC4_ENCODER_TYPE_HDMI0),
+ VC5_PV_MUXING_TEST("More than 3 outputs: DSI0, VEC, DSI1, HDMI1",
+ VC4_ENCODER_TYPE_DSI0,
+ VC4_ENCODER_TYPE_VEC,
+ VC4_ENCODER_TYPE_DSI1,
+ VC4_ENCODER_TYPE_HDMI1),
+ VC5_PV_MUXING_TEST("More than 3 outputs: DSI0, VEC, HDMI0, HDMI1",
+ VC4_ENCODER_TYPE_DSI0,
+ VC4_ENCODER_TYPE_VEC,
+ VC4_ENCODER_TYPE_HDMI0,
+ VC4_ENCODER_TYPE_HDMI1),
+ VC5_PV_MUXING_TEST("More than 3 outputs: DSI0, TXP, DSI1, HDMI0",
+ VC4_ENCODER_TYPE_DSI0,
+ VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_DSI1,
+ VC4_ENCODER_TYPE_HDMI0),
+ VC5_PV_MUXING_TEST("More than 3 outputs: DSI0, TXP, DSI1, HDMI1",
+ VC4_ENCODER_TYPE_DSI0,
+ VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_DSI1,
+ VC4_ENCODER_TYPE_HDMI1),
+ VC5_PV_MUXING_TEST("More than 3 outputs: DSI0, TXP, HDMI0, HDMI1",
+ VC4_ENCODER_TYPE_DSI0,
+ VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_HDMI0,
+ VC4_ENCODER_TYPE_HDMI1),
+ VC5_PV_MUXING_TEST("More than 3 outputs: DSI0, DSI1, HDMI0, HDMI1",
+ VC4_ENCODER_TYPE_DSI0,
+ VC4_ENCODER_TYPE_DSI1,
+ VC4_ENCODER_TYPE_HDMI0,
+ VC4_ENCODER_TYPE_HDMI1),
+ VC5_PV_MUXING_TEST("More than 3 outputs: DSI0, VEC, TXP, DSI1, HDMI0",
+ VC4_ENCODER_TYPE_DSI0,
+ VC4_ENCODER_TYPE_VEC,
+ VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_DSI1,
+ VC4_ENCODER_TYPE_HDMI0),
+ VC5_PV_MUXING_TEST("More than 3 outputs: DSI0, VEC, TXP, DSI1, HDMI1",
+ VC4_ENCODER_TYPE_DSI0,
+ VC4_ENCODER_TYPE_VEC,
+ VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_DSI1,
+ VC4_ENCODER_TYPE_HDMI1),
+ VC5_PV_MUXING_TEST("More than 3 outputs: DSI0, VEC, TXP, HDMI0, HDMI1",
+ VC4_ENCODER_TYPE_DSI0,
+ VC4_ENCODER_TYPE_VEC,
+ VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_HDMI0,
+ VC4_ENCODER_TYPE_HDMI1),
+ VC5_PV_MUXING_TEST("More than 3 outputs: DSI0, VEC, DSI1, HDMI0, HDMI1",
+ VC4_ENCODER_TYPE_DSI0,
+ VC4_ENCODER_TYPE_VEC,
+ VC4_ENCODER_TYPE_DSI1,
+ VC4_ENCODER_TYPE_HDMI0,
+ VC4_ENCODER_TYPE_HDMI1),
+ VC5_PV_MUXING_TEST("More than 3 outputs: DSI0, TXP, DSI1, HDMI0, HDMI1",
+ VC4_ENCODER_TYPE_DSI0,
+ VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_DSI1,
+ VC4_ENCODER_TYPE_HDMI0,
+ VC4_ENCODER_TYPE_HDMI1),
+ VC5_PV_MUXING_TEST("More than 3 outputs: VEC, TXP, DSI1, HDMI0, HDMI1",
+ VC4_ENCODER_TYPE_VEC,
+ VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_DSI1,
+ VC4_ENCODER_TYPE_HDMI0,
+ VC4_ENCODER_TYPE_HDMI1),
+ VC5_PV_MUXING_TEST("More than 3 outputs: DPI, VEC, TXP, DSI1, HDMI0, HDMI1",
+ VC4_ENCODER_TYPE_DPI,
+ VC4_ENCODER_TYPE_VEC,
+ VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_DSI1,
+ VC4_ENCODER_TYPE_HDMI0,
+ VC4_ENCODER_TYPE_HDMI1),
+ VC5_PV_MUXING_TEST("More than 3 outputs: DSI0, VEC, TXP, DSI1, HDMI0, HDMI1",
+ VC4_ENCODER_TYPE_DSI0,
+ VC4_ENCODER_TYPE_VEC,
+ VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_DSI1,
+ VC4_ENCODER_TYPE_HDMI0,
+ VC4_ENCODER_TYPE_HDMI1),
+};
+
+KUNIT_ARRAY_PARAM(vc5_test_pv_muxing_invalid,
+ vc5_test_pv_muxing_invalid_params,
+ vc4_test_pv_muxing_desc);
+
+static void drm_vc4_test_pv_muxing(struct kunit *test)
+{
+ const struct pv_muxing_param *params = test->param_value;
+ const struct pv_muxing_priv *priv = test->priv;
+ struct drm_atomic_state *state = priv->state;
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < params->nencoders; i++) {
+ enum vc4_encoder_type enc_type = params->encoders[i];
+
+ ret = vc4_mock_atomic_add_output(test, state, enc_type);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+ }
+
+ ret = drm_atomic_check_only(state);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ KUNIT_EXPECT_TRUE(test,
+ check_fifo_conflict(test, state));
+
+ for (i = 0; i < params->nencoders; i++) {
+ enum vc4_encoder_type enc_type = params->encoders[i];
+
+ KUNIT_EXPECT_TRUE(test, check_channel_for_encoder(test, state, enc_type,
+ params->check_fn));
+ }
+}
+
+static void drm_vc4_test_pv_muxing_invalid(struct kunit *test)
+{
+ const struct pv_muxing_param *params = test->param_value;
+ const struct pv_muxing_priv *priv = test->priv;
+ struct drm_atomic_state *state = priv->state;
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < params->nencoders; i++) {
+ enum vc4_encoder_type enc_type = params->encoders[i];
+
+ ret = vc4_mock_atomic_add_output(test, state, enc_type);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+ }
+
+ ret = drm_atomic_check_only(state);
+ KUNIT_EXPECT_LT(test, ret, 0);
+}
+
+static int vc4_pv_muxing_test_init(struct kunit *test)
+{
+ const struct pv_muxing_param *params = test->param_value;
+ struct drm_atomic_state *state;
+ struct pv_muxing_priv *priv;
+ struct drm_device *drm;
+ struct vc4_dev *vc4;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, priv);
+ test->priv = priv;
+
+ vc4 = params->mock_fn(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vc4);
+ priv->vc4 = vc4;
+
+ drm_modeset_acquire_init(&priv->ctx, 0);
+
+ drm = &vc4->base;
+ state = drm_atomic_state_alloc(drm);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
+
+ state->acquire_ctx = &priv->ctx;
+
+ priv->state = state;
+
+ return 0;
+}
+
+static void vc4_pv_muxing_test_exit(struct kunit *test)
+{
+ struct pv_muxing_priv *priv = test->priv;
+ struct vc4_dev *vc4 = priv->vc4;
+ struct drm_device *drm = &vc4->base;
+ struct drm_atomic_state *state = priv->state;
+
+ drm_atomic_state_put(state);
+ drm_modeset_drop_locks(&priv->ctx);
+ drm_modeset_acquire_fini(&priv->ctx);
+ drm_dev_unregister(drm);
+ drm_kunit_helper_free_device(test, vc4->dev);
+}
+
+static struct kunit_case vc4_pv_muxing_tests[] = {
+ KUNIT_CASE_PARAM(drm_vc4_test_pv_muxing,
+ vc4_test_pv_muxing_gen_params),
+ KUNIT_CASE_PARAM(drm_vc4_test_pv_muxing_invalid,
+ vc4_test_pv_muxing_invalid_gen_params),
+ {}
+};
+
+static struct kunit_suite vc4_pv_muxing_test_suite = {
+ .name = "vc4-pv-muxing-combinations",
+ .init = vc4_pv_muxing_test_init,
+ .exit = vc4_pv_muxing_test_exit,
+ .test_cases = vc4_pv_muxing_tests,
+};
+
+static struct kunit_case vc5_pv_muxing_tests[] = {
+ KUNIT_CASE_PARAM(drm_vc4_test_pv_muxing,
+ vc5_test_pv_muxing_gen_params),
+ KUNIT_CASE_PARAM(drm_vc4_test_pv_muxing_invalid,
+ vc5_test_pv_muxing_invalid_gen_params),
+ {}
+};
+
+static struct kunit_suite vc5_pv_muxing_test_suite = {
+ .name = "vc5-pv-muxing-combinations",
+ .init = vc4_pv_muxing_test_init,
+ .exit = vc4_pv_muxing_test_exit,
+ .test_cases = vc5_pv_muxing_tests,
+};
+
+/* See
+ * https://lore.kernel.org/all/3e113525-aa89-b1e2-56b7-ca55bd41d057@samsung.com/
+ * and
+ * https://lore.kernel.org/dri-devel/20200917121623.42023-1-maxime@cerno.tech/
+ */
+static void drm_test_vc5_pv_muxing_bugs_subsequent_crtc_enable(struct kunit *test)
+{
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_atomic_state *state;
+ struct vc4_crtc_state *new_vc4_crtc_state;
+ struct vc4_hvs_state *new_hvs_state;
+ unsigned int hdmi0_channel;
+ unsigned int hdmi1_channel;
+ struct drm_device *drm;
+ struct vc4_dev *vc4;
+ int ret;
+
+ vc4 = vc5_mock_device(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vc4);
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+ drm = &vc4->base;
+ state = drm_atomic_state_alloc(drm);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
+
+ state->acquire_ctx = &ctx;
+
+ ret = vc4_mock_atomic_add_output(test, state, VC4_ENCODER_TYPE_HDMI0);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ ret = drm_atomic_check_only(state);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ new_hvs_state = vc4_hvs_get_new_global_state(state);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, new_hvs_state);
+
+ new_vc4_crtc_state = get_vc4_crtc_state_for_encoder(test, state,
+ VC4_ENCODER_TYPE_HDMI0);
+ KUNIT_ASSERT_NOT_NULL(test, new_vc4_crtc_state);
+
+ hdmi0_channel = new_vc4_crtc_state->assigned_channel;
+ KUNIT_ASSERT_NE(test, hdmi0_channel, VC4_HVS_CHANNEL_DISABLED);
+ KUNIT_ASSERT_TRUE(test, new_hvs_state->fifo_state[hdmi0_channel].in_use);
+
+ ret = drm_atomic_helper_swap_state(state, false);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ drm_atomic_state_put(state);
+
+ state = drm_atomic_state_alloc(drm);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
+
+ state->acquire_ctx = &ctx;
+
+ ret = vc4_mock_atomic_add_output(test, state, VC4_ENCODER_TYPE_HDMI1);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ ret = drm_atomic_check_only(state);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ new_hvs_state = vc4_hvs_get_new_global_state(state);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, new_hvs_state);
+
+ new_vc4_crtc_state = get_vc4_crtc_state_for_encoder(test, state,
+ VC4_ENCODER_TYPE_HDMI1);
+ KUNIT_ASSERT_NOT_NULL(test, new_vc4_crtc_state);
+
+ hdmi1_channel = new_vc4_crtc_state->assigned_channel;
+ KUNIT_ASSERT_NE(test, hdmi1_channel, VC4_HVS_CHANNEL_DISABLED);
+ KUNIT_ASSERT_TRUE(test, new_hvs_state->fifo_state[hdmi1_channel].in_use);
+
+ KUNIT_EXPECT_NE(test, hdmi0_channel, hdmi1_channel);
+
+ drm_atomic_state_put(state);
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+ drm_dev_unregister(drm);
+ drm_kunit_helper_free_device(test, vc4->dev);
+}
+
+static void drm_test_vc5_pv_muxing_bugs_stable_fifo(struct kunit *test)
+{
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_atomic_state *state;
+ struct vc4_crtc_state *new_vc4_crtc_state;
+ struct vc4_hvs_state *new_hvs_state;
+ unsigned int old_hdmi0_channel;
+ unsigned int old_hdmi1_channel;
+ struct drm_device *drm;
+ struct vc4_dev *vc4;
+ int ret;
+
+ vc4 = vc5_mock_device(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vc4);
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+ drm = &vc4->base;
+ state = drm_atomic_state_alloc(drm);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
+
+ state->acquire_ctx = &ctx;
+
+ ret = vc4_mock_atomic_add_output(test, state, VC4_ENCODER_TYPE_HDMI0);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ ret = vc4_mock_atomic_add_output(test, state, VC4_ENCODER_TYPE_HDMI1);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ ret = drm_atomic_check_only(state);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ new_hvs_state = vc4_hvs_get_new_global_state(state);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, new_hvs_state);
+
+ new_vc4_crtc_state = get_vc4_crtc_state_for_encoder(test, state,
+ VC4_ENCODER_TYPE_HDMI0);
+ KUNIT_ASSERT_NOT_NULL(test, new_vc4_crtc_state);
+
+ old_hdmi0_channel = new_vc4_crtc_state->assigned_channel;
+ KUNIT_ASSERT_NE(test, old_hdmi0_channel, VC4_HVS_CHANNEL_DISABLED);
+ KUNIT_ASSERT_TRUE(test, new_hvs_state->fifo_state[old_hdmi0_channel].in_use);
+
+ new_vc4_crtc_state = get_vc4_crtc_state_for_encoder(test, state,
+ VC4_ENCODER_TYPE_HDMI1);
+ KUNIT_ASSERT_NOT_NULL(test, new_vc4_crtc_state);
+
+ old_hdmi1_channel = new_vc4_crtc_state->assigned_channel;
+ KUNIT_ASSERT_NE(test, old_hdmi1_channel, VC4_HVS_CHANNEL_DISABLED);
+ KUNIT_ASSERT_TRUE(test, new_hvs_state->fifo_state[old_hdmi1_channel].in_use);
+
+ ret = drm_atomic_helper_swap_state(state, false);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ drm_atomic_state_put(state);
+
+ state = drm_atomic_state_alloc(drm);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
+
+ state->acquire_ctx = &ctx;
+
+ ret = vc4_mock_atomic_del_output(test, state, VC4_ENCODER_TYPE_HDMI0);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ ret = drm_atomic_check_only(state);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ new_hvs_state = vc4_hvs_get_new_global_state(state);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, new_hvs_state);
+
+ new_vc4_crtc_state = get_vc4_crtc_state_for_encoder(test, state,
+ VC4_ENCODER_TYPE_HDMI1);
+
+ if (new_vc4_crtc_state) {
+ unsigned int hdmi1_channel;
+
+ hdmi1_channel = new_vc4_crtc_state->assigned_channel;
+ KUNIT_ASSERT_NE(test, hdmi1_channel, VC4_HVS_CHANNEL_DISABLED);
+ KUNIT_ASSERT_TRUE(test, new_hvs_state->fifo_state[hdmi1_channel].in_use);
+
+ KUNIT_EXPECT_EQ(test, old_hdmi1_channel, hdmi1_channel);
+ }
+
+ drm_atomic_state_put(state);
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+ drm_dev_unregister(drm);
+ drm_kunit_helper_free_device(test, vc4->dev);
+}
+
+static void
+drm_test_vc5_pv_muxing_bugs_subsequent_crtc_enable_too_many_crtc_state(struct kunit *test)
+{
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_atomic_state *state;
+ struct vc4_crtc_state *new_vc4_crtc_state;
+ struct drm_device *drm;
+ struct vc4_dev *vc4;
+ int ret;
+
+ vc4 = vc5_mock_device(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vc4);
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+ drm = &vc4->base;
+ state = drm_atomic_state_alloc(drm);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
+
+ state->acquire_ctx = &ctx;
+
+ ret = vc4_mock_atomic_add_output(test, state, VC4_ENCODER_TYPE_HDMI0);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ ret = drm_atomic_check_only(state);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ ret = drm_atomic_helper_swap_state(state, false);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ drm_atomic_state_put(state);
+
+ state = drm_atomic_state_alloc(drm);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
+
+ state->acquire_ctx = &ctx;
+
+ ret = vc4_mock_atomic_add_output(test, state, VC4_ENCODER_TYPE_HDMI1);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ ret = drm_atomic_check_only(state);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ new_vc4_crtc_state = get_vc4_crtc_state_for_encoder(test, state,
+ VC4_ENCODER_TYPE_HDMI0);
+ KUNIT_EXPECT_NULL(test, new_vc4_crtc_state);
+
+ drm_atomic_state_put(state);
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+ drm_dev_unregister(drm);
+ drm_kunit_helper_free_device(test, vc4->dev);
+}
+
+static struct kunit_case vc5_pv_muxing_bugs_tests[] = {
+ KUNIT_CASE(drm_test_vc5_pv_muxing_bugs_subsequent_crtc_enable),
+ KUNIT_CASE(drm_test_vc5_pv_muxing_bugs_subsequent_crtc_enable_too_many_crtc_state),
+ KUNIT_CASE(drm_test_vc5_pv_muxing_bugs_stable_fifo),
+ {}
+};
+
+static struct kunit_suite vc5_pv_muxing_bugs_test_suite = {
+ .name = "vc5-pv-muxing-bugs",
+ .test_cases = vc5_pv_muxing_bugs_tests,
+};
+
+kunit_test_suites(
+ &vc4_pv_muxing_test_suite,
+ &vc5_pv_muxing_test_suite,
+ &vc5_pv_muxing_bugs_test_suite
+);
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
index 43d9b3a6a352..c2b7573bd92b 100644
--- a/drivers/gpu/drm/vc4/vc4_bo.c
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
@@ -69,8 +69,8 @@ static void vc4_bo_stats_print(struct drm_printer *p, struct vc4_dev *vc4)
static int vc4_bo_stats_debugfs(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = (struct drm_info_node *)m->private;
- struct drm_device *dev = node->minor->dev;
+ struct drm_debugfs_entry *entry = m->private;
+ struct drm_device *dev = entry->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct drm_printer p = drm_seq_file_printer(m);
@@ -993,15 +993,11 @@ int vc4_bo_debugfs_init(struct drm_minor *minor)
{
struct drm_device *drm = minor->dev;
struct vc4_dev *vc4 = to_vc4_dev(drm);
- int ret;
if (!vc4->v3d)
return -ENODEV;
- ret = vc4_debugfs_add_file(minor, "bo_stats",
- vc4_bo_stats_debugfs, NULL);
- if (ret)
- return ret;
+ drm_debugfs_add_file(drm, "bo_stats", vc4_bo_stats_debugfs, NULL);
return 0;
}
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index 0108613e79d5..cdc0559221f0 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -50,8 +50,17 @@
#define HVS_FIFO_LATENCY_PIX 6
-#define CRTC_WRITE(offset, val) writel(val, vc4_crtc->regs + (offset))
-#define CRTC_READ(offset) readl(vc4_crtc->regs + (offset))
+#define CRTC_WRITE(offset, val) \
+ do { \
+ kunit_fail_current_test("Accessing a register in a unit test!\n"); \
+ writel(val, vc4_crtc->regs + (offset)); \
+ } while (0)
+
+#define CRTC_READ(offset) \
+ ({ \
+ kunit_fail_current_test("Accessing a register in a unit test!\n"); \
+ readl(vc4_crtc->regs + (offset)); \
+ })
static const struct debugfs_reg32 crtc_regs[] = {
VC4_REG32(PV_CONTROL),
@@ -326,8 +335,14 @@ static void vc4_crtc_config_pv(struct drm_crtc *crtc, struct drm_encoder *encode
bool is_dsi = (vc4_encoder->type == VC4_ENCODER_TYPE_DSI0 ||
vc4_encoder->type == VC4_ENCODER_TYPE_DSI1);
bool is_dsi1 = vc4_encoder->type == VC4_ENCODER_TYPE_DSI1;
+ bool is_vec = vc4_encoder->type == VC4_ENCODER_TYPE_VEC;
u32 format = is_dsi1 ? PV_CONTROL_FORMAT_DSIV_24 : PV_CONTROL_FORMAT_24;
u8 ppc = pv_data->pixels_per_clock;
+
+ u16 vert_bp = mode->crtc_vtotal - mode->crtc_vsync_end;
+ u16 vert_sync = mode->crtc_vsync_end - mode->crtc_vsync_start;
+ u16 vert_fp = mode->crtc_vsync_start - mode->crtc_vdisplay;
+
bool debug_dump_regs = false;
int idx;
@@ -355,49 +370,60 @@ static void vc4_crtc_config_pv(struct drm_crtc *crtc, struct drm_encoder *encode
VC4_SET_FIELD(mode->hdisplay * pixel_rep / ppc,
PV_HORZB_HACTIVE));
- CRTC_WRITE(PV_VERTA,
- VC4_SET_FIELD(mode->crtc_vtotal - mode->crtc_vsync_end +
- interlace,
- PV_VERTA_VBP) |
- VC4_SET_FIELD(mode->crtc_vsync_end - mode->crtc_vsync_start,
- PV_VERTA_VSYNC));
- CRTC_WRITE(PV_VERTB,
- VC4_SET_FIELD(mode->crtc_vsync_start - mode->crtc_vdisplay,
- PV_VERTB_VFP) |
- VC4_SET_FIELD(mode->crtc_vdisplay, PV_VERTB_VACTIVE));
-
if (interlace) {
+ bool odd_field_first = false;
+ u32 field_delay = mode->htotal * pixel_rep / (2 * ppc);
+ u16 vert_bp_even = vert_bp;
+ u16 vert_fp_even = vert_fp;
+
+ if (is_vec) {
+ /* VEC (composite output) */
+ ++field_delay;
+ if (mode->htotal == 858) {
+ /* 525-line mode (NTSC or PAL-M) */
+ odd_field_first = true;
+ }
+ }
+
+ if (odd_field_first)
+ ++vert_fp_even;
+ else
+ ++vert_bp;
+
CRTC_WRITE(PV_VERTA_EVEN,
- VC4_SET_FIELD(mode->crtc_vtotal -
- mode->crtc_vsync_end,
- PV_VERTA_VBP) |
- VC4_SET_FIELD(mode->crtc_vsync_end -
- mode->crtc_vsync_start,
- PV_VERTA_VSYNC));
+ VC4_SET_FIELD(vert_bp_even, PV_VERTA_VBP) |
+ VC4_SET_FIELD(vert_sync, PV_VERTA_VSYNC));
CRTC_WRITE(PV_VERTB_EVEN,
- VC4_SET_FIELD(mode->crtc_vsync_start -
- mode->crtc_vdisplay,
- PV_VERTB_VFP) |
+ VC4_SET_FIELD(vert_fp_even, PV_VERTB_VFP) |
VC4_SET_FIELD(mode->crtc_vdisplay, PV_VERTB_VACTIVE));
- /* We set up first field even mode for HDMI. VEC's
- * NTSC mode would want first field odd instead, once
- * we support it (to do so, set ODD_FIRST and put the
- * delay in VSYNCD_EVEN instead).
+ /* We set up first field even mode for HDMI and VEC's PAL.
+ * For NTSC, we need first field odd.
*/
CRTC_WRITE(PV_V_CONTROL,
PV_VCONTROL_CONTINUOUS |
(is_dsi ? PV_VCONTROL_DSI : 0) |
PV_VCONTROL_INTERLACE |
- VC4_SET_FIELD(mode->htotal * pixel_rep / (2 * ppc),
- PV_VCONTROL_ODD_DELAY));
- CRTC_WRITE(PV_VSYNCD_EVEN, 0);
+ (odd_field_first
+ ? PV_VCONTROL_ODD_FIRST
+ : VC4_SET_FIELD(field_delay,
+ PV_VCONTROL_ODD_DELAY)));
+ CRTC_WRITE(PV_VSYNCD_EVEN,
+ (odd_field_first ? field_delay : 0));
} else {
CRTC_WRITE(PV_V_CONTROL,
PV_VCONTROL_CONTINUOUS |
(is_dsi ? PV_VCONTROL_DSI : 0));
+ CRTC_WRITE(PV_VSYNCD_EVEN, 0);
}
+ CRTC_WRITE(PV_VERTA,
+ VC4_SET_FIELD(vert_bp, PV_VERTA_VBP) |
+ VC4_SET_FIELD(vert_sync, PV_VERTA_VSYNC));
+ CRTC_WRITE(PV_VERTB,
+ VC4_SET_FIELD(vert_fp, PV_VERTB_VFP) |
+ VC4_SET_FIELD(mode->crtc_vdisplay, PV_VERTB_VACTIVE));
+
if (is_dsi)
CRTC_WRITE(PV_HACT_ACT, mode->hdisplay * pixel_rep);
@@ -486,21 +512,6 @@ static int vc4_crtc_disable(struct drm_crtc *crtc,
return 0;
}
-static struct drm_encoder *vc4_crtc_get_encoder_by_type(struct drm_crtc *crtc,
- enum vc4_encoder_type type)
-{
- struct drm_encoder *encoder;
-
- drm_for_each_encoder(encoder, crtc->dev) {
- struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder);
-
- if (vc4_encoder->type == type)
- return encoder;
- }
-
- return NULL;
-}
-
int vc4_crtc_disable_at_boot(struct drm_crtc *crtc)
{
struct drm_device *drm = crtc->dev;
@@ -536,7 +547,7 @@ int vc4_crtc_disable_at_boot(struct drm_crtc *crtc)
pv_data = vc4_crtc_to_vc4_pv_data(vc4_crtc);
encoder_type = pv_data->encoder_types[encoder_sel];
- encoder = vc4_crtc_get_encoder_by_type(crtc, encoder_type);
+ encoder = vc4_find_encoder_by_type(drm, encoder_type);
if (WARN_ON(!encoder))
return 0;
@@ -690,8 +701,8 @@ void vc4_crtc_get_margins(struct drm_crtc_state *state,
}
}
-static int vc4_crtc_atomic_check(struct drm_crtc *crtc,
- struct drm_atomic_state *state)
+int vc4_crtc_atomic_check(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
{
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
crtc);
@@ -1096,12 +1107,9 @@ int vc4_crtc_late_register(struct drm_crtc *crtc)
struct drm_device *drm = crtc->dev;
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
const struct vc4_crtc_data *crtc_data = vc4_crtc_to_vc4_crtc_data(vc4_crtc);
- int ret;
- ret = vc4_debugfs_add_regset32(drm->primary, crtc_data->debugfs_name,
- &vc4_crtc->regset);
- if (ret)
- return ret;
+ vc4_debugfs_add_regset32(drm, crtc_data->debugfs_name,
+ &vc4_crtc->regset);
return 0;
}
@@ -1131,8 +1139,9 @@ static const struct drm_crtc_helper_funcs vc4_crtc_helper_funcs = {
.get_scanout_position = vc4_crtc_get_scanout_position,
};
-static const struct vc4_pv_data bcm2835_pv0_data = {
+const struct vc4_pv_data bcm2835_pv0_data = {
.base = {
+ .name = "pixelvalve-0",
.debugfs_name = "crtc0_regs",
.hvs_available_channels = BIT(0),
.hvs_output = 0,
@@ -1145,8 +1154,9 @@ static const struct vc4_pv_data bcm2835_pv0_data = {
},
};
-static const struct vc4_pv_data bcm2835_pv1_data = {
+const struct vc4_pv_data bcm2835_pv1_data = {
.base = {
+ .name = "pixelvalve-1",
.debugfs_name = "crtc1_regs",
.hvs_available_channels = BIT(2),
.hvs_output = 2,
@@ -1159,8 +1169,9 @@ static const struct vc4_pv_data bcm2835_pv1_data = {
},
};
-static const struct vc4_pv_data bcm2835_pv2_data = {
+const struct vc4_pv_data bcm2835_pv2_data = {
.base = {
+ .name = "pixelvalve-2",
.debugfs_name = "crtc2_regs",
.hvs_available_channels = BIT(1),
.hvs_output = 1,
@@ -1173,8 +1184,9 @@ static const struct vc4_pv_data bcm2835_pv2_data = {
},
};
-static const struct vc4_pv_data bcm2711_pv0_data = {
+const struct vc4_pv_data bcm2711_pv0_data = {
.base = {
+ .name = "pixelvalve-0",
.debugfs_name = "crtc0_regs",
.hvs_available_channels = BIT(0),
.hvs_output = 0,
@@ -1187,8 +1199,9 @@ static const struct vc4_pv_data bcm2711_pv0_data = {
},
};
-static const struct vc4_pv_data bcm2711_pv1_data = {
+const struct vc4_pv_data bcm2711_pv1_data = {
.base = {
+ .name = "pixelvalve-1",
.debugfs_name = "crtc1_regs",
.hvs_available_channels = BIT(0) | BIT(1) | BIT(2),
.hvs_output = 3,
@@ -1201,8 +1214,9 @@ static const struct vc4_pv_data bcm2711_pv1_data = {
},
};
-static const struct vc4_pv_data bcm2711_pv2_data = {
+const struct vc4_pv_data bcm2711_pv2_data = {
.base = {
+ .name = "pixelvalve-2",
.debugfs_name = "crtc2_regs",
.hvs_available_channels = BIT(0) | BIT(1) | BIT(2),
.hvs_output = 4,
@@ -1214,8 +1228,9 @@ static const struct vc4_pv_data bcm2711_pv2_data = {
},
};
-static const struct vc4_pv_data bcm2711_pv3_data = {
+const struct vc4_pv_data bcm2711_pv3_data = {
.base = {
+ .name = "pixelvalve-3",
.debugfs_name = "crtc3_regs",
.hvs_available_channels = BIT(1),
.hvs_output = 1,
@@ -1227,8 +1242,9 @@ static const struct vc4_pv_data bcm2711_pv3_data = {
},
};
-static const struct vc4_pv_data bcm2711_pv4_data = {
+const struct vc4_pv_data bcm2711_pv4_data = {
.base = {
+ .name = "pixelvalve-4",
.debugfs_name = "crtc4_regs",
.hvs_available_channels = BIT(0) | BIT(1) | BIT(2),
.hvs_output = 5,
@@ -1278,31 +1294,44 @@ static void vc4_set_crtc_possible_masks(struct drm_device *drm,
}
}
-int vc4_crtc_init(struct drm_device *drm, struct vc4_crtc *vc4_crtc,
- const struct drm_crtc_funcs *crtc_funcs,
- const struct drm_crtc_helper_funcs *crtc_helper_funcs)
+/**
+ * __vc4_crtc_init - Initializes a CRTC
+ * @drm: DRM Device
+ * @pdev: CRTC Platform Device
+ * @vc4_crtc: CRTC Object to Initialize
+ * @data: Configuration data associated with this CRTC
+ * @primary_plane: Primary plane for CRTC
+ * @crtc_funcs: Callbacks for the new CRTC
+ * @crtc_helper_funcs: Helper Callbacks for the new CRTC
+ * @feeds_txp: Is this CRTC connected to the TXP?
+ *
+ * Initializes our private CRTC structure. This function is mostly
+ * relevant for KUnit testing, all other users should use
+ * vc4_crtc_init() instead.
+ *
+ * Returns:
+ * 0 on success, a negative error code on failure.
+ */
+int __vc4_crtc_init(struct drm_device *drm,
+ struct platform_device *pdev,
+ struct vc4_crtc *vc4_crtc,
+ const struct vc4_crtc_data *data,
+ struct drm_plane *primary_plane,
+ const struct drm_crtc_funcs *crtc_funcs,
+ const struct drm_crtc_helper_funcs *crtc_helper_funcs,
+ bool feeds_txp)
{
struct vc4_dev *vc4 = to_vc4_dev(drm);
struct drm_crtc *crtc = &vc4_crtc->base;
- struct drm_plane *primary_plane;
unsigned int i;
int ret;
- /* For now, we create just the primary and the legacy cursor
- * planes. We should be able to stack more planes on easily,
- * but to do that we would need to compute the bandwidth
- * requirement of the plane configuration, and reject ones
- * that will take too much.
- */
- primary_plane = vc4_plane_init(drm, DRM_PLANE_TYPE_PRIMARY, 0);
- if (IS_ERR(primary_plane)) {
- dev_err(drm->dev, "failed to construct primary plane\n");
- return PTR_ERR(primary_plane);
- }
-
+ vc4_crtc->data = data;
+ vc4_crtc->pdev = pdev;
+ vc4_crtc->feeds_txp = feeds_txp;
spin_lock_init(&vc4_crtc->irq_lock);
ret = drmm_crtc_init_with_planes(drm, crtc, primary_plane, NULL,
- crtc_funcs, NULL);
+ crtc_funcs, data->name);
if (ret)
return ret;
@@ -1328,6 +1357,31 @@ int vc4_crtc_init(struct drm_device *drm, struct vc4_crtc *vc4_crtc,
return 0;
}
+int vc4_crtc_init(struct drm_device *drm, struct platform_device *pdev,
+ struct vc4_crtc *vc4_crtc,
+ const struct vc4_crtc_data *data,
+ const struct drm_crtc_funcs *crtc_funcs,
+ const struct drm_crtc_helper_funcs *crtc_helper_funcs,
+ bool feeds_txp)
+{
+ struct drm_plane *primary_plane;
+
+ /* For now, we create just the primary and the legacy cursor
+ * planes. We should be able to stack more planes on easily,
+ * but to do that we would need to compute the bandwidth
+ * requirement of the plane configuration, and reject ones
+ * that will take too much.
+ */
+ primary_plane = vc4_plane_init(drm, DRM_PLANE_TYPE_PRIMARY, 0);
+ if (IS_ERR(primary_plane)) {
+ dev_err(drm->dev, "failed to construct primary plane\n");
+ return PTR_ERR(primary_plane);
+ }
+
+ return __vc4_crtc_init(drm, pdev, vc4_crtc, data, primary_plane,
+ crtc_funcs, crtc_helper_funcs, feeds_txp);
+}
+
static int vc4_crtc_bind(struct device *dev, struct device *master, void *data)
{
struct platform_device *pdev = to_platform_device(dev);
@@ -1345,8 +1399,6 @@ static int vc4_crtc_bind(struct device *dev, struct device *master, void *data)
pv_data = of_device_get_match_data(dev);
if (!pv_data)
return -ENODEV;
- vc4_crtc->data = &pv_data->base;
- vc4_crtc->pdev = pdev;
vc4_crtc->regs = vc4_ioremap_regs(pdev, 0);
if (IS_ERR(vc4_crtc->regs))
@@ -1356,8 +1408,9 @@ static int vc4_crtc_bind(struct device *dev, struct device *master, void *data)
vc4_crtc->regset.regs = crtc_regs;
vc4_crtc->regset.nregs = ARRAY_SIZE(crtc_regs);
- ret = vc4_crtc_init(drm, vc4_crtc,
- &vc4_crtc_funcs, &vc4_crtc_helper_funcs);
+ ret = vc4_crtc_init(drm, pdev, vc4_crtc, &pv_data->base,
+ &vc4_crtc_funcs, &vc4_crtc_helper_funcs,
+ false);
if (ret)
return ret;
vc4_set_crtc_possible_masks(drm, crtc);
diff --git a/drivers/gpu/drm/vc4/vc4_debugfs.c b/drivers/gpu/drm/vc4/vc4_debugfs.c
index 19cda4f91a82..fac624a663ea 100644
--- a/drivers/gpu/drm/vc4/vc4_debugfs.c
+++ b/drivers/gpu/drm/vc4/vc4_debugfs.c
@@ -34,9 +34,9 @@ vc4_debugfs_init(struct drm_minor *minor)
static int vc4_debugfs_regset32(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = (struct drm_info_node *)m->private;
- struct drm_device *drm = node->minor->dev;
- struct debugfs_regset32 *regset = node->info_ent->data;
+ struct drm_debugfs_entry *entry = m->private;
+ struct drm_device *drm = entry->dev;
+ struct debugfs_regset32 *regset = entry->file.data;
struct drm_printer p = drm_seq_file_printer(m);
int idx;
@@ -50,31 +50,9 @@ static int vc4_debugfs_regset32(struct seq_file *m, void *unused)
return 0;
}
-int vc4_debugfs_add_file(struct drm_minor *minor,
- const char *name,
- int (*show)(struct seq_file*, void*),
- void *data)
+void vc4_debugfs_add_regset32(struct drm_device *drm,
+ const char *name,
+ struct debugfs_regset32 *regset)
{
- struct drm_device *dev = minor->dev;
- struct dentry *root = minor->debugfs_root;
- struct drm_info_list *file;
-
- file = drmm_kzalloc(dev, sizeof(*file), GFP_KERNEL);
- if (!file)
- return -ENOMEM;
-
- file->name = name;
- file->show = show;
- file->data = data;
-
- drm_debugfs_create_files(file, 1, root, minor);
-
- return 0;
-}
-
-int vc4_debugfs_add_regset32(struct drm_minor *minor,
- const char *name,
- struct debugfs_regset32 *regset)
-{
- return vc4_debugfs_add_file(minor, name, vc4_debugfs_regset32, regset);
+ drm_debugfs_add_file(drm, name, vc4_debugfs_regset32, regset);
}
diff --git a/drivers/gpu/drm/vc4/vc4_dpi.c b/drivers/gpu/drm/vc4/vc4_dpi.c
index 1f8f44b7b5a5..f518d6e59ed6 100644
--- a/drivers/gpu/drm/vc4/vc4_dpi.c
+++ b/drivers/gpu/drm/vc4/vc4_dpi.c
@@ -103,8 +103,17 @@ to_vc4_dpi(struct drm_encoder *encoder)
return container_of(encoder, struct vc4_dpi, encoder.base);
}
-#define DPI_READ(offset) readl(dpi->regs + (offset))
-#define DPI_WRITE(offset, val) writel(val, dpi->regs + (offset))
+#define DPI_READ(offset) \
+ ({ \
+ kunit_fail_current_test("Accessing a register in a unit test!\n"); \
+ readl(dpi->regs + (offset)); \
+ })
+
+#define DPI_WRITE(offset, val) \
+ do { \
+ kunit_fail_current_test("Accessing a register in a unit test!\n"); \
+ writel(val, dpi->regs + (offset)); \
+ } while (0)
static const struct debugfs_reg32 dpi_regs[] = {
VC4_REG32(DPI_C),
@@ -150,8 +159,8 @@ static void vc4_dpi_encoder_enable(struct drm_encoder *encoder)
}
drm_connector_list_iter_end(&conn_iter);
- /* Default to 24bit if no connector or format found. */
- dpi_c |= VC4_SET_FIELD(DPI_FORMAT_24BIT_888_RGB, DPI_FORMAT);
+ /* Default to 18bit if no connector or format found. */
+ dpi_c |= VC4_SET_FIELD(DPI_FORMAT_18BIT_666_RGB_1, DPI_FORMAT);
if (connector) {
if (connector->display_info.num_bus_formats) {
@@ -170,16 +179,26 @@ static void vc4_dpi_encoder_enable(struct drm_encoder *encoder)
dpi_c |= VC4_SET_FIELD(DPI_ORDER_BGR,
DPI_ORDER);
break;
+ case MEDIA_BUS_FMT_BGR666_1X24_CPADHI:
+ dpi_c |= VC4_SET_FIELD(DPI_ORDER_BGR, DPI_ORDER);
+ fallthrough;
case MEDIA_BUS_FMT_RGB666_1X24_CPADHI:
dpi_c |= VC4_SET_FIELD(DPI_FORMAT_18BIT_666_RGB_2,
DPI_FORMAT);
break;
+ case MEDIA_BUS_FMT_BGR666_1X18:
+ dpi_c |= VC4_SET_FIELD(DPI_ORDER_BGR, DPI_ORDER);
+ fallthrough;
case MEDIA_BUS_FMT_RGB666_1X18:
dpi_c |= VC4_SET_FIELD(DPI_FORMAT_18BIT_666_RGB_1,
DPI_FORMAT);
break;
case MEDIA_BUS_FMT_RGB565_1X16:
- dpi_c |= VC4_SET_FIELD(DPI_FORMAT_16BIT_565_RGB_3,
+ dpi_c |= VC4_SET_FIELD(DPI_FORMAT_16BIT_565_RGB_1,
+ DPI_FORMAT);
+ break;
+ case MEDIA_BUS_FMT_RGB565_1X24_CPADHI:
+ dpi_c |= VC4_SET_FIELD(DPI_FORMAT_16BIT_565_RGB_2,
DPI_FORMAT);
break;
default:
@@ -248,11 +267,8 @@ static int vc4_dpi_late_register(struct drm_encoder *encoder)
{
struct drm_device *drm = encoder->dev;
struct vc4_dpi *dpi = to_vc4_dpi(encoder);
- int ret;
- ret = vc4_debugfs_add_regset32(drm->primary, "dpi_regs", &dpi->regset);
- if (ret)
- return ret;
+ vc4_debugfs_add_regset32(drm, "dpi_regs", &dpi->regset);
return 0;
}
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index 5990d8f8c363..0ccaee57fe9a 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -196,7 +196,7 @@ static const struct drm_ioctl_desc vc4_drm_ioctls[] = {
DRM_IOCTL_DEF_DRV(VC4_PERFMON_GET_VALUES, vc4_perfmon_get_values_ioctl, DRM_RENDER_ALLOW),
};
-static const struct drm_driver vc4_drm_driver = {
+const struct drm_driver vc4_drm_driver = {
.driver_features = (DRIVER_MODESET |
DRIVER_ATOMIC |
DRIVER_GEM |
@@ -225,7 +225,7 @@ static const struct drm_driver vc4_drm_driver = {
.patchlevel = DRIVER_PATCHLEVEL,
};
-static const struct drm_driver vc5_drm_driver = {
+const struct drm_driver vc5_drm_driver = {
.driver_features = (DRIVER_MODESET |
DRIVER_ATOMIC |
DRIVER_GEM),
@@ -320,7 +320,6 @@ static int vc4_drm_bind(struct device *dev)
drm = &vc4->base;
platform_set_drvdata(pdev, drm);
- INIT_LIST_HEAD(&vc4->debugfs_list);
if (!is_vc5) {
ret = drmm_mutex_init(drm, &vc4->bin_bo_lock);
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index 515228682e8e..95069bb16821 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -19,11 +19,16 @@
#include <drm/drm_mm.h>
#include <drm/drm_modeset_lock.h>
+#include <kunit/test-bug.h>
+
#include "uapi/drm/vc4_drm.h"
struct drm_device;
struct drm_gem_object;
+extern const struct drm_driver vc4_drm_driver;
+extern const struct drm_driver vc5_drm_driver;
+
/* Don't forget to update vc4_bo.c: bo_type_names[] when adding to
* this.
*/
@@ -221,11 +226,6 @@ struct vc4_dev {
struct drm_private_obj hvs_channels;
struct drm_private_obj load_tracker;
- /* List of vc4_debugfs_info_entry for adding to debugfs once
- * the minor is available (after drm_dev_register()).
- */
- struct list_head debugfs_list;
-
/* Mutex for binner bo allocation. */
struct mutex bin_bo_lock;
/* Reference count for our binner bo. */
@@ -233,7 +233,7 @@ struct vc4_dev {
};
static inline struct vc4_dev *
-to_vc4_dev(struct drm_device *dev)
+to_vc4_dev(const struct drm_device *dev)
{
return container_of(dev, struct vc4_dev, base);
}
@@ -286,7 +286,7 @@ struct vc4_bo {
};
static inline struct vc4_bo *
-to_vc4_bo(struct drm_gem_object *bo)
+to_vc4_bo(const struct drm_gem_object *bo)
{
return container_of(to_drm_gem_dma_obj(bo), struct vc4_bo, base);
}
@@ -299,7 +299,7 @@ struct vc4_fence {
};
static inline struct vc4_fence *
-to_vc4_fence(struct dma_fence *fence)
+to_vc4_fence(const struct dma_fence *fence)
{
return container_of(fence, struct vc4_fence, base);
}
@@ -355,12 +355,35 @@ struct vc4_hvs {
bool vc5_hdmi_enable_4096by2160;
};
+#define HVS_NUM_CHANNELS 3
+
+struct vc4_hvs_state {
+ struct drm_private_state base;
+ unsigned long core_clock_rate;
+
+ struct {
+ unsigned in_use: 1;
+ unsigned long fifo_load;
+ struct drm_crtc_commit *pending_commit;
+ } fifo_state[HVS_NUM_CHANNELS];
+};
+
+static inline struct vc4_hvs_state *
+to_vc4_hvs_state(const struct drm_private_state *priv)
+{
+ return container_of(priv, struct vc4_hvs_state, base);
+}
+
+struct vc4_hvs_state *vc4_hvs_get_global_state(struct drm_atomic_state *state);
+struct vc4_hvs_state *vc4_hvs_get_old_global_state(const struct drm_atomic_state *state);
+struct vc4_hvs_state *vc4_hvs_get_new_global_state(const struct drm_atomic_state *state);
+
struct vc4_plane {
struct drm_plane base;
};
static inline struct vc4_plane *
-to_vc4_plane(struct drm_plane *plane)
+to_vc4_plane(const struct drm_plane *plane)
{
return container_of(plane, struct vc4_plane, base);
}
@@ -436,7 +459,7 @@ struct vc4_plane_state {
};
static inline struct vc4_plane_state *
-to_vc4_plane_state(struct drm_plane_state *state)
+to_vc4_plane_state(const struct drm_plane_state *state)
{
return container_of(state, struct vc4_plane_state, base);
}
@@ -450,6 +473,7 @@ enum vc4_encoder_type {
VC4_ENCODER_TYPE_DSI1,
VC4_ENCODER_TYPE_SMI,
VC4_ENCODER_TYPE_DPI,
+ VC4_ENCODER_TYPE_TXP,
};
struct vc4_encoder {
@@ -466,12 +490,30 @@ struct vc4_encoder {
};
static inline struct vc4_encoder *
-to_vc4_encoder(struct drm_encoder *encoder)
+to_vc4_encoder(const struct drm_encoder *encoder)
{
return container_of(encoder, struct vc4_encoder, base);
}
+static inline
+struct drm_encoder *vc4_find_encoder_by_type(struct drm_device *drm,
+ enum vc4_encoder_type type)
+{
+ struct drm_encoder *encoder;
+
+ drm_for_each_encoder(encoder, drm) {
+ struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder);
+
+ if (vc4_encoder->type == type)
+ return encoder;
+ }
+
+ return NULL;
+}
+
struct vc4_crtc_data {
+ const char *name;
+
const char *debugfs_name;
/* Bitmask of channels (FIFOs) of the HVS that the output can source from */
@@ -481,6 +523,8 @@ struct vc4_crtc_data {
int hvs_output;
};
+extern const struct vc4_crtc_data vc4_txp_crtc_data;
+
struct vc4_pv_data {
struct vc4_crtc_data base;
@@ -493,6 +537,15 @@ struct vc4_pv_data {
enum vc4_encoder_type encoder_types[4];
};
+extern const struct vc4_pv_data bcm2835_pv0_data;
+extern const struct vc4_pv_data bcm2835_pv1_data;
+extern const struct vc4_pv_data bcm2835_pv2_data;
+extern const struct vc4_pv_data bcm2711_pv0_data;
+extern const struct vc4_pv_data bcm2711_pv1_data;
+extern const struct vc4_pv_data bcm2711_pv2_data;
+extern const struct vc4_pv_data bcm2711_pv3_data;
+extern const struct vc4_pv_data bcm2711_pv4_data;
+
struct vc4_crtc {
struct drm_crtc base;
struct platform_device *pdev;
@@ -539,7 +592,7 @@ struct vc4_crtc {
};
static inline struct vc4_crtc *
-to_vc4_crtc(struct drm_crtc *crtc)
+to_vc4_crtc(const struct drm_crtc *crtc)
{
return container_of(crtc, struct vc4_crtc, base);
}
@@ -584,15 +637,34 @@ struct vc4_crtc_state {
#define VC4_HVS_CHANNEL_DISABLED ((unsigned int)-1)
static inline struct vc4_crtc_state *
-to_vc4_crtc_state(struct drm_crtc_state *crtc_state)
+to_vc4_crtc_state(const struct drm_crtc_state *crtc_state)
{
return container_of(crtc_state, struct vc4_crtc_state, base);
}
-#define V3D_READ(offset) readl(vc4->v3d->regs + offset)
-#define V3D_WRITE(offset, val) writel(val, vc4->v3d->regs + offset)
-#define HVS_READ(offset) readl(hvs->regs + offset)
-#define HVS_WRITE(offset, val) writel(val, hvs->regs + offset)
+#define V3D_READ(offset) \
+ ({ \
+ kunit_fail_current_test("Accessing a register in a unit test!\n"); \
+ readl(vc4->v3d->regs + (offset)); \
+ })
+
+#define V3D_WRITE(offset, val) \
+ do { \
+ kunit_fail_current_test("Accessing a register in a unit test!\n"); \
+ writel(val, vc4->v3d->regs + (offset)); \
+ } while (0)
+
+#define HVS_READ(offset) \
+ ({ \
+ kunit_fail_current_test("Accessing a register in a unit test!\n"); \
+ readl(hvs->regs + (offset)); \
+ })
+
+#define HVS_WRITE(offset, val) \
+ do { \
+ kunit_fail_current_test("Accessing a register in a unit test!\n"); \
+ writel(val, hvs->regs + (offset)); \
+ } while (0)
#define VC4_REG32(reg) { .name = #reg, .offset = reg }
@@ -862,14 +934,24 @@ int vc4_bo_debugfs_init(struct drm_minor *minor);
/* vc4_crtc.c */
extern struct platform_driver vc4_crtc_driver;
int vc4_crtc_disable_at_boot(struct drm_crtc *crtc);
-int vc4_crtc_init(struct drm_device *drm, struct vc4_crtc *vc4_crtc,
+int __vc4_crtc_init(struct drm_device *drm, struct platform_device *pdev,
+ struct vc4_crtc *vc4_crtc, const struct vc4_crtc_data *data,
+ struct drm_plane *primary_plane,
+ const struct drm_crtc_funcs *crtc_funcs,
+ const struct drm_crtc_helper_funcs *crtc_helper_funcs,
+ bool feeds_txp);
+int vc4_crtc_init(struct drm_device *drm, struct platform_device *pdev,
+ struct vc4_crtc *vc4_crtc, const struct vc4_crtc_data *data,
const struct drm_crtc_funcs *crtc_funcs,
- const struct drm_crtc_helper_funcs *crtc_helper_funcs);
+ const struct drm_crtc_helper_funcs *crtc_helper_funcs,
+ bool feeds_txp);
int vc4_page_flip(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event,
uint32_t flags,
struct drm_modeset_acquire_ctx *ctx);
+int vc4_crtc_atomic_check(struct drm_crtc *crtc,
+ struct drm_atomic_state *state);
struct drm_crtc_state *vc4_crtc_duplicate_state(struct drm_crtc *crtc);
void vc4_crtc_destroy_state(struct drm_crtc *crtc,
struct drm_crtc_state *state);
@@ -884,28 +966,15 @@ void vc4_crtc_get_margins(struct drm_crtc_state *state,
/* vc4_debugfs.c */
void vc4_debugfs_init(struct drm_minor *minor);
#ifdef CONFIG_DEBUG_FS
-int vc4_debugfs_add_file(struct drm_minor *minor,
- const char *filename,
- int (*show)(struct seq_file*, void*),
- void *data);
-int vc4_debugfs_add_regset32(struct drm_minor *minor,
- const char *filename,
- struct debugfs_regset32 *regset);
+void vc4_debugfs_add_regset32(struct drm_device *drm,
+ const char *filename,
+ struct debugfs_regset32 *regset);
#else
-static inline int vc4_debugfs_add_file(struct drm_minor *minor,
- const char *filename,
- int (*show)(struct seq_file*, void*),
- void *data)
-{
- return 0;
-}
-static inline int vc4_debugfs_add_regset32(struct drm_minor *minor,
- const char *filename,
- struct debugfs_regset32 *regset)
-{
- return 0;
-}
+static inline void vc4_debugfs_add_regset32(struct drm_device *drm,
+ const char *filename,
+ struct debugfs_regset32 *regset)
+{}
#endif
/* vc4_drv.c */
@@ -959,6 +1028,7 @@ void vc4_irq_reset(struct drm_device *dev);
/* vc4_hvs.c */
extern struct platform_driver vc4_hvs_driver;
+struct vc4_hvs *__vc4_hvs_alloc(struct vc4_dev *vc4, struct platform_device *pdev);
void vc4_hvs_stop_channel(struct vc4_hvs *hvs, unsigned int output);
int vc4_hvs_get_fifo_from_output(struct vc4_hvs *hvs, unsigned int output);
u8 vc4_hvs_get_fifo_frame_count(struct vc4_hvs *hvs, unsigned int fifo);
diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c
index 878e05d79e81..2a71321b9222 100644
--- a/drivers/gpu/drm/vc4/vc4_dsi.c
+++ b/drivers/gpu/drm/vc4/vc4_dsi.c
@@ -24,7 +24,6 @@
#include <linux/component.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
-#include <linux/i2c.h>
#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
@@ -556,8 +555,8 @@ struct vc4_dsi {
struct platform_device *pdev;
- struct drm_bridge *bridge;
- struct list_head bridge_chain;
+ struct drm_bridge *out_bridge;
+ struct drm_bridge bridge;
void __iomem *regs;
@@ -609,6 +608,12 @@ to_vc4_dsi(struct drm_encoder *encoder)
return container_of(encoder, struct vc4_dsi, encoder.base);
}
+static inline struct vc4_dsi *
+bridge_to_vc4_dsi(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct vc4_dsi, bridge);
+}
+
static inline void
dsi_dma_workaround_write(struct vc4_dsi *dsi, u32 offset, u32 val)
{
@@ -617,6 +622,8 @@ dsi_dma_workaround_write(struct vc4_dsi *dsi, u32 offset, u32 val)
dma_cookie_t cookie;
int ret;
+ kunit_fail_current_test("Accessing a register in a unit test!\n");
+
/* DSI0 should be able to write normally. */
if (!chan) {
writel(val, dsi->regs + offset);
@@ -645,7 +652,12 @@ dsi_dma_workaround_write(struct vc4_dsi *dsi, u32 offset, u32 val)
DRM_ERROR("Failed to wait for DMA: %d\n", ret);
}
-#define DSI_READ(offset) readl(dsi->regs + (offset))
+#define DSI_READ(offset) \
+ ({ \
+ kunit_fail_current_test("Accessing a register in a unit test!\n"); \
+ readl(dsi->regs + (offset)); \
+ })
+
#define DSI_WRITE(offset, val) dsi_dma_workaround_write(dsi, offset, val)
#define DSI_PORT_READ(offset) \
DSI_READ(dsi->variant->port ? DSI1_##offset : DSI0_##offset)
@@ -790,26 +802,22 @@ dsi_esc_timing(u32 ns)
return DIV_ROUND_UP(ns, ESC_TIME_NS);
}
-static void vc4_dsi_encoder_disable(struct drm_encoder *encoder)
+static void vc4_dsi_bridge_disable(struct drm_bridge *bridge,
+ struct drm_bridge_state *state)
{
- struct vc4_dsi *dsi = to_vc4_dsi(encoder);
- struct device *dev = &dsi->pdev->dev;
- struct drm_bridge *iter;
-
- list_for_each_entry_reverse(iter, &dsi->bridge_chain, chain_node) {
- if (iter->funcs->disable)
- iter->funcs->disable(iter);
+ struct vc4_dsi *dsi = bridge_to_vc4_dsi(bridge);
+ u32 disp0_ctrl;
- if (iter == dsi->bridge)
- break;
- }
-
- vc4_dsi_ulps(dsi, true);
+ disp0_ctrl = DSI_PORT_READ(DISP0_CTRL);
+ disp0_ctrl &= ~DSI_DISP0_ENABLE;
+ DSI_PORT_WRITE(DISP0_CTRL, disp0_ctrl);
+}
- list_for_each_entry_from(iter, &dsi->bridge_chain, chain_node) {
- if (iter->funcs->post_disable)
- iter->funcs->post_disable(iter);
- }
+static void vc4_dsi_bridge_post_disable(struct drm_bridge *bridge,
+ struct drm_bridge_state *state)
+{
+ struct vc4_dsi *dsi = bridge_to_vc4_dsi(bridge);
+ struct device *dev = &dsi->pdev->dev;
clk_disable_unprepare(dsi->pll_phy_clock);
clk_disable_unprepare(dsi->escape_clock);
@@ -831,11 +839,11 @@ static void vc4_dsi_encoder_disable(struct drm_encoder *encoder)
* higher-than-expected clock rate to the panel, but that's what the
* firmware does too.
*/
-static bool vc4_dsi_encoder_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+static bool vc4_dsi_bridge_mode_fixup(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
{
- struct vc4_dsi *dsi = to_vc4_dsi(encoder);
+ struct vc4_dsi *dsi = bridge_to_vc4_dsi(bridge);
struct clk *phy_parent = clk_get_parent(dsi->pll_phy_clock);
unsigned long parent_rate = clk_get_rate(phy_parent);
unsigned long pixel_clock_hz = mode->clock * 1000;
@@ -867,18 +875,22 @@ static bool vc4_dsi_encoder_mode_fixup(struct drm_encoder *encoder,
return true;
}
-static void vc4_dsi_encoder_enable(struct drm_encoder *encoder)
+static void vc4_dsi_bridge_pre_enable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_state)
{
- struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
- struct vc4_dsi *dsi = to_vc4_dsi(encoder);
+ struct drm_atomic_state *state = old_state->base.state;
+ struct vc4_dsi *dsi = bridge_to_vc4_dsi(bridge);
+ const struct drm_crtc_state *crtc_state;
struct device *dev = &dsi->pdev->dev;
+ const struct drm_display_mode *mode;
+ struct drm_connector *connector;
bool debug_dump_regs = false;
- struct drm_bridge *iter;
unsigned long hs_clock;
+ struct drm_crtc *crtc;
u32 ui_ns;
/* Minimum LP state duration in escape clock cycles. */
u32 lpx = dsi_esc_timing(60);
- unsigned long pixel_clock_hz = mode->clock * 1000;
+ unsigned long pixel_clock_hz;
unsigned long dsip_clock;
unsigned long phy_clock;
int ret;
@@ -895,6 +907,18 @@ static void vc4_dsi_encoder_enable(struct drm_encoder *encoder)
drm_print_regset32(&p, &dsi->regset);
}
+ /*
+ * Retrieve the CRTC adjusted mode. This requires a little dance to go
+ * from the bridge to the encoder, to the connector and to the CRTC.
+ */
+ connector = drm_atomic_get_new_connector_for_encoder(state,
+ bridge->encoder);
+ crtc = drm_atomic_get_new_connector_state(state, connector)->crtc;
+ crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+ mode = &crtc_state->adjusted_mode;
+
+ pixel_clock_hz = mode->clock * 1000;
+
/* Round up the clk_set_rate() request slightly, since
* PLLD_DSI1 is an integer divider and its rate selection will
* never round up.
@@ -1106,11 +1130,6 @@ static void vc4_dsi_encoder_enable(struct drm_encoder *encoder)
vc4_dsi_ulps(dsi, false);
- list_for_each_entry_reverse(iter, &dsi->bridge_chain, chain_node) {
- if (iter->funcs->pre_enable)
- iter->funcs->pre_enable(iter);
- }
-
if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) {
DSI_PORT_WRITE(DISP0_CTRL,
VC4_SET_FIELD(dsi->divider,
@@ -1118,18 +1137,23 @@ static void vc4_dsi_encoder_enable(struct drm_encoder *encoder)
VC4_SET_FIELD(dsi->format, DSI_DISP0_PFORMAT) |
VC4_SET_FIELD(DSI_DISP0_LP_STOP_PERFRAME,
DSI_DISP0_LP_STOP_CTRL) |
- DSI_DISP0_ST_END |
- DSI_DISP0_ENABLE);
+ DSI_DISP0_ST_END);
} else {
DSI_PORT_WRITE(DISP0_CTRL,
- DSI_DISP0_COMMAND_MODE |
- DSI_DISP0_ENABLE);
+ DSI_DISP0_COMMAND_MODE);
}
+}
- list_for_each_entry(iter, &dsi->bridge_chain, chain_node) {
- if (iter->funcs->enable)
- iter->funcs->enable(iter);
- }
+static void vc4_dsi_bridge_enable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_state)
+{
+ struct vc4_dsi *dsi = bridge_to_vc4_dsi(bridge);
+ bool debug_dump_regs = false;
+ u32 disp0_ctrl;
+
+ disp0_ctrl = DSI_PORT_READ(DISP0_CTRL);
+ disp0_ctrl |= DSI_DISP0_ENABLE;
+ DSI_PORT_WRITE(DISP0_CTRL, disp0_ctrl);
if (debug_dump_regs) {
struct drm_printer p = drm_info_printer(&dsi->pdev->dev);
@@ -1138,6 +1162,16 @@ static void vc4_dsi_encoder_enable(struct drm_encoder *encoder)
}
}
+static int vc4_dsi_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ struct vc4_dsi *dsi = bridge_to_vc4_dsi(bridge);
+
+ /* Attach the panel or bridge to the dsi bridge */
+ return drm_bridge_attach(bridge->encoder, dsi->out_bridge,
+ &dsi->bridge, flags);
+}
+
static ssize_t vc4_dsi_host_transfer(struct mipi_dsi_host *host,
const struct mipi_dsi_msg *msg)
{
@@ -1314,6 +1348,7 @@ static int vc4_dsi_host_attach(struct mipi_dsi_host *host,
struct mipi_dsi_device *device)
{
struct vc4_dsi *dsi = host_to_dsi(host);
+ int ret;
dsi->lanes = device->lanes;
dsi->channel = device->channel;
@@ -1348,7 +1383,15 @@ static int vc4_dsi_host_attach(struct mipi_dsi_host *host,
return 0;
}
- return component_add(&dsi->pdev->dev, &vc4_dsi_ops);
+ drm_bridge_add(&dsi->bridge);
+
+ ret = component_add(&dsi->pdev->dev, &vc4_dsi_ops);
+ if (ret) {
+ drm_bridge_remove(&dsi->bridge);
+ return ret;
+ }
+
+ return 0;
}
static int vc4_dsi_host_detach(struct mipi_dsi_host *host,
@@ -1357,6 +1400,7 @@ static int vc4_dsi_host_detach(struct mipi_dsi_host *host,
struct vc4_dsi *dsi = host_to_dsi(host);
component_del(&dsi->pdev->dev, &vc4_dsi_ops);
+ drm_bridge_remove(&dsi->bridge);
return 0;
}
@@ -1366,22 +1410,24 @@ static const struct mipi_dsi_host_ops vc4_dsi_host_ops = {
.transfer = vc4_dsi_host_transfer,
};
-static const struct drm_encoder_helper_funcs vc4_dsi_encoder_helper_funcs = {
- .disable = vc4_dsi_encoder_disable,
- .enable = vc4_dsi_encoder_enable,
- .mode_fixup = vc4_dsi_encoder_mode_fixup,
+static const struct drm_bridge_funcs vc4_dsi_bridge_funcs = {
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+ .atomic_pre_enable = vc4_dsi_bridge_pre_enable,
+ .atomic_enable = vc4_dsi_bridge_enable,
+ .atomic_disable = vc4_dsi_bridge_disable,
+ .atomic_post_disable = vc4_dsi_bridge_post_disable,
+ .attach = vc4_dsi_bridge_attach,
+ .mode_fixup = vc4_dsi_bridge_mode_fixup,
};
static int vc4_dsi_late_register(struct drm_encoder *encoder)
{
struct drm_device *drm = encoder->dev;
struct vc4_dsi *dsi = to_vc4_dsi(encoder);
- int ret;
- ret = vc4_debugfs_add_regset32(drm->primary, dsi->variant->debugfs_name,
- &dsi->regset);
- if (ret)
- return ret;
+ vc4_debugfs_add_regset32(drm, dsi->variant->debugfs_name, &dsi->regset);
return 0;
}
@@ -1617,7 +1663,6 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
dsi->variant = of_device_get_match_data(dev);
- INIT_LIST_HEAD(&dsi->bridge_chain);
dsi->encoder.type = dsi->variant->port ?
VC4_ENCODER_TYPE_DSI1 : VC4_ENCODER_TYPE_DSI0;
@@ -1723,9 +1768,9 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
return ret;
}
- dsi->bridge = drmm_of_get_bridge(drm, dev->of_node, 0, 0);
- if (IS_ERR(dsi->bridge))
- return PTR_ERR(dsi->bridge);
+ dsi->out_bridge = drmm_of_get_bridge(drm, dev->of_node, 0, 0);
+ if (IS_ERR(dsi->out_bridge))
+ return PTR_ERR(dsi->out_bridge);
/* The esc clock rate is supposed to always be 100Mhz. */
ret = clk_set_rate(dsi->escape_clock, 100 * 1000000);
@@ -1745,41 +1790,19 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
if (ret)
return ret;
- drm_encoder_helper_add(encoder, &vc4_dsi_encoder_helper_funcs);
-
ret = devm_pm_runtime_enable(dev);
if (ret)
return ret;
- ret = drm_bridge_attach(encoder, dsi->bridge, NULL, 0);
+ ret = drm_bridge_attach(encoder, &dsi->bridge, NULL, 0);
if (ret)
return ret;
- /* Disable the atomic helper calls into the bridge. We
- * manually call the bridge pre_enable / enable / etc. calls
- * from our driver, since we need to sequence them within the
- * encoder's enable/disable paths.
- */
- list_splice_init(&encoder->bridge_chain, &dsi->bridge_chain);
return 0;
}
-static void vc4_dsi_unbind(struct device *dev, struct device *master,
- void *data)
-{
- struct vc4_dsi *dsi = dev_get_drvdata(dev);
- struct drm_encoder *encoder = &dsi->encoder.base;
-
- /*
- * Restore the bridge_chain so the bridge detach procedure can happen
- * normally.
- */
- list_splice_init(&dsi->bridge_chain, &encoder->bridge_chain);
-}
-
static const struct component_ops vc4_dsi_ops = {
.bind = vc4_dsi_bind,
- .unbind = vc4_dsi_unbind,
};
static int vc4_dsi_dev_probe(struct platform_device *pdev)
@@ -1793,7 +1816,11 @@ static int vc4_dsi_dev_probe(struct platform_device *pdev)
dev_set_drvdata(dev, dsi);
kref_init(&dsi->kref);
+
dsi->pdev = pdev;
+ dsi->bridge.funcs = &vc4_dsi_bridge_funcs;
+ dsi->bridge.of_node = dev->of_node;
+ dsi->bridge.type = DRM_MODE_CONNECTOR_DSI;
dsi->dsi_host.ops = &vc4_dsi_host_ops;
dsi->dsi_host.dev = dev;
mipi_dsi_host_register(&dsi->dsi_host);
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index 12a00d644b61..14628864487a 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -160,8 +160,8 @@ static bool vc4_hdmi_is_full_range_rgb(struct vc4_hdmi *vc4_hdmi,
static int vc4_hdmi_debugfs_regs(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = (struct drm_info_node *)m->private;
- struct vc4_hdmi *vc4_hdmi = node->info_ent->data;
+ struct drm_debugfs_entry *entry = m->private;
+ struct vc4_hdmi *vc4_hdmi = entry->file.data;
struct drm_device *drm = vc4_hdmi->connector.dev;
struct drm_printer p = drm_seq_file_printer(m);
int idx;
@@ -402,6 +402,7 @@ static void vc4_hdmi_handle_hotplug(struct vc4_hdmi *vc4_hdmi,
{
struct drm_connector *connector = &vc4_hdmi->connector;
struct edid *edid;
+ int ret;
/*
* NOTE: This function should really be called with
@@ -430,7 +431,15 @@ static void vc4_hdmi_handle_hotplug(struct vc4_hdmi *vc4_hdmi,
cec_s_phys_addr_from_edid(vc4_hdmi->cec_adap, edid);
kfree(edid);
- vc4_hdmi_reset_link(connector, ctx);
+ for (;;) {
+ ret = vc4_hdmi_reset_link(connector, ctx);
+ if (ret == -EDEADLK) {
+ drm_modeset_backoff(ctx);
+ continue;
+ }
+
+ break;
+ }
}
static int vc4_hdmi_connector_detect_ctx(struct drm_connector *connector,
@@ -1298,11 +1307,12 @@ static void vc5_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi,
VC4_SET_FIELD(mode->crtc_vdisplay, VC5_HDMI_VERTA_VAL));
u32 vertb = (VC4_SET_FIELD(mode->htotal >> (2 - pixel_rep),
VC5_HDMI_VERTB_VSPO) |
- VC4_SET_FIELD(mode->crtc_vtotal - mode->crtc_vsync_end,
+ VC4_SET_FIELD(mode->crtc_vtotal - mode->crtc_vsync_end +
+ interlaced,
VC4_HDMI_VERTB_VBP));
u32 vertb_even = (VC4_SET_FIELD(0, VC5_HDMI_VERTB_VSPO) |
VC4_SET_FIELD(mode->crtc_vtotal -
- mode->crtc_vsync_end - interlaced,
+ mode->crtc_vsync_end,
VC4_HDMI_VERTB_VBP));
unsigned long flags;
unsigned char gcp;
@@ -1995,13 +2005,9 @@ static int vc4_hdmi_late_register(struct drm_encoder *encoder)
struct drm_device *drm = encoder->dev;
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
const struct vc4_hdmi_variant *variant = vc4_hdmi->variant;
- int ret;
- ret = vc4_debugfs_add_file(drm->primary, variant->debugfs_name,
- vc4_hdmi_debugfs_regs,
- vc4_hdmi);
- if (ret)
- return ret;
+ drm_debugfs_add_file(drm, variant->debugfs_name,
+ vc4_hdmi_debugfs_regs, vc4_hdmi);
return 0;
}
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi_regs.h b/drivers/gpu/drm/vc4/vc4_hdmi_regs.h
index 48db438550b1..b04b2fc8d831 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi_regs.h
+++ b/drivers/gpu/drm/vc4/vc4_hdmi_regs.h
@@ -456,6 +456,8 @@ static inline u32 vc4_hdmi_read(struct vc4_hdmi *hdmi,
WARN_ON(pm_runtime_status_suspended(&hdmi->pdev->dev));
+ kunit_fail_current_test("Accessing an HDMI register in a unit test!\n");
+
if (reg >= variant->num_registers) {
dev_warn(&hdmi->pdev->dev,
"Invalid register ID %u\n", reg);
@@ -486,6 +488,8 @@ static inline void vc4_hdmi_write(struct vc4_hdmi *hdmi,
WARN_ON(pm_runtime_status_suspended(&hdmi->pdev->dev));
+ kunit_fail_current_test("Accessing an HDMI register in a unit test!\n");
+
if (reg >= variant->num_registers) {
dev_warn(&hdmi->pdev->dev,
"Invalid register ID %u\n", reg);
diff --git a/drivers/gpu/drm/vc4/vc4_hvs.c b/drivers/gpu/drm/vc4/vc4_hvs.c
index c4453a5ae163..4da66ef96783 100644
--- a/drivers/gpu/drm/vc4/vc4_hvs.c
+++ b/drivers/gpu/drm/vc4/vc4_hvs.c
@@ -93,8 +93,8 @@ void vc4_hvs_dump_state(struct vc4_hvs *hvs)
static int vc4_hvs_debugfs_underrun(struct seq_file *m, void *data)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
+ struct drm_debugfs_entry *entry = m->private;
+ struct drm_device *dev = entry->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct drm_printer p = drm_seq_file_printer(m);
@@ -105,8 +105,8 @@ static int vc4_hvs_debugfs_underrun(struct seq_file *m, void *data)
static int vc4_hvs_debugfs_dlist(struct seq_file *m, void *data)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
+ struct drm_debugfs_entry *entry = m->private;
+ struct drm_device *dev = entry->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_hvs *hvs = vc4->hvs;
struct drm_printer p = drm_seq_file_printer(m);
@@ -370,28 +370,30 @@ static int vc4_hvs_init_channel(struct vc4_hvs *hvs, struct drm_crtc *crtc,
* mode.
*/
dispctrl = SCALER_DISPCTRLX_ENABLE;
+ dispbkgndx = HVS_READ(SCALER_DISPBKGNDX(chan));
- if (!vc4->is_vc5)
+ if (!vc4->is_vc5) {
dispctrl |= VC4_SET_FIELD(mode->hdisplay,
SCALER_DISPCTRLX_WIDTH) |
VC4_SET_FIELD(mode->vdisplay,
SCALER_DISPCTRLX_HEIGHT) |
(oneshot ? SCALER_DISPCTRLX_ONESHOT : 0);
- else
+ dispbkgndx |= SCALER_DISPBKGND_AUTOHS;
+ } else {
dispctrl |= VC4_SET_FIELD(mode->hdisplay,
SCALER5_DISPCTRLX_WIDTH) |
VC4_SET_FIELD(mode->vdisplay,
SCALER5_DISPCTRLX_HEIGHT) |
(oneshot ? SCALER5_DISPCTRLX_ONESHOT : 0);
+ dispbkgndx &= ~SCALER5_DISPBKGND_BCK2BCK;
+ }
HVS_WRITE(SCALER_DISPCTRLX(chan), dispctrl);
- dispbkgndx = HVS_READ(SCALER_DISPBKGNDX(chan));
dispbkgndx &= ~SCALER_DISPBKGND_GAMMA;
dispbkgndx &= ~SCALER_DISPBKGND_INTERLACE;
HVS_WRITE(SCALER_DISPBKGNDX(chan), dispbkgndx |
- SCALER_DISPBKGND_AUTOHS |
((!vc4->is_vc5) ? SCALER_DISPBKGND_GAMMA : 0) |
(interlace ? SCALER_DISPBKGND_INTERLACE : 0));
@@ -568,6 +570,8 @@ void vc4_hvs_atomic_flush(struct drm_crtc *crtc,
bool enable_bg_fill = false;
u32 __iomem *dlist_start = vc4->hvs->dlist + vc4_state->mm.start;
u32 __iomem *dlist_next = dlist_start;
+ unsigned int zpos = 0;
+ bool found = false;
int idx;
if (!drm_dev_enter(dev, &idx)) {
@@ -575,29 +579,43 @@ void vc4_hvs_atomic_flush(struct drm_crtc *crtc,
return;
}
+ if (vc4_state->assigned_channel == VC4_HVS_CHANNEL_DISABLED)
+ return;
+
if (debug_dump_regs) {
DRM_INFO("CRTC %d HVS before:\n", drm_crtc_index(crtc));
vc4_hvs_dump_state(hvs);
}
/* Copy all the active planes' dlist contents to the hardware dlist. */
- drm_atomic_crtc_for_each_plane(plane, crtc) {
- /* Is this the first active plane? */
- if (dlist_next == dlist_start) {
- /* We need to enable background fill when a plane
- * could be alpha blending from the background, i.e.
- * where no other plane is underneath. It suffices to
- * consider the first active plane here since we set
- * needs_bg_fill such that either the first plane
- * already needs it or all planes on top blend from
- * the first or a lower plane.
- */
- vc4_plane_state = to_vc4_plane_state(plane->state);
- enable_bg_fill = vc4_plane_state->needs_bg_fill;
+ do {
+ found = false;
+
+ drm_atomic_crtc_for_each_plane(plane, crtc) {
+ if (plane->state->normalized_zpos != zpos)
+ continue;
+
+ /* Is this the first active plane? */
+ if (dlist_next == dlist_start) {
+ /* We need to enable background fill when a plane
+ * could be alpha blending from the background, i.e.
+ * where no other plane is underneath. It suffices to
+ * consider the first active plane here since we set
+ * needs_bg_fill such that either the first plane
+ * already needs it or all planes on top blend from
+ * the first or a lower plane.
+ */
+ vc4_plane_state = to_vc4_plane_state(plane->state);
+ enable_bg_fill = vc4_plane_state->needs_bg_fill;
+ }
+
+ dlist_next += vc4_plane_write_dlist(plane, dlist_next);
+
+ found = true;
}
- dlist_next += vc4_plane_write_dlist(plane, dlist_next);
- }
+ zpos++;
+ } while (found);
writel(SCALER_CTL0_END, dlist_next);
dlist_next++;
@@ -658,7 +676,8 @@ void vc4_hvs_mask_underrun(struct vc4_hvs *hvs, int channel)
return;
dispctrl = HVS_READ(SCALER_DISPCTRL);
- dispctrl &= ~SCALER_DISPCTRL_DSPEISLUR(channel);
+ dispctrl &= ~(hvs->vc4->is_vc5 ? SCALER5_DISPCTRL_DSPEISLUR(channel) :
+ SCALER_DISPCTRL_DSPEISLUR(channel));
HVS_WRITE(SCALER_DISPCTRL, dispctrl);
@@ -675,7 +694,8 @@ void vc4_hvs_unmask_underrun(struct vc4_hvs *hvs, int channel)
return;
dispctrl = HVS_READ(SCALER_DISPCTRL);
- dispctrl |= SCALER_DISPCTRL_DSPEISLUR(channel);
+ dispctrl |= (hvs->vc4->is_vc5 ? SCALER5_DISPCTRL_DSPEISLUR(channel) :
+ SCALER_DISPCTRL_DSPEISLUR(channel));
HVS_WRITE(SCALER_DISPSTAT,
SCALER_DISPSTAT_EUFLOW(channel));
@@ -701,6 +721,7 @@ static irqreturn_t vc4_hvs_irq_handler(int irq, void *data)
int channel;
u32 control;
u32 status;
+ u32 dspeislur;
/*
* NOTE: We don't need to protect the register access using
@@ -717,9 +738,11 @@ static irqreturn_t vc4_hvs_irq_handler(int irq, void *data)
control = HVS_READ(SCALER_DISPCTRL);
for (channel = 0; channel < SCALER_CHANNELS_COUNT; channel++) {
+ dspeislur = vc4->is_vc5 ? SCALER5_DISPCTRL_DSPEISLUR(channel) :
+ SCALER_DISPCTRL_DSPEISLUR(channel);
/* Interrupt masking is not always honored, so check it here. */
if (status & SCALER_DISPSTAT_EUFLOW(channel) &&
- control & SCALER_DISPCTRL_DSPEISLUR(channel)) {
+ control & dspeislur) {
vc4_hvs_mask_underrun(hvs, channel);
vc4_hvs_report_underrun(dev);
@@ -740,7 +763,6 @@ int vc4_hvs_debugfs_init(struct drm_minor *minor)
struct drm_device *drm = minor->dev;
struct vc4_dev *vc4 = to_vc4_dev(drm);
struct vc4_hvs *hvs = vc4->hvs;
- int ret;
if (!vc4->hvs)
return -ENODEV;
@@ -750,24 +772,55 @@ int vc4_hvs_debugfs_init(struct drm_minor *minor)
minor->debugfs_root,
&vc4->load_tracker_enabled);
- ret = vc4_debugfs_add_file(minor, "hvs_dlists",
- vc4_hvs_debugfs_dlist, NULL);
- if (ret)
- return ret;
+ drm_debugfs_add_file(drm, "hvs_dlists", vc4_hvs_debugfs_dlist, NULL);
- ret = vc4_debugfs_add_file(minor, "hvs_underrun",
- vc4_hvs_debugfs_underrun, NULL);
- if (ret)
- return ret;
+ drm_debugfs_add_file(drm, "hvs_underrun", vc4_hvs_debugfs_underrun, NULL);
- ret = vc4_debugfs_add_regset32(minor, "hvs_regs",
- &hvs->regset);
- if (ret)
- return ret;
+ vc4_debugfs_add_regset32(drm, "hvs_regs", &hvs->regset);
return 0;
}
+struct vc4_hvs *__vc4_hvs_alloc(struct vc4_dev *vc4, struct platform_device *pdev)
+{
+ struct drm_device *drm = &vc4->base;
+ struct vc4_hvs *hvs;
+
+ hvs = drmm_kzalloc(drm, sizeof(*hvs), GFP_KERNEL);
+ if (!hvs)
+ return ERR_PTR(-ENOMEM);
+
+ hvs->vc4 = vc4;
+ hvs->pdev = pdev;
+
+ spin_lock_init(&hvs->mm_lock);
+
+ /* Set up the HVS display list memory manager. We never
+ * overwrite the setup from the bootloader (just 128b out of
+ * our 16K), since we don't want to scramble the screen when
+ * transitioning from the firmware's boot setup to runtime.
+ */
+ drm_mm_init(&hvs->dlist_mm,
+ HVS_BOOTLOADER_DLIST_END,
+ (SCALER_DLIST_SIZE >> 2) - HVS_BOOTLOADER_DLIST_END);
+
+ /* Set up the HVS LBM memory manager. We could have some more
+ * complicated data structure that allowed reuse of LBM areas
+ * between planes when they don't overlap on the screen, but
+ * for now we just allocate globally.
+ */
+ if (!vc4->is_vc5)
+ /* 48k words of 2x12-bit pixels */
+ drm_mm_init(&hvs->lbm_mm, 0, 48 * 1024);
+ else
+ /* 60k words of 4x12-bit pixels */
+ drm_mm_init(&hvs->lbm_mm, 0, 60 * 1024);
+
+ vc4->hvs = hvs;
+
+ return hvs;
+}
+
static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
{
struct platform_device *pdev = to_platform_device(dev);
@@ -776,13 +829,11 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
struct vc4_hvs *hvs = NULL;
int ret;
u32 dispctrl;
- u32 reg;
+ u32 reg, top;
- hvs = drmm_kzalloc(drm, sizeof(*hvs), GFP_KERNEL);
- if (!hvs)
- return -ENOMEM;
- hvs->vc4 = vc4;
- hvs->pdev = pdev;
+ hvs = __vc4_hvs_alloc(vc4, NULL);
+ if (IS_ERR(hvs))
+ return PTR_ERR(hvs);
hvs->regs = vc4_ioremap_regs(pdev, 0);
if (IS_ERR(hvs->regs))
@@ -835,29 +886,6 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
else
hvs->dlist = hvs->regs + SCALER5_DLIST_START;
- spin_lock_init(&hvs->mm_lock);
-
- /* Set up the HVS display list memory manager. We never
- * overwrite the setup from the bootloader (just 128b out of
- * our 16K), since we don't want to scramble the screen when
- * transitioning from the firmware's boot setup to runtime.
- */
- drm_mm_init(&hvs->dlist_mm,
- HVS_BOOTLOADER_DLIST_END,
- (SCALER_DLIST_SIZE >> 2) - HVS_BOOTLOADER_DLIST_END);
-
- /* Set up the HVS LBM memory manager. We could have some more
- * complicated data structure that allowed reuse of LBM areas
- * between planes when they don't overlap on the screen, but
- * for now we just allocate globally.
- */
- if (!vc4->is_vc5)
- /* 48k words of 2x12-bit pixels */
- drm_mm_init(&hvs->lbm_mm, 0, 48 * 1024);
- else
- /* 60k words of 4x12-bit pixels */
- drm_mm_init(&hvs->lbm_mm, 0, 60 * 1024);
-
/* Upload filter kernels. We only have the one for now, so we
* keep it around for the lifetime of the driver.
*/
@@ -867,8 +895,6 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
if (ret)
return ret;
- vc4->hvs = hvs;
-
reg = HVS_READ(SCALER_DISPECTRL);
reg &= ~SCALER_DISPECTRL_DSP2_MUX_MASK;
HVS_WRITE(SCALER_DISPECTRL,
@@ -896,22 +922,102 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
SCALER_DISPCTRL_DISPEIRQ(1) |
SCALER_DISPCTRL_DISPEIRQ(2);
- dispctrl &= ~(SCALER_DISPCTRL_DMAEIRQ |
- SCALER_DISPCTRL_SLVWREIRQ |
- SCALER_DISPCTRL_SLVRDEIRQ |
- SCALER_DISPCTRL_DSPEIEOF(0) |
- SCALER_DISPCTRL_DSPEIEOF(1) |
- SCALER_DISPCTRL_DSPEIEOF(2) |
- SCALER_DISPCTRL_DSPEIEOLN(0) |
- SCALER_DISPCTRL_DSPEIEOLN(1) |
- SCALER_DISPCTRL_DSPEIEOLN(2) |
- SCALER_DISPCTRL_DSPEISLUR(0) |
- SCALER_DISPCTRL_DSPEISLUR(1) |
- SCALER_DISPCTRL_DSPEISLUR(2) |
- SCALER_DISPCTRL_SCLEIRQ);
+ if (!vc4->is_vc5)
+ dispctrl &= ~(SCALER_DISPCTRL_DMAEIRQ |
+ SCALER_DISPCTRL_SLVWREIRQ |
+ SCALER_DISPCTRL_SLVRDEIRQ |
+ SCALER_DISPCTRL_DSPEIEOF(0) |
+ SCALER_DISPCTRL_DSPEIEOF(1) |
+ SCALER_DISPCTRL_DSPEIEOF(2) |
+ SCALER_DISPCTRL_DSPEIEOLN(0) |
+ SCALER_DISPCTRL_DSPEIEOLN(1) |
+ SCALER_DISPCTRL_DSPEIEOLN(2) |
+ SCALER_DISPCTRL_DSPEISLUR(0) |
+ SCALER_DISPCTRL_DSPEISLUR(1) |
+ SCALER_DISPCTRL_DSPEISLUR(2) |
+ SCALER_DISPCTRL_SCLEIRQ);
+ else
+ dispctrl &= ~(SCALER_DISPCTRL_DMAEIRQ |
+ SCALER5_DISPCTRL_SLVEIRQ |
+ SCALER5_DISPCTRL_DSPEIEOF(0) |
+ SCALER5_DISPCTRL_DSPEIEOF(1) |
+ SCALER5_DISPCTRL_DSPEIEOF(2) |
+ SCALER5_DISPCTRL_DSPEIEOLN(0) |
+ SCALER5_DISPCTRL_DSPEIEOLN(1) |
+ SCALER5_DISPCTRL_DSPEIEOLN(2) |
+ SCALER5_DISPCTRL_DSPEISLUR(0) |
+ SCALER5_DISPCTRL_DSPEISLUR(1) |
+ SCALER5_DISPCTRL_DSPEISLUR(2) |
+ SCALER_DISPCTRL_SCLEIRQ);
+
+
+ /* Set AXI panic mode.
+ * VC4 panics when < 2 lines in FIFO.
+ * VC5 panics when less than 1 line in the FIFO.
+ */
+ dispctrl &= ~(SCALER_DISPCTRL_PANIC0_MASK |
+ SCALER_DISPCTRL_PANIC1_MASK |
+ SCALER_DISPCTRL_PANIC2_MASK);
+ dispctrl |= VC4_SET_FIELD(2, SCALER_DISPCTRL_PANIC0);
+ dispctrl |= VC4_SET_FIELD(2, SCALER_DISPCTRL_PANIC1);
+ dispctrl |= VC4_SET_FIELD(2, SCALER_DISPCTRL_PANIC2);
HVS_WRITE(SCALER_DISPCTRL, dispctrl);
+ /* Recompute Composite Output Buffer (COB) allocations for the displays
+ */
+ if (!vc4->is_vc5) {
+ /* The COB is 20736 pixels, or just over 10 lines at 2048 wide.
+ * The bottom 2048 pixels are full 32bpp RGBA (intended for the
+ * TXP composing RGBA to memory), whilst the remainder are only
+ * 24bpp RGB.
+ *
+ * Assign 3 lines to channels 1 & 2, and just over 4 lines to
+ * channel 0.
+ */
+ #define VC4_COB_SIZE 20736
+ #define VC4_COB_LINE_WIDTH 2048
+ #define VC4_COB_NUM_LINES 3
+ reg = 0;
+ top = VC4_COB_LINE_WIDTH * VC4_COB_NUM_LINES;
+ reg |= (top - 1) << 16;
+ HVS_WRITE(SCALER_DISPBASE2, reg);
+ reg = top;
+ top += VC4_COB_LINE_WIDTH * VC4_COB_NUM_LINES;
+ reg |= (top - 1) << 16;
+ HVS_WRITE(SCALER_DISPBASE1, reg);
+ reg = top;
+ top = VC4_COB_SIZE;
+ reg |= (top - 1) << 16;
+ HVS_WRITE(SCALER_DISPBASE0, reg);
+ } else {
+ /* The COB is 44416 pixels, or 10.8 lines at 4096 wide.
+ * The bottom 4096 pixels are full RGBA (intended for the TXP
+ * composing RGBA to memory), whilst the remainder are only
+ * RGB. Addressing is always pixel wide.
+ *
+ * Assign 3 lines of 4096 to channels 1 & 2, and just over 4
+ * lines. to channel 0.
+ */
+ #define VC5_COB_SIZE 44416
+ #define VC5_COB_LINE_WIDTH 4096
+ #define VC5_COB_NUM_LINES 3
+ reg = 0;
+ top = VC5_COB_LINE_WIDTH * VC5_COB_NUM_LINES;
+ reg |= top << 16;
+ HVS_WRITE(SCALER_DISPBASE2, reg);
+ top += 16;
+ reg = top;
+ top += VC5_COB_LINE_WIDTH * VC5_COB_NUM_LINES;
+ reg |= top << 16;
+ HVS_WRITE(SCALER_DISPBASE1, reg);
+ top += 16;
+ reg = top;
+ top = VC5_COB_SIZE;
+ reg |= top << 16;
+ HVS_WRITE(SCALER_DISPBASE0, reg);
+ }
+
ret = devm_request_irq(dev, platform_get_irq(pdev, 0),
vc4_hvs_irq_handler, 0, "vc4 hvs", drm);
if (ret)
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c
index 8fbeecdf2ec4..a7e3d47c50f4 100644
--- a/drivers/gpu/drm/vc4/vc4_kms.c
+++ b/drivers/gpu/drm/vc4/vc4_kms.c
@@ -12,6 +12,7 @@
*/
#include <linux/clk.h>
+#include <linux/sort.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
@@ -24,8 +25,6 @@
#include "vc4_drv.h"
#include "vc4_regs.h"
-#define HVS_NUM_CHANNELS 3
-
struct vc4_ctm_state {
struct drm_private_state base;
struct drm_color_ctm *ctm;
@@ -38,23 +37,6 @@ to_vc4_ctm_state(const struct drm_private_state *priv)
return container_of(priv, struct vc4_ctm_state, base);
}
-struct vc4_hvs_state {
- struct drm_private_state base;
- unsigned long core_clock_rate;
-
- struct {
- unsigned in_use: 1;
- unsigned long fifo_load;
- struct drm_crtc_commit *pending_commit;
- } fifo_state[HVS_NUM_CHANNELS];
-};
-
-static struct vc4_hvs_state *
-to_vc4_hvs_state(const struct drm_private_state *priv)
-{
- return container_of(priv, struct vc4_hvs_state, base);
-}
-
struct vc4_load_tracker_state {
struct drm_private_state base;
u64 hvs_load;
@@ -190,8 +172,8 @@ vc4_ctm_commit(struct vc4_dev *vc4, struct drm_atomic_state *state)
VC4_SET_FIELD(ctm_state->fifo, SCALER_OLEDOFFS_DISPFIFO));
}
-static struct vc4_hvs_state *
-vc4_hvs_get_new_global_state(struct drm_atomic_state *state)
+struct vc4_hvs_state *
+vc4_hvs_get_new_global_state(const struct drm_atomic_state *state)
{
struct vc4_dev *vc4 = to_vc4_dev(state->dev);
struct drm_private_state *priv_state;
@@ -203,8 +185,8 @@ vc4_hvs_get_new_global_state(struct drm_atomic_state *state)
return to_vc4_hvs_state(priv_state);
}
-static struct vc4_hvs_state *
-vc4_hvs_get_old_global_state(struct drm_atomic_state *state)
+struct vc4_hvs_state *
+vc4_hvs_get_old_global_state(const struct drm_atomic_state *state)
{
struct vc4_dev *vc4 = to_vc4_dev(state->dev);
struct drm_private_state *priv_state;
@@ -216,7 +198,7 @@ vc4_hvs_get_old_global_state(struct drm_atomic_state *state)
return to_vc4_hvs_state(priv_state);
}
-static struct vc4_hvs_state *
+struct vc4_hvs_state *
vc4_hvs_get_global_state(struct drm_atomic_state *state)
{
struct vc4_dev *vc4 = to_vc4_dev(state->dev);
@@ -776,6 +758,20 @@ static int vc4_hvs_channels_obj_init(struct vc4_dev *vc4)
return drmm_add_action_or_reset(&vc4->base, vc4_hvs_channels_obj_fini, NULL);
}
+static int cmp_vc4_crtc_hvs_output(const void *a, const void *b)
+{
+ const struct vc4_crtc *crtc_a =
+ to_vc4_crtc(*(const struct drm_crtc **)a);
+ const struct vc4_crtc_data *data_a =
+ vc4_crtc_to_vc4_crtc_data(crtc_a);
+ const struct vc4_crtc *crtc_b =
+ to_vc4_crtc(*(const struct drm_crtc **)b);
+ const struct vc4_crtc_data *data_b =
+ vc4_crtc_to_vc4_crtc_data(crtc_b);
+
+ return data_a->hvs_output - data_b->hvs_output;
+}
+
/*
* The BCM2711 HVS has up to 7 outputs connected to the pixelvalves and
* the TXP (and therefore all the CRTCs found on that platform).
@@ -810,10 +806,11 @@ static int vc4_pv_muxing_atomic_check(struct drm_device *dev,
struct drm_atomic_state *state)
{
struct vc4_hvs_state *hvs_new_state;
- struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ struct drm_crtc **sorted_crtcs;
struct drm_crtc *crtc;
unsigned int unassigned_channels = 0;
unsigned int i;
+ int ret;
hvs_new_state = vc4_hvs_get_global_state(state);
if (IS_ERR(hvs_new_state))
@@ -823,15 +820,59 @@ static int vc4_pv_muxing_atomic_check(struct drm_device *dev,
if (!hvs_new_state->fifo_state[i].in_use)
unassigned_channels |= BIT(i);
- for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
- struct vc4_crtc_state *old_vc4_crtc_state =
- to_vc4_crtc_state(old_crtc_state);
- struct vc4_crtc_state *new_vc4_crtc_state =
- to_vc4_crtc_state(new_crtc_state);
- struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
+ /*
+ * The problem we have to solve here is that we have up to 7
+ * encoders, connected to up to 6 CRTCs.
+ *
+ * Those CRTCs, depending on the instance, can be routed to 1, 2
+ * or 3 HVS FIFOs, and we need to set the muxing between FIFOs and
+ * outputs in the HVS accordingly.
+ *
+ * It would be pretty hard to come up with an algorithm that
+ * would generically solve this. However, the current routing
+ * trees we support allow us to simplify a bit the problem.
+ *
+ * Indeed, with the current supported layouts, if we try to
+ * assign in the ascending crtc index order the FIFOs, we can't
+ * fall into the situation where an earlier CRTC that had
+ * multiple routes is assigned one that was the only option for
+ * a later CRTC.
+ *
+ * If the layout changes and doesn't give us that in the future,
+ * we will need to have something smarter, but it works so far.
+ */
+ sorted_crtcs = kmalloc_array(dev->num_crtcs, sizeof(*sorted_crtcs), GFP_KERNEL);
+ if (!sorted_crtcs)
+ return -ENOMEM;
+
+ i = 0;
+ drm_for_each_crtc(crtc, dev)
+ sorted_crtcs[i++] = crtc;
+
+ sort(sorted_crtcs, i, sizeof(*sorted_crtcs), cmp_vc4_crtc_hvs_output, NULL);
+
+ for (i = 0; i < dev->num_crtcs; i++) {
+ struct vc4_crtc_state *old_vc4_crtc_state, *new_vc4_crtc_state;
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ struct vc4_crtc *vc4_crtc;
unsigned int matching_channels;
unsigned int channel;
+ crtc = sorted_crtcs[i];
+ if (!crtc)
+ continue;
+ vc4_crtc = to_vc4_crtc(crtc);
+
+ old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc);
+ if (!old_crtc_state)
+ continue;
+ old_vc4_crtc_state = to_vc4_crtc_state(old_crtc_state);
+
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+ if (!new_crtc_state)
+ continue;
+ new_vc4_crtc_state = to_vc4_crtc_state(new_crtc_state);
+
drm_dbg(dev, "%s: Trying to find a channel.\n", crtc->name);
/* Nothing to do here, let's skip it */
@@ -860,33 +901,11 @@ static int vc4_pv_muxing_atomic_check(struct drm_device *dev,
continue;
}
- /*
- * The problem we have to solve here is that we have
- * up to 7 encoders, connected to up to 6 CRTCs.
- *
- * Those CRTCs, depending on the instance, can be
- * routed to 1, 2 or 3 HVS FIFOs, and we need to set
- * the change the muxing between FIFOs and outputs in
- * the HVS accordingly.
- *
- * It would be pretty hard to come up with an
- * algorithm that would generically solve
- * this. However, the current routing trees we support
- * allow us to simplify a bit the problem.
- *
- * Indeed, with the current supported layouts, if we
- * try to assign in the ascending crtc index order the
- * FIFOs, we can't fall into the situation where an
- * earlier CRTC that had multiple routes is assigned
- * one that was the only option for a later CRTC.
- *
- * If the layout changes and doesn't give us that in
- * the future, we will need to have something smarter,
- * but it works so far.
- */
matching_channels = unassigned_channels & vc4_crtc->data->hvs_available_channels;
- if (!matching_channels)
- return -EINVAL;
+ if (!matching_channels) {
+ ret = -EINVAL;
+ goto err_free_crtc_array;
+ }
channel = ffs(matching_channels) - 1;
@@ -896,7 +915,12 @@ static int vc4_pv_muxing_atomic_check(struct drm_device *dev,
hvs_new_state->fifo_state[channel].in_use = true;
}
+ kfree(sorted_crtcs);
return 0;
+
+err_free_crtc_array:
+ kfree(sorted_crtcs);
+ return ret;
}
static int
@@ -1050,6 +1074,7 @@ int vc4_kms_load(struct drm_device *dev)
dev->mode_config.helper_private = &vc4_mode_config_helpers;
dev->mode_config.preferred_depth = 24;
dev->mode_config.async_page_flip = true;
+ dev->mode_config.normalize_zpos = true;
ret = vc4_ctm_obj_init(vc4);
if (ret)
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index 8b92a45a3c89..dee525bacd4b 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -65,78 +65,176 @@ static const struct hvs_format {
.drm = DRM_FORMAT_RGB565,
.hvs = HVS_PIXEL_FORMAT_RGB565,
.pixel_order = HVS_PIXEL_ORDER_XRGB,
+ .pixel_order_hvs5 = HVS_PIXEL_ORDER_XRGB,
},
{
.drm = DRM_FORMAT_BGR565,
.hvs = HVS_PIXEL_FORMAT_RGB565,
.pixel_order = HVS_PIXEL_ORDER_XBGR,
+ .pixel_order_hvs5 = HVS_PIXEL_ORDER_XBGR,
},
{
.drm = DRM_FORMAT_ARGB1555,
.hvs = HVS_PIXEL_FORMAT_RGBA5551,
.pixel_order = HVS_PIXEL_ORDER_ABGR,
+ .pixel_order_hvs5 = HVS_PIXEL_ORDER_ARGB,
},
{
.drm = DRM_FORMAT_XRGB1555,
.hvs = HVS_PIXEL_FORMAT_RGBA5551,
.pixel_order = HVS_PIXEL_ORDER_ABGR,
+ .pixel_order_hvs5 = HVS_PIXEL_ORDER_ARGB,
},
{
.drm = DRM_FORMAT_RGB888,
.hvs = HVS_PIXEL_FORMAT_RGB888,
.pixel_order = HVS_PIXEL_ORDER_XRGB,
+ .pixel_order_hvs5 = HVS_PIXEL_ORDER_XRGB,
},
{
.drm = DRM_FORMAT_BGR888,
.hvs = HVS_PIXEL_FORMAT_RGB888,
.pixel_order = HVS_PIXEL_ORDER_XBGR,
+ .pixel_order_hvs5 = HVS_PIXEL_ORDER_XBGR,
},
{
.drm = DRM_FORMAT_YUV422,
.hvs = HVS_PIXEL_FORMAT_YCBCR_YUV422_3PLANE,
.pixel_order = HVS_PIXEL_ORDER_XYCBCR,
+ .pixel_order_hvs5 = HVS_PIXEL_ORDER_XYCBCR,
},
{
.drm = DRM_FORMAT_YVU422,
.hvs = HVS_PIXEL_FORMAT_YCBCR_YUV422_3PLANE,
.pixel_order = HVS_PIXEL_ORDER_XYCRCB,
+ .pixel_order_hvs5 = HVS_PIXEL_ORDER_XYCRCB,
},
{
.drm = DRM_FORMAT_YUV420,
.hvs = HVS_PIXEL_FORMAT_YCBCR_YUV420_3PLANE,
.pixel_order = HVS_PIXEL_ORDER_XYCBCR,
+ .pixel_order_hvs5 = HVS_PIXEL_ORDER_XYCBCR,
},
{
.drm = DRM_FORMAT_YVU420,
.hvs = HVS_PIXEL_FORMAT_YCBCR_YUV420_3PLANE,
.pixel_order = HVS_PIXEL_ORDER_XYCRCB,
+ .pixel_order_hvs5 = HVS_PIXEL_ORDER_XYCRCB,
},
{
.drm = DRM_FORMAT_NV12,
.hvs = HVS_PIXEL_FORMAT_YCBCR_YUV420_2PLANE,
.pixel_order = HVS_PIXEL_ORDER_XYCBCR,
+ .pixel_order_hvs5 = HVS_PIXEL_ORDER_XYCBCR,
},
{
.drm = DRM_FORMAT_NV21,
.hvs = HVS_PIXEL_FORMAT_YCBCR_YUV420_2PLANE,
.pixel_order = HVS_PIXEL_ORDER_XYCRCB,
+ .pixel_order_hvs5 = HVS_PIXEL_ORDER_XYCRCB,
},
{
.drm = DRM_FORMAT_NV16,
.hvs = HVS_PIXEL_FORMAT_YCBCR_YUV422_2PLANE,
.pixel_order = HVS_PIXEL_ORDER_XYCBCR,
+ .pixel_order_hvs5 = HVS_PIXEL_ORDER_XYCBCR,
},
{
.drm = DRM_FORMAT_NV61,
.hvs = HVS_PIXEL_FORMAT_YCBCR_YUV422_2PLANE,
.pixel_order = HVS_PIXEL_ORDER_XYCRCB,
+ .pixel_order_hvs5 = HVS_PIXEL_ORDER_XYCRCB,
},
{
.drm = DRM_FORMAT_P030,
.hvs = HVS_PIXEL_FORMAT_YCBCR_10BIT,
- .pixel_order = HVS_PIXEL_ORDER_XYCBCR,
+ .pixel_order_hvs5 = HVS_PIXEL_ORDER_XYCBCR,
+ .hvs5_only = true,
+ },
+ {
+ .drm = DRM_FORMAT_XRGB2101010,
+ .hvs = HVS_PIXEL_FORMAT_RGBA1010102,
+ .pixel_order_hvs5 = HVS_PIXEL_ORDER_ARGB,
+ .hvs5_only = true,
+ },
+ {
+ .drm = DRM_FORMAT_ARGB2101010,
+ .hvs = HVS_PIXEL_FORMAT_RGBA1010102,
+ .pixel_order_hvs5 = HVS_PIXEL_ORDER_ARGB,
+ .hvs5_only = true,
+ },
+ {
+ .drm = DRM_FORMAT_ABGR2101010,
+ .hvs = HVS_PIXEL_FORMAT_RGBA1010102,
+ .pixel_order_hvs5 = HVS_PIXEL_ORDER_ABGR,
.hvs5_only = true,
},
+ {
+ .drm = DRM_FORMAT_XBGR2101010,
+ .hvs = HVS_PIXEL_FORMAT_RGBA1010102,
+ .pixel_order_hvs5 = HVS_PIXEL_ORDER_ABGR,
+ .hvs5_only = true,
+ },
+ {
+ .drm = DRM_FORMAT_RGB332,
+ .hvs = HVS_PIXEL_FORMAT_RGB332,
+ .pixel_order = HVS_PIXEL_ORDER_ARGB,
+ .pixel_order_hvs5 = HVS_PIXEL_ORDER_ARGB,
+ },
+ {
+ .drm = DRM_FORMAT_BGR233,
+ .hvs = HVS_PIXEL_FORMAT_RGB332,
+ .pixel_order = HVS_PIXEL_ORDER_ABGR,
+ .pixel_order_hvs5 = HVS_PIXEL_ORDER_ABGR,
+ },
+ {
+ .drm = DRM_FORMAT_XRGB4444,
+ .hvs = HVS_PIXEL_FORMAT_RGBA4444,
+ .pixel_order = HVS_PIXEL_ORDER_ABGR,
+ .pixel_order_hvs5 = HVS_PIXEL_ORDER_ARGB,
+ },
+ {
+ .drm = DRM_FORMAT_ARGB4444,
+ .hvs = HVS_PIXEL_FORMAT_RGBA4444,
+ .pixel_order = HVS_PIXEL_ORDER_ABGR,
+ .pixel_order_hvs5 = HVS_PIXEL_ORDER_ARGB,
+ },
+ {
+ .drm = DRM_FORMAT_XBGR4444,
+ .hvs = HVS_PIXEL_FORMAT_RGBA4444,
+ .pixel_order = HVS_PIXEL_ORDER_ARGB,
+ .pixel_order_hvs5 = HVS_PIXEL_ORDER_ABGR,
+ },
+ {
+ .drm = DRM_FORMAT_ABGR4444,
+ .hvs = HVS_PIXEL_FORMAT_RGBA4444,
+ .pixel_order = HVS_PIXEL_ORDER_ARGB,
+ .pixel_order_hvs5 = HVS_PIXEL_ORDER_ABGR,
+ },
+ {
+ .drm = DRM_FORMAT_BGRX4444,
+ .hvs = HVS_PIXEL_FORMAT_RGBA4444,
+ .pixel_order = HVS_PIXEL_ORDER_RGBA,
+ .pixel_order_hvs5 = HVS_PIXEL_ORDER_BGRA,
+ },
+ {
+ .drm = DRM_FORMAT_BGRA4444,
+ .hvs = HVS_PIXEL_FORMAT_RGBA4444,
+ .pixel_order = HVS_PIXEL_ORDER_RGBA,
+ .pixel_order_hvs5 = HVS_PIXEL_ORDER_BGRA,
+ },
+ {
+ .drm = DRM_FORMAT_RGBX4444,
+ .hvs = HVS_PIXEL_FORMAT_RGBA4444,
+ .pixel_order = HVS_PIXEL_ORDER_BGRA,
+ .pixel_order_hvs5 = HVS_PIXEL_ORDER_RGBA,
+ },
+ {
+ .drm = DRM_FORMAT_RGBA4444,
+ .hvs = HVS_PIXEL_FORMAT_RGBA4444,
+ .pixel_order = HVS_PIXEL_ORDER_BGRA,
+ .pixel_order_hvs5 = HVS_PIXEL_ORDER_RGBA,
+ },
};
static const struct hvs_format *vc4_get_hvs_format(u32 drm_format)
@@ -1001,15 +1099,10 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
vc4_dlist_write(vc4_state, 0xc0c0c0c0);
} else {
- u32 hvs_pixel_order = format->pixel_order;
-
- if (format->pixel_order_hvs5)
- hvs_pixel_order = format->pixel_order_hvs5;
-
/* Control word */
vc4_dlist_write(vc4_state,
SCALER_CTL0_VALID |
- (hvs_pixel_order << SCALER_CTL0_ORDER_SHIFT) |
+ (format->pixel_order_hvs5 << SCALER_CTL0_ORDER_SHIFT) |
(hvs_format << SCALER_CTL0_PIXEL_FORMAT_SHIFT) |
VC4_SET_FIELD(tiling, SCALER_CTL0_TILING) |
(vc4_state->is_unity ?
@@ -1488,6 +1581,16 @@ static bool vc4_format_mod_supported(struct drm_plane *plane,
case DRM_FORMAT_BGRX1010102:
case DRM_FORMAT_RGBA1010102:
case DRM_FORMAT_BGRA1010102:
+ case DRM_FORMAT_XRGB4444:
+ case DRM_FORMAT_ARGB4444:
+ case DRM_FORMAT_XBGR4444:
+ case DRM_FORMAT_ABGR4444:
+ case DRM_FORMAT_RGBX4444:
+ case DRM_FORMAT_RGBA4444:
+ case DRM_FORMAT_BGRX4444:
+ case DRM_FORMAT_BGRA4444:
+ case DRM_FORMAT_RGB332:
+ case DRM_FORMAT_BGR233:
case DRM_FORMAT_YUV422:
case DRM_FORMAT_YVU422:
case DRM_FORMAT_YUV420:
@@ -1568,9 +1671,14 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev,
DRM_COLOR_YCBCR_BT709,
DRM_COLOR_YCBCR_LIMITED_RANGE);
+ if (type == DRM_PLANE_TYPE_PRIMARY)
+ drm_plane_create_zpos_immutable_property(plane, 0);
+
return plane;
}
+#define VC4_NUM_OVERLAY_PLANES 16
+
int vc4_plane_create_additional_planes(struct drm_device *drm)
{
struct drm_plane *cursor_plane;
@@ -1586,24 +1694,35 @@ int vc4_plane_create_additional_planes(struct drm_device *drm)
* modest number of planes to expose, that should hopefully
* still cover any sane usecase.
*/
- for (i = 0; i < 16; i++) {
+ for (i = 0; i < VC4_NUM_OVERLAY_PLANES; i++) {
struct drm_plane *plane =
vc4_plane_init(drm, DRM_PLANE_TYPE_OVERLAY,
GENMASK(drm->mode_config.num_crtc - 1, 0));
if (IS_ERR(plane))
continue;
+
+ /* Create zpos property. Max of all the overlays + 1 primary +
+ * 1 cursor plane on a crtc.
+ */
+ drm_plane_create_zpos_property(plane, i + 1, 1,
+ VC4_NUM_OVERLAY_PLANES + 1);
}
drm_for_each_crtc(crtc, drm) {
/* Set up the legacy cursor after overlay initialization,
- * since we overlay planes on the CRTC in the order they were
- * initialized.
+ * since the zpos fallback is that planes are rendered by plane
+ * ID order, and that then puts the cursor on top.
*/
cursor_plane = vc4_plane_init(drm, DRM_PLANE_TYPE_CURSOR,
drm_crtc_mask(crtc));
if (!IS_ERR(cursor_plane)) {
crtc->cursor = cursor_plane;
+
+ drm_plane_create_zpos_property(cursor_plane,
+ VC4_NUM_OVERLAY_PLANES + 1,
+ 1,
+ VC4_NUM_OVERLAY_PLANES + 1);
}
}
diff --git a/drivers/gpu/drm/vc4/vc4_regs.h b/drivers/gpu/drm/vc4/vc4_regs.h
index f0290fad991d..f3763bd600f6 100644
--- a/drivers/gpu/drm/vc4/vc4_regs.h
+++ b/drivers/gpu/drm/vc4/vc4_regs.h
@@ -220,6 +220,12 @@
#define SCALER_DISPCTRL 0x00000000
/* Global register for clock gating the HVS */
# define SCALER_DISPCTRL_ENABLE BIT(31)
+# define SCALER_DISPCTRL_PANIC0_MASK VC4_MASK(25, 24)
+# define SCALER_DISPCTRL_PANIC0_SHIFT 24
+# define SCALER_DISPCTRL_PANIC1_MASK VC4_MASK(27, 26)
+# define SCALER_DISPCTRL_PANIC1_SHIFT 26
+# define SCALER_DISPCTRL_PANIC2_MASK VC4_MASK(29, 28)
+# define SCALER_DISPCTRL_PANIC2_SHIFT 28
# define SCALER_DISPCTRL_DSP3_MUX_MASK VC4_MASK(19, 18)
# define SCALER_DISPCTRL_DSP3_MUX_SHIFT 18
@@ -228,15 +234,21 @@
* always enabled.
*/
# define SCALER_DISPCTRL_DSPEISLUR(x) BIT(13 + (x))
+# define SCALER5_DISPCTRL_DSPEISLUR(x) BIT(9 + ((x) * 4))
/* Enables Display 0 end-of-line-N contribution to
* SCALER_DISPSTAT_IRQDISP0
*/
# define SCALER_DISPCTRL_DSPEIEOLN(x) BIT(8 + ((x) * 2))
+# define SCALER5_DISPCTRL_DSPEIEOLN(x) BIT(8 + ((x) * 4))
/* Enables Display 0 EOF contribution to SCALER_DISPSTAT_IRQDISP0 */
# define SCALER_DISPCTRL_DSPEIEOF(x) BIT(7 + ((x) * 2))
+# define SCALER5_DISPCTRL_DSPEIEOF(x) BIT(7 + ((x) * 4))
-# define SCALER_DISPCTRL_SLVRDEIRQ BIT(6)
-# define SCALER_DISPCTRL_SLVWREIRQ BIT(5)
+# define SCALER5_DISPCTRL_DSPEIVST(x) BIT(6 + ((x) * 4))
+
+# define SCALER_DISPCTRL_SLVRDEIRQ BIT(6) /* HVS4 only */
+# define SCALER_DISPCTRL_SLVWREIRQ BIT(5) /* HVS4 only */
+# define SCALER5_DISPCTRL_SLVEIRQ BIT(5)
# define SCALER_DISPCTRL_DMAEIRQ BIT(4)
/* Enables interrupt generation on the enabled EOF/EOLN/EISLUR
* bits and short frames..
@@ -360,6 +372,7 @@
#define SCALER_DISPBKGND0 0x00000044
# define SCALER_DISPBKGND_AUTOHS BIT(31)
+# define SCALER5_DISPBKGND_BCK2BCK BIT(31)
# define SCALER_DISPBKGND_INTERLACE BIT(30)
# define SCALER_DISPBKGND_GAMMA BIT(29)
# define SCALER_DISPBKGND_TESTMODE_MASK VC4_MASK(28, 25)
@@ -835,16 +848,19 @@ enum hvs_pixel_format {
/* Note: the LSB is the rightmost character shown. Only valid for
* HVS_PIXEL_FORMAT_RGB8888, not RGB888.
*/
+/* For modes 332, 4444, 555, 5551, 6666, 8888, 10:10:10:2 */
#define HVS_PIXEL_ORDER_RGBA 0
#define HVS_PIXEL_ORDER_BGRA 1
#define HVS_PIXEL_ORDER_ARGB 2
#define HVS_PIXEL_ORDER_ABGR 3
+/* For modes 666 and 888 (4 & 5) */
#define HVS_PIXEL_ORDER_XBRG 0
#define HVS_PIXEL_ORDER_XRBG 1
#define HVS_PIXEL_ORDER_XRGB 2
#define HVS_PIXEL_ORDER_XBGR 3
+/* For YCbCr modes (8-12, and 17) */
#define HVS_PIXEL_ORDER_XYCBCR 0
#define HVS_PIXEL_ORDER_XYCRCB 1
#define HVS_PIXEL_ORDER_YXCBCR 2
diff --git a/drivers/gpu/drm/vc4/vc4_txp.c b/drivers/gpu/drm/vc4/vc4_txp.c
index bd181b5a7b52..ef5cab2a3aa9 100644
--- a/drivers/gpu/drm/vc4/vc4_txp.c
+++ b/drivers/gpu/drm/vc4/vc4_txp.c
@@ -145,14 +145,24 @@
/* Number of lines received and committed to memory. */
#define TXP_PROGRESS 0x10
-#define TXP_READ(offset) readl(txp->regs + (offset))
-#define TXP_WRITE(offset, val) writel(val, txp->regs + (offset))
+#define TXP_READ(offset) \
+ ({ \
+ kunit_fail_current_test("Accessing a register in a unit test!\n"); \
+ readl(txp->regs + (offset)); \
+ })
+
+#define TXP_WRITE(offset, val) \
+ do { \
+ kunit_fail_current_test("Accessing a register in a unit test!\n"); \
+ writel(val, txp->regs + (offset)); \
+ } while (0)
struct vc4_txp {
struct vc4_crtc base;
struct platform_device *pdev;
+ struct vc4_encoder encoder;
struct drm_writeback_connector connector;
void __iomem *regs;
@@ -160,7 +170,7 @@ struct vc4_txp {
static inline struct vc4_txp *encoder_to_vc4_txp(struct drm_encoder *encoder)
{
- return container_of(encoder, struct vc4_txp, connector.encoder);
+ return container_of(encoder, struct vc4_txp, encoder.base);
}
static inline struct vc4_txp *connector_to_vc4_txp(struct drm_connector *conn)
@@ -478,7 +488,8 @@ static irqreturn_t vc4_txp_interrupt(int irq, void *data)
return IRQ_HANDLED;
}
-static const struct vc4_crtc_data vc4_txp_crtc_data = {
+const struct vc4_crtc_data vc4_txp_crtc_data = {
+ .name = "txp",
.debugfs_name = "txp_regs",
.hvs_available_channels = BIT(2),
.hvs_output = 2,
@@ -488,10 +499,10 @@ static int vc4_txp_bind(struct device *dev, struct device *master, void *data)
{
struct platform_device *pdev = to_platform_device(dev);
struct drm_device *drm = dev_get_drvdata(master);
+ struct vc4_encoder *vc4_encoder;
+ struct drm_encoder *encoder;
struct vc4_crtc *vc4_crtc;
struct vc4_txp *txp;
- struct drm_crtc *crtc;
- struct drm_encoder *encoder;
int ret, irq;
irq = platform_get_irq(pdev, 0);
@@ -501,39 +512,42 @@ static int vc4_txp_bind(struct device *dev, struct device *master, void *data)
txp = drmm_kzalloc(drm, sizeof(*txp), GFP_KERNEL);
if (!txp)
return -ENOMEM;
- vc4_crtc = &txp->base;
- crtc = &vc4_crtc->base;
-
- vc4_crtc->pdev = pdev;
- vc4_crtc->data = &vc4_txp_crtc_data;
- vc4_crtc->feeds_txp = true;
txp->pdev = pdev;
-
txp->regs = vc4_ioremap_regs(pdev, 0);
if (IS_ERR(txp->regs))
return PTR_ERR(txp->regs);
+
+ vc4_crtc = &txp->base;
vc4_crtc->regset.base = txp->regs;
vc4_crtc->regset.regs = txp_regs;
vc4_crtc->regset.nregs = ARRAY_SIZE(txp_regs);
- drm_connector_helper_add(&txp->connector.base,
- &vc4_txp_connector_helper_funcs);
- ret = drm_writeback_connector_init(drm, &txp->connector,
- &vc4_txp_connector_funcs,
- &vc4_txp_encoder_helper_funcs,
- drm_fmts, ARRAY_SIZE(drm_fmts),
- 0);
+ ret = vc4_crtc_init(drm, pdev, vc4_crtc, &vc4_txp_crtc_data,
+ &vc4_txp_crtc_funcs, &vc4_txp_crtc_helper_funcs, true);
if (ret)
return ret;
- ret = vc4_crtc_init(drm, vc4_crtc,
- &vc4_txp_crtc_funcs, &vc4_txp_crtc_helper_funcs);
+ vc4_encoder = &txp->encoder;
+ txp->encoder.type = VC4_ENCODER_TYPE_TXP;
+
+ encoder = &vc4_encoder->base;
+ encoder->possible_crtcs = drm_crtc_mask(&vc4_crtc->base);
+
+ drm_encoder_helper_add(encoder, &vc4_txp_encoder_helper_funcs);
+
+ ret = drmm_encoder_init(drm, encoder, NULL, DRM_MODE_ENCODER_VIRTUAL, NULL);
if (ret)
return ret;
- encoder = &txp->connector.encoder;
- encoder->possible_crtcs = drm_crtc_mask(crtc);
+ drm_connector_helper_add(&txp->connector.base,
+ &vc4_txp_connector_helper_funcs);
+ ret = drm_writeback_connector_init_with_encoder(drm, &txp->connector,
+ encoder,
+ &vc4_txp_connector_funcs,
+ drm_fmts, ARRAY_SIZE(drm_fmts));
+ if (ret)
+ return ret;
ret = devm_request_irq(dev, irq, vc4_txp_interrupt, 0,
dev_name(dev), txp);
diff --git a/drivers/gpu/drm/vc4/vc4_v3d.c b/drivers/gpu/drm/vc4/vc4_v3d.c
index 56abb0d6bc39..29a664c8bf44 100644
--- a/drivers/gpu/drm/vc4/vc4_v3d.c
+++ b/drivers/gpu/drm/vc4/vc4_v3d.c
@@ -96,8 +96,8 @@ static const struct debugfs_reg32 v3d_regs[] = {
static int vc4_v3d_debugfs_ident(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = (struct drm_info_node *)m->private;
- struct drm_device *dev = node->minor->dev;
+ struct drm_debugfs_entry *entry = m->private;
+ struct drm_device *dev = entry->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
int ret = vc4_v3d_pm_get(vc4);
@@ -404,19 +404,13 @@ int vc4_v3d_debugfs_init(struct drm_minor *minor)
struct drm_device *drm = minor->dev;
struct vc4_dev *vc4 = to_vc4_dev(drm);
struct vc4_v3d *v3d = vc4->v3d;
- int ret;
if (!vc4->v3d)
return -ENODEV;
- ret = vc4_debugfs_add_file(minor, "v3d_ident",
- vc4_v3d_debugfs_ident, NULL);
- if (ret)
- return ret;
+ drm_debugfs_add_file(drm, "v3d_ident", vc4_v3d_debugfs_ident, NULL);
- ret = vc4_debugfs_add_regset32(minor, "v3d_regs", &v3d->regset);
- if (ret)
- return ret;
+ vc4_debugfs_add_regset32(drm, "v3d_regs", &v3d->regset);
return 0;
}
diff --git a/drivers/gpu/drm/vc4/vc4_vec.c b/drivers/gpu/drm/vc4/vc4_vec.c
index 92c07e31d632..a3782d05cd66 100644
--- a/drivers/gpu/drm/vc4/vc4_vec.c
+++ b/drivers/gpu/drm/vc4/vc4_vec.c
@@ -46,6 +46,7 @@
#define VEC_CONFIG0_YDEL(x) ((x) << 26)
#define VEC_CONFIG0_CDEL_MASK GENMASK(25, 24)
#define VEC_CONFIG0_CDEL(x) ((x) << 24)
+#define VEC_CONFIG0_SECAM_STD BIT(21)
#define VEC_CONFIG0_PBPR_FIL BIT(18)
#define VEC_CONFIG0_CHROMA_GAIN_MASK GENMASK(17, 16)
#define VEC_CONFIG0_CHROMA_GAIN_UNITY (0 << 16)
@@ -76,6 +77,27 @@
#define VEC_SOFT_RESET 0x10c
#define VEC_CLMP0_START 0x144
#define VEC_CLMP0_END 0x148
+
+/*
+ * These set the color subcarrier frequency
+ * if VEC_CONFIG1_CUSTOM_FREQ is enabled.
+ *
+ * VEC_FREQ1_0 contains the most significant 16-bit half-word,
+ * VEC_FREQ3_2 contains the least significant 16-bit half-word.
+ * 0x80000000 seems to be equivalent to the pixel clock
+ * (which itself is the VEC clock divided by 8).
+ *
+ * Reference values (with the default pixel clock of 13.5 MHz):
+ *
+ * NTSC (3579545.[45] Hz) - 0x21F07C1F
+ * PAL (4433618.75 Hz) - 0x2A098ACB
+ * PAL-M (3575611.[888111] Hz) - 0x21E6EFE3
+ * PAL-N (3582056.25 Hz) - 0x21F69446
+ *
+ * NOTE: For SECAM, it is used as the Dr center frequency,
+ * regardless of whether VEC_CONFIG1_CUSTOM_FREQ is enabled or not;
+ * that is specified as 4406250 Hz, which corresponds to 0x29C71C72.
+ */
#define VEC_FREQ3_2 0x180
#define VEC_FREQ1_0 0x184
@@ -118,6 +140,14 @@
#define VEC_INTERRUPT_CONTROL 0x190
#define VEC_INTERRUPT_STATUS 0x194
+
+/*
+ * Db center frequency for SECAM; the clock for this is the same as for
+ * VEC_FREQ3_2/VEC_FREQ1_0, which is used for Dr center frequency.
+ *
+ * This is specified as 4250000 Hz, which corresponds to 0x284BDA13.
+ * That is also the default value, so no need to set it explicitly.
+ */
#define VEC_FCW_SECAM_B 0x198
#define VEC_SECAM_GAIN_VAL 0x19c
@@ -172,11 +202,22 @@ struct vc4_vec {
struct clk *clock;
+ struct drm_property *legacy_tv_mode_property;
+
struct debugfs_regset32 regset;
};
-#define VEC_READ(offset) readl(vec->regs + (offset))
-#define VEC_WRITE(offset, val) writel(val, vec->regs + (offset))
+#define VEC_READ(offset) \
+ ({ \
+ kunit_fail_current_test("Accessing a register in a unit test!\n"); \
+ readl(vec->regs + (offset)); \
+ })
+
+#define VEC_WRITE(offset, val) \
+ do { \
+ kunit_fail_current_test("Accessing a register in a unit test!\n"); \
+ writel(val, vec->regs + (offset)); \
+ } while (0)
static inline struct vc4_vec *
encoder_to_vc4_vec(struct drm_encoder *encoder)
@@ -184,15 +225,26 @@ encoder_to_vc4_vec(struct drm_encoder *encoder)
return container_of(encoder, struct vc4_vec, encoder.base);
}
+static inline struct vc4_vec *
+connector_to_vc4_vec(struct drm_connector *connector)
+{
+ return container_of(connector, struct vc4_vec, connector);
+}
+
enum vc4_vec_tv_mode_id {
VC4_VEC_TV_MODE_NTSC,
VC4_VEC_TV_MODE_NTSC_J,
VC4_VEC_TV_MODE_PAL,
VC4_VEC_TV_MODE_PAL_M,
+ VC4_VEC_TV_MODE_NTSC_443,
+ VC4_VEC_TV_MODE_PAL_60,
+ VC4_VEC_TV_MODE_PAL_N,
+ VC4_VEC_TV_MODE_SECAM,
};
struct vc4_vec_tv_mode {
- const struct drm_display_mode *mode;
+ unsigned int mode;
+ u16 expected_htotal;
u32 config0;
u32 config1;
u32 custom_freq;
@@ -225,41 +277,86 @@ static const struct debugfs_reg32 vec_regs[] = {
VC4_REG32(VEC_DAC_MISC),
};
-static const struct drm_display_mode ntsc_mode = {
- DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 13500,
- 720, 720 + 14, 720 + 14 + 64, 720 + 14 + 64 + 60, 0,
- 480, 480 + 7, 480 + 7 + 6, 525, 0,
- DRM_MODE_FLAG_INTERLACE)
-};
-
-static const struct drm_display_mode pal_mode = {
- DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 13500,
- 720, 720 + 20, 720 + 20 + 64, 720 + 20 + 64 + 60, 0,
- 576, 576 + 4, 576 + 4 + 6, 625, 0,
- DRM_MODE_FLAG_INTERLACE)
-};
-
static const struct vc4_vec_tv_mode vc4_vec_tv_modes[] = {
- [VC4_VEC_TV_MODE_NTSC] = {
- .mode = &ntsc_mode,
+ {
+ .mode = DRM_MODE_TV_MODE_NTSC,
+ .expected_htotal = 858,
.config0 = VEC_CONFIG0_NTSC_STD | VEC_CONFIG0_PDEN,
.config1 = VEC_CONFIG1_C_CVBS_CVBS,
},
- [VC4_VEC_TV_MODE_NTSC_J] = {
- .mode = &ntsc_mode,
+ {
+ .mode = DRM_MODE_TV_MODE_NTSC_443,
+ .expected_htotal = 858,
+ .config0 = VEC_CONFIG0_NTSC_STD,
+ .config1 = VEC_CONFIG1_C_CVBS_CVBS | VEC_CONFIG1_CUSTOM_FREQ,
+ .custom_freq = 0x2a098acb,
+ },
+ {
+ .mode = DRM_MODE_TV_MODE_NTSC_J,
+ .expected_htotal = 858,
.config0 = VEC_CONFIG0_NTSC_STD,
.config1 = VEC_CONFIG1_C_CVBS_CVBS,
},
- [VC4_VEC_TV_MODE_PAL] = {
- .mode = &pal_mode,
+ {
+ .mode = DRM_MODE_TV_MODE_PAL,
+ .expected_htotal = 864,
.config0 = VEC_CONFIG0_PAL_BDGHI_STD,
.config1 = VEC_CONFIG1_C_CVBS_CVBS,
},
- [VC4_VEC_TV_MODE_PAL_M] = {
- .mode = &ntsc_mode,
+ {
+ /* PAL-60 */
+ .mode = DRM_MODE_TV_MODE_PAL,
+ .expected_htotal = 858,
+ .config0 = VEC_CONFIG0_PAL_M_STD,
+ .config1 = VEC_CONFIG1_C_CVBS_CVBS | VEC_CONFIG1_CUSTOM_FREQ,
+ .custom_freq = 0x2a098acb,
+ },
+ {
+ .mode = DRM_MODE_TV_MODE_PAL_M,
+ .expected_htotal = 858,
.config0 = VEC_CONFIG0_PAL_M_STD,
.config1 = VEC_CONFIG1_C_CVBS_CVBS,
},
+ {
+ .mode = DRM_MODE_TV_MODE_PAL_N,
+ .expected_htotal = 864,
+ .config0 = VEC_CONFIG0_PAL_N_STD,
+ .config1 = VEC_CONFIG1_C_CVBS_CVBS,
+ },
+ {
+ .mode = DRM_MODE_TV_MODE_SECAM,
+ .expected_htotal = 864,
+ .config0 = VEC_CONFIG0_SECAM_STD,
+ .config1 = VEC_CONFIG1_C_CVBS_CVBS,
+ .custom_freq = 0x29c71c72,
+ },
+};
+
+static inline const struct vc4_vec_tv_mode *
+vc4_vec_tv_mode_lookup(unsigned int mode, u16 htotal)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(vc4_vec_tv_modes); i++) {
+ const struct vc4_vec_tv_mode *tv_mode = &vc4_vec_tv_modes[i];
+
+ if (tv_mode->mode == mode &&
+ tv_mode->expected_htotal == htotal)
+ return tv_mode;
+ }
+
+ return NULL;
+}
+
+static const struct drm_prop_enum_list legacy_tv_mode_names[] = {
+ { VC4_VEC_TV_MODE_NTSC, "NTSC", },
+ { VC4_VEC_TV_MODE_NTSC_443, "NTSC-443", },
+ { VC4_VEC_TV_MODE_NTSC_J, "NTSC-J", },
+ { VC4_VEC_TV_MODE_PAL, "PAL", },
+ { VC4_VEC_TV_MODE_PAL_60, "PAL-60", },
+ { VC4_VEC_TV_MODE_PAL_M, "PAL-M", },
+ { VC4_VEC_TV_MODE_PAL_N, "PAL-N", },
+ { VC4_VEC_TV_MODE_SECAM, "SECAM", },
};
static enum drm_connector_status
@@ -268,38 +365,126 @@ vc4_vec_connector_detect(struct drm_connector *connector, bool force)
return connector_status_unknown;
}
-static int vc4_vec_connector_get_modes(struct drm_connector *connector)
+static void vc4_vec_connector_reset(struct drm_connector *connector)
{
- struct drm_connector_state *state = connector->state;
- struct drm_display_mode *mode;
+ drm_atomic_helper_connector_reset(connector);
+ drm_atomic_helper_connector_tv_reset(connector);
+}
- mode = drm_mode_duplicate(connector->dev,
- vc4_vec_tv_modes[state->tv.mode].mode);
- if (!mode) {
- DRM_ERROR("Failed to create a new display mode\n");
- return -ENOMEM;
+static int
+vc4_vec_connector_set_property(struct drm_connector *connector,
+ struct drm_connector_state *state,
+ struct drm_property *property,
+ uint64_t val)
+{
+ struct vc4_vec *vec = connector_to_vc4_vec(connector);
+
+ if (property != vec->legacy_tv_mode_property)
+ return -EINVAL;
+
+ switch (val) {
+ case VC4_VEC_TV_MODE_NTSC:
+ state->tv.mode = DRM_MODE_TV_MODE_NTSC;
+ break;
+
+ case VC4_VEC_TV_MODE_NTSC_443:
+ state->tv.mode = DRM_MODE_TV_MODE_NTSC_443;
+ break;
+
+ case VC4_VEC_TV_MODE_NTSC_J:
+ state->tv.mode = DRM_MODE_TV_MODE_NTSC_J;
+ break;
+
+ case VC4_VEC_TV_MODE_PAL:
+ case VC4_VEC_TV_MODE_PAL_60:
+ state->tv.mode = DRM_MODE_TV_MODE_PAL;
+ break;
+
+ case VC4_VEC_TV_MODE_PAL_M:
+ state->tv.mode = DRM_MODE_TV_MODE_PAL_M;
+ break;
+
+ case VC4_VEC_TV_MODE_PAL_N:
+ state->tv.mode = DRM_MODE_TV_MODE_PAL_N;
+ break;
+
+ case VC4_VEC_TV_MODE_SECAM:
+ state->tv.mode = DRM_MODE_TV_MODE_SECAM;
+ break;
+
+ default:
+ return -EINVAL;
}
- drm_mode_probed_add(connector, mode);
+ return 0;
+}
+
+static int
+vc4_vec_connector_get_property(struct drm_connector *connector,
+ const struct drm_connector_state *state,
+ struct drm_property *property,
+ uint64_t *val)
+{
+ struct vc4_vec *vec = connector_to_vc4_vec(connector);
+
+ if (property != vec->legacy_tv_mode_property)
+ return -EINVAL;
+
+ switch (state->tv.mode) {
+ case DRM_MODE_TV_MODE_NTSC:
+ *val = VC4_VEC_TV_MODE_NTSC;
+ break;
+
+ case DRM_MODE_TV_MODE_NTSC_443:
+ *val = VC4_VEC_TV_MODE_NTSC_443;
+ break;
+
+ case DRM_MODE_TV_MODE_NTSC_J:
+ *val = VC4_VEC_TV_MODE_NTSC_J;
+ break;
- return 1;
+ case DRM_MODE_TV_MODE_PAL:
+ *val = VC4_VEC_TV_MODE_PAL;
+ break;
+
+ case DRM_MODE_TV_MODE_PAL_M:
+ *val = VC4_VEC_TV_MODE_PAL_M;
+ break;
+
+ case DRM_MODE_TV_MODE_PAL_N:
+ *val = VC4_VEC_TV_MODE_PAL_N;
+ break;
+
+ case DRM_MODE_TV_MODE_SECAM:
+ *val = VC4_VEC_TV_MODE_SECAM;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
}
static const struct drm_connector_funcs vc4_vec_connector_funcs = {
.detect = vc4_vec_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
- .reset = drm_atomic_helper_connector_reset,
+ .reset = vc4_vec_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .atomic_get_property = vc4_vec_connector_get_property,
+ .atomic_set_property = vc4_vec_connector_set_property,
};
static const struct drm_connector_helper_funcs vc4_vec_connector_helper_funcs = {
- .get_modes = vc4_vec_connector_get_modes,
+ .atomic_check = drm_atomic_helper_connector_tv_check,
+ .get_modes = drm_connector_helper_tv_get_modes,
};
static int vc4_vec_connector_init(struct drm_device *dev, struct vc4_vec *vec)
{
struct drm_connector *connector = &vec->connector;
+ struct drm_property *prop;
int ret;
connector->interlace_allowed = true;
@@ -313,7 +498,16 @@ static int vc4_vec_connector_init(struct drm_device *dev, struct vc4_vec *vec)
drm_object_attach_property(&connector->base,
dev->mode_config.tv_mode_property,
- VC4_VEC_TV_MODE_NTSC);
+ DRM_MODE_TV_MODE_NTSC);
+
+ prop = drm_property_create_enum(dev, 0, "mode",
+ legacy_tv_mode_names,
+ ARRAY_SIZE(legacy_tv_mode_names));
+ if (!prop)
+ return -ENOMEM;
+ vec->legacy_tv_mode_property = prop;
+
+ drm_object_attach_property(&connector->base, prop, VC4_VEC_TV_MODE_NTSC);
drm_connector_attach_encoder(connector, &vec->encoder.base);
@@ -360,14 +554,20 @@ static void vc4_vec_encoder_enable(struct drm_encoder *encoder,
struct drm_connector *connector = &vec->connector;
struct drm_connector_state *conn_state =
drm_atomic_get_new_connector_state(state, connector);
- const struct vc4_vec_tv_mode *tv_mode =
- &vc4_vec_tv_modes[conn_state->tv.mode];
+ struct drm_display_mode *adjusted_mode =
+ &encoder->crtc->state->adjusted_mode;
+ const struct vc4_vec_tv_mode *tv_mode;
int idx, ret;
if (!drm_dev_enter(drm, &idx))
return;
- ret = pm_runtime_get_sync(&vec->pdev->dev);
+ tv_mode = vc4_vec_tv_mode_lookup(conn_state->tv.mode,
+ adjusted_mode->htotal);
+ if (!tv_mode)
+ goto err_dev_exit;
+
+ ret = pm_runtime_resume_and_get(&vec->pdev->dev);
if (ret < 0) {
DRM_ERROR("Failed to retain power domain: %d\n", ret);
goto err_dev_exit;
@@ -413,7 +613,9 @@ static void vc4_vec_encoder_enable(struct drm_encoder *encoder,
VEC_WRITE(VEC_CLMP0_START, 0xac);
VEC_WRITE(VEC_CLMP0_END, 0xec);
VEC_WRITE(VEC_CONFIG2,
- VEC_CONFIG2_UV_DIG_DIS | VEC_CONFIG2_RGB_DIG_DIS);
+ VEC_CONFIG2_UV_DIG_DIS |
+ VEC_CONFIG2_RGB_DIG_DIS |
+ ((adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) ? 0 : VEC_CONFIG2_PROG_SCAN));
VEC_WRITE(VEC_CONFIG3, VEC_CONFIG3_HORIZ_LEN_STD);
VEC_WRITE(VEC_DAC_CONFIG, vec->variant->dac_config);
@@ -447,13 +649,61 @@ static int vc4_vec_encoder_atomic_check(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
- const struct vc4_vec_tv_mode *vec_mode;
+ const struct drm_display_mode *mode = &crtc_state->adjusted_mode;
+ const struct vc4_vec_tv_mode *tv_mode;
+
+ tv_mode = vc4_vec_tv_mode_lookup(conn_state->tv.mode, mode->htotal);
+ if (!tv_mode)
+ return -EINVAL;
+
+ if (mode->crtc_hdisplay % 4)
+ return -EINVAL;
+
+ if (!(mode->crtc_hsync_end - mode->crtc_hsync_start))
+ return -EINVAL;
+
+ switch (mode->htotal) {
+ /* NTSC */
+ case 858:
+ if (mode->crtc_vtotal > 262)
+ return -EINVAL;
+
+ if (mode->crtc_vdisplay < 1 || mode->crtc_vdisplay > 253)
+ return -EINVAL;
+
+ if (!(mode->crtc_vsync_start - mode->crtc_vdisplay))
+ return -EINVAL;
+
+ if ((mode->crtc_vsync_end - mode->crtc_vsync_start) != 3)
+ return -EINVAL;
+
+ if ((mode->crtc_vtotal - mode->crtc_vsync_end) < 4)
+ return -EINVAL;
- vec_mode = &vc4_vec_tv_modes[conn_state->tv.mode];
+ break;
- if (conn_state->crtc &&
- !drm_mode_equal(vec_mode->mode, &crtc_state->adjusted_mode))
+ /* PAL/SECAM */
+ case 864:
+ if (mode->crtc_vtotal > 312)
+ return -EINVAL;
+
+ if (mode->crtc_vdisplay < 1 || mode->crtc_vdisplay > 305)
+ return -EINVAL;
+
+ if (!(mode->crtc_vsync_start - mode->crtc_vdisplay))
+ return -EINVAL;
+
+ if ((mode->crtc_vsync_end - mode->crtc_vsync_start) != 3)
+ return -EINVAL;
+
+ if ((mode->crtc_vtotal - mode->crtc_vsync_end) < 2)
+ return -EINVAL;
+
+ break;
+
+ default:
return -EINVAL;
+ }
return 0;
}
@@ -468,12 +718,8 @@ static int vc4_vec_late_register(struct drm_encoder *encoder)
{
struct drm_device *drm = encoder->dev;
struct vc4_vec *vec = encoder_to_vc4_vec(encoder);
- int ret;
- ret = vc4_debugfs_add_regset32(drm->primary, "vec_regs",
- &vec->regset);
- if (ret)
- return ret;
+ vc4_debugfs_add_regset32(drm, "vec_regs", &vec->regset);
return 0;
}
@@ -500,13 +746,6 @@ static const struct of_device_id vc4_vec_dt_match[] = {
{ /* sentinel */ },
};
-static const char * const tv_mode_names[] = {
- [VC4_VEC_TV_MODE_NTSC] = "NTSC",
- [VC4_VEC_TV_MODE_NTSC_J] = "NTSC-J",
- [VC4_VEC_TV_MODE_PAL] = "PAL",
- [VC4_VEC_TV_MODE_PAL_M] = "PAL-M",
-};
-
static int vc4_vec_bind(struct device *dev, struct device *master, void *data)
{
struct platform_device *pdev = to_platform_device(dev);
@@ -514,8 +753,14 @@ static int vc4_vec_bind(struct device *dev, struct device *master, void *data)
struct vc4_vec *vec;
int ret;
- ret = drm_mode_create_tv_properties(drm, ARRAY_SIZE(tv_mode_names),
- tv_mode_names);
+ ret = drm_mode_create_tv_properties(drm,
+ BIT(DRM_MODE_TV_MODE_NTSC) |
+ BIT(DRM_MODE_TV_MODE_NTSC_443) |
+ BIT(DRM_MODE_TV_MODE_NTSC_J) |
+ BIT(DRM_MODE_TV_MODE_PAL) |
+ BIT(DRM_MODE_TV_MODE_PAL_M) |
+ BIT(DRM_MODE_TV_MODE_PAL_N) |
+ BIT(DRM_MODE_TV_MODE_SECAM));
if (ret)
return ret;
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index b7a64c7dcc2c..af6ffb696086 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -165,6 +165,8 @@ struct virtio_gpu_vbuffer {
struct virtio_gpu_object_array *objs;
struct list_head list;
+
+ uint32_t seqno;
};
struct virtio_gpu_output {
@@ -194,6 +196,7 @@ struct virtio_gpu_queue {
spinlock_t qlock;
wait_queue_head_t ack_queue;
struct work_struct dequeue_work;
+ uint32_t seqno;
};
struct virtio_gpu_drv_capset {
diff --git a/drivers/gpu/drm/virtio/virtgpu_trace.h b/drivers/gpu/drm/virtio/virtgpu_trace.h
index 711ecc2bd241..031bc77689d5 100644
--- a/drivers/gpu/drm/virtio/virtgpu_trace.h
+++ b/drivers/gpu/drm/virtio/virtgpu_trace.h
@@ -9,40 +9,44 @@
#define TRACE_INCLUDE_FILE virtgpu_trace
DECLARE_EVENT_CLASS(virtio_gpu_cmd,
- TP_PROTO(struct virtqueue *vq, struct virtio_gpu_ctrl_hdr *hdr),
- TP_ARGS(vq, hdr),
+ TP_PROTO(struct virtqueue *vq, struct virtio_gpu_ctrl_hdr *hdr, u32 seqno),
+ TP_ARGS(vq, hdr, seqno),
TP_STRUCT__entry(
__field(int, dev)
__field(unsigned int, vq)
- __field(const char *, name)
+ __string(name, vq->name)
__field(u32, type)
__field(u32, flags)
__field(u64, fence_id)
__field(u32, ctx_id)
+ __field(u32, num_free)
+ __field(u32, seqno)
),
TP_fast_assign(
__entry->dev = vq->vdev->index;
__entry->vq = vq->index;
- __entry->name = vq->name;
+ __assign_str(name, vq->name);
__entry->type = le32_to_cpu(hdr->type);
__entry->flags = le32_to_cpu(hdr->flags);
__entry->fence_id = le64_to_cpu(hdr->fence_id);
__entry->ctx_id = le32_to_cpu(hdr->ctx_id);
+ __entry->num_free = vq->num_free;
+ __entry->seqno = seqno;
),
- TP_printk("vdev=%d vq=%u name=%s type=0x%x flags=0x%x fence_id=%llu ctx_id=%u",
- __entry->dev, __entry->vq, __entry->name,
+ TP_printk("vdev=%d vq=%u name=%s type=0x%x flags=0x%x fence_id=%llu ctx_id=%u num_free=%u seqno=%u",
+ __entry->dev, __entry->vq, __get_str(name),
__entry->type, __entry->flags, __entry->fence_id,
- __entry->ctx_id)
+ __entry->ctx_id, __entry->num_free, __entry->seqno)
);
DEFINE_EVENT(virtio_gpu_cmd, virtio_gpu_cmd_queue,
- TP_PROTO(struct virtqueue *vq, struct virtio_gpu_ctrl_hdr *hdr),
- TP_ARGS(vq, hdr)
+ TP_PROTO(struct virtqueue *vq, struct virtio_gpu_ctrl_hdr *hdr, u32 seqno),
+ TP_ARGS(vq, hdr, seqno)
);
DEFINE_EVENT(virtio_gpu_cmd, virtio_gpu_cmd_response,
- TP_PROTO(struct virtqueue *vq, struct virtio_gpu_ctrl_hdr *hdr),
- TP_ARGS(vq, hdr)
+ TP_PROTO(struct virtqueue *vq, struct virtio_gpu_ctrl_hdr *hdr, u32 seqno),
+ TP_ARGS(vq, hdr, seqno)
);
#endif
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index 9ff8660b50ad..a04a9b20896d 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -215,7 +215,7 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
list_for_each_entry(entry, &reclaim_list, list) {
resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
- trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp);
+ trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp, entry->seqno);
if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA)) {
if (le32_to_cpu(resp->type) >= VIRTIO_GPU_RESP_ERR_UNSPEC) {
@@ -261,6 +261,10 @@ void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
spin_unlock(&vgdev->cursorq.qlock);
list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
+ struct virtio_gpu_ctrl_hdr *resp =
+ (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
+
+ trace_virtio_gpu_cmd_response(vgdev->cursorq.vq, resp, entry->seqno);
list_del(&entry->list);
free_vbuf(vgdev, entry);
}
@@ -353,7 +357,8 @@ again:
ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
WARN_ON(ret);
- trace_virtio_gpu_cmd_queue(vq, virtio_gpu_vbuf_ctrl_hdr(vbuf));
+ vbuf->seqno = ++vgdev->ctrlq.seqno;
+ trace_virtio_gpu_cmd_queue(vq, virtio_gpu_vbuf_ctrl_hdr(vbuf), vbuf->seqno);
atomic_inc(&vgdev->pending_commands);
@@ -465,8 +470,10 @@ retry:
spin_lock(&vgdev->cursorq.qlock);
goto retry;
} else {
+ vbuf->seqno = ++vgdev->cursorq.seqno;
trace_virtio_gpu_cmd_queue(vq,
- virtio_gpu_vbuf_ctrl_hdr(vbuf));
+ virtio_gpu_vbuf_ctrl_hdr(vbuf),
+ vbuf->seqno);
notify = virtqueue_kick_prepare(vq);
}
diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c
index 293dbca50c31..6d3a2d57d992 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.c
+++ b/drivers/gpu/drm/vkms/vkms_drv.c
@@ -57,7 +57,8 @@ static void vkms_release(struct drm_device *dev)
{
struct vkms_device *vkms = drm_device_to_vkms_device(dev);
- destroy_workqueue(vkms->output.composer_workq);
+ if (vkms->output.composer_workq)
+ destroy_workqueue(vkms->output.composer_workq);
}
static void vkms_atomic_commit_tail(struct drm_atomic_state *old_state)
@@ -91,8 +92,8 @@ static void vkms_atomic_commit_tail(struct drm_atomic_state *old_state)
static int vkms_config_show(struct seq_file *m, void *data)
{
- struct drm_info_node *node = (struct drm_info_node *)m->private;
- struct drm_device *dev = node->minor->dev;
+ struct drm_debugfs_entry *entry = m->private;
+ struct drm_device *dev = entry->dev;
struct vkms_device *vkmsdev = drm_device_to_vkms_device(dev);
seq_printf(m, "writeback=%d\n", vkmsdev->config->writeback);
@@ -102,24 +103,16 @@ static int vkms_config_show(struct seq_file *m, void *data)
return 0;
}
-static const struct drm_info_list vkms_config_debugfs_list[] = {
+static const struct drm_debugfs_info vkms_config_debugfs_list[] = {
{ "vkms_config", vkms_config_show, 0 },
};
-static void vkms_config_debugfs_init(struct drm_minor *minor)
-{
- drm_debugfs_create_files(vkms_config_debugfs_list, ARRAY_SIZE(vkms_config_debugfs_list),
- minor->debugfs_root, minor);
-}
-
static const struct drm_driver vkms_driver = {
.driver_features = DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_GEM,
.release = vkms_release,
.fops = &vkms_driver_fops,
DRM_GEM_SHMEM_DRIVER_OPS,
- .debugfs_init = vkms_config_debugfs_init,
-
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
@@ -201,6 +194,9 @@ static int vkms_create(struct vkms_config *config)
if (ret)
goto out_devres;
+ drm_debugfs_add_files(&vkms_device->drm, vkms_config_debugfs_list,
+ ARRAY_SIZE(vkms_config_debugfs_list));
+
ret = drm_dev_register(&vkms_device->drm, 0);
if (ret)
goto out_devres;
@@ -218,6 +214,7 @@ out_unregister:
static int __init vkms_init(void)
{
+ int ret;
struct vkms_config *config;
config = kmalloc(sizeof(*config), GFP_KERNEL);
@@ -230,7 +227,11 @@ static int __init vkms_init(void)
config->writeback = enable_writeback;
config->overlay = enable_overlay;
- return vkms_create(config);
+ ret = vkms_create(config);
+ if (ret)
+ kfree(config);
+
+ return ret;
}
static void vkms_destroy(struct vkms_config *config)
diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h
index 0a67b8073f7e..4a248567efb2 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.h
+++ b/drivers/gpu/drm/vkms/vkms_drv.h
@@ -12,8 +12,8 @@
#include <drm/drm_encoder.h>
#include <drm/drm_writeback.h>
-#define XRES_MIN 20
-#define YRES_MIN 20
+#define XRES_MIN 10
+#define YRES_MIN 10
#define XRES_DEF 1024
#define YRES_DEF 768
diff --git a/drivers/gpu/drm/vkms/vkms_plane.c b/drivers/gpu/drm/vkms/vkms_plane.c
index c3a845220e10..b3f8a115cc23 100644
--- a/drivers/gpu/drm/vkms/vkms_plane.c
+++ b/drivers/gpu/drm/vkms/vkms_plane.c
@@ -160,10 +160,44 @@ static int vkms_plane_atomic_check(struct drm_plane *plane,
return 0;
}
+static int vkms_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct drm_shadow_plane_state *shadow_plane_state;
+ struct drm_framebuffer *fb = state->fb;
+ int ret;
+
+ if (!fb)
+ return 0;
+
+ shadow_plane_state = to_drm_shadow_plane_state(state);
+
+ ret = drm_gem_plane_helper_prepare_fb(plane, state);
+ if (ret)
+ return ret;
+
+ return drm_gem_fb_vmap(fb, shadow_plane_state->map, shadow_plane_state->data);
+}
+
+static void vkms_cleanup_fb(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct drm_shadow_plane_state *shadow_plane_state;
+ struct drm_framebuffer *fb = state->fb;
+
+ if (!fb)
+ return;
+
+ shadow_plane_state = to_drm_shadow_plane_state(state);
+
+ drm_gem_fb_vunmap(fb, shadow_plane_state->map);
+}
+
static const struct drm_plane_helper_funcs vkms_primary_helper_funcs = {
.atomic_update = vkms_plane_atomic_update,
.atomic_check = vkms_plane_atomic_check,
- DRM_GEM_SHADOW_PLANE_HELPER_FUNCS,
+ .prepare_fb = vkms_prepare_fb,
+ .cleanup_fb = vkms_cleanup_fb,
};
struct vkms_plane *vkms_plane_init(struct vkms_device *vkmsdev,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
index 3c06df2a5474..2b843ff4b437 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
@@ -28,7 +28,7 @@
#include <linux/dmapool.h>
#include <linux/pci.h>
-#include <drm/ttm/ttm_bo_api.h>
+#include <drm/ttm/ttm_bo.h>
#include "vmwgfx_drv.h"
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index bd02cb0e6837..9ad28346aff7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -40,7 +40,6 @@
#include <drm/drm_ioctl.h>
#include <drm/drm_module.h>
#include <drm/drm_sysfs.h>
-#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_range_manager.h>
#include <drm/ttm/ttm_placement.h>
#include <generated/utsrelease.h>
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index b062b020b378..4b612fc9758c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -37,8 +37,10 @@
#include <drm/drm_file.h>
#include <drm/drm_rect.h>
-#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_execbuf_util.h>
+#include <drm/ttm/ttm_tt.h>
+#include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_bo.h>
#include "ttm_object.h"
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index a5379f6fb5ab..43cec8e37e4d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -29,7 +29,7 @@
#include "vmwgfx_drv.h"
#include "vmwgfx_reg.h"
-#include <drm/ttm/ttm_bo_api.h>
+#include <drm/ttm/ttm_bo.h>
#include <drm/ttm/ttm_placement.h>
#include "vmwgfx_so.h"
#include "vmwgfx_binding.h"
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
index c482e5298e11..20158a92acc7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
@@ -25,7 +25,6 @@
*
**************************************************************************/
-#include <drm/ttm/ttm_bo_driver.h>
#include "vmwgfx_drv.h"
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
index abd5e3323ebf..ceb4d3d3b965 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
@@ -29,7 +29,6 @@
*/
#include "vmwgfx_drv.h"
-#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
#include <linux/idr.h>
#include <linux/spinlock.h>
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_system_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_system_manager.c
index d3007bf1b8f5..ee7964cbdaca 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_system_manager.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_system_manager.c
@@ -26,7 +26,6 @@
#include "vmwgfx_drv.h"
-#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_device.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_resource.h>
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
index 4e3938e62c08..856a352a72a6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
@@ -26,7 +26,6 @@
**************************************************************************/
#include "vmwgfx_drv.h"
-#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
static const struct ttm_place vram_placement_flags = {
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
index 118318513e2d..c35eac1116f5 100644
--- a/drivers/gpu/ipu-v3/ipu-common.c
+++ b/drivers/gpu/ipu-v3/ipu-common.c
@@ -1165,6 +1165,7 @@ static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base)
pdev = platform_device_alloc(reg->name, id++);
if (!pdev) {
ret = -ENOMEM;
+ of_node_put(of_node);
goto err_register;
}
diff --git a/drivers/nvme/host/auth.c b/drivers/nvme/host/auth.c
index bb0abbe4491c..4424f53a8a0a 100644
--- a/drivers/nvme/host/auth.c
+++ b/drivers/nvme/host/auth.c
@@ -953,7 +953,7 @@ int nvme_auth_init_ctrl(struct nvme_ctrl *ctrl)
goto err_free_dhchap_secret;
if (!ctrl->opts->dhchap_secret && !ctrl->opts->dhchap_ctrl_secret)
- return ret;
+ return 0;
ctrl->dhchap_ctxs = kvcalloc(ctrl_max_dhchaps(ctrl),
sizeof(*chap), GFP_KERNEL);
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 95c488ea91c3..7be562a4e1aa 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1074,6 +1074,18 @@ static u32 nvme_known_admin_effects(u8 opcode)
return 0;
}
+static u32 nvme_known_nvm_effects(u8 opcode)
+{
+ switch (opcode) {
+ case nvme_cmd_write:
+ case nvme_cmd_write_zeroes:
+ case nvme_cmd_write_uncor:
+ return NVME_CMD_EFFECTS_LBCC;
+ default:
+ return 0;
+ }
+}
+
u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode)
{
u32 effects = 0;
@@ -1081,16 +1093,24 @@ u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode)
if (ns) {
if (ns->head->effects)
effects = le32_to_cpu(ns->head->effects->iocs[opcode]);
+ if (ns->head->ids.csi == NVME_CAP_CSS_NVM)
+ effects |= nvme_known_nvm_effects(opcode);
if (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC))
dev_warn_once(ctrl->device,
- "IO command:%02x has unhandled effects:%08x\n",
+ "IO command:%02x has unusual effects:%08x\n",
opcode, effects);
- return 0;
- }
- if (ctrl->effects)
- effects = le32_to_cpu(ctrl->effects->acs[opcode]);
- effects |= nvme_known_admin_effects(opcode);
+ /*
+ * NVME_CMD_EFFECTS_CSE_MASK causes a freeze all I/O queues,
+ * which would deadlock when done on an I/O command. Note that
+ * We already warn about an unusual effect above.
+ */
+ effects &= ~NVME_CMD_EFFECTS_CSE_MASK;
+ } else {
+ if (ctrl->effects)
+ effects = le32_to_cpu(ctrl->effects->acs[opcode]);
+ effects |= nvme_known_admin_effects(opcode);
+ }
return effects;
}
@@ -4926,7 +4946,7 @@ int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
memset(set, 0, sizeof(*set));
set->ops = ops;
- set->queue_depth = ctrl->sqsize + 1;
+ set->queue_depth = min_t(unsigned, ctrl->sqsize, BLK_MQ_MAX_DEPTH - 1);
/*
* Some Apple controllers requires tags to be unique across admin and
* the (only) I/O queue, so reserve the first 32 tags of the I/O queue.
diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
index 9ddda571f046..a8639919237e 100644
--- a/drivers/nvme/host/ioctl.c
+++ b/drivers/nvme/host/ioctl.c
@@ -11,6 +11,8 @@
static bool nvme_cmd_allowed(struct nvme_ns *ns, struct nvme_command *c,
fmode_t mode)
{
+ u32 effects;
+
if (capable(CAP_SYS_ADMIN))
return true;
@@ -43,11 +45,29 @@ static bool nvme_cmd_allowed(struct nvme_ns *ns, struct nvme_command *c,
}
/*
- * Only allow I/O commands that transfer data to the controller if the
- * special file is open for writing, but always allow I/O commands that
- * transfer data from the controller.
+ * Check if the controller provides a Commands Supported and Effects log
+ * and marks this command as supported. If not reject unprivileged
+ * passthrough.
+ */
+ effects = nvme_command_effects(ns->ctrl, ns, c->common.opcode);
+ if (!(effects & NVME_CMD_EFFECTS_CSUPP))
+ return false;
+
+ /*
+ * Don't allow passthrough for command that have intrusive (or unknown)
+ * effects.
+ */
+ if (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC |
+ NVME_CMD_EFFECTS_UUID_SEL |
+ NVME_CMD_EFFECTS_SCOPE_MASK))
+ return false;
+
+ /*
+ * Only allow I/O commands that transfer data to the controller or that
+ * change the logical block contents if the file descriptor is open for
+ * writing.
*/
- if (nvme_is_write(c))
+ if (nvme_is_write(c) || (effects & NVME_CMD_EFFECTS_LBCC))
return mode & FMODE_WRITE;
return true;
}
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 6bbb73ef8b25..424c8a467a0c 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -893,7 +893,7 @@ static inline void nvme_trace_bio_complete(struct request *req)
{
struct nvme_ns *ns = req->q->queuedata;
- if (req->cmd_flags & REQ_NVME_MPATH)
+ if ((req->cmd_flags & REQ_NVME_MPATH) && req->bio)
trace_block_bio_complete(ns->head->disk->queue, req->bio);
}
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index f0f8027644bb..b13baccedb4a 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -36,7 +36,7 @@
#define SQ_SIZE(q) ((q)->q_depth << (q)->sqes)
#define CQ_SIZE(q) ((q)->q_depth * sizeof(struct nvme_completion))
-#define SGES_PER_PAGE (PAGE_SIZE / sizeof(struct nvme_sgl_desc))
+#define SGES_PER_PAGE (NVME_CTRL_PAGE_SIZE / sizeof(struct nvme_sgl_desc))
/*
* These can be higher, but we need to ensure that any command doesn't
@@ -144,9 +144,9 @@ struct nvme_dev {
mempool_t *iod_mempool;
/* shadow doorbell buffer support: */
- u32 *dbbuf_dbs;
+ __le32 *dbbuf_dbs;
dma_addr_t dbbuf_dbs_dma_addr;
- u32 *dbbuf_eis;
+ __le32 *dbbuf_eis;
dma_addr_t dbbuf_eis_dma_addr;
/* host memory buffer support: */
@@ -208,10 +208,10 @@ struct nvme_queue {
#define NVMEQ_SQ_CMB 1
#define NVMEQ_DELETE_ERROR 2
#define NVMEQ_POLLED 3
- u32 *dbbuf_sq_db;
- u32 *dbbuf_cq_db;
- u32 *dbbuf_sq_ei;
- u32 *dbbuf_cq_ei;
+ __le32 *dbbuf_sq_db;
+ __le32 *dbbuf_cq_db;
+ __le32 *dbbuf_sq_ei;
+ __le32 *dbbuf_cq_ei;
struct completion delete_done;
};
@@ -343,11 +343,11 @@ static inline int nvme_dbbuf_need_event(u16 event_idx, u16 new_idx, u16 old)
}
/* Update dbbuf and return true if an MMIO is required */
-static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db,
- volatile u32 *dbbuf_ei)
+static bool nvme_dbbuf_update_and_check_event(u16 value, __le32 *dbbuf_db,
+ volatile __le32 *dbbuf_ei)
{
if (dbbuf_db) {
- u16 old_value;
+ u16 old_value, event_idx;
/*
* Ensure that the queue is written before updating
@@ -355,8 +355,8 @@ static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db,
*/
wmb();
- old_value = *dbbuf_db;
- *dbbuf_db = value;
+ old_value = le32_to_cpu(*dbbuf_db);
+ *dbbuf_db = cpu_to_le32(value);
/*
* Ensure that the doorbell is updated before reading the event
@@ -366,7 +366,8 @@ static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db,
*/
mb();
- if (!nvme_dbbuf_need_event(*dbbuf_ei, value, old_value))
+ event_idx = le32_to_cpu(*dbbuf_ei);
+ if (!nvme_dbbuf_need_event(event_idx, value, old_value))
return false;
}
@@ -380,9 +381,9 @@ static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db,
*/
static int nvme_pci_npages_prp(void)
{
- unsigned nprps = DIV_ROUND_UP(NVME_MAX_KB_SZ + NVME_CTRL_PAGE_SIZE,
- NVME_CTRL_PAGE_SIZE);
- return DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8);
+ unsigned max_bytes = (NVME_MAX_KB_SZ * 1024) + NVME_CTRL_PAGE_SIZE;
+ unsigned nprps = DIV_ROUND_UP(max_bytes, NVME_CTRL_PAGE_SIZE);
+ return DIV_ROUND_UP(8 * nprps, NVME_CTRL_PAGE_SIZE - 8);
}
/*
@@ -392,7 +393,7 @@ static int nvme_pci_npages_prp(void)
static int nvme_pci_npages_sgl(void)
{
return DIV_ROUND_UP(NVME_MAX_SEGS * sizeof(struct nvme_sgl_desc),
- PAGE_SIZE);
+ NVME_CTRL_PAGE_SIZE);
}
static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
@@ -708,7 +709,7 @@ static void nvme_pci_sgl_set_seg(struct nvme_sgl_desc *sge,
sge->length = cpu_to_le32(entries * sizeof(*sge));
sge->type = NVME_SGL_FMT_LAST_SEG_DESC << 4;
} else {
- sge->length = cpu_to_le32(PAGE_SIZE);
+ sge->length = cpu_to_le32(NVME_CTRL_PAGE_SIZE);
sge->type = NVME_SGL_FMT_SEG_DESC << 4;
}
}
@@ -2332,10 +2333,12 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
if (dev->cmb_use_sqes) {
result = nvme_cmb_qdepth(dev, nr_io_queues,
sizeof(struct nvme_command));
- if (result > 0)
+ if (result > 0) {
dev->q_depth = result;
- else
+ dev->ctrl.sqsize = result - 1;
+ } else {
dev->cmb_use_sqes = false;
+ }
}
do {
@@ -2536,7 +2539,6 @@ static int nvme_pci_enable(struct nvme_dev *dev)
dev->q_depth = min_t(u32, NVME_CAP_MQES(dev->ctrl.cap) + 1,
io_queue_depth);
- dev->ctrl.sqsize = dev->q_depth - 1; /* 0's based queue depth */
dev->db_stride = 1 << NVME_CAP_STRIDE(dev->ctrl.cap);
dev->dbs = dev->bar + 4096;
@@ -2577,7 +2579,7 @@ static int nvme_pci_enable(struct nvme_dev *dev)
dev_warn(dev->ctrl.device, "IO queue depth clamped to %d\n",
dev->q_depth);
}
-
+ dev->ctrl.sqsize = dev->q_depth - 1; /* 0's based queue depth */
nvme_map_cmb(dev);
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index 53a004ea320c..6a54ed6fb121 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -164,26 +164,31 @@ out:
static void nvmet_get_cmd_effects_nvm(struct nvme_effects_log *log)
{
- log->acs[nvme_admin_get_log_page] = cpu_to_le32(1 << 0);
- log->acs[nvme_admin_identify] = cpu_to_le32(1 << 0);
- log->acs[nvme_admin_abort_cmd] = cpu_to_le32(1 << 0);
- log->acs[nvme_admin_set_features] = cpu_to_le32(1 << 0);
- log->acs[nvme_admin_get_features] = cpu_to_le32(1 << 0);
- log->acs[nvme_admin_async_event] = cpu_to_le32(1 << 0);
- log->acs[nvme_admin_keep_alive] = cpu_to_le32(1 << 0);
-
- log->iocs[nvme_cmd_read] = cpu_to_le32(1 << 0);
- log->iocs[nvme_cmd_write] = cpu_to_le32(1 << 0);
- log->iocs[nvme_cmd_flush] = cpu_to_le32(1 << 0);
- log->iocs[nvme_cmd_dsm] = cpu_to_le32(1 << 0);
- log->iocs[nvme_cmd_write_zeroes] = cpu_to_le32(1 << 0);
+ log->acs[nvme_admin_get_log_page] =
+ log->acs[nvme_admin_identify] =
+ log->acs[nvme_admin_abort_cmd] =
+ log->acs[nvme_admin_set_features] =
+ log->acs[nvme_admin_get_features] =
+ log->acs[nvme_admin_async_event] =
+ log->acs[nvme_admin_keep_alive] =
+ cpu_to_le32(NVME_CMD_EFFECTS_CSUPP);
+
+ log->iocs[nvme_cmd_read] =
+ log->iocs[nvme_cmd_flush] =
+ log->iocs[nvme_cmd_dsm] =
+ cpu_to_le32(NVME_CMD_EFFECTS_CSUPP);
+ log->iocs[nvme_cmd_write] =
+ log->iocs[nvme_cmd_write_zeroes] =
+ cpu_to_le32(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC);
}
static void nvmet_get_cmd_effects_zns(struct nvme_effects_log *log)
{
- log->iocs[nvme_cmd_zone_append] = cpu_to_le32(1 << 0);
- log->iocs[nvme_cmd_zone_mgmt_send] = cpu_to_le32(1 << 0);
- log->iocs[nvme_cmd_zone_mgmt_recv] = cpu_to_le32(1 << 0);
+ log->iocs[nvme_cmd_zone_append] =
+ log->iocs[nvme_cmd_zone_mgmt_send] =
+ cpu_to_le32(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC);
+ log->iocs[nvme_cmd_zone_mgmt_recv] =
+ cpu_to_le32(NVME_CMD_EFFECTS_CSUPP);
}
static void nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req *req)
diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
index 79af5140af8b..adc0958755d6 100644
--- a/drivers/nvme/target/passthru.c
+++ b/drivers/nvme/target/passthru.c
@@ -334,14 +334,13 @@ static void nvmet_passthru_execute_cmd(struct nvmet_req *req)
}
/*
- * If there are effects for the command we are about to execute, or
- * an end_req function we need to use nvme_execute_passthru_rq()
- * synchronously in a work item seeing the end_req function and
- * nvme_passthru_end() can't be called in the request done callback
- * which is typically in interrupt context.
+ * If a command needs post-execution fixups, or there are any
+ * non-trivial effects, make sure to execute the command synchronously
+ * in a workqueue so that nvme_passthru_end gets called.
*/
effects = nvme_command_effects(ctrl, ns, req->cmd->common.opcode);
- if (req->p.use_workqueue || effects) {
+ if (req->p.use_workqueue ||
+ (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC))) {
INIT_WORK(&req->p.work, nvmet_passthru_execute_cmd_work);
req->p.rq = rq;
queue_work(nvmet_wq, &req->p.work);
diff --git a/include/acpi/video.h b/include/acpi/video.h
index a275c35e5249..8ed9bec03e53 100644
--- a/include/acpi/video.h
+++ b/include/acpi/video.h
@@ -53,6 +53,7 @@ enum acpi_backlight_type {
};
#if IS_ENABLED(CONFIG_ACPI_VIDEO)
+extern void acpi_video_report_nolcd(void);
extern int acpi_video_register(void);
extern void acpi_video_unregister(void);
extern void acpi_video_register_backlight(void);
@@ -69,6 +70,7 @@ extern int acpi_video_get_levels(struct acpi_device *device,
struct acpi_video_device_brightness **dev_br,
int *pmax_level);
#else
+static inline void acpi_video_report_nolcd(void) { return; };
static inline int acpi_video_register(void) { return -ENODEV; }
static inline void acpi_video_unregister(void) { return; }
static inline void acpi_video_register_backlight(void) { return; }
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index a94219e9916f..659bf3b31c91 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -891,7 +891,12 @@
#define PRINTK_INDEX
#endif
+/*
+ * Discard .note.GNU-stack, which is emitted as PROGBITS by the compiler.
+ * Otherwise, the type of .notes section would become PROGBITS instead of NOTES.
+ */
#define NOTES \
+ /DISCARD/ : { *(.note.GNU-stack) } \
.notes : AT(ADDR(.notes) - LOAD_OFFSET) { \
BOUNDED_SECTION_BY(.note.*, _notes) \
} NOTES_HEADERS \
diff --git a/include/drm/display/drm_dp.h b/include/drm/display/drm_dp.h
index 9bc22a02874d..632376c291db 100644
--- a/include/drm/display/drm_dp.h
+++ b/include/drm/display/drm_dp.h
@@ -610,6 +610,7 @@
#define DP_DOWNSPREAD_CTRL 0x107
# define DP_SPREAD_AMP_0_5 (1 << 4)
+# define DP_FIXED_VTOTAL_AS_SDP_EN_IN_PR_ACTIVE (1 << 6)
# define DP_MSA_TIMING_PAR_IGNORE_EN (1 << 7) /* eDP */
#define DP_MAIN_LINK_CHANNEL_CODING_SET 0x108
@@ -1112,6 +1113,11 @@
# define DP_VSC_EXT_CEA_SDP_SUPPORTED (1 << 6) /* DP 1.4 */
# define DP_VSC_EXT_CEA_SDP_CHAINING_SUPPORTED (1 << 7) /* DP 1.4 */
+#define DP_DPRX_FEATURE_ENUMERATION_LIST_CONT_1 0x2214 /* 2.0 E11 */
+# define DP_ADAPTIVE_SYNC_SDP_SUPPORTED (1 << 0)
+# define DP_AS_SDP_FIRST_HALF_LINE_OR_3840_PIXEL_CYCLE_WINDOW_NOT_SUPPORTED (1 << 1)
+# define DP_VSC_EXT_SDP_FRAMEWORK_VERSION_1_SUPPORTED (1 << 4)
+
#define DP_128B132B_SUPPORTED_LINK_RATES 0x2215 /* 2.0 */
# define DP_UHBR10 (1 << 0)
# define DP_UHBR20 (1 << 1)
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index 10b1990bc1f6..92586ab55ef5 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -515,17 +515,17 @@ struct drm_private_state * __must_check
drm_atomic_get_private_obj_state(struct drm_atomic_state *state,
struct drm_private_obj *obj);
struct drm_private_state *
-drm_atomic_get_old_private_obj_state(struct drm_atomic_state *state,
+drm_atomic_get_old_private_obj_state(const struct drm_atomic_state *state,
struct drm_private_obj *obj);
struct drm_private_state *
-drm_atomic_get_new_private_obj_state(struct drm_atomic_state *state,
+drm_atomic_get_new_private_obj_state(const struct drm_atomic_state *state,
struct drm_private_obj *obj);
struct drm_connector *
-drm_atomic_get_old_connector_for_encoder(struct drm_atomic_state *state,
+drm_atomic_get_old_connector_for_encoder(const struct drm_atomic_state *state,
struct drm_encoder *encoder);
struct drm_connector *
-drm_atomic_get_new_connector_for_encoder(struct drm_atomic_state *state,
+drm_atomic_get_new_connector_for_encoder(const struct drm_atomic_state *state,
struct drm_encoder *encoder);
/**
@@ -540,7 +540,7 @@ drm_atomic_get_new_connector_for_encoder(struct drm_atomic_state *state,
* @drm_atomic_get_new_crtc_state should be used instead.
*/
static inline struct drm_crtc_state *
-drm_atomic_get_existing_crtc_state(struct drm_atomic_state *state,
+drm_atomic_get_existing_crtc_state(const struct drm_atomic_state *state,
struct drm_crtc *crtc)
{
return state->crtcs[drm_crtc_index(crtc)].state;
@@ -555,7 +555,7 @@ drm_atomic_get_existing_crtc_state(struct drm_atomic_state *state,
* NULL if the CRTC is not part of the global atomic state.
*/
static inline struct drm_crtc_state *
-drm_atomic_get_old_crtc_state(struct drm_atomic_state *state,
+drm_atomic_get_old_crtc_state(const struct drm_atomic_state *state,
struct drm_crtc *crtc)
{
return state->crtcs[drm_crtc_index(crtc)].old_state;
@@ -569,7 +569,7 @@ drm_atomic_get_old_crtc_state(struct drm_atomic_state *state,
* NULL if the CRTC is not part of the global atomic state.
*/
static inline struct drm_crtc_state *
-drm_atomic_get_new_crtc_state(struct drm_atomic_state *state,
+drm_atomic_get_new_crtc_state(const struct drm_atomic_state *state,
struct drm_crtc *crtc)
{
return state->crtcs[drm_crtc_index(crtc)].new_state;
@@ -587,7 +587,7 @@ drm_atomic_get_new_crtc_state(struct drm_atomic_state *state,
* @drm_atomic_get_new_plane_state should be used instead.
*/
static inline struct drm_plane_state *
-drm_atomic_get_existing_plane_state(struct drm_atomic_state *state,
+drm_atomic_get_existing_plane_state(const struct drm_atomic_state *state,
struct drm_plane *plane)
{
return state->planes[drm_plane_index(plane)].state;
@@ -602,7 +602,7 @@ drm_atomic_get_existing_plane_state(struct drm_atomic_state *state,
* NULL if the plane is not part of the global atomic state.
*/
static inline struct drm_plane_state *
-drm_atomic_get_old_plane_state(struct drm_atomic_state *state,
+drm_atomic_get_old_plane_state(const struct drm_atomic_state *state,
struct drm_plane *plane)
{
return state->planes[drm_plane_index(plane)].old_state;
@@ -617,7 +617,7 @@ drm_atomic_get_old_plane_state(struct drm_atomic_state *state,
* NULL if the plane is not part of the global atomic state.
*/
static inline struct drm_plane_state *
-drm_atomic_get_new_plane_state(struct drm_atomic_state *state,
+drm_atomic_get_new_plane_state(const struct drm_atomic_state *state,
struct drm_plane *plane)
{
return state->planes[drm_plane_index(plane)].new_state;
@@ -635,7 +635,7 @@ drm_atomic_get_new_plane_state(struct drm_atomic_state *state,
* @drm_atomic_get_new_connector_state should be used instead.
*/
static inline struct drm_connector_state *
-drm_atomic_get_existing_connector_state(struct drm_atomic_state *state,
+drm_atomic_get_existing_connector_state(const struct drm_atomic_state *state,
struct drm_connector *connector)
{
int index = drm_connector_index(connector);
@@ -655,7 +655,7 @@ drm_atomic_get_existing_connector_state(struct drm_atomic_state *state,
* or NULL if the connector is not part of the global atomic state.
*/
static inline struct drm_connector_state *
-drm_atomic_get_old_connector_state(struct drm_atomic_state *state,
+drm_atomic_get_old_connector_state(const struct drm_atomic_state *state,
struct drm_connector *connector)
{
int index = drm_connector_index(connector);
@@ -675,7 +675,7 @@ drm_atomic_get_old_connector_state(struct drm_atomic_state *state,
* or NULL if the connector is not part of the global atomic state.
*/
static inline struct drm_connector_state *
-drm_atomic_get_new_connector_state(struct drm_atomic_state *state,
+drm_atomic_get_new_connector_state(const struct drm_atomic_state *state,
struct drm_connector *connector)
{
int index = drm_connector_index(connector);
@@ -713,7 +713,7 @@ drm_atomic_get_new_connector_state(struct drm_atomic_state *state,
* Read-only pointer to the current plane state.
*/
static inline const struct drm_plane_state *
-__drm_atomic_get_current_plane_state(struct drm_atomic_state *state,
+__drm_atomic_get_current_plane_state(const struct drm_atomic_state *state,
struct drm_plane *plane)
{
if (state->planes[drm_plane_index(plane)].state)
@@ -1134,10 +1134,10 @@ struct drm_bridge_state *
drm_atomic_get_bridge_state(struct drm_atomic_state *state,
struct drm_bridge *bridge);
struct drm_bridge_state *
-drm_atomic_get_old_bridge_state(struct drm_atomic_state *state,
+drm_atomic_get_old_bridge_state(const struct drm_atomic_state *state,
struct drm_bridge *bridge);
struct drm_bridge_state *
-drm_atomic_get_new_bridge_state(struct drm_atomic_state *state,
+drm_atomic_get_new_bridge_state(const struct drm_atomic_state *state,
struct drm_bridge *bridge);
#endif /* DRM_ATOMIC_H_ */
diff --git a/include/drm/drm_atomic_state_helper.h b/include/drm/drm_atomic_state_helper.h
index 192766656b88..b9740edb2658 100644
--- a/include/drm/drm_atomic_state_helper.h
+++ b/include/drm/drm_atomic_state_helper.h
@@ -26,6 +26,7 @@
#include <linux/types.h>
+struct drm_atomic_state;
struct drm_bridge;
struct drm_bridge_state;
struct drm_crtc;
@@ -70,6 +71,9 @@ void __drm_atomic_helper_connector_state_reset(struct drm_connector_state *conn_
void __drm_atomic_helper_connector_reset(struct drm_connector *connector,
struct drm_connector_state *conn_state);
void drm_atomic_helper_connector_reset(struct drm_connector *connector);
+void drm_atomic_helper_connector_tv_reset(struct drm_connector *connector);
+int drm_atomic_helper_connector_tv_check(struct drm_connector *connector,
+ struct drm_atomic_state *state);
void drm_atomic_helper_connector_tv_margins_reset(struct drm_connector *connector);
void
__drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector,
diff --git a/include/drm/drm_audio_component.h b/include/drm/drm_audio_component.h
index 0d36bfd1a4cd..5a4cd1fa8e2a 100644
--- a/include/drm/drm_audio_component.h
+++ b/include/drm/drm_audio_component.h
@@ -4,6 +4,9 @@
#ifndef _DRM_AUDIO_COMPONENT_H_
#define _DRM_AUDIO_COMPONENT_H_
+#include <linux/completion.h>
+#include <linux/types.h>
+
struct drm_audio_component;
struct device;
diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h
index 6b65b0dfb4fb..42f86327b40a 100644
--- a/include/drm/drm_bridge.h
+++ b/include/drm/drm_bridge.h
@@ -297,12 +297,6 @@ struct drm_bridge_funcs {
* not enable the display link feeding the next bridge in the chain (if
* there is one) when this callback is called.
*
- * Note that this function will only be invoked in the context of an
- * atomic commit. It will not be invoked from
- * &drm_bridge_chain_pre_enable. It would be prudent to also provide an
- * implementation of @pre_enable if you are expecting driver calls into
- * &drm_bridge_chain_pre_enable.
- *
* The @atomic_pre_enable callback is optional.
*/
void (*atomic_pre_enable)(struct drm_bridge *bridge,
@@ -323,11 +317,6 @@ struct drm_bridge_funcs {
* callback must enable the display link feeding the next bridge in the
* chain if there is one.
*
- * Note that this function will only be invoked in the context of an
- * atomic commit. It will not be invoked from &drm_bridge_chain_enable.
- * It would be prudent to also provide an implementation of @enable if
- * you are expecting driver calls into &drm_bridge_chain_enable.
- *
* The @atomic_enable callback is optional.
*/
void (*atomic_enable)(struct drm_bridge *bridge,
@@ -345,12 +334,6 @@ struct drm_bridge_funcs {
* The bridge can assume that the display pipe (i.e. clocks and timing
* signals) feeding it is still running when this callback is called.
*
- * Note that this function will only be invoked in the context of an
- * atomic commit. It will not be invoked from
- * &drm_bridge_chain_disable. It would be prudent to also provide an
- * implementation of @disable if you are expecting driver calls into
- * &drm_bridge_chain_disable.
- *
* The @atomic_disable callback is optional.
*/
void (*atomic_disable)(struct drm_bridge *bridge,
@@ -370,13 +353,6 @@ struct drm_bridge_funcs {
* signals) feeding it is no longer running when this callback is
* called.
*
- * Note that this function will only be invoked in the context of an
- * atomic commit. It will not be invoked from
- * &drm_bridge_chain_post_disable.
- * It would be prudent to also provide an implementation of
- * @post_disable if you are expecting driver calls into
- * &drm_bridge_chain_post_disable.
- *
* The @atomic_post_disable callback is optional.
*/
void (*atomic_post_disable)(struct drm_bridge *bridge,
@@ -769,6 +745,14 @@ struct drm_bridge {
*/
bool interlace_allowed;
/**
+ * @pre_enable_prev_first: The bridge requires that the prev
+ * bridge @pre_enable function is called before its @pre_enable,
+ * and conversely for post_disable. This is most frequently a
+ * requirement for DSI devices which need the host to be initialised
+ * before the peripheral.
+ */
+ bool pre_enable_prev_first;
+ /**
* @ddc: Associated I2C adapter for DDC access, if any.
*/
struct i2c_adapter *ddc;
@@ -876,13 +860,9 @@ enum drm_mode_status
drm_bridge_chain_mode_valid(struct drm_bridge *bridge,
const struct drm_display_info *info,
const struct drm_display_mode *mode);
-void drm_bridge_chain_disable(struct drm_bridge *bridge);
-void drm_bridge_chain_post_disable(struct drm_bridge *bridge);
void drm_bridge_chain_mode_set(struct drm_bridge *bridge,
const struct drm_display_mode *mode,
const struct drm_display_mode *adjusted_mode);
-void drm_bridge_chain_pre_enable(struct drm_bridge *bridge);
-void drm_bridge_chain_enable(struct drm_bridge *bridge);
int drm_atomic_bridge_chain_check(struct drm_bridge *bridge,
struct drm_crtc_state *crtc_state,
diff --git a/include/drm/drm_bridge_connector.h b/include/drm/drm_bridge_connector.h
index 33f6c3bbdb4a..69630815fb09 100644
--- a/include/drm/drm_bridge_connector.h
+++ b/include/drm/drm_bridge_connector.h
@@ -10,8 +10,6 @@ struct drm_connector;
struct drm_device;
struct drm_encoder;
-void drm_bridge_connector_enable_hpd(struct drm_connector *connector);
-void drm_bridge_connector_disable_hpd(struct drm_connector *connector);
struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
struct drm_encoder *encoder);
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
index 565cf9d3c550..9037f1317aee 100644
--- a/include/drm/drm_connector.h
+++ b/include/drm/drm_connector.h
@@ -144,6 +144,65 @@ enum subpixel_order {
};
/**
+ * enum drm_connector_tv_mode - Analog TV output mode
+ *
+ * This enum is used to indicate the TV output mode used on an analog TV
+ * connector.
+ *
+ * WARNING: The values of this enum is uABI since they're exposed in the
+ * "TV mode" connector property.
+ */
+enum drm_connector_tv_mode {
+ /**
+ * @DRM_MODE_TV_MODE_NTSC: CCIR System M (aka 525-lines)
+ * together with the NTSC Color Encoding.
+ */
+ DRM_MODE_TV_MODE_NTSC,
+
+ /**
+ * @DRM_MODE_TV_MODE_NTSC_443: Variant of
+ * @DRM_MODE_TV_MODE_NTSC. Uses a color subcarrier frequency
+ * of 4.43 MHz.
+ */
+ DRM_MODE_TV_MODE_NTSC_443,
+
+ /**
+ * @DRM_MODE_TV_MODE_NTSC_J: Variant of @DRM_MODE_TV_MODE_NTSC
+ * used in Japan. Uses a black level equals to the blanking
+ * level.
+ */
+ DRM_MODE_TV_MODE_NTSC_J,
+
+ /**
+ * @DRM_MODE_TV_MODE_PAL: CCIR System B together with the PAL
+ * color system.
+ */
+ DRM_MODE_TV_MODE_PAL,
+
+ /**
+ * @DRM_MODE_TV_MODE_PAL_M: CCIR System M (aka 525-lines)
+ * together with the PAL color encoding
+ */
+ DRM_MODE_TV_MODE_PAL_M,
+
+ /**
+ * @DRM_MODE_TV_MODE_PAL_N: CCIR System N together with the PAL
+ * color encoding. It uses 625 lines, but has a color subcarrier
+ * frequency of 3.58MHz, the SECAM color space, and narrower
+ * channels compared to most of the other PAL variants.
+ */
+ DRM_MODE_TV_MODE_PAL_N,
+
+ /**
+ * @DRM_MODE_TV_MODE_SECAM: CCIR System B together with the
+ * SECAM color system.
+ */
+ DRM_MODE_TV_MODE_SECAM,
+
+ DRM_MODE_TV_MODE_MAX,
+};
+
+/**
* struct drm_scrambling: sink's scrambling support.
*/
struct drm_scrambling {
@@ -701,6 +760,7 @@ struct drm_connector_tv_margins {
* @select_subconnector: selected subconnector
* @subconnector: detected subconnector
* @margins: TV margins
+ * @legacy_mode: Legacy TV mode, driver specific value
* @mode: TV mode
* @brightness: brightness in percent
* @contrast: contrast in percent
@@ -713,6 +773,7 @@ struct drm_tv_connector_state {
enum drm_mode_subconnector select_subconnector;
enum drm_mode_subconnector subconnector;
struct drm_connector_tv_margins margins;
+ unsigned int legacy_mode;
unsigned int mode;
unsigned int brightness;
unsigned int contrast;
@@ -1313,6 +1374,18 @@ struct drm_cmdline_mode {
* @tv_margins: TV margins to apply to the mode.
*/
struct drm_connector_tv_margins tv_margins;
+
+ /**
+ * @tv_mode: TV mode standard. See DRM_MODE_TV_MODE_*.
+ */
+ enum drm_connector_tv_mode tv_mode;
+
+ /**
+ * @tv_mode_specified:
+ *
+ * Did the mode have a preferred TV mode?
+ */
+ bool tv_mode_specified;
};
/**
@@ -1810,19 +1883,24 @@ const char *drm_get_subpixel_order_name(enum subpixel_order order);
const char *drm_get_dpms_name(int val);
const char *drm_get_dvi_i_subconnector_name(int val);
const char *drm_get_dvi_i_select_name(int val);
+const char *drm_get_tv_mode_name(int val);
const char *drm_get_tv_subconnector_name(int val);
const char *drm_get_tv_select_name(int val);
const char *drm_get_dp_subconnector_name(int val);
const char *drm_get_content_protection_name(int val);
const char *drm_get_hdcp_content_type_name(int val);
+int drm_get_tv_mode_from_name(const char *name, size_t len);
+
int drm_mode_create_dvi_i_properties(struct drm_device *dev);
void drm_connector_attach_dp_subconnector_property(struct drm_connector *connector);
int drm_mode_create_tv_margin_properties(struct drm_device *dev);
+int drm_mode_create_tv_properties_legacy(struct drm_device *dev,
+ unsigned int num_modes,
+ const char * const modes[]);
int drm_mode_create_tv_properties(struct drm_device *dev,
- unsigned int num_modes,
- const char * const modes[]);
+ unsigned int supported_tv_modes);
void drm_connector_attach_tv_margin_properties(struct drm_connector *conn);
int drm_mode_create_scaling_mode_property(struct drm_device *dev);
int drm_connector_attach_content_type_property(struct drm_connector *dev);
diff --git a/include/drm/drm_debugfs.h b/include/drm/drm_debugfs.h
index 2188dc83957f..7616f457ce70 100644
--- a/include/drm/drm_debugfs.h
+++ b/include/drm/drm_debugfs.h
@@ -79,12 +79,61 @@ struct drm_info_node {
struct dentry *dent;
};
+/**
+ * struct drm_debugfs_info - debugfs info list entry
+ *
+ * This structure represents a debugfs file to be created by the drm
+ * core.
+ */
+struct drm_debugfs_info {
+ /** @name: File name */
+ const char *name;
+
+ /**
+ * @show:
+ *
+ * Show callback. &seq_file->private will be set to the &struct
+ * drm_debugfs_entry corresponding to the instance of this info
+ * on a given &struct drm_device.
+ */
+ int (*show)(struct seq_file*, void*);
+
+ /** @driver_features: Required driver features for this entry. */
+ u32 driver_features;
+
+ /** @data: Driver-private data, should not be device-specific. */
+ void *data;
+};
+
+/**
+ * struct drm_debugfs_entry - Per-device debugfs node structure
+ *
+ * This structure represents a debugfs file, as an instantiation of a &struct
+ * drm_debugfs_info on a &struct drm_device.
+ */
+struct drm_debugfs_entry {
+ /** @dev: &struct drm_device for this node. */
+ struct drm_device *dev;
+
+ /** @file: Template for this node. */
+ struct drm_debugfs_info file;
+
+ /** @list: Linked list of all device nodes. */
+ struct list_head list;
+};
+
#if defined(CONFIG_DEBUG_FS)
void drm_debugfs_create_files(const struct drm_info_list *files,
int count, struct dentry *root,
struct drm_minor *minor);
int drm_debugfs_remove_files(const struct drm_info_list *files,
int count, struct drm_minor *minor);
+
+void drm_debugfs_add_file(struct drm_device *dev, const char *name,
+ int (*show)(struct seq_file*, void*), void *data);
+
+void drm_debugfs_add_files(struct drm_device *dev,
+ const struct drm_debugfs_info *files, int count);
#else
static inline void drm_debugfs_create_files(const struct drm_info_list *files,
int count, struct dentry *root,
@@ -96,6 +145,16 @@ static inline int drm_debugfs_remove_files(const struct drm_info_list *files,
{
return 0;
}
+
+static inline void drm_debugfs_add_file(struct drm_device *dev, const char *name,
+ int (*show)(struct seq_file*, void*),
+ void *data)
+{}
+
+static inline void drm_debugfs_add_files(struct drm_device *dev,
+ const struct drm_debugfs_info *files,
+ int count)
+{}
#endif
#endif /* _DRM_DEBUGFS_H_ */
diff --git a/include/drm/drm_device.h b/include/drm/drm_device.h
index 933ce2048e20..7cf4afae2e79 100644
--- a/include/drm/drm_device.h
+++ b/include/drm/drm_device.h
@@ -87,10 +87,23 @@ struct drm_device {
*/
void *dev_private;
- /** @primary: Primary node */
+ /**
+ * @primary:
+ *
+ * Primary node. Drivers should not interact with this
+ * directly. debugfs interfaces can be registered with
+ * drm_debugfs_add_file(), and sysfs should be directly added on the
+ * hardware (and not character device node) struct device @dev.
+ */
struct drm_minor *primary;
- /** @render: Render node */
+ /**
+ * @render:
+ *
+ * Render node. Drivers should not interact with this directly ever.
+ * Drivers should not expose any additional interfaces in debugfs or
+ * sysfs on this node.
+ */
struct drm_minor *render;
/** @accel: Compute Acceleration node */
@@ -298,6 +311,21 @@ struct drm_device {
*/
struct drm_fb_helper *fb_helper;
+ /**
+ * @debugfs_mutex:
+ *
+ * Protects &debugfs_list access.
+ */
+ struct mutex debugfs_mutex;
+
+ /**
+ * @debugfs_list:
+ *
+ * List of debugfs files to be created by the DRM device. The files
+ * must be added during drm_dev_register().
+ */
+ struct list_head debugfs_list;
+
/* Everything below here is for legacy driver, never use! */
/* private: */
#if IS_ENABLED(CONFIG_DRM_LEGACY)
diff --git a/include/drm/drm_format_helper.h b/include/drm/drm_format_helper.h
index eb5c98cf82b8..291deb09475b 100644
--- a/include/drm/drm_format_helper.h
+++ b/include/drm/drm_format_helper.h
@@ -30,12 +30,27 @@ void drm_fb_xrgb8888_to_rgb332(struct iosys_map *dst, const unsigned int *dst_pi
void drm_fb_xrgb8888_to_rgb565(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src, const struct drm_framebuffer *fb,
const struct drm_rect *clip, bool swab);
+void drm_fb_xrgb8888_to_xrgb1555(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip);
+void drm_fb_xrgb8888_to_argb1555(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip);
+void drm_fb_xrgb8888_to_rgba5551(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip);
void drm_fb_xrgb8888_to_rgb888(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src, const struct drm_framebuffer *fb,
const struct drm_rect *clip);
+void drm_fb_xrgb8888_to_argb8888(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip);
void drm_fb_xrgb8888_to_xrgb2101010(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src, const struct drm_framebuffer *fb,
const struct drm_rect *clip);
+void drm_fb_xrgb8888_to_argb2101010(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip);
void drm_fb_xrgb8888_to_gray8(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src, const struct drm_framebuffer *fb,
const struct drm_rect *clip);
@@ -50,7 +65,6 @@ void drm_fb_xrgb8888_to_mono(struct iosys_map *dst, const unsigned int *dst_pitc
size_t drm_fb_build_fourcc_list(struct drm_device *dev,
const u32 *native_fourccs, size_t native_nfourccs,
- const u32 *extra_fourccs, size_t extra_nfourccs,
u32 *fourccs_out, size_t nfourccs_out);
#endif /* __LINUX_DRM_FORMAT_HELPER_H */
diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
index a17c2f903f81..772a4adf5287 100644
--- a/include/drm/drm_gem.h
+++ b/include/drm/drm_gem.h
@@ -405,6 +405,7 @@ int drm_gem_object_init(struct drm_device *dev,
struct drm_gem_object *obj, size_t size);
void drm_gem_private_object_init(struct drm_device *dev,
struct drm_gem_object *obj, size_t size);
+void drm_gem_private_object_fini(struct drm_gem_object *obj);
void drm_gem_vm_open(struct vm_area_struct *vma);
void drm_gem_vm_close(struct vm_area_struct *vma);
int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
diff --git a/include/drm/drm_gem_atomic_helper.h b/include/drm/drm_gem_atomic_helper.h
index 6970ccb787e2..40b8b039518e 100644
--- a/include/drm/drm_gem_atomic_helper.h
+++ b/include/drm/drm_gem_atomic_helper.h
@@ -15,8 +15,6 @@ struct drm_simple_display_pipe;
*/
int drm_gem_plane_helper_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state);
-int drm_gem_simple_display_pipe_prepare_fb(struct drm_simple_display_pipe *pipe,
- struct drm_plane_state *plane_state);
/*
* Helpers for planes with shadow buffers
diff --git a/include/drm/drm_gem_ttm_helper.h b/include/drm/drm_gem_ttm_helper.h
index 4c003b4f173e..7b53d673ae7e 100644
--- a/include/drm/drm_gem_ttm_helper.h
+++ b/include/drm/drm_gem_ttm_helper.h
@@ -7,8 +7,7 @@
#include <drm/drm_device.h>
#include <drm/drm_gem.h>
-#include <drm/ttm/ttm_bo_api.h>
-#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_bo.h>
struct iosys_map;
diff --git a/include/drm/drm_gem_vram_helper.h b/include/drm/drm_gem_vram_helper.h
index c083a1d71cf4..d3e8920c0b64 100644
--- a/include/drm/drm_gem_vram_helper.h
+++ b/include/drm/drm_gem_vram_helper.h
@@ -8,8 +8,8 @@
#include <drm/drm_gem_ttm_helper.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_modes.h>
-#include <drm/ttm/ttm_bo_api.h>
-#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_bo.h>
+#include <drm/ttm/ttm_placement.h>
#include <linux/container_of.h>
#include <linux/iosys-map.h>
diff --git a/include/drm/drm_kunit_helpers.h b/include/drm/drm_kunit_helpers.h
new file mode 100644
index 000000000000..ed013fdcc1ff
--- /dev/null
+++ b/include/drm/drm_kunit_helpers.h
@@ -0,0 +1,91 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#ifndef DRM_KUNIT_HELPERS_H_
+#define DRM_KUNIT_HELPERS_H_
+
+#include <kunit/test.h>
+
+struct drm_device;
+struct kunit;
+
+struct device *drm_kunit_helper_alloc_device(struct kunit *test);
+void drm_kunit_helper_free_device(struct kunit *test, struct device *dev);
+
+struct drm_device *
+__drm_kunit_helper_alloc_drm_device_with_driver(struct kunit *test,
+ struct device *dev,
+ size_t size, size_t offset,
+ const struct drm_driver *driver);
+
+/**
+ * drm_kunit_helper_alloc_drm_device_with_driver - Allocates a mock DRM device for KUnit tests
+ * @_test: The test context object
+ * @_dev: The parent device object
+ * @_type: the type of the struct which contains struct &drm_device
+ * @_member: the name of the &drm_device within @_type.
+ * @_drv: Mocked DRM device driver features
+ *
+ * This function creates a struct &drm_device from @_dev and @_drv.
+ *
+ * @_dev should be allocated using drm_kunit_helper_alloc_device().
+ *
+ * The driver is tied to the @_test context and will get cleaned at the
+ * end of the test. The drm_device is allocated through
+ * devm_drm_dev_alloc() and will thus be freed through a device-managed
+ * resource.
+ *
+ * Returns:
+ * A pointer to the new drm_device, or an ERR_PTR() otherwise.
+ */
+#define drm_kunit_helper_alloc_drm_device_with_driver(_test, _dev, _type, _member, _drv) \
+ ((_type *)__drm_kunit_helper_alloc_drm_device_with_driver(_test, _dev, \
+ sizeof(_type), \
+ offsetof(_type, _member), \
+ _drv))
+
+static inline struct drm_device *
+__drm_kunit_helper_alloc_drm_device(struct kunit *test,
+ struct device *dev,
+ size_t size, size_t offset,
+ u32 features)
+{
+ struct drm_driver *driver;
+
+ driver = kunit_kzalloc(test, sizeof(*driver), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, driver);
+
+ driver->driver_features = features;
+
+ return __drm_kunit_helper_alloc_drm_device_with_driver(test, dev,
+ size, offset,
+ driver);
+}
+
+/**
+ * drm_kunit_helper_alloc_drm_device - Allocates a mock DRM device for KUnit tests
+ * @_test: The test context object
+ * @_dev: The parent device object
+ * @_type: the type of the struct which contains struct &drm_device
+ * @_member: the name of the &drm_device within @_type.
+ * @_features: Mocked DRM device driver features
+ *
+ * This function creates a struct &drm_driver and will create a struct
+ * &drm_device from @_dev and that driver.
+ *
+ * @_dev should be allocated using drm_kunit_helper_alloc_device().
+ *
+ * The driver is tied to the @_test context and will get cleaned at the
+ * end of the test. The drm_device is allocated through
+ * devm_drm_dev_alloc() and will thus be freed through a device-managed
+ * resource.
+ *
+ * Returns:
+ * A pointer to the new drm_device, or an ERR_PTR() otherwise.
+ */
+#define drm_kunit_helper_alloc_drm_device(_test, _dev, _type, _member, _feat) \
+ ((_type *)__drm_kunit_helper_alloc_drm_device(_test, _dev, \
+ sizeof(_type), \
+ offsetof(_type, _member), \
+ _feat))
+
+#endif // DRM_KUNIT_HELPERS_H_
diff --git a/include/drm/drm_mipi_dbi.h b/include/drm/drm_mipi_dbi.h
index 14eaecb1825c..816f196b3d4c 100644
--- a/include/drm/drm_mipi_dbi.h
+++ b/include/drm/drm_mipi_dbi.h
@@ -13,9 +13,10 @@
#include <drm/drm_simple_kms_helper.h>
struct drm_rect;
-struct spi_device;
struct gpio_desc;
+struct iosys_map;
struct regulator;
+struct spi_device;
/**
* struct mipi_dbi - MIPI DBI interface
@@ -122,11 +123,16 @@ struct mipi_dbi_dev {
struct backlight_device *backlight;
/**
- * @regulator: power regulator (optional)
+ * @regulator: power regulator (Vdd) (optional)
*/
struct regulator *regulator;
/**
+ * @io_regulator: I/O power regulator (Vddi) (optional)
+ */
+ struct regulator *io_regulator;
+
+ /**
* @dbi: MIPI DBI interface
*/
struct mipi_dbi dbi;
@@ -163,6 +169,15 @@ void mipi_dbi_enable_flush(struct mipi_dbi_dev *dbidev,
struct drm_crtc_state *crtc_state,
struct drm_plane_state *plan_state);
void mipi_dbi_pipe_disable(struct drm_simple_display_pipe *pipe);
+int mipi_dbi_pipe_begin_fb_access(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *plane_state);
+void mipi_dbi_pipe_end_fb_access(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *plane_state);
+void mipi_dbi_pipe_reset_plane(struct drm_simple_display_pipe *pipe);
+struct drm_plane_state *mipi_dbi_pipe_duplicate_plane_state(struct drm_simple_display_pipe *pipe);
+void mipi_dbi_pipe_destroy_plane_state(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *plane_state);
+
void mipi_dbi_hw_reset(struct mipi_dbi *dbi);
bool mipi_dbi_display_is_on(struct mipi_dbi *dbi);
int mipi_dbi_poweron_reset(struct mipi_dbi_dev *dbidev);
@@ -176,8 +191,9 @@ int mipi_dbi_command_read(struct mipi_dbi *dbi, u8 cmd, u8 *val);
int mipi_dbi_command_buf(struct mipi_dbi *dbi, u8 cmd, u8 *data, size_t len);
int mipi_dbi_command_stackbuf(struct mipi_dbi *dbi, u8 cmd, const u8 *data,
size_t len);
-int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb,
+int mipi_dbi_buf_copy(void *dst, struct iosys_map *src, struct drm_framebuffer *fb,
struct drm_rect *clip, bool swap);
+
/**
* mipi_dbi_command - MIPI DCS command with optional parameter(s)
* @dbi: MIPI DBI structure
@@ -207,4 +223,25 @@ void mipi_dbi_debugfs_init(struct drm_minor *minor);
static inline void mipi_dbi_debugfs_init(struct drm_minor *minor) {}
#endif
+/**
+ * DRM_MIPI_DBI_SIMPLE_DISPLAY_PIPE_FUNCS - Initializes struct drm_simple_display_pipe_funcs
+ * for MIPI-DBI devices
+ * @enable_: Enable-callback implementation
+ *
+ * This macro initializes struct drm_simple_display_pipe_funcs with default
+ * values for MIPI-DBI-based devices. The only callback that depends on the
+ * hardware is @enable, for which the driver has to provide an implementation.
+ * MIPI-based drivers are encouraged to use this macro for initialization.
+ */
+#define DRM_MIPI_DBI_SIMPLE_DISPLAY_PIPE_FUNCS(enable_) \
+ .mode_valid = mipi_dbi_pipe_mode_valid, \
+ .enable = (enable_), \
+ .disable = mipi_dbi_pipe_disable, \
+ .update = mipi_dbi_pipe_update, \
+ .begin_fb_access = mipi_dbi_pipe_begin_fb_access, \
+ .end_fb_access = mipi_dbi_pipe_end_fb_access, \
+ .reset_plane = mipi_dbi_pipe_reset_plane, \
+ .duplicate_plane_state = mipi_dbi_pipe_duplicate_plane_state, \
+ .destroy_plane_state = mipi_dbi_pipe_destroy_plane_state
+
#endif /* __LINUX_MIPI_DBI_H */
diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h
index 20b21b577dea..4f503d99f668 100644
--- a/include/drm/drm_mipi_dsi.h
+++ b/include/drm/drm_mipi_dsi.h
@@ -298,20 +298,41 @@ int mipi_dsi_dcs_get_display_brightness(struct mipi_dsi_device *dsi,
u16 *brightness);
/**
+ * mipi_dsi_generic_write_seq - transmit data using a generic write packet
+ * @dsi: DSI peripheral device
+ * @seq: buffer containing the payload
+ */
+#define mipi_dsi_generic_write_seq(dsi, seq...) \
+ do { \
+ static const u8 d[] = { seq }; \
+ struct device *dev = &dsi->dev; \
+ int ret; \
+ ret = mipi_dsi_generic_write(dsi, d, ARRAY_SIZE(d)); \
+ if (ret < 0) { \
+ dev_err_ratelimited(dev, "transmit data failed: %d\n", \
+ ret); \
+ return ret; \
+ } \
+ } while (0)
+
+/**
* mipi_dsi_dcs_write_seq - transmit a DCS command with payload
* @dsi: DSI peripheral device
* @cmd: Command
* @seq: buffer containing data to be transmitted
*/
-#define mipi_dsi_dcs_write_seq(dsi, cmd, seq...) do { \
- static const u8 d[] = { cmd, seq }; \
- struct device *dev = &dsi->dev; \
- int ret; \
- ret = mipi_dsi_dcs_write_buffer(dsi, d, ARRAY_SIZE(d)); \
- if (ret < 0) { \
- dev_err_ratelimited(dev, "sending command %#02x failed: %d\n", cmd, ret); \
- return ret; \
- } \
+#define mipi_dsi_dcs_write_seq(dsi, cmd, seq...) \
+ do { \
+ static const u8 d[] = { cmd, seq }; \
+ struct device *dev = &dsi->dev; \
+ int ret; \
+ ret = mipi_dsi_dcs_write_buffer(dsi, d, ARRAY_SIZE(d)); \
+ if (ret < 0) { \
+ dev_err_ratelimited( \
+ dev, "sending command %#02x failed: %d\n", \
+ cmd, ret); \
+ return ret; \
+ } \
} while (0)
/**
diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h
index 5362702fffe1..e5b053001d22 100644
--- a/include/drm/drm_mode_config.h
+++ b/include/drm/drm_mode_config.h
@@ -712,11 +712,21 @@ struct drm_mode_config {
* between different TV connector types.
*/
struct drm_property *tv_select_subconnector_property;
+
/**
- * @tv_mode_property: Optional TV property to select
+ * @legacy_tv_mode_property: Optional TV property to select
* the output TV mode.
+ *
+ * Superseded by @tv_mode_property
+ */
+ struct drm_property *legacy_tv_mode_property;
+
+ /**
+ * @tv_mode_property: Optional TV property to select the TV
+ * standard output on the connector.
*/
struct drm_property *tv_mode_property;
+
/**
* @tv_left_margin_property: Optional TV property to set the left
* margin (expressed in pixels).
diff --git a/include/drm/drm_modes.h b/include/drm/drm_modes.h
index b0c680e6f670..c613f0abe9dc 100644
--- a/include/drm/drm_modes.h
+++ b/include/drm/drm_modes.h
@@ -468,6 +468,23 @@ bool drm_mode_is_420_also(const struct drm_display_info *display,
bool drm_mode_is_420(const struct drm_display_info *display,
const struct drm_display_mode *mode);
+struct drm_display_mode *drm_analog_tv_mode(struct drm_device *dev,
+ enum drm_connector_tv_mode mode,
+ unsigned long pixel_clock_hz,
+ unsigned int hdisplay,
+ unsigned int vdisplay,
+ bool interlace);
+
+static inline struct drm_display_mode *drm_mode_analog_ntsc_480i(struct drm_device *dev)
+{
+ return drm_analog_tv_mode(dev, DRM_MODE_TV_MODE_NTSC, 13500000, 720, 480, true);
+}
+
+static inline struct drm_display_mode *drm_mode_analog_pal_576i(struct drm_device *dev)
+{
+ return drm_analog_tv_mode(dev, DRM_MODE_TV_MODE_PAL, 13500000, 720, 576, true);
+}
+
struct drm_display_mode *drm_cvt_mode(struct drm_device *dev,
int hdisplay, int vdisplay, int vrefresh,
bool reduced, bool interlaced,
diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h
index d9f2254a039a..77a540ad7dcd 100644
--- a/include/drm/drm_modeset_helper_vtables.h
+++ b/include/drm/drm_modeset_helper_vtables.h
@@ -1143,6 +1143,28 @@ struct drm_connector_helper_funcs {
*/
void (*cleanup_writeback_job)(struct drm_writeback_connector *connector,
struct drm_writeback_job *job);
+
+ /**
+ * @enable_hpd:
+ *
+ * Enable hot-plug detection for the connector.
+ *
+ * This operation is optional.
+ *
+ * This callback is used by the drm_kms_helper_poll_enable() helpers.
+ */
+ void (*enable_hpd)(struct drm_connector *connector);
+
+ /**
+ * @disable_hpd:
+ *
+ * Disable hot-plug detection for the connector.
+ *
+ * This operation is optional.
+ *
+ * This callback is used by the drm_kms_helper_poll_disable() helpers.
+ */
+ void (*disable_hpd)(struct drm_connector *connector);
};
/**
diff --git a/include/drm/drm_panel.h b/include/drm/drm_panel.h
index 994bfcdd84c5..432fab2347eb 100644
--- a/include/drm/drm_panel.h
+++ b/include/drm/drm_panel.h
@@ -188,6 +188,16 @@ struct drm_panel {
* Panel entry in registry.
*/
struct list_head list;
+
+ /**
+ * @prepare_prev_first:
+ *
+ * The previous controller should be prepared first, before the prepare
+ * for the panel is called. This is largely required for DSI panels
+ * where the DSI host controller should be initialised to LP-11 before
+ * the panel is powered up.
+ */
+ bool prepare_prev_first;
};
void drm_panel_init(struct drm_panel *panel, struct device *dev,
diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h
index 447e664e49d5..51291983ea44 100644
--- a/include/drm/drm_plane.h
+++ b/include/drm/drm_plane.h
@@ -77,8 +77,8 @@ struct drm_plane_state {
* write this field directly for a driver's implicit fence.
*
* Drivers should store any implicit fence in this from their
- * &drm_plane_helper_funcs.prepare_fb callback. See drm_gem_plane_helper_prepare_fb()
- * and drm_gem_simple_display_pipe_prepare_fb() for suitable helpers.
+ * &drm_plane_helper_funcs.prepare_fb callback. See
+ * drm_gem_plane_helper_prepare_fb() for a suitable helper.
*/
struct dma_fence *fence;
diff --git a/include/drm/drm_probe_helper.h b/include/drm/drm_probe_helper.h
index 5880daa14624..4977e0ab72db 100644
--- a/include/drm/drm_probe_helper.h
+++ b/include/drm/drm_probe_helper.h
@@ -35,5 +35,6 @@ int drm_connector_helper_get_modes_from_ddc(struct drm_connector *connector);
int drm_connector_helper_get_modes_fixed(struct drm_connector *connector,
const struct drm_display_mode *fixed_mode);
int drm_connector_helper_get_modes(struct drm_connector *connector);
+int drm_connector_helper_tv_get_modes(struct drm_connector *connector);
#endif
diff --git a/include/drm/drm_simple_kms_helper.h b/include/drm/drm_simple_kms_helper.h
index 2298fe3af4cd..b2486d073763 100644
--- a/include/drm/drm_simple_kms_helper.h
+++ b/include/drm/drm_simple_kms_helper.h
@@ -117,9 +117,9 @@ struct drm_simple_display_pipe_funcs {
* more details.
*
* For GEM drivers who neither have a @prepare_fb nor @cleanup_fb hook
- * set drm_gem_simple_display_pipe_prepare_fb() is called automatically
+ * set, drm_gem_plane_helper_prepare_fb() is called automatically
* to implement this. Other drivers which need additional plane
- * processing can call drm_gem_simple_display_pipe_prepare_fb() from
+ * processing can call drm_gem_plane_helper_prepare_fb() from
* their @prepare_fb hook.
*/
int (*prepare_fb)(struct drm_simple_display_pipe *pipe,
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo.h
index 44a538ee5e2a..d87232472435 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo.h
@@ -32,28 +32,24 @@
#define _TTM_BO_API_H_
#include <drm/drm_gem.h>
-#include <drm/drm_vma_manager.h>
+
#include <linux/kref.h>
#include <linux/list.h>
-#include <linux/wait.h>
-#include <linux/mutex.h>
-#include <linux/mm.h>
-#include <linux/bitmap.h>
-#include <linux/dma-resv.h>
-
-#include "ttm_resource.h"
-struct ttm_global;
+#include "ttm_device.h"
-struct ttm_device;
+/* Default number of pre-faulted pages in the TTM fault handler */
+#define TTM_BO_VM_NUM_PREFAULT 16
struct iosys_map;
-struct drm_mm_node;
-
+struct ttm_global;
+struct ttm_device;
struct ttm_placement;
-
struct ttm_place;
+struct ttm_resource;
+struct ttm_resource_manager;
+struct ttm_tt;
/**
* enum ttm_bo_type
@@ -68,15 +64,12 @@ struct ttm_place;
* @ttm_bo_type_sg: Buffer made from dmabuf sg table shared with another
* driver.
*/
-
enum ttm_bo_type {
ttm_bo_type_device,
ttm_bo_type_kernel,
ttm_bo_type_sg
};
-struct ttm_tt;
-
/**
* struct ttm_buffer_object
*
@@ -85,18 +78,11 @@ struct ttm_tt;
* @type: The bo type.
* @page_alignment: Page alignment.
* @destroy: Destruction function. If NULL, kfree is used.
- * @num_pages: Actual number of pages.
* @kref: Reference count of this buffer object. When this refcount reaches
* zero, the object is destroyed or put on the delayed delete list.
- * @mem: structure describing current placement.
+ * @resource: structure describing current placement.
* @ttm: TTM structure holding system pages.
- * @evicted: Whether the object was evicted without user-space knowing.
* @deleted: True if the object is only a zombie and already deleted.
- * @ddestroy: List head for the delayed destroy list.
- * @swap: List head for swap LRU list.
- * @offset: The current GPU offset, which can have different meanings
- * depending on the memory type. For SYSTEM type memory, it should be 0.
- * @cur_placement: Hint of current placement.
*
* Base class for TTM buffer object, that deals with data placement and CPU
* mappings. GPU mappings are really up to the driver, but for simpler GPUs
@@ -109,52 +95,43 @@ struct ttm_tt;
* The destroy member, the API visibility of this object makes it possible
* to derive driver specific types.
*/
-
struct ttm_buffer_object {
struct drm_gem_object base;
- /**
+ /*
* Members constant at init.
*/
-
struct ttm_device *bdev;
enum ttm_bo_type type;
uint32_t page_alignment;
void (*destroy) (struct ttm_buffer_object *);
- /**
+ /*
* Members not needing protection.
*/
struct kref kref;
- /**
+ /*
* Members protected by the bo::resv::reserved lock.
*/
-
struct ttm_resource *resource;
struct ttm_tt *ttm;
bool deleted;
struct ttm_lru_bulk_move *bulk_move;
+ unsigned priority;
+ unsigned pin_count;
/**
- * Members protected by the bdev::lru_lock.
- */
-
- struct list_head ddestroy;
-
- /**
- * Members protected by a bo reservation.
+ * @delayed_delete: Work item used when we can't delete the BO
+ * immediately
*/
-
- unsigned priority;
- unsigned pin_count;
+ struct work_struct delayed_delete;
/**
* Special members that are protected by the reserve lock
* and the bo::lock when written to. Can be read with
* either of these locks held.
*/
-
struct sg_table *sg;
};
@@ -170,7 +147,6 @@ struct ttm_buffer_object {
* mapping can either be an ioremap, a vmap, a kmap or part of a
* premapped region.
*/
-
#define TTM_BO_MAP_IOMEM_MASK 0x80
struct ttm_bo_kmap_obj {
void *virtual;
@@ -238,95 +214,120 @@ ttm_bo_get_unless_zero(struct ttm_buffer_object *bo)
}
/**
- * ttm_bo_wait - wait for buffer idle.
+ * ttm_bo_reserve:
+ *
+ * @bo: A pointer to a struct ttm_buffer_object.
+ * @interruptible: Sleep interruptible if waiting.
+ * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
+ * @ticket: ticket used to acquire the ww_mutex.
*
- * @bo: The buffer object.
- * @interruptible: Use interruptible wait.
- * @no_wait: Return immediately if buffer is busy.
+ * Locks a buffer object for validation. (Or prevents other processes from
+ * locking it for validation), while taking a number of measures to prevent
+ * deadlocks.
*
- * This function must be called with the bo::mutex held, and makes
- * sure any previous rendering to the buffer is completed.
- * Note: It might be necessary to block validations before the
- * wait by reserving the buffer.
- * Returns -EBUSY if no_wait is true and the buffer is busy.
- * Returns -ERESTARTSYS if interrupted by a signal.
+ * Returns:
+ * -EDEADLK: The reservation may cause a deadlock.
+ * Release all buffer reservations, wait for @bo to become unreserved and
+ * try again.
+ * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
+ * a signal. Release all buffer reservations and return to user-space.
+ * -EBUSY: The function needed to sleep, but @no_wait was true
+ * -EALREADY: Bo already reserved using @ticket. This error code will only
+ * be returned if @use_ticket is set to true.
*/
-int ttm_bo_wait(struct ttm_buffer_object *bo, bool interruptible, bool no_wait);
-
-static inline int ttm_bo_wait_ctx(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx)
+static inline int ttm_bo_reserve(struct ttm_buffer_object *bo,
+ bool interruptible, bool no_wait,
+ struct ww_acquire_ctx *ticket)
{
- return ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
-}
+ int ret = 0;
-/**
- * ttm_bo_validate
- *
- * @bo: The buffer object.
- * @placement: Proposed placement for the buffer object.
- * @ctx: validation parameters.
- *
- * Changes placement and caching policy of the buffer object
- * according proposed placement.
- * Returns
- * -EINVAL on invalid proposed placement.
- * -ENOMEM on out-of-memory condition.
- * -EBUSY if no_wait is true and buffer busy.
- * -ERESTARTSYS if interrupted by a signal.
- */
-int ttm_bo_validate(struct ttm_buffer_object *bo,
- struct ttm_placement *placement,
- struct ttm_operation_ctx *ctx);
+ if (no_wait) {
+ bool success;
+
+ if (WARN_ON(ticket))
+ return -EBUSY;
+
+ success = dma_resv_trylock(bo->base.resv);
+ return success ? 0 : -EBUSY;
+ }
+
+ if (interruptible)
+ ret = dma_resv_lock_interruptible(bo->base.resv, ticket);
+ else
+ ret = dma_resv_lock(bo->base.resv, ticket);
+ if (ret == -EINTR)
+ return -ERESTARTSYS;
+ return ret;
+}
/**
- * ttm_bo_put
- *
- * @bo: The buffer object.
- *
- * Unreference a buffer object.
+ * ttm_bo_reserve_slowpath:
+ * @bo: A pointer to a struct ttm_buffer_object.
+ * @interruptible: Sleep interruptible if waiting.
+ * @sequence: Set (@bo)->sequence to this value after lock
+ *
+ * This is called after ttm_bo_reserve returns -EAGAIN and we backed off
+ * from all our other reservations. Because there are no other reservations
+ * held by us, this function cannot deadlock any more.
*/
-void ttm_bo_put(struct ttm_buffer_object *bo);
+static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
+ bool interruptible,
+ struct ww_acquire_ctx *ticket)
+{
+ if (interruptible) {
+ int ret = dma_resv_lock_slow_interruptible(bo->base.resv,
+ ticket);
+ if (ret == -EINTR)
+ ret = -ERESTARTSYS;
+ return ret;
+ }
+ dma_resv_lock_slow(bo->base.resv, ticket);
+ return 0;
+}
void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo);
-void ttm_bo_set_bulk_move(struct ttm_buffer_object *bo,
- struct ttm_lru_bulk_move *bulk);
-/**
- * ttm_bo_lock_delayed_workqueue
- *
- * Prevent the delayed workqueue from running.
- * Returns
- * True if the workqueue was queued at the time
- */
-int ttm_bo_lock_delayed_workqueue(struct ttm_device *bdev);
+static inline void
+ttm_bo_move_to_lru_tail_unlocked(struct ttm_buffer_object *bo)
+{
+ spin_lock(&bo->bdev->lru_lock);
+ ttm_bo_move_to_lru_tail(bo);
+ spin_unlock(&bo->bdev->lru_lock);
+}
+
+static inline void ttm_bo_assign_mem(struct ttm_buffer_object *bo,
+ struct ttm_resource *new_mem)
+{
+ WARN_ON(bo->resource);
+ bo->resource = new_mem;
+}
/**
- * ttm_bo_unlock_delayed_workqueue
+ * ttm_bo_move_null = assign memory for a buffer object.
+ * @bo: The bo to assign the memory to
+ * @new_mem: The memory to be assigned.
*
- * Allows the delayed workqueue to run.
+ * Assign the memory from new_mem to the memory of the buffer object bo.
*/
-void ttm_bo_unlock_delayed_workqueue(struct ttm_device *bdev, int resched);
+static inline void ttm_bo_move_null(struct ttm_buffer_object *bo,
+ struct ttm_resource *new_mem)
+{
+ ttm_resource_free(bo, &bo->resource);
+ ttm_bo_assign_mem(bo, new_mem);
+}
/**
- * ttm_bo_eviction_valuable
+ * ttm_bo_unreserve
*
- * @bo: The buffer object to evict
- * @place: the placement we need to make room for
+ * @bo: A pointer to a struct ttm_buffer_object.
*
- * Check if it is valuable to evict the BO to make room for the given placement.
+ * Unreserve a previous reservation of @bo.
*/
-bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
- const struct ttm_place *place);
-
-int ttm_bo_init_reserved(struct ttm_device *bdev, struct ttm_buffer_object *bo,
- enum ttm_bo_type type, struct ttm_placement *placement,
- uint32_t alignment, struct ttm_operation_ctx *ctx,
- struct sg_table *sg, struct dma_resv *resv,
- void (*destroy) (struct ttm_buffer_object *));
-int ttm_bo_init_validate(struct ttm_device *bdev, struct ttm_buffer_object *bo,
- enum ttm_bo_type type, struct ttm_placement *placement,
- uint32_t alignment, bool interruptible,
- struct sg_table *sg, struct dma_resv *resv,
- void (*destroy) (struct ttm_buffer_object *));
+static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo)
+{
+ ttm_bo_move_to_lru_tail_unlocked(bo);
+ dma_resv_unlock(bo->base.resv);
+}
/**
* ttm_kmap_obj_virtual
@@ -346,126 +347,84 @@ static inline void *ttm_kmap_obj_virtual(struct ttm_bo_kmap_obj *map,
return map->virtual;
}
-/**
- * ttm_bo_kmap
- *
- * @bo: The buffer object.
- * @start_page: The first page to map.
- * @num_pages: Number of pages to map.
- * @map: pointer to a struct ttm_bo_kmap_obj representing the map.
- *
- * Sets up a kernel virtual mapping, using ioremap, vmap or kmap to the
- * data in the buffer object. The ttm_kmap_obj_virtual function can then be
- * used to obtain a virtual address to the data.
- *
- * Returns
- * -ENOMEM: Out of memory.
- * -EINVAL: Invalid range.
- */
+int ttm_bo_wait(struct ttm_buffer_object *bo, bool interruptible, bool no_wait);
+int ttm_bo_wait_ctx(struct ttm_buffer_object *bo,
+ struct ttm_operation_ctx *ctx);
+int ttm_bo_validate(struct ttm_buffer_object *bo,
+ struct ttm_placement *placement,
+ struct ttm_operation_ctx *ctx);
+void ttm_bo_put(struct ttm_buffer_object *bo);
+void ttm_bo_set_bulk_move(struct ttm_buffer_object *bo,
+ struct ttm_lru_bulk_move *bulk);
+int ttm_bo_lock_delayed_workqueue(struct ttm_device *bdev);
+void ttm_bo_unlock_delayed_workqueue(struct ttm_device *bdev, int resched);
+bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
+ const struct ttm_place *place);
+int ttm_bo_init_reserved(struct ttm_device *bdev, struct ttm_buffer_object *bo,
+ enum ttm_bo_type type, struct ttm_placement *placement,
+ uint32_t alignment, struct ttm_operation_ctx *ctx,
+ struct sg_table *sg, struct dma_resv *resv,
+ void (*destroy)(struct ttm_buffer_object *));
+int ttm_bo_init_validate(struct ttm_device *bdev, struct ttm_buffer_object *bo,
+ enum ttm_bo_type type, struct ttm_placement *placement,
+ uint32_t alignment, bool interruptible,
+ struct sg_table *sg, struct dma_resv *resv,
+ void (*destroy)(struct ttm_buffer_object *));
int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page,
unsigned long num_pages, struct ttm_bo_kmap_obj *map);
-
-/**
- * ttm_bo_kunmap
- *
- * @map: Object describing the map to unmap.
- *
- * Unmaps a kernel map set up by ttm_bo_kmap.
- */
void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map);
-
-/**
- * ttm_bo_vmap
- *
- * @bo: The buffer object.
- * @map: pointer to a struct iosys_map representing the map.
- *
- * Sets up a kernel virtual mapping, using ioremap or vmap to the
- * data in the buffer object. The parameter @map returns the virtual
- * address as struct iosys_map. Unmap the buffer with ttm_bo_vunmap().
- *
- * Returns
- * -ENOMEM: Out of memory.
- * -EINVAL: Invalid range.
- */
int ttm_bo_vmap(struct ttm_buffer_object *bo, struct iosys_map *map);
-
-/**
- * ttm_bo_vunmap
- *
- * @bo: The buffer object.
- * @map: Object describing the map to unmap.
- *
- * Unmaps a kernel map set up by ttm_bo_vmap().
- */
void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct iosys_map *map);
-
-/**
- * ttm_bo_mmap_obj - mmap memory backed by a ttm buffer object.
- *
- * @vma: vma as input from the fbdev mmap method.
- * @bo: The bo backing the address space.
- *
- * Maps a buffer object.
- */
int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo);
-
-/**
- * ttm_bo_io
- *
- * @bdev: Pointer to the struct ttm_device.
- * @filp: Pointer to the struct file attempting to read / write.
- * @wbuf: User-space pointer to address of buffer to write. NULL on read.
- * @rbuf: User-space pointer to address of buffer to read into.
- * Null on write.
- * @count: Number of bytes to read / write.
- * @f_pos: Pointer to current file position.
- * @write: 1 for read, 0 for write.
- *
- * This function implements read / write into ttm buffer objects, and is
- * intended to
- * be called from the fops::read and fops::write method.
- * Returns:
- * See man (2) write, man(2) read. In particular,
- * the function may return -ERESTARTSYS if
- * interrupted by a signal.
- */
-ssize_t ttm_bo_io(struct ttm_device *bdev, struct file *filp,
- const char __user *wbuf, char __user *rbuf,
- size_t count, loff_t *f_pos, bool write);
-
int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
gfp_t gfp_flags);
-
void ttm_bo_pin(struct ttm_buffer_object *bo);
void ttm_bo_unpin(struct ttm_buffer_object *bo);
-
int ttm_mem_evict_first(struct ttm_device *bdev,
struct ttm_resource_manager *man,
const struct ttm_place *place,
struct ttm_operation_ctx *ctx,
struct ww_acquire_ctx *ticket);
-
-/* Default number of pre-faulted pages in the TTM fault handler */
-#define TTM_BO_VM_NUM_PREFAULT 16
-
vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo,
struct vm_fault *vmf);
-
vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
pgprot_t prot,
pgoff_t num_prefault);
-
vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf);
-
void ttm_bo_vm_open(struct vm_area_struct *vma);
-
void ttm_bo_vm_close(struct vm_area_struct *vma);
-
int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
void *buf, int len, int write);
-bool ttm_bo_delayed_delete(struct ttm_device *bdev, bool remove_all);
-
vm_fault_t ttm_bo_vm_dummy_page(struct vm_fault *vmf, pgprot_t prot);
+int ttm_bo_mem_space(struct ttm_buffer_object *bo,
+ struct ttm_placement *placement,
+ struct ttm_resource **mem,
+ struct ttm_operation_ctx *ctx);
+
+void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
+/*
+ * ttm_bo_util.c
+ */
+int ttm_mem_io_reserve(struct ttm_device *bdev,
+ struct ttm_resource *mem);
+void ttm_mem_io_free(struct ttm_device *bdev,
+ struct ttm_resource *mem);
+void ttm_move_memcpy(bool clear, u32 num_pages,
+ struct ttm_kmap_iter *dst_iter,
+ struct ttm_kmap_iter *src_iter);
+int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
+ struct ttm_operation_ctx *ctx,
+ struct ttm_resource *new_mem);
+int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
+ struct dma_fence *fence, bool evict,
+ bool pipeline,
+ struct ttm_resource *new_mem);
+void ttm_bo_move_sync_cleanup(struct ttm_buffer_object *bo,
+ struct ttm_resource *new_mem);
+int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo);
+pgprot_t ttm_io_prot(struct ttm_buffer_object *bo, struct ttm_resource *res,
+ pgprot_t tmp);
+void ttm_bo_tt_destroy(struct ttm_buffer_object *bo);
+
#endif
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
deleted file mode 100644
index 1afa891f488a..000000000000
--- a/include/drm/ttm/ttm_bo_driver.h
+++ /dev/null
@@ -1,303 +0,0 @@
-/**************************************************************************
- *
- * Copyright (c) 2006-2009 Vmware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-/*
- * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
- */
-#ifndef _TTM_BO_DRIVER_H_
-#define _TTM_BO_DRIVER_H_
-
-#include <drm/drm_mm.h>
-#include <drm/drm_vma_manager.h>
-#include <linux/workqueue.h>
-#include <linux/fs.h>
-#include <linux/spinlock.h>
-#include <linux/dma-resv.h>
-
-#include <drm/ttm/ttm_device.h>
-
-#include "ttm_bo_api.h"
-#include "ttm_kmap_iter.h"
-#include "ttm_placement.h"
-#include "ttm_tt.h"
-#include "ttm_pool.h"
-
-/*
- * ttm_bo.c
- */
-
-/**
- * ttm_bo_mem_space
- *
- * @bo: Pointer to a struct ttm_buffer_object. the data of which
- * we want to allocate space for.
- * @proposed_placement: Proposed new placement for the buffer object.
- * @mem: A struct ttm_resource.
- * @interruptible: Sleep interruptible when sliping.
- * @no_wait_gpu: Return immediately if the GPU is busy.
- *
- * Allocate memory space for the buffer object pointed to by @bo, using
- * the placement flags in @mem, potentially evicting other idle buffer objects.
- * This function may sleep while waiting for space to become available.
- * Returns:
- * -EBUSY: No space available (only if no_wait == 1).
- * -ENOMEM: Could not allocate memory for the buffer object, either due to
- * fragmentation or concurrent allocators.
- * -ERESTARTSYS: An interruptible sleep was interrupted by a signal.
- */
-int ttm_bo_mem_space(struct ttm_buffer_object *bo,
- struct ttm_placement *placement,
- struct ttm_resource **mem,
- struct ttm_operation_ctx *ctx);
-
-/**
- * ttm_bo_unmap_virtual
- *
- * @bo: tear down the virtual mappings for this BO
- */
-void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
-
-/**
- * ttm_bo_reserve:
- *
- * @bo: A pointer to a struct ttm_buffer_object.
- * @interruptible: Sleep interruptible if waiting.
- * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
- * @ticket: ticket used to acquire the ww_mutex.
- *
- * Locks a buffer object for validation. (Or prevents other processes from
- * locking it for validation), while taking a number of measures to prevent
- * deadlocks.
- *
- * Returns:
- * -EDEADLK: The reservation may cause a deadlock.
- * Release all buffer reservations, wait for @bo to become unreserved and
- * try again.
- * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
- * a signal. Release all buffer reservations and return to user-space.
- * -EBUSY: The function needed to sleep, but @no_wait was true
- * -EALREADY: Bo already reserved using @ticket. This error code will only
- * be returned if @use_ticket is set to true.
- */
-static inline int ttm_bo_reserve(struct ttm_buffer_object *bo,
- bool interruptible, bool no_wait,
- struct ww_acquire_ctx *ticket)
-{
- int ret;
-
- if (no_wait) {
- bool success;
- if (WARN_ON(ticket))
- return -EBUSY;
-
- success = dma_resv_trylock(bo->base.resv);
- return success ? 0 : -EBUSY;
- }
-
- if (interruptible)
- ret = dma_resv_lock_interruptible(bo->base.resv, ticket);
- else
- ret = dma_resv_lock(bo->base.resv, ticket);
- if (ret == -EINTR)
- return -ERESTARTSYS;
- return ret;
-}
-
-/**
- * ttm_bo_reserve_slowpath:
- * @bo: A pointer to a struct ttm_buffer_object.
- * @interruptible: Sleep interruptible if waiting.
- * @sequence: Set (@bo)->sequence to this value after lock
- *
- * This is called after ttm_bo_reserve returns -EAGAIN and we backed off
- * from all our other reservations. Because there are no other reservations
- * held by us, this function cannot deadlock any more.
- */
-static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
- bool interruptible,
- struct ww_acquire_ctx *ticket)
-{
- if (interruptible) {
- int ret = dma_resv_lock_slow_interruptible(bo->base.resv,
- ticket);
- if (ret == -EINTR)
- ret = -ERESTARTSYS;
- return ret;
- }
- dma_resv_lock_slow(bo->base.resv, ticket);
- return 0;
-}
-
-static inline void
-ttm_bo_move_to_lru_tail_unlocked(struct ttm_buffer_object *bo)
-{
- spin_lock(&bo->bdev->lru_lock);
- ttm_bo_move_to_lru_tail(bo);
- spin_unlock(&bo->bdev->lru_lock);
-}
-
-static inline void ttm_bo_assign_mem(struct ttm_buffer_object *bo,
- struct ttm_resource *new_mem)
-{
- WARN_ON(bo->resource);
- bo->resource = new_mem;
-}
-
-/**
- * ttm_bo_move_null = assign memory for a buffer object.
- * @bo: The bo to assign the memory to
- * @new_mem: The memory to be assigned.
- *
- * Assign the memory from new_mem to the memory of the buffer object bo.
- */
-static inline void ttm_bo_move_null(struct ttm_buffer_object *bo,
- struct ttm_resource *new_mem)
-{
- ttm_resource_free(bo, &bo->resource);
- ttm_bo_assign_mem(bo, new_mem);
-}
-
-/**
- * ttm_bo_unreserve
- *
- * @bo: A pointer to a struct ttm_buffer_object.
- *
- * Unreserve a previous reservation of @bo.
- */
-static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo)
-{
- ttm_bo_move_to_lru_tail_unlocked(bo);
- dma_resv_unlock(bo->base.resv);
-}
-
-/*
- * ttm_bo_util.c
- */
-int ttm_mem_io_reserve(struct ttm_device *bdev,
- struct ttm_resource *mem);
-void ttm_mem_io_free(struct ttm_device *bdev,
- struct ttm_resource *mem);
-
-/**
- * ttm_bo_move_memcpy
- *
- * @bo: A pointer to a struct ttm_buffer_object.
- * @interruptible: Sleep interruptible if waiting.
- * @no_wait_gpu: Return immediately if the GPU is busy.
- * @new_mem: struct ttm_resource indicating where to move.
- *
- * Fallback move function for a mappable buffer object in mappable memory.
- * The function will, if successful,
- * free any old aperture space, and set (@new_mem)->mm_node to NULL,
- * and update the (@bo)->mem placement flags. If unsuccessful, the old
- * data remains untouched, and it's up to the caller to free the
- * memory space indicated by @new_mem.
- * Returns:
- * !0: Failure.
- */
-
-int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
- struct ttm_operation_ctx *ctx,
- struct ttm_resource *new_mem);
-
-/**
- * ttm_bo_move_accel_cleanup.
- *
- * @bo: A pointer to a struct ttm_buffer_object.
- * @fence: A fence object that signals when moving is complete.
- * @evict: This is an evict move. Don't return until the buffer is idle.
- * @pipeline: evictions are to be pipelined.
- * @new_mem: struct ttm_resource indicating where to move.
- *
- * Accelerated move function to be called when an accelerated move
- * has been scheduled. The function will create a new temporary buffer object
- * representing the old placement, and put the sync object on both buffer
- * objects. After that the newly created buffer object is unref'd to be
- * destroyed when the move is complete. This will help pipeline
- * buffer moves.
- */
-int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
- struct dma_fence *fence, bool evict,
- bool pipeline,
- struct ttm_resource *new_mem);
-
-/**
- * ttm_bo_move_sync_cleanup.
- *
- * @bo: A pointer to a struct ttm_buffer_object.
- * @new_mem: struct ttm_resource indicating where to move.
- *
- * Special case of ttm_bo_move_accel_cleanup where the bo is guaranteed
- * by the caller to be idle. Typically used after memcpy buffer moves.
- */
-void ttm_bo_move_sync_cleanup(struct ttm_buffer_object *bo,
- struct ttm_resource *new_mem);
-
-/**
- * ttm_bo_pipeline_gutting.
- *
- * @bo: A pointer to a struct ttm_buffer_object.
- *
- * Pipelined gutting a BO of its backing store.
- */
-int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo);
-
-/**
- * ttm_io_prot
- *
- * bo: ttm buffer object
- * res: ttm resource object
- * @tmp: Page protection flag for a normal, cached mapping.
- *
- * Utility function that returns the pgprot_t that should be used for
- * setting up a PTE with the caching model indicated by @c_state.
- */
-pgprot_t ttm_io_prot(struct ttm_buffer_object *bo, struct ttm_resource *res,
- pgprot_t tmp);
-
-/**
- * ttm_bo_tt_bind
- *
- * Bind the object tt to a memory resource.
- */
-int ttm_bo_tt_bind(struct ttm_buffer_object *bo, struct ttm_resource *mem);
-
-/**
- * ttm_bo_tt_destroy.
- */
-void ttm_bo_tt_destroy(struct ttm_buffer_object *bo);
-
-void ttm_move_memcpy(bool clear,
- u32 num_pages,
- struct ttm_kmap_iter *dst_iter,
- struct ttm_kmap_iter *src_iter);
-
-struct ttm_kmap_iter *
-ttm_kmap_iter_iomap_init(struct ttm_kmap_iter_iomap *iter_io,
- struct io_mapping *iomap,
- struct sg_table *st,
- resource_size_t start);
-#endif
diff --git a/include/drm/ttm/ttm_device.h b/include/drm/ttm/ttm_device.h
index 95b3c04b1ab9..4f3e81eac6f3 100644
--- a/include/drm/ttm/ttm_device.h
+++ b/include/drm/ttm/ttm_device.h
@@ -252,11 +252,6 @@ struct ttm_device {
spinlock_t lru_lock;
/**
- * @ddestroy: Destroyed but not yet cleaned up buffer objects.
- */
- struct list_head ddestroy;
-
- /**
* @pinned: Buffer objects which are pinned and so not on any LRU list.
*/
struct list_head pinned;
@@ -270,7 +265,7 @@ struct ttm_device {
/**
* @wq: Work queue structure for the delayed delete workqueue.
*/
- struct delayed_work wq;
+ struct workqueue_struct *wq;
};
int ttm_global_swapout(struct ttm_operation_ctx *ctx, gfp_t gfp_flags);
diff --git a/include/drm/ttm/ttm_execbuf_util.h b/include/drm/ttm/ttm_execbuf_util.h
index a99d7fdf2964..03aca29d3ce4 100644
--- a/include/drm/ttm/ttm_execbuf_util.h
+++ b/include/drm/ttm/ttm_execbuf_util.h
@@ -33,7 +33,9 @@
#include <linux/list.h>
-#include "ttm_bo_api.h"
+struct ww_acquire_ctx;
+struct dma_fence;
+struct ttm_buffer_object;
/**
* struct ttm_validate_buffer
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index 6fa8d4e29719..3f31baa3293f 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -356,7 +356,7 @@ struct dma_buf {
*/
const char *name;
- /** @name_lock: Spinlock to protect name acces for read access. */
+ /** @name_lock: Spinlock to protect name access for read access. */
spinlock_t name_lock;
/**
@@ -393,7 +393,7 @@ struct dma_buf {
* anything the userspace API considers write access.
*
* - Drivers may just always add a write fence, since that only
- * causes unecessarily synchronization, but no correctness issues.
+ * causes unnecessary synchronization, but no correctness issues.
*
* - Some drivers only expose a synchronous userspace API with no
* pipelining across drivers. These do not set any fences for their
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index d6be2a686100..4fad4aa245fb 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -7,6 +7,7 @@
#ifndef _LINUX_NVME_H
#define _LINUX_NVME_H
+#include <linux/bits.h>
#include <linux/types.h>
#include <linux/uuid.h>
@@ -639,8 +640,9 @@ enum {
NVME_CMD_EFFECTS_NCC = 1 << 2,
NVME_CMD_EFFECTS_NIC = 1 << 3,
NVME_CMD_EFFECTS_CCC = 1 << 4,
- NVME_CMD_EFFECTS_CSE_MASK = 3 << 16,
+ NVME_CMD_EFFECTS_CSE_MASK = GENMASK(18, 16),
NVME_CMD_EFFECTS_UUID_SEL = 1 << 19,
+ NVME_CMD_EFFECTS_SCOPE_MASK = GENMASK(31, 20),
};
struct nvme_effects_log {
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index 4038abe8505a..fe7f871e3080 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -832,6 +832,10 @@ struct drm_amdgpu_cs_chunk_data {
#define AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_SCLK 0x8
/* Subquery id: Query GPU stable pstate memory clock */
#define AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_MCLK 0x9
+ /* Subquery id: Query GPU peak pstate shader clock */
+ #define AMDGPU_INFO_SENSOR_PEAK_PSTATE_GFX_SCLK 0xa
+ /* Subquery id: Query GPU peak pstate memory clock */
+ #define AMDGPU_INFO_SENSOR_PEAK_PSTATE_GFX_MCLK 0xb
/* Number of VRAM page faults on CPU access. */
#define AMDGPU_INFO_NUM_VRAM_CPU_PAGE_FAULTS 0x1E
#define AMDGPU_INFO_VRAM_LOST_COUNTER 0x1F
@@ -1107,6 +1111,8 @@ struct drm_amdgpu_info_device {
__u32 pa_sc_tile_steering_override;
/* disabled TCCs */
__u64 tcc_disabled_mask;
+ __u64 min_engine_clock;
+ __u64 min_memory_clock;
};
struct drm_amdgpu_info_hw_ip {
diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h
index bc056f2d537d..de703c6be969 100644
--- a/include/uapi/drm/drm_fourcc.h
+++ b/include/uapi/drm/drm_fourcc.h
@@ -88,6 +88,18 @@ extern "C" {
*
* The authoritative list of format modifier codes is found in
* `include/uapi/drm/drm_fourcc.h`
+ *
+ * Open Source User Waiver
+ * -----------------------
+ *
+ * Because this is the authoritative source for pixel formats and modifiers
+ * referenced by GL, Vulkan extensions and other standards and hence used both
+ * by open source and closed source driver stacks, the usual requirement for an
+ * upstream in-kernel or open source userspace user does not apply.
+ *
+ * To ensure, as much as feasible, compatibility across stacks and avoid
+ * confusion with incompatible enumerations stakeholders for all relevant driver
+ * stacks should approve additions.
*/
#define fourcc_code(a, b, c, d) ((__u32)(a) | ((__u32)(b) << 8) | \
diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h
index 9d4c4078e8d0..2780bce62faf 100644
--- a/include/uapi/linux/io_uring.h
+++ b/include/uapi/linux/io_uring.h
@@ -10,7 +10,15 @@
#include <linux/fs.h>
#include <linux/types.h>
+/*
+ * this file is shared with liburing and that has to autodetect
+ * if linux/time_types.h is available or not, it can
+ * define UAPI_LINUX_IO_URING_H_SKIP_LINUX_TIME_TYPES_H
+ * if linux/time_types.h is not available
+ */
+#ifndef UAPI_LINUX_IO_URING_H_SKIP_LINUX_TIME_TYPES_H
#include <linux/time_types.h>
+#endif
#ifdef __cplusplus
extern "C" {
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 20522d4ba1e0..55155e262646 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -1767,6 +1767,7 @@ struct kvm_xen_hvm_attr {
__u8 runstate_update_flag;
struct {
__u64 gfn;
+#define KVM_XEN_INVALID_GFN ((__u64)-1)
} shared_info;
struct {
__u32 send_port;
@@ -1798,6 +1799,7 @@ struct kvm_xen_hvm_attr {
} u;
};
+
/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_SHARED_INFO */
#define KVM_XEN_ATTR_TYPE_LONG_MODE 0x0
#define KVM_XEN_ATTR_TYPE_SHARED_INFO 0x1
@@ -1823,6 +1825,7 @@ struct kvm_xen_vcpu_attr {
__u16 pad[3];
union {
__u64 gpa;
+#define KVM_XEN_INVALID_GPA ((__u64)-1)
__u64 pad[8];
struct {
__u64 state;
diff --git a/include/uapi/linux/media-bus-format.h b/include/uapi/linux/media-bus-format.h
index ca9a24c874da..a03c543cb072 100644
--- a/include/uapi/linux/media-bus-format.h
+++ b/include/uapi/linux/media-bus-format.h
@@ -34,7 +34,7 @@
#define MEDIA_BUS_FMT_FIXED 0x0001
-/* RGB - next is 0x1022 */
+/* RGB - next is 0x1025 */
#define MEDIA_BUS_FMT_RGB444_1X12 0x1016
#define MEDIA_BUS_FMT_RGB444_2X8_PADHI_BE 0x1001
#define MEDIA_BUS_FMT_RGB444_2X8_PADHI_LE 0x1002
@@ -46,8 +46,11 @@
#define MEDIA_BUS_FMT_RGB565_2X8_BE 0x1007
#define MEDIA_BUS_FMT_RGB565_2X8_LE 0x1008
#define MEDIA_BUS_FMT_RGB666_1X18 0x1009
+#define MEDIA_BUS_FMT_BGR666_1X18 0x1023
#define MEDIA_BUS_FMT_RBG888_1X24 0x100e
#define MEDIA_BUS_FMT_RGB666_1X24_CPADHI 0x1015
+#define MEDIA_BUS_FMT_BGR666_1X24_CPADHI 0x1024
+#define MEDIA_BUS_FMT_RGB565_1X24_CPADHI 0x1022
#define MEDIA_BUS_FMT_RGB666_1X7X3_SPWG 0x1010
#define MEDIA_BUS_FMT_BGR888_1X24 0x1013
#define MEDIA_BUS_FMT_BGR888_3X8 0x101b
diff --git a/io_uring/cancel.c b/io_uring/cancel.c
index 2291a53cdabd..b4f5dfacc0c3 100644
--- a/io_uring/cancel.c
+++ b/io_uring/cancel.c
@@ -288,24 +288,23 @@ int io_sync_cancel(struct io_ring_ctx *ctx, void __user *arg)
ret = __io_sync_cancel(current->io_uring, &cd, sc.fd);
+ mutex_unlock(&ctx->uring_lock);
if (ret != -EALREADY)
break;
- mutex_unlock(&ctx->uring_lock);
ret = io_run_task_work_sig(ctx);
- if (ret < 0) {
- mutex_lock(&ctx->uring_lock);
+ if (ret < 0)
break;
- }
ret = schedule_hrtimeout(&timeout, HRTIMER_MODE_ABS);
- mutex_lock(&ctx->uring_lock);
if (!ret) {
ret = -ETIME;
break;
}
+ mutex_lock(&ctx->uring_lock);
} while (1);
finish_wait(&ctx->cq_wait, &wait);
+ mutex_lock(&ctx->uring_lock);
if (ret == -ENOENT || ret > 0)
ret = 0;
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index ff2bbac1a10f..58ac13b69dc8 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -677,16 +677,20 @@ static void __io_cqring_overflow_flush(struct io_ring_ctx *ctx)
io_cq_unlock_post(ctx);
}
+static void io_cqring_do_overflow_flush(struct io_ring_ctx *ctx)
+{
+ /* iopoll syncs against uring_lock, not completion_lock */
+ if (ctx->flags & IORING_SETUP_IOPOLL)
+ mutex_lock(&ctx->uring_lock);
+ __io_cqring_overflow_flush(ctx);
+ if (ctx->flags & IORING_SETUP_IOPOLL)
+ mutex_unlock(&ctx->uring_lock);
+}
+
static void io_cqring_overflow_flush(struct io_ring_ctx *ctx)
{
- if (test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq)) {
- /* iopoll syncs against uring_lock, not completion_lock */
- if (ctx->flags & IORING_SETUP_IOPOLL)
- mutex_lock(&ctx->uring_lock);
- __io_cqring_overflow_flush(ctx);
- if (ctx->flags & IORING_SETUP_IOPOLL)
- mutex_unlock(&ctx->uring_lock);
- }
+ if (test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq))
+ io_cqring_do_overflow_flush(ctx);
}
void __io_put_task(struct task_struct *task, int nr)
@@ -2549,7 +2553,10 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
trace_io_uring_cqring_wait(ctx, min_events);
do {
- io_cqring_overflow_flush(ctx);
+ if (test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq)) {
+ finish_wait(&ctx->cq_wait, &iowq.wq);
+ io_cqring_do_overflow_flush(ctx);
+ }
prepare_to_wait_exclusive(&ctx->cq_wait, &iowq.wq,
TASK_INTERRUPTIBLE);
ret = io_cqring_wait_schedule(ctx, &iowq, timeout);
@@ -4013,8 +4020,6 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
return -EEXIST;
if (ctx->restricted) {
- if (opcode >= IORING_REGISTER_LAST)
- return -EINVAL;
opcode = array_index_nospec(opcode, IORING_REGISTER_LAST);
if (!test_bit(opcode, ctx->restrictions.register_op))
return -EACCES;
@@ -4170,6 +4175,9 @@ SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
long ret = -EBADF;
struct fd f;
+ if (opcode >= IORING_REGISTER_LAST)
+ return -EINVAL;
+
f = fdget(fd);
if (!f.file)
return -EBADF;
diff --git a/kernel/events/core.c b/kernel/events/core.c
index eacc3702654d..d56328e5080e 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -380,7 +380,6 @@ enum event_type_t {
/*
* perf_sched_events : >0 events exist
- * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu
*/
static void perf_sched_delayed(struct work_struct *work);
@@ -389,7 +388,6 @@ static DECLARE_DELAYED_WORK(perf_sched_work, perf_sched_delayed);
static DEFINE_MUTEX(perf_sched_mutex);
static atomic_t perf_sched_count;
-static DEFINE_PER_CPU(atomic_t, perf_cgroup_events);
static DEFINE_PER_CPU(struct pmu_event_list, pmu_sb_events);
static atomic_t nr_mmap_events __read_mostly;
@@ -844,9 +842,16 @@ static void perf_cgroup_switch(struct task_struct *task)
struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
struct perf_cgroup *cgrp;
- cgrp = perf_cgroup_from_task(task, NULL);
+ /*
+ * cpuctx->cgrp is set when the first cgroup event enabled,
+ * and is cleared when the last cgroup event disabled.
+ */
+ if (READ_ONCE(cpuctx->cgrp) == NULL)
+ return;
WARN_ON_ONCE(cpuctx->ctx.nr_cgroups == 0);
+
+ cgrp = perf_cgroup_from_task(task, NULL);
if (READ_ONCE(cpuctx->cgrp) == cgrp)
return;
@@ -3631,8 +3636,7 @@ void __perf_event_task_sched_out(struct task_struct *task,
* to check if we have to switch out PMU state.
* cgroup event are system-wide mode only
*/
- if (atomic_read(this_cpu_ptr(&perf_cgroup_events)))
- perf_cgroup_switch(next);
+ perf_cgroup_switch(next);
}
static bool perf_less_group_idx(const void *l, const void *r)
@@ -4974,15 +4978,6 @@ static void unaccount_pmu_sb_event(struct perf_event *event)
detach_sb_event(event);
}
-static void unaccount_event_cpu(struct perf_event *event, int cpu)
-{
- if (event->parent)
- return;
-
- if (is_cgroup_event(event))
- atomic_dec(&per_cpu(perf_cgroup_events, cpu));
-}
-
#ifdef CONFIG_NO_HZ_FULL
static DEFINE_SPINLOCK(nr_freq_lock);
#endif
@@ -5048,8 +5043,6 @@ static void unaccount_event(struct perf_event *event)
schedule_delayed_work(&perf_sched_work, HZ);
}
- unaccount_event_cpu(event, event->cpu);
-
unaccount_pmu_sb_event(event);
}
@@ -11679,15 +11672,6 @@ static void account_pmu_sb_event(struct perf_event *event)
attach_sb_event(event);
}
-static void account_event_cpu(struct perf_event *event, int cpu)
-{
- if (event->parent)
- return;
-
- if (is_cgroup_event(event))
- atomic_inc(&per_cpu(perf_cgroup_events, cpu));
-}
-
/* Freq events need the tick to stay alive (see perf_event_task_tick). */
static void account_freq_event_nohz(void)
{
@@ -11775,8 +11759,6 @@ static void account_event(struct perf_event *event)
}
enabled:
- account_event_cpu(event, event->cpu);
-
account_pmu_sb_event(event);
}
@@ -12339,12 +12321,12 @@ SYSCALL_DEFINE5(perf_event_open,
if (flags & ~PERF_FLAG_ALL)
return -EINVAL;
- /* Do we allow access to perf_event_open(2) ? */
- err = security_perf_event_open(&attr, PERF_SECURITY_OPEN);
+ err = perf_copy_attr(attr_uptr, &attr);
if (err)
return err;
- err = perf_copy_attr(attr_uptr, &attr);
+ /* Do we allow access to perf_event_open(2) ? */
+ err = security_perf_event_open(&attr, PERF_SECURITY_OPEN);
if (err)
return err;
@@ -12689,7 +12671,8 @@ SYSCALL_DEFINE5(perf_event_open,
return event_fd;
err_context:
- /* event->pmu_ctx freed by free_event() */
+ put_pmu_ctx(event->pmu_ctx);
+ event->pmu_ctx = NULL; /* _free_event() */
err_locked:
mutex_unlock(&ctx->mutex);
perf_unpin_context(ctx);
@@ -12802,6 +12785,7 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
err_pmu_ctx:
put_pmu_ctx(pmu_ctx);
+ event->pmu_ctx = NULL; /* _free_event() */
err_unlock:
mutex_unlock(&ctx->mutex);
perf_unpin_context(ctx);
@@ -12822,13 +12806,11 @@ static void __perf_pmu_remove(struct perf_event_context *ctx,
perf_event_groups_for_cpu_pmu(event, groups, cpu, pmu) {
perf_remove_from_context(event, 0);
- unaccount_event_cpu(event, cpu);
put_pmu_ctx(event->pmu_ctx);
list_add(&event->migrate_entry, events);
for_each_sibling_event(sibling, event) {
perf_remove_from_context(sibling, 0);
- unaccount_event_cpu(sibling, cpu);
put_pmu_ctx(sibling->pmu_ctx);
list_add(&sibling->migrate_entry, events);
}
@@ -12847,7 +12829,6 @@ static void __perf_pmu_install_event(struct pmu *pmu,
if (event->state >= PERF_EVENT_STATE_OFF)
event->state = PERF_EVENT_STATE_INACTIVE;
- account_event_cpu(event, cpu);
perf_install_in_context(ctx, event, cpu);
}
@@ -13231,7 +13212,7 @@ inherit_event(struct perf_event *parent_event,
pmu_ctx = find_get_pmu_context(child_event->pmu, child_ctx, child_event);
if (IS_ERR(pmu_ctx)) {
free_event(child_event);
- return NULL;
+ return ERR_CAST(pmu_ctx);
}
child_event->pmu_ctx = pmu_ctx;
@@ -13742,8 +13723,7 @@ static int __perf_cgroup_move(void *info)
struct task_struct *task = info;
preempt_disable();
- if (atomic_read(this_cpu_ptr(&perf_cgroup_events)))
- perf_cgroup_switch(task);
+ perf_cgroup_switch(task);
preempt_enable();
return 0;
diff --git a/kernel/futex/syscalls.c b/kernel/futex/syscalls.c
index 086a22d1adb7..a8074079b09e 100644
--- a/kernel/futex/syscalls.c
+++ b/kernel/futex/syscalls.c
@@ -286,19 +286,22 @@ SYSCALL_DEFINE5(futex_waitv, struct futex_waitv __user *, waiters,
}
futexv = kcalloc(nr_futexes, sizeof(*futexv), GFP_KERNEL);
- if (!futexv)
- return -ENOMEM;
+ if (!futexv) {
+ ret = -ENOMEM;
+ goto destroy_timer;
+ }
ret = futex_parse_waitv(futexv, waiters, nr_futexes);
if (!ret)
ret = futex_wait_multiple(futexv, nr_futexes, timeout ? &to : NULL);
+ kfree(futexv);
+
+destroy_timer:
if (timeout) {
hrtimer_cancel(&to.timer);
destroy_hrtimer_on_stack(&to.timer);
}
-
- kfree(futexv);
return ret;
}
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index 7779ee8abc2a..010cf4e6d0b8 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -89,15 +89,31 @@ static inline int __ww_mutex_check_kill(struct rt_mutex *lock,
* set this bit before looking at the lock.
*/
-static __always_inline void
-rt_mutex_set_owner(struct rt_mutex_base *lock, struct task_struct *owner)
+static __always_inline struct task_struct *
+rt_mutex_owner_encode(struct rt_mutex_base *lock, struct task_struct *owner)
{
unsigned long val = (unsigned long)owner;
if (rt_mutex_has_waiters(lock))
val |= RT_MUTEX_HAS_WAITERS;
- WRITE_ONCE(lock->owner, (struct task_struct *)val);
+ return (struct task_struct *)val;
+}
+
+static __always_inline void
+rt_mutex_set_owner(struct rt_mutex_base *lock, struct task_struct *owner)
+{
+ /*
+ * lock->wait_lock is held but explicit acquire semantics are needed
+ * for a new lock owner so WRITE_ONCE is insufficient.
+ */
+ xchg_acquire(&lock->owner, rt_mutex_owner_encode(lock, owner));
+}
+
+static __always_inline void rt_mutex_clear_owner(struct rt_mutex_base *lock)
+{
+ /* lock->wait_lock is held so the unlock provides release semantics. */
+ WRITE_ONCE(lock->owner, rt_mutex_owner_encode(lock, NULL));
}
static __always_inline void clear_rt_mutex_waiters(struct rt_mutex_base *lock)
@@ -106,7 +122,8 @@ static __always_inline void clear_rt_mutex_waiters(struct rt_mutex_base *lock)
((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS);
}
-static __always_inline void fixup_rt_mutex_waiters(struct rt_mutex_base *lock)
+static __always_inline void
+fixup_rt_mutex_waiters(struct rt_mutex_base *lock, bool acquire_lock)
{
unsigned long owner, *p = (unsigned long *) &lock->owner;
@@ -172,8 +189,21 @@ static __always_inline void fixup_rt_mutex_waiters(struct rt_mutex_base *lock)
* still set.
*/
owner = READ_ONCE(*p);
- if (owner & RT_MUTEX_HAS_WAITERS)
- WRITE_ONCE(*p, owner & ~RT_MUTEX_HAS_WAITERS);
+ if (owner & RT_MUTEX_HAS_WAITERS) {
+ /*
+ * See rt_mutex_set_owner() and rt_mutex_clear_owner() on
+ * why xchg_acquire() is used for updating owner for
+ * locking and WRITE_ONCE() for unlocking.
+ *
+ * WRITE_ONCE() would work for the acquire case too, but
+ * in case that the lock acquisition failed it might
+ * force other lockers into the slow path unnecessarily.
+ */
+ if (acquire_lock)
+ xchg_acquire(p, owner & ~RT_MUTEX_HAS_WAITERS);
+ else
+ WRITE_ONCE(*p, owner & ~RT_MUTEX_HAS_WAITERS);
+ }
}
/*
@@ -208,6 +238,13 @@ static __always_inline void mark_rt_mutex_waiters(struct rt_mutex_base *lock)
owner = *p;
} while (cmpxchg_relaxed(p, owner,
owner | RT_MUTEX_HAS_WAITERS) != owner);
+
+ /*
+ * The cmpxchg loop above is relaxed to avoid back-to-back ACQUIRE
+ * operations in the event of contention. Ensure the successful
+ * cmpxchg is visible.
+ */
+ smp_mb__after_atomic();
}
/*
@@ -1243,7 +1280,7 @@ static int __sched __rt_mutex_slowtrylock(struct rt_mutex_base *lock)
* try_to_take_rt_mutex() sets the lock waiters bit
* unconditionally. Clean this up.
*/
- fixup_rt_mutex_waiters(lock);
+ fixup_rt_mutex_waiters(lock, true);
return ret;
}
@@ -1604,7 +1641,7 @@ static int __sched __rt_mutex_slowlock(struct rt_mutex_base *lock,
* try_to_take_rt_mutex() sets the waiter bit
* unconditionally. We might have to fix that up.
*/
- fixup_rt_mutex_waiters(lock);
+ fixup_rt_mutex_waiters(lock, true);
trace_contention_end(lock, ret);
@@ -1719,7 +1756,7 @@ static void __sched rtlock_slowlock_locked(struct rt_mutex_base *lock)
* try_to_take_rt_mutex() sets the waiter bit unconditionally.
* We might have to fix that up:
*/
- fixup_rt_mutex_waiters(lock);
+ fixup_rt_mutex_waiters(lock, true);
debug_rt_mutex_free_waiter(&waiter);
trace_contention_end(lock, 0);
diff --git a/kernel/locking/rtmutex_api.c b/kernel/locking/rtmutex_api.c
index 900220941caa..cb9fdff76a8a 100644
--- a/kernel/locking/rtmutex_api.c
+++ b/kernel/locking/rtmutex_api.c
@@ -267,7 +267,7 @@ void __sched rt_mutex_init_proxy_locked(struct rt_mutex_base *lock,
void __sched rt_mutex_proxy_unlock(struct rt_mutex_base *lock)
{
debug_rt_mutex_proxy_unlock(lock);
- rt_mutex_set_owner(lock, NULL);
+ rt_mutex_clear_owner(lock);
}
/**
@@ -382,7 +382,7 @@ int __sched rt_mutex_wait_proxy_lock(struct rt_mutex_base *lock,
* try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
* have to fix that up.
*/
- fixup_rt_mutex_waiters(lock);
+ fixup_rt_mutex_waiters(lock, true);
raw_spin_unlock_irq(&lock->wait_lock);
return ret;
@@ -438,7 +438,7 @@ bool __sched rt_mutex_cleanup_proxy_lock(struct rt_mutex_base *lock,
* try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
* have to fix that up.
*/
- fixup_rt_mutex_waiters(lock);
+ fixup_rt_mutex_waiters(lock, false);
raw_spin_unlock_irq(&lock->wait_lock);
diff --git a/lib/kunit/string-stream.c b/lib/kunit/string-stream.c
index f5f51166d8c2..cc32743c1171 100644
--- a/lib/kunit/string-stream.c
+++ b/lib/kunit/string-stream.c
@@ -23,8 +23,10 @@ static struct string_stream_fragment *alloc_string_stream_fragment(
return ERR_PTR(-ENOMEM);
frag->fragment = kunit_kmalloc(test, len, gfp);
- if (!frag->fragment)
+ if (!frag->fragment) {
+ kunit_kfree(test, frag);
return ERR_PTR(-ENOMEM);
+ }
return frag;
}
diff --git a/scripts/Makefile.modpost b/scripts/Makefile.modpost
index 5eb5e8280379..0ee296cf520c 100644
--- a/scripts/Makefile.modpost
+++ b/scripts/Makefile.modpost
@@ -55,6 +55,17 @@ ifneq ($(findstring i,$(filter-out --%,$(MAKEFLAGS))),)
modpost-args += -n
endif
+ifneq ($(KBUILD_MODPOST_WARN)$(missing-input),)
+modpost-args += -w
+endif
+
+# Read out modules.order to pass in modpost.
+# Otherwise, allmodconfig would fail with "Argument list too long".
+ifdef KBUILD_MODULES
+modpost-args += -T $(MODORDER)
+modpost-deps += $(MODORDER)
+endif
+
ifeq ($(KBUILD_EXTMOD),)
# Generate the list of in-tree objects in vmlinux
@@ -113,17 +124,6 @@ modpost-args += -e $(addprefix -i , $(KBUILD_EXTRA_SYMBOLS))
endif # ($(KBUILD_EXTMOD),)
-ifneq ($(KBUILD_MODPOST_WARN)$(missing-input),)
-modpost-args += -w
-endif
-
-ifdef KBUILD_MODULES
-modpost-args += -T $(MODORDER)
-modpost-deps += $(MODORDER)
-endif
-
-# Read out modules.order to pass in modpost.
-# Otherwise, allmodconfig would fail with "Argument list too long".
quiet_cmd_modpost = MODPOST $@
cmd_modpost = \
$(if $(missing-input), \
diff --git a/scripts/Makefile.package b/scripts/Makefile.package
index 539e9f765d64..525a2820976f 100644
--- a/scripts/Makefile.package
+++ b/scripts/Makefile.package
@@ -158,6 +158,7 @@ $(perf-tar-pkgs):
PHONY += help
help:
@echo ' rpm-pkg - Build both source and binary RPM kernel packages'
+ @echo ' srcrpm-pkg - Build only the source kernel RPM package'
@echo ' binrpm-pkg - Build only the binary kernel RPM package'
@echo ' deb-pkg - Build both source and binary deb kernel packages'
@echo ' bindeb-pkg - Build only the binary kernel deb package'
diff --git a/scripts/basic/fixdep.c b/scripts/basic/fixdep.c
index 2328f9a641da..f932aeaba71a 100644
--- a/scripts/basic/fixdep.c
+++ b/scripts/basic/fixdep.c
@@ -94,7 +94,6 @@
#include <unistd.h>
#include <fcntl.h>
#include <string.h>
-#include <stdarg.h>
#include <stdlib.h>
#include <stdio.h>
#include <ctype.h>
diff --git a/scripts/kconfig/mconf.c b/scripts/kconfig/mconf.c
index 9c549683c627..e67e0db50b2e 100644
--- a/scripts/kconfig/mconf.c
+++ b/scripts/kconfig/mconf.c
@@ -161,6 +161,12 @@ static const char mconf_readme[] =
"(especially with a larger number of unrolled categories) than the\n"
"default mode.\n"
"\n"
+
+"Search\n"
+"-------\n"
+"Pressing the forward-slash (/) anywhere brings up a search dialog box.\n"
+"\n"
+
"Different color themes available\n"
"--------------------------------\n"
"It is possible to select different color themes using the variable\n"
diff --git a/scripts/package/mkspec b/scripts/package/mkspec
index dda00a948a01..adab28fa7f89 100755
--- a/scripts/package/mkspec
+++ b/scripts/package/mkspec
@@ -51,7 +51,8 @@ sed -e '/^DEL/d' -e 's/^\t*//' <<EOF
URL: https://www.kernel.org
$S Source: kernel-$__KERNELRELEASE.tar.gz
Provides: $PROVIDES
-$S BuildRequires: bc binutils bison dwarves elfutils-libelf-devel flex
+$S BuildRequires: bc binutils bison dwarves
+$S BuildRequires: (elfutils-libelf-devel or libelf-devel) flex
$S BuildRequires: gcc make openssl openssl-devel perl python3 rsync
# $UTS_MACHINE as a fallback of _arch in case
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 8015e4471267..386dd9d9143f 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -167,6 +167,7 @@ struct hdmi_spec {
struct hdmi_ops ops;
bool dyn_pin_out;
+ bool static_pcm_mapping;
/* hdmi interrupt trigger control flag for Nvidia codec */
bool hdmi_intr_trig_ctrl;
bool nv_dp_workaround; /* workaround DP audio infoframe for Nvidia */
@@ -1525,13 +1526,16 @@ static void update_eld(struct hda_codec *codec,
*/
pcm_jack = pin_idx_to_pcm_jack(codec, per_pin);
- if (eld->eld_valid) {
- hdmi_attach_hda_pcm(spec, per_pin);
- hdmi_pcm_setup_pin(spec, per_pin);
- } else {
- hdmi_pcm_reset_pin(spec, per_pin);
- hdmi_detach_hda_pcm(spec, per_pin);
+ if (!spec->static_pcm_mapping) {
+ if (eld->eld_valid) {
+ hdmi_attach_hda_pcm(spec, per_pin);
+ hdmi_pcm_setup_pin(spec, per_pin);
+ } else {
+ hdmi_pcm_reset_pin(spec, per_pin);
+ hdmi_detach_hda_pcm(spec, per_pin);
+ }
}
+
/* if pcm_idx == -1, it means this is in monitor connection event
* we can get the correct pcm_idx now.
*/
@@ -2281,8 +2285,8 @@ static int generic_hdmi_build_pcms(struct hda_codec *codec)
struct hdmi_spec *spec = codec->spec;
int idx, pcm_num;
- /* limit the PCM devices to the codec converters */
- pcm_num = spec->num_cvts;
+ /* limit the PCM devices to the codec converters or available PINs */
+ pcm_num = min(spec->num_cvts, spec->num_pins);
codec_dbg(codec, "hdmi: pcm_num set to %d\n", pcm_num);
for (idx = 0; idx < pcm_num; idx++) {
@@ -2379,6 +2383,11 @@ static int generic_hdmi_build_controls(struct hda_codec *codec)
struct hdmi_spec_per_pin *per_pin = get_pin(spec, pin_idx);
struct hdmi_eld *pin_eld = &per_pin->sink_eld;
+ if (spec->static_pcm_mapping) {
+ hdmi_attach_hda_pcm(spec, per_pin);
+ hdmi_pcm_setup_pin(spec, per_pin);
+ }
+
pin_eld->eld_valid = false;
hdmi_present_sense(per_pin, 0);
}
@@ -4419,6 +4428,8 @@ static int patch_atihdmi(struct hda_codec *codec)
spec = codec->spec;
+ spec->static_pcm_mapping = true;
+
spec->ops.pin_get_eld = atihdmi_pin_get_eld;
spec->ops.pin_setup_infoframe = atihdmi_pin_setup_infoframe;
spec->ops.pin_hbr_setup = atihdmi_pin_hbr_setup;
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index e443d88f627f..3794b522c222 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -7175,6 +7175,7 @@ enum {
ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK,
ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK_PIN,
ALC295_FIXUP_DELL_INSPIRON_TOP_SPEAKERS,
+ ALC236_FIXUP_DELL_DUAL_CODECS,
};
/* A special fixup for Lenovo C940 and Yoga Duet 7;
@@ -9130,6 +9131,12 @@ static const struct hda_fixup alc269_fixups[] = {
.chained = true,
.chain_id = ALC269_FIXUP_DELL4_MIC_NO_PRESENCE,
},
+ [ALC236_FIXUP_DELL_DUAL_CODECS] = {
+ .type = HDA_FIXUP_PINS,
+ .v.func = alc1220_fixup_gb_dual_codecs,
+ .chained = true,
+ .chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+ },
};
static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -9232,6 +9239,12 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1028, 0x0b1a, "Dell Precision 5570", ALC289_FIXUP_DUAL_SPK),
SND_PCI_QUIRK(0x1028, 0x0b37, "Dell Inspiron 16 Plus 7620 2-in-1", ALC295_FIXUP_DELL_INSPIRON_TOP_SPEAKERS),
SND_PCI_QUIRK(0x1028, 0x0b71, "Dell Inspiron 16 Plus 7620", ALC295_FIXUP_DELL_INSPIRON_TOP_SPEAKERS),
+ SND_PCI_QUIRK(0x1028, 0x0c19, "Dell Precision 3340", ALC236_FIXUP_DELL_DUAL_CODECS),
+ SND_PCI_QUIRK(0x1028, 0x0c1a, "Dell Precision 3340", ALC236_FIXUP_DELL_DUAL_CODECS),
+ SND_PCI_QUIRK(0x1028, 0x0c1b, "Dell Precision 3440", ALC236_FIXUP_DELL_DUAL_CODECS),
+ SND_PCI_QUIRK(0x1028, 0x0c1c, "Dell Precision 3540", ALC236_FIXUP_DELL_DUAL_CODECS),
+ SND_PCI_QUIRK(0x1028, 0x0c1d, "Dell Precision 3440", ALC236_FIXUP_DELL_DUAL_CODECS),
+ SND_PCI_QUIRK(0x1028, 0x0c1e, "Dell Precision 3540", ALC236_FIXUP_DELL_DUAL_CODECS),
SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
diff --git a/sound/usb/line6/driver.c b/sound/usb/line6/driver.c
index 59faa5a9a714..b67617b68e50 100644
--- a/sound/usb/line6/driver.c
+++ b/sound/usb/line6/driver.c
@@ -304,7 +304,8 @@ static void line6_data_received(struct urb *urb)
for (;;) {
done =
line6_midibuf_read(mb, line6->buffer_message,
- LINE6_MIDI_MESSAGE_MAXLEN);
+ LINE6_MIDI_MESSAGE_MAXLEN,
+ LINE6_MIDIBUF_READ_RX);
if (done <= 0)
break;
diff --git a/sound/usb/line6/midi.c b/sound/usb/line6/midi.c
index ba0e2b7e8fe1..0838632c788e 100644
--- a/sound/usb/line6/midi.c
+++ b/sound/usb/line6/midi.c
@@ -44,7 +44,8 @@ static void line6_midi_transmit(struct snd_rawmidi_substream *substream)
int req, done;
for (;;) {
- req = min(line6_midibuf_bytes_free(mb), line6->max_packet_size);
+ req = min3(line6_midibuf_bytes_free(mb), line6->max_packet_size,
+ LINE6_FALLBACK_MAXPACKETSIZE);
done = snd_rawmidi_transmit_peek(substream, chunk, req);
if (done == 0)
@@ -56,7 +57,8 @@ static void line6_midi_transmit(struct snd_rawmidi_substream *substream)
for (;;) {
done = line6_midibuf_read(mb, chunk,
- LINE6_FALLBACK_MAXPACKETSIZE);
+ LINE6_FALLBACK_MAXPACKETSIZE,
+ LINE6_MIDIBUF_READ_TX);
if (done == 0)
break;
diff --git a/sound/usb/line6/midibuf.c b/sound/usb/line6/midibuf.c
index 6a70463f82c4..e7f830f7526c 100644
--- a/sound/usb/line6/midibuf.c
+++ b/sound/usb/line6/midibuf.c
@@ -9,6 +9,7 @@
#include "midibuf.h"
+
static int midibuf_message_length(unsigned char code)
{
int message_length;
@@ -20,12 +21,7 @@ static int midibuf_message_length(unsigned char code)
message_length = length[(code >> 4) - 8];
} else {
- /*
- Note that according to the MIDI specification 0xf2 is
- the "Song Position Pointer", but this is used by Line 6
- to send sysex messages to the host.
- */
- static const int length[] = { -1, 2, -1, 2, -1, -1, 1, 1, 1, 1,
+ static const int length[] = { -1, 2, 2, 2, -1, -1, 1, 1, 1, -1,
1, 1, 1, -1, 1, 1
};
message_length = length[code & 0x0f];
@@ -125,7 +121,7 @@ int line6_midibuf_write(struct midi_buffer *this, unsigned char *data,
}
int line6_midibuf_read(struct midi_buffer *this, unsigned char *data,
- int length)
+ int length, int read_type)
{
int bytes_used;
int length1, length2;
@@ -148,9 +144,22 @@ int line6_midibuf_read(struct midi_buffer *this, unsigned char *data,
length1 = this->size - this->pos_read;
- /* check MIDI command length */
command = this->buf[this->pos_read];
+ /*
+ PODxt always has status byte lower nibble set to 0010,
+ when it means to send 0000, so we correct if here so
+ that control/program changes come on channel 1 and
+ sysex message status byte is correct
+ */
+ if (read_type == LINE6_MIDIBUF_READ_RX) {
+ if (command == 0xb2 || command == 0xc2 || command == 0xf2) {
+ unsigned char fixed = command & 0xf0;
+ this->buf[this->pos_read] = fixed;
+ command = fixed;
+ }
+ }
+ /* check MIDI command length */
if (command & 0x80) {
midi_length = midibuf_message_length(command);
this->command_prev = command;
diff --git a/sound/usb/line6/midibuf.h b/sound/usb/line6/midibuf.h
index 124a8f9f7e96..542e8d836f87 100644
--- a/sound/usb/line6/midibuf.h
+++ b/sound/usb/line6/midibuf.h
@@ -8,6 +8,9 @@
#ifndef MIDIBUF_H
#define MIDIBUF_H
+#define LINE6_MIDIBUF_READ_TX 0
+#define LINE6_MIDIBUF_READ_RX 1
+
struct midi_buffer {
unsigned char *buf;
int size;
@@ -23,7 +26,7 @@ extern void line6_midibuf_destroy(struct midi_buffer *mb);
extern int line6_midibuf_ignore(struct midi_buffer *mb, int length);
extern int line6_midibuf_init(struct midi_buffer *mb, int size, int split);
extern int line6_midibuf_read(struct midi_buffer *mb, unsigned char *data,
- int length);
+ int length, int read_type);
extern void line6_midibuf_reset(struct midi_buffer *mb);
extern int line6_midibuf_write(struct midi_buffer *mb, unsigned char *data,
int length);
diff --git a/sound/usb/line6/pod.c b/sound/usb/line6/pod.c
index cd41aa7f0385..d173971e5f02 100644
--- a/sound/usb/line6/pod.c
+++ b/sound/usb/line6/pod.c
@@ -159,8 +159,9 @@ static struct line6_pcm_properties pod_pcm_properties = {
.bytes_per_channel = 3 /* SNDRV_PCM_FMTBIT_S24_3LE */
};
+
static const char pod_version_header[] = {
- 0xf2, 0x7e, 0x7f, 0x06, 0x02
+ 0xf0, 0x7e, 0x7f, 0x06, 0x02
};
static char *pod_alloc_sysex_buffer(struct usb_line6_pod *pod, int code,
diff --git a/tools/testing/selftests/kvm/.gitignore b/tools/testing/selftests/kvm/.gitignore
index 6ce8c488d62e..6d9381d60172 100644
--- a/tools/testing/selftests/kvm/.gitignore
+++ b/tools/testing/selftests/kvm/.gitignore
@@ -1,86 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
-/aarch64/aarch32_id_regs
-/aarch64/arch_timer
-/aarch64/debug-exceptions
-/aarch64/get-reg-list
-/aarch64/hypercalls
-/aarch64/page_fault_test
-/aarch64/psci_test
-/aarch64/vcpu_width_config
-/aarch64/vgic_init
-/aarch64/vgic_irq
-/s390x/memop
-/s390x/resets
-/s390x/sync_regs_test
-/s390x/tprot
-/x86_64/amx_test
-/x86_64/cpuid_test
-/x86_64/cr4_cpuid_sync_test
-/x86_64/debug_regs
-/x86_64/exit_on_emulation_failure_test
-/x86_64/fix_hypercall_test
-/x86_64/get_msr_index_features
-/x86_64/kvm_clock_test
-/x86_64/kvm_pv_test
-/x86_64/hyperv_clock
-/x86_64/hyperv_cpuid
-/x86_64/hyperv_evmcs
-/x86_64/hyperv_features
-/x86_64/hyperv_ipi
-/x86_64/hyperv_svm_test
-/x86_64/hyperv_tlb_flush
-/x86_64/max_vcpuid_cap_test
-/x86_64/mmio_warning_test
-/x86_64/monitor_mwait_test
-/x86_64/nested_exceptions_test
-/x86_64/nx_huge_pages_test
-/x86_64/platform_info_test
-/x86_64/pmu_event_filter_test
-/x86_64/set_boot_cpu_id
-/x86_64/set_sregs_test
-/x86_64/sev_migrate_tests
-/x86_64/smaller_maxphyaddr_emulation_test
-/x86_64/smm_test
-/x86_64/state_test
-/x86_64/svm_vmcall_test
-/x86_64/svm_int_ctl_test
-/x86_64/svm_nested_soft_inject_test
-/x86_64/svm_nested_shutdown_test
-/x86_64/sync_regs_test
-/x86_64/tsc_msrs_test
-/x86_64/tsc_scaling_sync
-/x86_64/ucna_injection_test
-/x86_64/userspace_io_test
-/x86_64/userspace_msr_exit_test
-/x86_64/vmx_apic_access_test
-/x86_64/vmx_close_while_nested_test
-/x86_64/vmx_dirty_log_test
-/x86_64/vmx_exception_with_invalid_guest_state
-/x86_64/vmx_invalid_nested_guest_state
-/x86_64/vmx_msrs_test
-/x86_64/vmx_preemption_timer_test
-/x86_64/vmx_set_nested_state_test
-/x86_64/vmx_tsc_adjust_test
-/x86_64/vmx_nested_tsc_scaling_test
-/x86_64/xapic_ipi_test
-/x86_64/xapic_state_test
-/x86_64/xen_shinfo_test
-/x86_64/xen_vmcall_test
-/x86_64/xss_msr_test
-/x86_64/vmx_pmu_caps_test
-/x86_64/triple_fault_event_test
-/access_tracking_perf_test
-/demand_paging_test
-/dirty_log_test
-/dirty_log_perf_test
-/hardware_disable_test
-/kvm_create_max_vcpus
-/kvm_page_table_test
-/max_guest_memory_test
-/memslot_modification_stress_test
-/memslot_perf_test
-/rseq_test
-/set_memory_region_test
-/steal_time
-/kvm_binary_stats_test
-/system_counter_offset_test
+*
+!/**/
+!*.c
+!*.h
+!*.S
+!*.sh
diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
index 947676983da1..1750f91dd936 100644
--- a/tools/testing/selftests/kvm/Makefile
+++ b/tools/testing/selftests/kvm/Makefile
@@ -7,35 +7,14 @@ top_srcdir = ../../../..
include $(top_srcdir)/scripts/subarch.include
ARCH ?= $(SUBARCH)
-# For cross-builds to work, UNAME_M has to map to ARCH and arch specific
-# directories and targets in this Makefile. "uname -m" doesn't map to
-# arch specific sub-directory names.
-#
-# UNAME_M variable to used to run the compiles pointing to the right arch
-# directories and build the right targets for these supported architectures.
-#
-# TEST_GEN_PROGS and LIBKVM are set using UNAME_M variable.
-# LINUX_TOOL_ARCH_INCLUDE is set using ARCH variable.
-#
-# x86_64 targets are named to include x86_64 as a suffix and directories
-# for includes are in x86_64 sub-directory. s390x and aarch64 follow the
-# same convention. "uname -m" doesn't result in the correct mapping for
-# s390x and aarch64.
-#
-# No change necessary for x86_64
-UNAME_M := $(shell uname -m)
-
-# Set UNAME_M for arm64 compile/install to work
-ifeq ($(ARCH),arm64)
- UNAME_M := aarch64
-endif
-# Set UNAME_M s390x compile/install to work
-ifeq ($(ARCH),s390)
- UNAME_M := s390x
-endif
-# Set UNAME_M riscv compile/install to work
-ifeq ($(ARCH),riscv)
- UNAME_M := riscv
+ifeq ($(ARCH),x86)
+ ARCH_DIR := x86_64
+else ifeq ($(ARCH),arm64)
+ ARCH_DIR := aarch64
+else ifeq ($(ARCH),s390)
+ ARCH_DIR := s390x
+else
+ ARCH_DIR := $(ARCH)
endif
LIBKVM += lib/assert.c
@@ -196,10 +175,15 @@ TEST_GEN_PROGS_riscv += kvm_page_table_test
TEST_GEN_PROGS_riscv += set_memory_region_test
TEST_GEN_PROGS_riscv += kvm_binary_stats_test
-TEST_PROGS += $(TEST_PROGS_$(UNAME_M))
-TEST_GEN_PROGS += $(TEST_GEN_PROGS_$(UNAME_M))
-TEST_GEN_PROGS_EXTENDED += $(TEST_GEN_PROGS_EXTENDED_$(UNAME_M))
-LIBKVM += $(LIBKVM_$(UNAME_M))
+TEST_PROGS += $(TEST_PROGS_$(ARCH_DIR))
+TEST_GEN_PROGS += $(TEST_GEN_PROGS_$(ARCH_DIR))
+TEST_GEN_PROGS_EXTENDED += $(TEST_GEN_PROGS_EXTENDED_$(ARCH_DIR))
+LIBKVM += $(LIBKVM_$(ARCH_DIR))
+
+# lib.mak defines $(OUTPUT), prepends $(OUTPUT)/ to $(TEST_GEN_PROGS), and most
+# importantly defines, i.e. overwrites, $(CC) (unless `make -e` or `make CC=`,
+# which causes the environment variable to override the makefile).
+include ../lib.mk
INSTALL_HDR_PATH = $(top_srcdir)/usr
LINUX_HDR_PATH = $(INSTALL_HDR_PATH)/include/
@@ -210,25 +194,23 @@ else
LINUX_TOOL_ARCH_INCLUDE = $(top_srcdir)/tools/arch/$(ARCH)/include
endif
CFLAGS += -Wall -Wstrict-prototypes -Wuninitialized -O2 -g -std=gnu99 \
+ -Wno-gnu-variable-sized-type-not-at-end \
+ -fno-builtin-memcmp -fno-builtin-memcpy -fno-builtin-memset \
-fno-stack-protector -fno-PIE -I$(LINUX_TOOL_INCLUDE) \
-I$(LINUX_TOOL_ARCH_INCLUDE) -I$(LINUX_HDR_PATH) -Iinclude \
- -I$(<D) -Iinclude/$(UNAME_M) -I ../rseq -I.. $(EXTRA_CFLAGS) \
+ -I$(<D) -Iinclude/$(ARCH_DIR) -I ../rseq -I.. $(EXTRA_CFLAGS) \
$(KHDR_INCLUDES)
-no-pie-option := $(call try-run, echo 'int main() { return 0; }' | \
- $(CC) -Werror -no-pie -x c - -o "$$TMP", -no-pie)
+no-pie-option := $(call try-run, echo 'int main(void) { return 0; }' | \
+ $(CC) -Werror $(CFLAGS) -no-pie -x c - -o "$$TMP", -no-pie)
# On s390, build the testcases KVM-enabled
-pgste-option = $(call try-run, echo 'int main() { return 0; }' | \
+pgste-option = $(call try-run, echo 'int main(void) { return 0; }' | \
$(CC) -Werror -Wl$(comma)--s390-pgste -x c - -o "$$TMP",-Wl$(comma)--s390-pgste)
LDLIBS += -ldl
LDFLAGS += -pthread $(no-pie-option) $(pgste-option)
-# After inclusion, $(OUTPUT) is defined and
-# $(TEST_GEN_PROGS) starts with $(OUTPUT)/
-include ../lib.mk
-
LIBKVM_C := $(filter %.c,$(LIBKVM))
LIBKVM_S := $(filter %.S,$(LIBKVM))
LIBKVM_C_OBJ := $(patsubst %.c, $(OUTPUT)/%.o, $(LIBKVM_C))
diff --git a/tools/testing/selftests/kvm/aarch64/page_fault_test.c b/tools/testing/selftests/kvm/aarch64/page_fault_test.c
index 95d22cfb7b41..beb944fa6fd4 100644
--- a/tools/testing/selftests/kvm/aarch64/page_fault_test.c
+++ b/tools/testing/selftests/kvm/aarch64/page_fault_test.c
@@ -117,7 +117,7 @@ static void guest_cas(void)
GUEST_ASSERT(guest_check_lse());
asm volatile(".arch_extension lse\n"
"casal %0, %1, [%2]\n"
- :: "r" (0), "r" (TEST_DATA), "r" (guest_test_memory));
+ :: "r" (0ul), "r" (TEST_DATA), "r" (guest_test_memory));
val = READ_ONCE(*guest_test_memory);
GUEST_ASSERT_EQ(val, TEST_DATA);
}
diff --git a/tools/testing/selftests/kvm/lib/aarch64/ucall.c b/tools/testing/selftests/kvm/lib/aarch64/ucall.c
index 562c16dfbb00..f212bd8ab93d 100644
--- a/tools/testing/selftests/kvm/lib/aarch64/ucall.c
+++ b/tools/testing/selftests/kvm/lib/aarch64/ucall.c
@@ -14,11 +14,13 @@ static vm_vaddr_t *ucall_exit_mmio_addr;
void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa)
{
- virt_pg_map(vm, mmio_gpa, mmio_gpa);
+ vm_vaddr_t mmio_gva = vm_vaddr_unused_gap(vm, vm->page_size, KVM_UTIL_MIN_VADDR);
+
+ virt_map(vm, mmio_gva, mmio_gpa, 1);
vm->ucall_mmio_addr = mmio_gpa;
- write_guest_global(vm, ucall_exit_mmio_addr, (vm_vaddr_t *)mmio_gpa);
+ write_guest_global(vm, ucall_exit_mmio_addr, (vm_vaddr_t *)mmio_gva);
}
void ucall_arch_do_ucall(vm_vaddr_t uc)
diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
index c88c3ace16d2..56d5ea949cbb 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util.c
+++ b/tools/testing/selftests/kvm/lib/kvm_util.c
@@ -186,6 +186,15 @@ const struct vm_guest_mode_params vm_guest_mode_params[] = {
_Static_assert(sizeof(vm_guest_mode_params)/sizeof(struct vm_guest_mode_params) == NUM_VM_MODES,
"Missing new mode params?");
+/*
+ * Initializes vm->vpages_valid to match the canonical VA space of the
+ * architecture.
+ *
+ * The default implementation is valid for architectures which split the
+ * range addressed by a single page table into a low and high region
+ * based on the MSB of the VA. On architectures with this behavior
+ * the VA region spans [0, 2^(va_bits - 1)), [-(2^(va_bits - 1), -1].
+ */
__weak void vm_vaddr_populate_bitmap(struct kvm_vm *vm)
{
sparsebit_set_num(vm->vpages_valid,
@@ -1416,10 +1425,10 @@ void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
while (npages--) {
virt_pg_map(vm, vaddr, paddr);
+ sparsebit_set(vm->vpages_mapped, vaddr >> vm->page_shift);
+
vaddr += page_size;
paddr += page_size;
-
- sparsebit_set(vm->vpages_mapped, vaddr >> vm->page_shift);
}
}
diff --git a/tools/testing/selftests/kvm/lib/ucall_common.c b/tools/testing/selftests/kvm/lib/ucall_common.c
index 0cc0971ce60e..2f0e2ea941cc 100644
--- a/tools/testing/selftests/kvm/lib/ucall_common.c
+++ b/tools/testing/selftests/kvm/lib/ucall_common.c
@@ -4,6 +4,8 @@
#include "linux/bitmap.h"
#include "linux/atomic.h"
+#define GUEST_UCALL_FAILED -1
+
struct ucall_header {
DECLARE_BITMAP(in_use, KVM_MAX_VCPUS);
struct ucall ucalls[KVM_MAX_VCPUS];
@@ -41,7 +43,8 @@ static struct ucall *ucall_alloc(void)
struct ucall *uc;
int i;
- GUEST_ASSERT(ucall_pool);
+ if (!ucall_pool)
+ goto ucall_failed;
for (i = 0; i < KVM_MAX_VCPUS; ++i) {
if (!test_and_set_bit(i, ucall_pool->in_use)) {
@@ -51,7 +54,13 @@ static struct ucall *ucall_alloc(void)
}
}
- GUEST_ASSERT(0);
+ucall_failed:
+ /*
+ * If the vCPU cannot grab a ucall structure, make a bare ucall with a
+ * magic value to signal to get_ucall() that things went sideways.
+ * GUEST_ASSERT() depends on ucall_alloc() and so cannot be used here.
+ */
+ ucall_arch_do_ucall(GUEST_UCALL_FAILED);
return NULL;
}
@@ -93,6 +102,9 @@ uint64_t get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc)
addr = ucall_arch_get_ucall(vcpu);
if (addr) {
+ TEST_ASSERT(addr != (void *)GUEST_UCALL_FAILED,
+ "Guest failed to allocate ucall struct");
+
memcpy(uc, addr, sizeof(*uc));
vcpu_run_complete_io(vcpu);
} else {
diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c
index c4d368d56cfe..acfa1d01e7df 100644
--- a/tools/testing/selftests/kvm/lib/x86_64/processor.c
+++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c
@@ -1031,7 +1031,7 @@ bool is_amd_cpu(void)
void kvm_get_cpu_address_width(unsigned int *pa_bits, unsigned int *va_bits)
{
if (!kvm_cpu_has_p(X86_PROPERTY_MAX_PHY_ADDR)) {
- *pa_bits == kvm_cpu_has(X86_FEATURE_PAE) ? 36 : 32;
+ *pa_bits = kvm_cpu_has(X86_FEATURE_PAE) ? 36 : 32;
*va_bits = 32;
} else {
*pa_bits = kvm_cpu_property(X86_PROPERTY_MAX_PHY_ADDR);
diff --git a/tools/testing/selftests/kvm/memslot_perf_test.c b/tools/testing/selftests/kvm/memslot_perf_test.c
index e698306bf49d..e6587e193490 100644
--- a/tools/testing/selftests/kvm/memslot_perf_test.c
+++ b/tools/testing/selftests/kvm/memslot_perf_test.c
@@ -265,6 +265,9 @@ static uint64_t get_max_slots(struct vm_data *data, uint32_t host_page_size)
slots = data->nslots;
while (--slots > 1) {
pages_per_slot = mempages / slots;
+ if (!pages_per_slot)
+ continue;
+
rempages = mempages % pages_per_slot;
if (check_slot_pages(host_page_size, guest_page_size,
pages_per_slot, rempages))
diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_ipi.c b/tools/testing/selftests/kvm/x86_64/hyperv_ipi.c
index 8b791eac7d5a..0cbb0e646ef8 100644
--- a/tools/testing/selftests/kvm/x86_64/hyperv_ipi.c
+++ b/tools/testing/selftests/kvm/x86_64/hyperv_ipi.c
@@ -193,8 +193,9 @@ static void sender_guest_code(void *hcall_page, vm_vaddr_t pgs_gpa)
GUEST_SYNC(stage++);
/*
* 'XMM Fast' HvCallSendSyntheticClusterIpiEx to HV_GENERIC_SET_ALL.
- * Nothing to write anything to XMM regs.
*/
+ ipi_ex->vp_set.valid_bank_mask = 0;
+ hyperv_write_xmm_input(&ipi_ex->vp_set.valid_bank_mask, 2);
hyperv_hypercall(HVCALL_SEND_IPI_EX | HV_HYPERCALL_FAST_BIT,
IPI_VECTOR, HV_GENERIC_SET_ALL);
nop_loop();
diff --git a/tools/testing/selftests/kvm/x86_64/svm_nested_soft_inject_test.c b/tools/testing/selftests/kvm/x86_64/svm_nested_soft_inject_test.c
index e497ace629c1..b34980d45648 100644
--- a/tools/testing/selftests/kvm/x86_64/svm_nested_soft_inject_test.c
+++ b/tools/testing/selftests/kvm/x86_64/svm_nested_soft_inject_test.c
@@ -41,8 +41,17 @@ static void guest_int_handler(struct ex_regs *regs)
static void l2_guest_code_int(void)
{
GUEST_ASSERT_1(int_fired == 1, int_fired);
- vmmcall();
- ud2();
+
+ /*
+ * Same as the vmmcall() function, but with a ud2 sneaked after the
+ * vmmcall. The caller injects an exception with the return address
+ * increased by 2, so the "pop rbp" must be after the ud2 and we cannot
+ * use vmmcall() directly.
+ */
+ __asm__ __volatile__("push %%rbp; vmmcall; ud2; pop %%rbp"
+ : : "a"(0xdeadbeef), "c"(0xbeefdead)
+ : "rbx", "rdx", "rsi", "rdi", "r8", "r9",
+ "r10", "r11", "r12", "r13", "r14", "r15");
GUEST_ASSERT_1(bp_fired == 1, bp_fired);
hlt();
diff --git a/tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c b/tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c
index 5943187e8594..ff8ecdf32ae0 100644
--- a/tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c
+++ b/tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c
@@ -49,11 +49,6 @@ enum {
NUM_VMX_PAGES,
};
-struct kvm_single_msr {
- struct kvm_msrs header;
- struct kvm_msr_entry entry;
-} __attribute__((packed));
-
/* The virtual machine object. */
static struct kvm_vm *vm;
diff --git a/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c b/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c
index 721f6a693799..dae510c263b4 100644
--- a/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c
+++ b/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c
@@ -962,6 +962,12 @@ int main(int argc, char *argv[])
}
done:
+ struct kvm_xen_hvm_attr evt_reset = {
+ .type = KVM_XEN_ATTR_TYPE_EVTCHN,
+ .u.evtchn.flags = KVM_XEN_EVTCHN_RESET,
+ };
+ vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &evt_reset);
+
alarm(0);
clock_gettime(CLOCK_REALTIME, &max_ts);
diff --git a/virt/kvm/kvm_mm.h b/virt/kvm/kvm_mm.h
index a1ab15006af3..180f1a09e6ba 100644
--- a/virt/kvm/kvm_mm.h
+++ b/virt/kvm/kvm_mm.h
@@ -14,14 +14,10 @@
#define KVM_MMU_LOCK_INIT(kvm) rwlock_init(&(kvm)->mmu_lock)
#define KVM_MMU_LOCK(kvm) write_lock(&(kvm)->mmu_lock)
#define KVM_MMU_UNLOCK(kvm) write_unlock(&(kvm)->mmu_lock)
-#define KVM_MMU_READ_LOCK(kvm) read_lock(&(kvm)->mmu_lock)
-#define KVM_MMU_READ_UNLOCK(kvm) read_unlock(&(kvm)->mmu_lock)
#else
#define KVM_MMU_LOCK_INIT(kvm) spin_lock_init(&(kvm)->mmu_lock)
#define KVM_MMU_LOCK(kvm) spin_lock(&(kvm)->mmu_lock)
#define KVM_MMU_UNLOCK(kvm) spin_unlock(&(kvm)->mmu_lock)
-#define KVM_MMU_READ_LOCK(kvm) spin_lock(&(kvm)->mmu_lock)
-#define KVM_MMU_READ_UNLOCK(kvm) spin_unlock(&(kvm)->mmu_lock)
#endif /* KVM_HAVE_MMU_RWLOCK */
kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool interruptible,