diff options
Diffstat (limited to 'drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c')
-rw-r--r-- | drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 225 |
1 files changed, 173 insertions, 52 deletions
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 41e4774abdb0..3e83fed540e8 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -72,6 +72,7 @@ #include <linux/pci.h> #include <linux/firmware.h> #include <linux/component.h> +#include <linux/dmi.h> #include <drm/display/drm_dp_mst_helper.h> #include <drm/display/drm_hdmi_helper.h> @@ -119,6 +120,8 @@ MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB); MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB); #define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin" MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB); +#define FIRMWARE_DCN_314_DMUB "amdgpu/dcn_3_1_4_dmcub.bin" +MODULE_FIRMWARE(FIRMWARE_DCN_314_DMUB); #define FIRMWARE_DCN_315_DMUB "amdgpu/dcn_3_1_5_dmcub.bin" MODULE_FIRMWARE(FIRMWARE_DCN_315_DMUB); #define FIRMWARE_DCN316_DMUB "amdgpu/dcn_3_1_6_dmcub.bin" @@ -473,6 +476,26 @@ static void dm_pflip_high_irq(void *interrupt_params) vrr_active, (int) !e); } +static void dm_crtc_handle_vblank(struct amdgpu_crtc *acrtc) +{ + struct drm_crtc *crtc = &acrtc->base; + struct drm_device *dev = crtc->dev; + unsigned long flags; + + drm_crtc_handle_vblank(crtc); + + spin_lock_irqsave(&dev->event_lock, flags); + + /* Send completion event for cursor-only commits */ + if (acrtc->event && acrtc->pflip_status != AMDGPU_FLIP_SUBMITTED) { + drm_crtc_send_vblank_event(crtc, acrtc->event); + drm_crtc_vblank_put(crtc); + acrtc->event = NULL; + } + + spin_unlock_irqrestore(&dev->event_lock, flags); +} + static void dm_vupdate_high_irq(void *interrupt_params) { struct common_irq_params *irq_params = interrupt_params; @@ -511,7 +534,7 @@ static void dm_vupdate_high_irq(void *interrupt_params) * if a pageflip happened inside front-porch. */ if (vrr_active) { - drm_crtc_handle_vblank(&acrtc->base); + dm_crtc_handle_vblank(acrtc); /* BTR processing for pre-DCE12 ASICs */ if (acrtc->dm_irq_params.stream && @@ -563,7 +586,7 @@ static void dm_crtc_high_irq(void *interrupt_params) * to dm_vupdate_high_irq after end of front-porch. */ if (!vrr_active) - drm_crtc_handle_vblank(&acrtc->base); + dm_crtc_handle_vblank(acrtc); /** * Following stuff must happen at start of vblank, for crc @@ -1403,6 +1426,41 @@ static bool dm_should_disable_stutter(struct pci_dev *pdev) return false; } +static const struct dmi_system_id hpd_disconnect_quirk_table[] = { + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3660"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3260"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3460"), + }, + }, + {} +}; + +static void retrieve_dmi_info(struct amdgpu_display_manager *dm) +{ + const struct dmi_system_id *dmi_id; + + dm->aux_hpd_discon_quirk = false; + + dmi_id = dmi_first_match(hpd_disconnect_quirk_table); + if (dmi_id) { + dm->aux_hpd_discon_quirk = true; + DRM_INFO("aux_hpd_discon_quirk attached\n"); + } +} + static int amdgpu_dm_init(struct amdgpu_device *adev) { struct dc_init_data init_data; @@ -1530,7 +1588,13 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) init_data.flags.enable_mipi_converter_optimization = true; + init_data.dcn_reg_offsets = adev->reg_offset[DCE_HWIP][0]; + init_data.nbio_reg_offsets = adev->reg_offset[NBIO_HWIP][0]; + INIT_LIST_HEAD(&adev->dm.da_list); + + retrieve_dmi_info(&adev->dm); + /* Display Core create. */ adev->dm.dc = dc_create(&init_data); @@ -1562,6 +1626,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING) adev->dm.dc->debug.disable_clock_gate = true; + if (amdgpu_dc_debug_mask & DC_FORCE_SUBVP_MCLK_SWITCH) + adev->dm.dc->debug.force_subvp_mclk_switch = true; + r = dm_dmub_hw_init(adev); if (r) { DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r); @@ -1617,7 +1684,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work(); #endif - if (dc_enable_dmub_notifications(adev->dm.dc)) { + if (dc_is_dmub_outbox_supported(adev->dm.dc)) { init_completion(&adev->dm.dmub_aux_transfer_done); adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL); if (!adev->dm.dmub_notify) { @@ -1653,6 +1720,13 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) goto error; } + /* Enable outbox notification only after IRQ handlers are registered and DMUB is alive. + * It is expected that DMUB will resend any pending notifications at this point, for + * example HPD from DPIA. + */ + if (dc_is_dmub_outbox_supported(adev->dm.dc)) + dc_enable_dmub_outbox(adev->dm.dc); + /* create fake encoders for MST */ dm_dp_create_fake_mst_encoders(adev); @@ -1941,6 +2015,10 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev) dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31; fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB; break; + case IP_VERSION(3, 1, 4): + dmub_asic = DMUB_ASIC_DCN314; + fw_name_dmub = FIRMWARE_DCN_314_DMUB; + break; case IP_VERSION(3, 1, 5): dmub_asic = DMUB_ASIC_DCN315; fw_name_dmub = FIRMWARE_DCN_315_DMUB; @@ -2625,9 +2703,6 @@ static int dm_resume(void *handle) */ link_enc_cfg_copy(adev->dm.dc->current_state, dc_state); - if (dc_enable_dmub_notifications(adev->dm.dc)) - amdgpu_dm_outbox_init(adev); - r = dm_dmub_hw_init(adev); if (r) DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r); @@ -2645,6 +2720,11 @@ static int dm_resume(void *handle) } } + if (dc_is_dmub_outbox_supported(adev->dm.dc)) { + amdgpu_dm_outbox_init(adev); + dc_enable_dmub_outbox(adev->dm.dc); + } + WARN_ON(!dc_commit_state(dm->dc, dc_state)); dm_gpureset_commit_state(dm->cached_dc_state, dm); @@ -2666,13 +2746,15 @@ static int dm_resume(void *handle) /* TODO: Remove dc_state->dccg, use dc->dccg directly. */ dc_resource_state_construct(dm->dc, dm_state->context); - /* Re-enable outbox interrupts for DPIA. */ - if (dc_enable_dmub_notifications(adev->dm.dc)) - amdgpu_dm_outbox_init(adev); - /* Before powering on DC we need to re-initialize DMUB. */ dm_dmub_hw_resume(adev); + /* Re-enable outbox interrupts for DPIA. */ + if (dc_is_dmub_outbox_supported(adev->dm.dc)) { + amdgpu_dm_outbox_init(adev); + dc_enable_dmub_outbox(adev->dm.dc); + } + /* power on hardware */ dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0); @@ -2705,10 +2787,13 @@ static int dm_resume(void *handle) if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type)) DRM_ERROR("KMS: Failed to detect connector\n"); - if (aconnector->base.force && new_connection_type == dc_connection_none) + if (aconnector->base.force && new_connection_type == dc_connection_none) { emulated_link_detect(aconnector->dc_link); - else + } else { + mutex_lock(&dm->dc_lock); dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD); + mutex_unlock(&dm->dc_lock); + } if (aconnector->fake_enable && aconnector->dc_link->local_sink) aconnector->fake_enable = false; @@ -3039,6 +3124,7 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector) #ifdef CONFIG_DRM_AMD_DC_HDCP struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state); #endif + bool ret = false; if (adev->dm.disable_hpd_irq) return; @@ -3070,16 +3156,20 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector) if (aconnector->base.force == DRM_FORCE_UNSPECIFIED) drm_kms_helper_connector_hotplug_event(connector); + } else { + mutex_lock(&adev->dm.dc_lock); + ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD); + mutex_unlock(&adev->dm.dc_lock); + if (ret) { + amdgpu_dm_update_connector_after_detect(aconnector); - } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) { - amdgpu_dm_update_connector_after_detect(aconnector); - - drm_modeset_lock_all(dev); - dm_restore_drm_connector_state(dev, connector); - drm_modeset_unlock_all(dev); + drm_modeset_lock_all(dev); + dm_restore_drm_connector_state(dev, connector); + drm_modeset_unlock_all(dev); - if (aconnector->base.force == DRM_FORCE_UNSPECIFIED) - drm_kms_helper_connector_hotplug_event(connector); + if (aconnector->base.force == DRM_FORCE_UNSPECIFIED) + drm_kms_helper_connector_hotplug_event(connector); + } } mutex_unlock(&aconnector->hpd_lock); @@ -3274,19 +3364,25 @@ out: drm_modeset_unlock_all(dev); drm_kms_helper_connector_hotplug_event(connector); - } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) { + } else { + bool ret = false; - if (aconnector->fake_enable) - aconnector->fake_enable = false; + mutex_lock(&adev->dm.dc_lock); + ret = dc_link_detect(dc_link, DETECT_REASON_HPDRX); + mutex_unlock(&adev->dm.dc_lock); - amdgpu_dm_update_connector_after_detect(aconnector); + if (ret) { + if (aconnector->fake_enable) + aconnector->fake_enable = false; + amdgpu_dm_update_connector_after_detect(aconnector); - drm_modeset_lock_all(dev); - dm_restore_drm_connector_state(dev, connector); - drm_modeset_unlock_all(dev); + drm_modeset_lock_all(dev); + dm_restore_drm_connector_state(dev, connector); + drm_modeset_unlock_all(dev); - drm_kms_helper_connector_hotplug_event(connector); + drm_kms_helper_connector_hotplug_event(connector); + } } } #ifdef CONFIG_DRM_AMD_DC_HDCP @@ -3820,7 +3916,8 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev) adev_to_drm(adev)->mode_config.max_height = 16384; adev_to_drm(adev)->mode_config.preferred_depth = 24; - adev_to_drm(adev)->mode_config.prefer_shadow = 1; + /* disable prefer shadow for now due to hibernation issues */ + adev_to_drm(adev)->mode_config.prefer_shadow = 0; /* indicates support for immediate flip */ adev_to_drm(adev)->mode_config.async_page_flip = true; @@ -4221,6 +4318,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) case IP_VERSION(3, 0, 0): case IP_VERSION(3, 1, 2): case IP_VERSION(3, 1, 3): + case IP_VERSION(3, 1, 4): case IP_VERSION(3, 1, 5): case IP_VERSION(3, 1, 6): case IP_VERSION(3, 2, 0): @@ -4241,6 +4339,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) switch (adev->ip_versions[DCE_HWIP][0]) { case IP_VERSION(3, 1, 2): case IP_VERSION(3, 1, 3): + case IP_VERSION(3, 1, 4): case IP_VERSION(3, 1, 5): case IP_VERSION(3, 1, 6): case IP_VERSION(3, 2, 0): @@ -4290,23 +4389,30 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) if (aconnector->base.force && new_connection_type == dc_connection_none) { emulated_link_detect(link); amdgpu_dm_update_connector_after_detect(aconnector); + } else { + bool ret = false; - } else if (dc_link_detect(link, DETECT_REASON_BOOT)) { - amdgpu_dm_update_connector_after_detect(aconnector); - register_backlight_device(dm, link); - if (dm->num_of_edps) - update_connector_ext_caps(aconnector); - if (psr_feature_enabled) - amdgpu_dm_set_psr_caps(link); - - /* TODO: Fix vblank control helpers to delay PSR entry to allow this when - * PSR is also supported. - */ - if (link->psr_settings.psr_feature_enabled) - adev_to_drm(adev)->vblank_disable_immediate = false; - } + mutex_lock(&dm->dc_lock); + ret = dc_link_detect(link, DETECT_REASON_BOOT); + mutex_unlock(&dm->dc_lock); + + if (ret) { + amdgpu_dm_update_connector_after_detect(aconnector); + register_backlight_device(dm, link); + if (dm->num_of_edps) + update_connector_ext_caps(aconnector); + if (psr_feature_enabled) + amdgpu_dm_set_psr_caps(link); + + /* TODO: Fix vblank control helpers to delay PSR entry to allow this when + * PSR is also supported. + */ + if (link->psr_settings.psr_feature_enabled) + adev_to_drm(adev)->vblank_disable_immediate = false; + } + } } /* Software is initialized. Now we can register interrupt handlers. */ @@ -4357,6 +4463,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) case IP_VERSION(3, 0, 1): case IP_VERSION(3, 1, 2): case IP_VERSION(3, 1, 3): + case IP_VERSION(3, 1, 4): case IP_VERSION(3, 1, 5): case IP_VERSION(3, 1, 6): case IP_VERSION(3, 2, 0): @@ -4545,6 +4652,7 @@ static int dm_early_init(void *handle) case IP_VERSION(2, 1, 0): case IP_VERSION(3, 1, 2): case IP_VERSION(3, 1, 3): + case IP_VERSION(3, 1, 4): case IP_VERSION(3, 1, 5): case IP_VERSION(3, 1, 6): case IP_VERSION(3, 2, 0): @@ -5295,6 +5403,7 @@ get_plane_modifiers(struct amdgpu_device *adev, unsigned int plane_type, uint64_ add_gfx10_1_modifiers(adev, mods, &size, &capacity); break; case AMDGPU_FAMILY_GC_11_0_0: + case AMDGPU_FAMILY_GC_11_0_2: add_gfx11_modifiers(adev, mods, &size, &capacity); break; } @@ -5474,7 +5583,7 @@ fill_blending_from_plane_state(const struct drm_plane_state *plane_state, } } - if (per_pixel_alpha && plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE) + if (*per_pixel_alpha && plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE) *pre_multiplied_alpha = false; } @@ -7213,12 +7322,10 @@ create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector, break; } - if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) + dc_result = dc_validate_stream(adev->dm.dc, stream); + if (dc_result == DC_OK && stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) dc_result = dm_dp_mst_is_port_support_mode(aconnector, stream); - if (dc_result == DC_OK) - dc_result = dc_validate_stream(adev->dm.dc, stream); - if (dc_result != DC_OK) { DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n", drm_mode->hdisplay, @@ -8651,7 +8758,7 @@ static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap, if (dc_submit_i2c( ddc_service->ctx->dc, - ddc_service->ddc_pin->hw_info.ddc_channel, + ddc_service->link->link_index, &cmd)) result = num; @@ -8687,8 +8794,6 @@ create_i2c(struct ddc_service *ddc_service, snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index); i2c_set_adapdata(&i2c->base, i2c); i2c->ddc_service = ddc_service; - if (i2c->ddc_service->ddc_pin) - i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index; return i2c; } @@ -9316,6 +9421,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, unsigned long flags; uint32_t target_vblank, last_flip_vblank; bool vrr_active = amdgpu_dm_vrr_active(acrtc_state); + bool cursor_update = false; bool pflip_present = false; struct { struct dc_surface_update surface_updates[MAX_SURFACES]; @@ -9351,8 +9457,13 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state); /* Cursor plane is handled after stream updates */ - if (plane->type == DRM_PLANE_TYPE_CURSOR) + if (plane->type == DRM_PLANE_TYPE_CURSOR) { + if ((fb && crtc == pcrtc) || + (old_plane_state->fb && old_plane_state->crtc == pcrtc)) + cursor_update = true; + continue; + } if (!fb || !crtc || pcrtc != crtc) continue; @@ -9505,6 +9616,16 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, bundle->stream_update.vrr_infopacket = &acrtc_state->stream->vrr_infopacket; } + } else if (cursor_update && acrtc_state->active_planes > 0 && + acrtc_attach->base.state->event) { + drm_crtc_vblank_get(pcrtc); + + spin_lock_irqsave(&pcrtc->dev->event_lock, flags); + + acrtc_attach->event = acrtc_attach->base.state->event; + acrtc_attach->base.state->event = NULL; + + spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags); } /* Update the planes if changed or disable if we don't have any. */ |