diff options
Diffstat (limited to 'drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c')
-rw-r--r-- | drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c | 474 |
1 files changed, 287 insertions, 187 deletions
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c index 48e6e8d74c85..862e9e6bf0a5 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c @@ -2,7 +2,7 @@ /* * Copyright (C) 2013 Red Hat * Copyright (c) 2014-2018, 2020-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. * * Author: Rob Clark <robdclark@gmail.com> */ @@ -24,6 +24,7 @@ #include "dpu_hw_catalog.h" #include "dpu_hw_intf.h" #include "dpu_hw_ctl.h" +#include "dpu_hw_cwb.h" #include "dpu_hw_dspp.h" #include "dpu_hw_dsc.h" #include "dpu_hw_merge3d.h" @@ -58,8 +59,6 @@ #define IDLE_SHORT_TIMEOUT 1 -#define MAX_HDISPLAY_SPLIT 1080 - /* timeout in frames waiting for frame done */ #define DPU_ENCODER_FRAME_DONE_TIMEOUT_FRAMES 5 @@ -135,8 +134,12 @@ enum dpu_enc_rc_states { * @cur_slave: As above but for the slave encoder. * @hw_pp: Handle to the pingpong blocks used for the display. No. * pingpong blocks can be different than num_phys_encs. + * @hw_cwb: Handle to the CWB muxes used for concurrent writeback + * display. Number of CWB muxes can be different than + * num_phys_encs. * @hw_dsc: Handle to the DSC blocks used for the display. * @dsc_mask: Bitmask of used DSC blocks. + * @cwb_mask: Bitmask of used CWB muxes * @intfs_swapped: Whether or not the phys_enc interfaces have been swapped * for partial update right-only cases, such as pingpong * split where virtual pingpong does not generate IRQs @@ -179,9 +182,11 @@ struct dpu_encoder_virt { struct dpu_encoder_phys *cur_master; struct dpu_encoder_phys *cur_slave; struct dpu_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC]; + struct dpu_hw_cwb *hw_cwb[MAX_CHANNELS_PER_ENC]; struct dpu_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC]; unsigned int dsc_mask; + unsigned int cwb_mask; bool intfs_swapped; @@ -622,9 +627,9 @@ bool dpu_encoder_use_dsc_merge(struct drm_encoder *drm_enc) if (dpu_enc->phys_encs[i]) intf_count++; - /* See dpu_encoder_get_topology, we only support 2:2:1 topology */ - if (dpu_enc->dsc) - num_dsc = 2; + for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) + if (dpu_enc->hw_dsc[i]) + num_dsc++; return (num_dsc > 0) && (num_dsc > intf_count); } @@ -647,130 +652,51 @@ struct drm_dsc_config *dpu_encoder_get_dsc_config(struct drm_encoder *drm_enc) return NULL; } -static struct msm_display_topology dpu_encoder_get_topology( - struct dpu_encoder_virt *dpu_enc, - struct dpu_kms *dpu_kms, - struct drm_display_mode *mode, - struct drm_crtc_state *crtc_state, - struct drm_dsc_config *dsc) +void dpu_encoder_update_topology(struct drm_encoder *drm_enc, + struct msm_display_topology *topology, + struct drm_atomic_state *state, + const struct drm_display_mode *adj_mode) { - struct msm_display_topology topology = {0}; - int i, intf_count = 0; + struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); + struct msm_drm_private *priv = dpu_enc->base.dev->dev_private; + struct msm_display_info *disp_info = &dpu_enc->disp_info; + struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms); + struct drm_connector *connector; + struct drm_connector_state *conn_state; + struct drm_framebuffer *fb; + struct drm_dsc_config *dsc; + + int i; for (i = 0; i < MAX_PHYS_ENCODERS_PER_VIRTUAL; i++) if (dpu_enc->phys_encs[i]) - intf_count++; + topology->num_intf++; - /* Datapath topology selection - * - * Dual display - * 2 LM, 2 INTF ( Split display using 2 interfaces) - * - * Single display - * 1 LM, 1 INTF - * 2 LM, 1 INTF (stream merge to support high resolution interfaces) - * - * Add dspps to the reservation requirements if ctm is requested - */ - if (intf_count == 2) - topology.num_lm = 2; - else if (!dpu_kms->catalog->caps->has_3d_merge) - topology.num_lm = 1; - else - topology.num_lm = (mode->hdisplay > MAX_HDISPLAY_SPLIT) ? 2 : 1; - - if (crtc_state->ctm) - topology.num_dspp = topology.num_lm; - - topology.num_intf = intf_count; + dsc = dpu_encoder_get_dsc_config(drm_enc); + /* We only support 2 DSC mode (with 2 LM and 1 INTF) */ if (dsc) { /* - * In case of Display Stream Compression (DSC), we would use - * 2 DSC encoders, 2 layer mixers and 1 interface - * this is power optimal and can drive up to (including) 4k - * screens + * Use 2 DSC encoders, 2 layer mixers and 1 or 2 interfaces + * when Display Stream Compression (DSC) is enabled, + * and when enough DSC blocks are available. + * This is power-optimal and can drive up to (including) 4k + * screens. */ - topology.num_dsc = 2; - topology.num_lm = 2; - topology.num_intf = 1; - } - - return topology; -} - -static void dpu_encoder_assign_crtc_resources(struct dpu_kms *dpu_kms, - struct drm_encoder *drm_enc, - struct dpu_global_state *global_state, - struct drm_crtc_state *crtc_state) -{ - struct dpu_crtc_state *cstate; - struct dpu_hw_blk *hw_ctl[MAX_CHANNELS_PER_ENC]; - struct dpu_hw_blk *hw_lm[MAX_CHANNELS_PER_ENC]; - struct dpu_hw_blk *hw_dspp[MAX_CHANNELS_PER_ENC]; - int num_lm, num_ctl, num_dspp, i; - - cstate = to_dpu_crtc_state(crtc_state); - - memset(cstate->mixers, 0, sizeof(cstate->mixers)); - - num_ctl = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, - drm_enc->base.id, DPU_HW_BLK_CTL, hw_ctl, ARRAY_SIZE(hw_ctl)); - num_lm = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, - drm_enc->base.id, DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm)); - num_dspp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, - drm_enc->base.id, DPU_HW_BLK_DSPP, hw_dspp, - ARRAY_SIZE(hw_dspp)); - - for (i = 0; i < num_lm; i++) { - int ctl_idx = (i < num_ctl) ? i : (num_ctl-1); - - cstate->mixers[i].hw_lm = to_dpu_hw_mixer(hw_lm[i]); - cstate->mixers[i].lm_ctl = to_dpu_hw_ctl(hw_ctl[ctl_idx]); - cstate->mixers[i].hw_dspp = i < num_dspp ? to_dpu_hw_dspp(hw_dspp[i]) : NULL; - } - - cstate->num_mixers = num_lm; -} - -static int dpu_encoder_virt_atomic_check( - struct drm_encoder *drm_enc, - struct drm_crtc_state *crtc_state, - struct drm_connector_state *conn_state) -{ - struct dpu_encoder_virt *dpu_enc; - struct msm_drm_private *priv; - struct dpu_kms *dpu_kms; - struct drm_display_mode *adj_mode; - struct msm_display_topology topology; - struct msm_display_info *disp_info; - struct dpu_global_state *global_state; - struct drm_framebuffer *fb; - struct drm_dsc_config *dsc; - int ret = 0; - - if (!drm_enc || !crtc_state || !conn_state) { - DPU_ERROR("invalid arg(s), drm_enc %d, crtc/conn state %d/%d\n", - drm_enc != NULL, crtc_state != NULL, conn_state != NULL); - return -EINVAL; + WARN(topology->num_intf > 2, + "DSC topology cannot support more than 2 interfaces\n"); + if (topology->num_intf >= 2 || dpu_kms->catalog->dsc_count >= 2) + topology->num_dsc = 2; + else + topology->num_dsc = 1; } - dpu_enc = to_dpu_encoder_virt(drm_enc); - DPU_DEBUG_ENC(dpu_enc, "\n"); - - priv = drm_enc->dev->dev_private; - disp_info = &dpu_enc->disp_info; - dpu_kms = to_dpu_kms(priv->kms); - adj_mode = &crtc_state->adjusted_mode; - global_state = dpu_kms_get_global_state(crtc_state->state); - if (IS_ERR(global_state)) - return PTR_ERR(global_state); - - trace_dpu_enc_atomic_check(DRMID(drm_enc)); - - dsc = dpu_encoder_get_dsc_config(drm_enc); - - topology = dpu_encoder_get_topology(dpu_enc, dpu_kms, adj_mode, crtc_state, dsc); + connector = drm_atomic_get_new_connector_for_encoder(state, drm_enc); + if (!connector) + return; + conn_state = drm_atomic_get_new_connector_state(state, connector); + if (!conn_state) + return; /* * Use CDM only for writeback or DP at the moment as other interfaces cannot handle it. @@ -781,34 +707,45 @@ static int dpu_encoder_virt_atomic_check( fb = conn_state->writeback_job->fb; if (fb && MSM_FORMAT_IS_YUV(msm_framebuffer_format(fb))) - topology.needs_cdm = true; + topology->num_cdm++; } else if (disp_info->intf_type == INTF_DP) { if (msm_dp_is_yuv_420_enabled(priv->dp[disp_info->h_tile_instance[0]], adj_mode)) - topology.needs_cdm = true; + topology->num_cdm++; } +} - if (topology.needs_cdm && !dpu_enc->cur_master->hw_cdm) - crtc_state->mode_changed = true; - else if (!topology.needs_cdm && dpu_enc->cur_master->hw_cdm) - crtc_state->mode_changed = true; - /* - * Release and Allocate resources on every modeset - * Dont allocate when active is false. - */ - if (drm_atomic_crtc_needs_modeset(crtc_state)) { - dpu_rm_release(global_state, drm_enc); +bool dpu_encoder_needs_modeset(struct drm_encoder *drm_enc, struct drm_atomic_state *state) +{ + struct drm_connector *connector; + struct drm_connector_state *conn_state; + struct drm_framebuffer *fb; + struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); - if (!crtc_state->active_changed || crtc_state->enable) - ret = dpu_rm_reserve(&dpu_kms->rm, global_state, - drm_enc, crtc_state, &topology); - if (!ret) - dpu_encoder_assign_crtc_resources(dpu_kms, drm_enc, - global_state, crtc_state); - } + if (!drm_enc || !state) + return false; - trace_dpu_enc_atomic_check_flags(DRMID(drm_enc), adj_mode->flags); + connector = drm_atomic_get_new_connector_for_encoder(state, drm_enc); + if (!connector) + return false; - return ret; + conn_state = drm_atomic_get_new_connector_state(state, connector); + + /** + * These checks are duplicated from dpu_encoder_update_topology() since + * CRTC and encoder don't hold topology information + */ + if (dpu_enc->disp_info.intf_type == INTF_WB && conn_state->writeback_job) { + fb = conn_state->writeback_job->fb; + if (fb && MSM_FORMAT_IS_YUV(msm_framebuffer_format(fb))) { + if (!dpu_enc->cur_master->hw_cdm) + return true; + } else { + if (dpu_enc->cur_master->hw_cdm) + return true; + } + } + + return false; } static void _dpu_encoder_update_vsync_source(struct dpu_encoder_virt *dpu_enc, @@ -1219,8 +1156,12 @@ static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc, struct dpu_hw_blk *hw_pp[MAX_CHANNELS_PER_ENC]; struct dpu_hw_blk *hw_ctl[MAX_CHANNELS_PER_ENC]; struct dpu_hw_blk *hw_dsc[MAX_CHANNELS_PER_ENC]; + struct dpu_hw_blk *hw_cwb[MAX_CHANNELS_PER_ENC]; int num_ctl, num_pp, num_dsc; + int num_cwb = 0; + bool is_cwb_encoder; unsigned int dsc_mask = 0; + unsigned int cwb_mask = 0; int i; if (!drm_enc) { @@ -1233,6 +1174,8 @@ static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc, priv = drm_enc->dev->dev_private; dpu_kms = to_dpu_kms(priv->kms); + is_cwb_encoder = drm_crtc_in_clone_mode(crtc_state) && + dpu_enc->disp_info.intf_type == INTF_WB; global_state = dpu_kms_get_existing_global_state(dpu_kms); if (IS_ERR_OR_NULL(global_state)) { @@ -1243,18 +1186,38 @@ static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc, trace_dpu_enc_mode_set(DRMID(drm_enc)); /* Query resource that have been reserved in atomic check step. */ - num_pp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, - drm_enc->base.id, DPU_HW_BLK_PINGPONG, hw_pp, - ARRAY_SIZE(hw_pp)); + if (is_cwb_encoder) { + num_pp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, + drm_enc->crtc, + DPU_HW_BLK_DCWB_PINGPONG, + hw_pp, ARRAY_SIZE(hw_pp)); + num_cwb = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, + drm_enc->crtc, + DPU_HW_BLK_CWB, + hw_cwb, ARRAY_SIZE(hw_cwb)); + } else { + num_pp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, + drm_enc->crtc, + DPU_HW_BLK_PINGPONG, hw_pp, + ARRAY_SIZE(hw_pp)); + } + + for (i = 0; i < num_cwb; i++) { + dpu_enc->hw_cwb[i] = to_dpu_hw_cwb(hw_cwb[i]); + cwb_mask |= BIT(dpu_enc->hw_cwb[i]->idx - CWB_0); + } + + dpu_enc->cwb_mask = cwb_mask; + num_ctl = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, - drm_enc->base.id, DPU_HW_BLK_CTL, hw_ctl, ARRAY_SIZE(hw_ctl)); + drm_enc->crtc, DPU_HW_BLK_CTL, hw_ctl, ARRAY_SIZE(hw_ctl)); for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) dpu_enc->hw_pp[i] = i < num_pp ? to_dpu_hw_pingpong(hw_pp[i]) : NULL; num_dsc = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, - drm_enc->base.id, DPU_HW_BLK_DSC, + drm_enc->crtc, DPU_HW_BLK_DSC, hw_dsc, ARRAY_SIZE(hw_dsc)); for (i = 0; i < num_dsc; i++) { dpu_enc->hw_dsc[i] = to_dpu_hw_dsc(hw_dsc[i]); @@ -1268,7 +1231,7 @@ static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc, struct dpu_hw_blk *hw_cdm = NULL; dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, - drm_enc->base.id, DPU_HW_BLK_CDM, + drm_enc->crtc, DPU_HW_BLK_CDM, &hw_cdm, 1); dpu_enc->cur_master->hw_cdm = hw_cdm ? to_dpu_hw_cdm(hw_cdm) : NULL; } @@ -1447,7 +1410,7 @@ static void dpu_encoder_virt_atomic_disable(struct drm_encoder *drm_enc, /* after phys waits for frame-done, should be no more frames pending */ if (atomic_xchg(&dpu_enc->frame_done_timeout_ms, 0)) { DPU_ERROR("enc%d timeout pending\n", drm_enc->base.id); - del_timer_sync(&dpu_enc->frame_done_timer); + timer_delete_sync(&dpu_enc->frame_done_timer); } dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_STOP); @@ -1619,7 +1582,7 @@ void dpu_encoder_frame_done_callback( if (!dpu_enc->frame_busy_mask[0]) { atomic_set(&dpu_enc->frame_done_timeout_ms, 0); - del_timer(&dpu_enc->frame_done_timer); + timer_delete(&dpu_enc->frame_done_timer); dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_FRAME_DONE); @@ -1654,6 +1617,7 @@ static void dpu_encoder_off_work(struct work_struct *work) static void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc, struct dpu_encoder_phys *phys, uint32_t extra_flush_bits) { + struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); struct dpu_hw_ctl *ctl; int pending_kickoff_cnt; u32 ret = UINT_MAX; @@ -1671,6 +1635,15 @@ static void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc, pending_kickoff_cnt = dpu_encoder_phys_inc_pending(phys); + /* Return early if encoder is writeback and in clone mode */ + if (drm_enc->encoder_type == DRM_MODE_ENCODER_VIRTUAL && + dpu_enc->cwb_mask) { + DPU_DEBUG("encoder %d skip flush for concurrent writeback encoder\n", + DRMID(drm_enc)); + return; + } + + if (extra_flush_bits && ctl->ops.update_pending_flush) ctl->ops.update_pending_flush(ctl, extra_flush_bits); @@ -1693,6 +1666,8 @@ static void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc, */ static void _dpu_encoder_trigger_start(struct dpu_encoder_phys *phys) { + struct dpu_encoder_virt *dpu_enc; + if (!phys) { DPU_ERROR("invalid argument(s)\n"); return; @@ -1703,6 +1678,14 @@ static void _dpu_encoder_trigger_start(struct dpu_encoder_phys *phys) return; } + dpu_enc = to_dpu_encoder_virt(phys->parent); + + if (phys->parent->encoder_type == DRM_MODE_ENCODER_VIRTUAL && + dpu_enc->cwb_mask) { + DPU_DEBUG("encoder %d CWB enabled, skipping\n", DRMID(phys->parent)); + return; + } + if (phys->ops.trigger_start && phys->enable_state != DPU_ENC_DISABLED) phys->ops.trigger_start(phys); } @@ -2020,7 +2003,6 @@ static void dpu_encoder_dsc_pipe_cfg(struct dpu_hw_ctl *ctl, static void dpu_encoder_prep_dsc(struct dpu_encoder_virt *dpu_enc, struct drm_dsc_config *dsc) { - /* coding only for 2LM, 2enc, 1 dsc config */ struct dpu_encoder_phys *enc_master = dpu_enc->cur_master; struct dpu_hw_ctl *ctl = enc_master->hw_ctl; struct dpu_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC]; @@ -2030,22 +2012,24 @@ static void dpu_encoder_prep_dsc(struct dpu_encoder_virt *dpu_enc, int dsc_common_mode; int pic_width; u32 initial_lines; + int num_dsc = 0; int i; for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) { hw_pp[i] = dpu_enc->hw_pp[i]; hw_dsc[i] = dpu_enc->hw_dsc[i]; - if (!hw_pp[i] || !hw_dsc[i]) { - DPU_ERROR_ENC(dpu_enc, "invalid params for DSC\n"); - return; - } + if (!hw_pp[i] || !hw_dsc[i]) + break; + + num_dsc++; } - dsc_common_mode = 0; pic_width = dsc->pic_width; - dsc_common_mode = DSC_MODE_SPLIT_PANEL; + dsc_common_mode = 0; + if (num_dsc > 1) + dsc_common_mode |= DSC_MODE_SPLIT_PANEL; if (dpu_encoder_use_dsc_merge(enc_master->parent)) dsc_common_mode |= DSC_MODE_MULTIPLEX; if (enc_master->intf_mode == INTF_MODE_VIDEO) @@ -2054,14 +2038,10 @@ static void dpu_encoder_prep_dsc(struct dpu_encoder_virt *dpu_enc, this_frame_slices = pic_width / dsc->slice_width; intf_ip_w = this_frame_slices * dsc->slice_width; - /* - * dsc merge case: when using 2 encoders for the same stream, - * no. of slices need to be same on both the encoders. - */ - enc_ip_w = intf_ip_w / 2; + enc_ip_w = intf_ip_w / num_dsc; initial_lines = dpu_encoder_dsc_initial_line_calc(dsc, enc_ip_w); - for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) + for (i = 0; i < num_dsc; i++) dpu_encoder_dsc_pipe_cfg(ctl, hw_dsc[i], hw_pp[i], dsc, dsc_common_mode, initial_lines); } @@ -2135,6 +2115,25 @@ bool dpu_encoder_is_valid_for_commit(struct drm_encoder *drm_enc) } /** + * dpu_encoder_start_frame_done_timer - Start the encoder frame done timer + * @drm_enc: Pointer to drm encoder structure + */ +void dpu_encoder_start_frame_done_timer(struct drm_encoder *drm_enc) +{ + struct dpu_encoder_virt *dpu_enc; + unsigned long timeout_ms; + + dpu_enc = to_dpu_encoder_virt(drm_enc); + timeout_ms = DPU_ENCODER_FRAME_DONE_TIMEOUT_FRAMES * 1000 / + drm_mode_vrefresh(&drm_enc->crtc->state->adjusted_mode); + + atomic_set(&dpu_enc->frame_done_timeout_ms, timeout_ms); + mod_timer(&dpu_enc->frame_done_timer, + jiffies + msecs_to_jiffies(timeout_ms)); + +} + +/** * dpu_encoder_kickoff - trigger a double buffer flip of the ctl path * (i.e. ctl flush and start) immediately. * @drm_enc: encoder pointer @@ -2143,7 +2142,6 @@ void dpu_encoder_kickoff(struct drm_encoder *drm_enc) { struct dpu_encoder_virt *dpu_enc; struct dpu_encoder_phys *phys; - unsigned long timeout_ms; unsigned int i; DPU_ATRACE_BEGIN("encoder_kickoff"); @@ -2151,13 +2149,6 @@ void dpu_encoder_kickoff(struct drm_encoder *drm_enc) trace_dpu_enc_kickoff(DRMID(drm_enc)); - timeout_ms = DPU_ENCODER_FRAME_DONE_TIMEOUT_FRAMES * 1000 / - drm_mode_vrefresh(&drm_enc->crtc->state->adjusted_mode); - - atomic_set(&dpu_enc->frame_done_timeout_ms, timeout_ms); - mod_timer(&dpu_enc->frame_done_timer, - jiffies + msecs_to_jiffies(timeout_ms)); - /* All phys encs are ready to go, trigger the kickoff */ _dpu_encoder_kickoff_phys(dpu_enc); @@ -2183,22 +2174,22 @@ static void dpu_encoder_helper_reset_mixers(struct dpu_encoder_phys *phys_enc) memset(&mixer, 0, sizeof(mixer)); /* reset all mixers for this encoder */ - if (phys_enc->hw_ctl->ops.clear_all_blendstages) - phys_enc->hw_ctl->ops.clear_all_blendstages(phys_enc->hw_ctl); + if (ctl->ops.clear_all_blendstages) + ctl->ops.clear_all_blendstages(ctl); global_state = dpu_kms_get_existing_global_state(phys_enc->dpu_kms); num_lm = dpu_rm_get_assigned_resources(&phys_enc->dpu_kms->rm, global_state, - phys_enc->parent->base.id, DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm)); + phys_enc->parent->crtc, DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm)); for (i = 0; i < num_lm; i++) { hw_mixer[i] = to_dpu_hw_mixer(hw_lm[i]); - if (phys_enc->hw_ctl->ops.update_pending_flush_mixer) - phys_enc->hw_ctl->ops.update_pending_flush_mixer(ctl, hw_mixer[i]->idx); + if (ctl->ops.update_pending_flush_mixer) + ctl->ops.update_pending_flush_mixer(ctl, hw_mixer[i]->idx); /* clear all blendstages */ - if (phys_enc->hw_ctl->ops.setup_blendstage) - phys_enc->hw_ctl->ops.setup_blendstage(ctl, hw_mixer[i]->idx, NULL); + if (ctl->ops.setup_blendstage) + ctl->ops.setup_blendstage(ctl, hw_mixer[i]->idx, NULL); } } @@ -2250,7 +2241,7 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc) dpu_enc = to_dpu_encoder_virt(phys_enc->parent); - phys_enc->hw_ctl->ops.reset(ctl); + ctl->ops.reset(ctl); dpu_encoder_helper_reset_mixers(phys_enc); @@ -2265,8 +2256,8 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc) phys_enc->hw_wb->ops.bind_pingpong_blk(phys_enc->hw_wb, PINGPONG_NONE); /* mark WB flush as pending */ - if (phys_enc->hw_ctl->ops.update_pending_flush_wb) - phys_enc->hw_ctl->ops.update_pending_flush_wb(ctl, phys_enc->hw_wb->idx); + if (ctl->ops.update_pending_flush_wb) + ctl->ops.update_pending_flush_wb(ctl, phys_enc->hw_wb->idx); } else { for (i = 0; i < dpu_enc->num_phys_encs; i++) { if (dpu_enc->phys_encs[i] && phys_enc->hw_intf->ops.bind_pingpong_blk) @@ -2275,8 +2266,8 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc) PINGPONG_NONE); /* mark INTF flush as pending */ - if (phys_enc->hw_ctl->ops.update_pending_flush_intf) - phys_enc->hw_ctl->ops.update_pending_flush_intf(phys_enc->hw_ctl, + if (ctl->ops.update_pending_flush_intf) + ctl->ops.update_pending_flush_intf(ctl, dpu_enc->phys_encs[i]->hw_intf->idx); } } @@ -2284,12 +2275,15 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc) if (phys_enc->hw_pp && phys_enc->hw_pp->ops.setup_dither) phys_enc->hw_pp->ops.setup_dither(phys_enc->hw_pp, NULL); + if (dpu_enc->cwb_mask) + dpu_encoder_helper_phys_setup_cwb(phys_enc, false); + /* reset the merge 3D HW block */ if (phys_enc->hw_pp && phys_enc->hw_pp->merge_3d) { phys_enc->hw_pp->merge_3d->ops.setup_3d_mode(phys_enc->hw_pp->merge_3d, BLEND_3D_NONE); - if (phys_enc->hw_ctl->ops.update_pending_flush_merge_3d) - phys_enc->hw_ctl->ops.update_pending_flush_merge_3d(ctl, + if (ctl->ops.update_pending_flush_merge_3d) + ctl->ops.update_pending_flush_merge_3d(ctl, phys_enc->hw_pp->merge_3d->idx); } @@ -2297,9 +2291,9 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc) if (phys_enc->hw_cdm->ops.bind_pingpong_blk && phys_enc->hw_pp) phys_enc->hw_cdm->ops.bind_pingpong_blk(phys_enc->hw_cdm, PINGPONG_NONE); - if (phys_enc->hw_ctl->ops.update_pending_flush_cdm) - phys_enc->hw_ctl->ops.update_pending_flush_cdm(phys_enc->hw_ctl, - phys_enc->hw_cdm->idx); + if (ctl->ops.update_pending_flush_cdm) + ctl->ops.update_pending_flush_cdm(ctl, + phys_enc->hw_cdm->idx); } if (dpu_enc->dsc) { @@ -2310,6 +2304,7 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc) intf_cfg.stream_sel = 0; /* Don't care value for video mode */ intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc); intf_cfg.dsc = dpu_encoder_helper_get_dsc(phys_enc); + intf_cfg.cwb = dpu_enc->cwb_mask; if (phys_enc->hw_intf) intf_cfg.intf = phys_enc->hw_intf->idx; @@ -2327,6 +2322,68 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc) ctl->ops.clear_pending_flush(ctl); } +void dpu_encoder_helper_phys_setup_cwb(struct dpu_encoder_phys *phys_enc, + bool enable) +{ + struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(phys_enc->parent); + struct dpu_hw_cwb *hw_cwb; + struct dpu_hw_ctl *hw_ctl; + struct dpu_hw_cwb_setup_cfg cwb_cfg; + + struct dpu_kms *dpu_kms; + struct dpu_global_state *global_state; + struct dpu_hw_blk *rt_pp_list[MAX_CHANNELS_PER_ENC]; + int num_pp; + + if (!phys_enc->hw_wb) + return; + + hw_ctl = phys_enc->hw_ctl; + + if (!phys_enc->hw_ctl) { + DPU_DEBUG("[wb:%d] no ctl assigned\n", + phys_enc->hw_wb->idx - WB_0); + return; + } + + dpu_kms = phys_enc->dpu_kms; + global_state = dpu_kms_get_existing_global_state(dpu_kms); + num_pp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, + phys_enc->parent->crtc, + DPU_HW_BLK_PINGPONG, rt_pp_list, + ARRAY_SIZE(rt_pp_list)); + + if (num_pp == 0 || num_pp > MAX_CHANNELS_PER_ENC) { + DPU_DEBUG_ENC(dpu_enc, "invalid num_pp %d\n", num_pp); + return; + } + + /* + * The CWB mux supports using LM or DSPP as tap points. For now, + * always use LM tap point + */ + cwb_cfg.input = INPUT_MODE_LM_OUT; + + for (int i = 0; i < MAX_CHANNELS_PER_ENC; i++) { + hw_cwb = dpu_enc->hw_cwb[i]; + if (!hw_cwb) + continue; + + if (enable) { + struct dpu_hw_pingpong *hw_pp = + to_dpu_hw_pingpong(rt_pp_list[i]); + cwb_cfg.pp_idx = hw_pp->idx; + } else { + cwb_cfg.pp_idx = PINGPONG_NONE; + } + + hw_cwb->ops.config_cwb(hw_cwb, &cwb_cfg); + + if (hw_ctl->ops.update_pending_flush_cwb) + hw_ctl->ops.update_pending_flush_cwb(hw_ctl, hw_cwb->idx); + } +} + /** * dpu_encoder_helper_phys_setup_cdm - setup chroma down sampling block * @phys_enc: Pointer to physical encoder @@ -2513,6 +2570,38 @@ static int dpu_encoder_virt_add_phys_encs( return 0; } +/** + * dpu_encoder_get_clones - Calculate the possible_clones for DPU encoder + * @drm_enc: DRM encoder pointer + * Returns: possible_clones mask + */ +uint32_t dpu_encoder_get_clones(struct drm_encoder *drm_enc) +{ + struct drm_encoder *curr; + int type = drm_enc->encoder_type; + uint32_t clone_mask = drm_encoder_mask(drm_enc); + + /* + * Set writeback as possible clones of real-time DSI encoders and vice + * versa + * + * Writeback encoders can't be clones of each other and DSI + * encoders can't be clones of each other. + * + * TODO: Add DP encoders as valid possible clones for writeback encoders + * (and vice versa) once concurrent writeback has been validated for DP + */ + drm_for_each_encoder(curr, drm_enc->dev) { + if ((type == DRM_MODE_ENCODER_VIRTUAL && + curr->encoder_type == DRM_MODE_ENCODER_DSI) || + (type == DRM_MODE_ENCODER_DSI && + curr->encoder_type == DRM_MODE_ENCODER_VIRTUAL)) + clone_mask |= drm_encoder_mask(curr); + } + + return clone_mask; +} + static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc, struct dpu_kms *dpu_kms, struct msm_display_info *disp_info) @@ -2630,7 +2719,6 @@ static const struct drm_encoder_helper_funcs dpu_encoder_helper_funcs = { .atomic_mode_set = dpu_encoder_virt_atomic_mode_set, .atomic_disable = dpu_encoder_virt_atomic_disable, .atomic_enable = dpu_encoder_virt_atomic_enable, - .atomic_check = dpu_encoder_virt_atomic_check, }; static const struct drm_encoder_funcs dpu_encoder_funcs = { @@ -2789,6 +2877,18 @@ enum dpu_intf_mode dpu_encoder_get_intf_mode(struct drm_encoder *encoder) } /** + * dpu_encoder_helper_get_cwb_mask - get CWB blocks mask for the DPU encoder + * @phys_enc: Pointer to physical encoder structure + */ +unsigned int dpu_encoder_helper_get_cwb_mask(struct dpu_encoder_phys *phys_enc) +{ + struct drm_encoder *encoder = phys_enc->parent; + struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(encoder); + + return dpu_enc->cwb_mask; +} + +/** * dpu_encoder_helper_get_dsc - get DSC blocks mask for the DPU encoder * This helper function is used by physical encoder to get DSC blocks mask * used for this encoder. |