summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/display/intel_psr.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/display/intel_psr.c')
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c230
1 files changed, 125 insertions, 105 deletions
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index 0b021acb330f..4e938bad808c 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -44,6 +44,7 @@
#include "intel_psr.h"
#include "intel_psr_regs.h"
#include "intel_snps_phy.h"
+#include "intel_vblank.h"
#include "skl_universal_plane.h"
/**
@@ -154,7 +155,7 @@
*
* Unfortunately CHICKEN_TRANS itself seems to be double buffered
* and thus won't latch until the first vblank. So with DC states
- * enabled the register effctively uses the reset value during DC5
+ * enabled the register effectively uses the reset value during DC5
* exit+PSR exit sequence, and thus the bit does nothing until
* latched by the vblank that it was trying to prevent from being
* generated in the first place. So we should probably call this
@@ -171,7 +172,7 @@
* CHICKEN_PIPESL_1[15]/HSW_UNMASK_VBL_TO_REGS_IN_SRD (hsw):
*
* On BDW without this bit is no vblanks whatsoever are
- * generated after PSR exit. On HSW this has no apparant effect.
+ * generated after PSR exit. On HSW this has no apparent effect.
* WaPsrDPRSUnmaskVBlankInSRD says to set this.
*
* The rest of the bits are more self-explanatory and/or
@@ -185,7 +186,7 @@
* has_psr + has_panel_replay: Panel Replay
* has_psr + has_panel_replay + has_sel_update: Panel Replay Selective Update
*
- * Description of some intel_psr varibles. enabled, panel_replay_enabled,
+ * Description of some intel_psr variables. enabled, panel_replay_enabled,
* sel_update_enabled
*
* enabled (alone): PSR1
@@ -814,8 +815,8 @@ static void intel_psr_enable_sink_alpm(struct intel_dp *intel_dp,
drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, val);
}
-void intel_psr_enable_sink(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state)
+static void intel_psr_enable_sink(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
{
intel_psr_enable_sink_alpm(intel_dp, crtc_state);
@@ -827,6 +828,13 @@ void intel_psr_enable_sink(struct intel_dp *intel_dp,
drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
}
+void intel_psr_panel_replay_enable_sink(struct intel_dp *intel_dp)
+{
+ if (CAN_PANEL_REPLAY(intel_dp))
+ drm_dp_dpcd_writeb(&intel_dp->aux, PANEL_REPLAY_CONFIG,
+ DP_PANEL_REPLAY_ENABLE);
+}
+
static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
@@ -1043,7 +1051,7 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
};
/*
* Still using the default IO_BUFFER_WAKE and FAST_WAKE, see
- * comments bellow for more information
+ * comments below for more information
*/
int tmp;
@@ -1991,18 +1999,25 @@ static void intel_psr_enable_locked(struct intel_dp *intel_dp,
if (!psr_interrupt_error_check(intel_dp))
return;
- if (intel_dp->psr.panel_replay_enabled) {
+ if (intel_dp->psr.panel_replay_enabled)
drm_dbg_kms(display->drm, "Enabling Panel Replay\n");
- } else {
+ else
drm_dbg_kms(display->drm, "Enabling PSR%s\n",
intel_dp->psr.sel_update_enabled ? "2" : "1");
- /*
- * Panel replay has to be enabled before link training: doing it
- * only for PSR here.
- */
- intel_psr_enable_sink(intel_dp, crtc_state);
- }
+ /*
+ * Enabling here only for PSR. Panel Replay enable bit is already
+ * written at this point. See
+ * intel_psr_panel_replay_enable_sink. Modifiers/options:
+ * - Selective Update
+ * - Region Early Transport
+ * - Selective Update Region Scanline Capture
+ * - VSC_SDP_CRC
+ * - HPD on different Errors
+ * - CRC verification
+ * are written for PSR and Panel Replay here.
+ */
+ intel_psr_enable_sink(intel_dp, crtc_state);
if (intel_dp_is_edp(intel_dp))
intel_snps_phy_update_psr_power_state(&dig_port->base, true);
@@ -2172,7 +2187,8 @@ void intel_psr_disable(struct intel_dp *intel_dp,
if (!old_crtc_state->has_psr)
return;
- if (drm_WARN_ON(display->drm, !CAN_PSR(intel_dp)))
+ if (drm_WARN_ON(display->drm, !CAN_PSR(intel_dp) &&
+ !CAN_PANEL_REPLAY(intel_dp)))
return;
mutex_lock(&intel_dp->psr.lock);
@@ -2275,6 +2291,27 @@ bool intel_psr_needs_block_dc_vblank(const struct intel_crtc_state *crtc_state)
return false;
}
+/**
+ * intel_psr_trigger_frame_change_event - Trigger "Frame Change" event
+ * @dsb: DSB context
+ * @state: the atomic state
+ * @crtc: the CRTC
+ *
+ * Generate PSR "Frame Change" event.
+ */
+void intel_psr_trigger_frame_change_event(struct intel_dsb *dsb,
+ struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ const struct intel_crtc_state *crtc_state =
+ intel_pre_commit_crtc_state(state, crtc);
+ struct intel_display *display = to_intel_display(crtc);
+
+ if (crtc_state->has_psr)
+ intel_de_write_dsb(display, dsb,
+ CURSURFLIVE(display, crtc->pipe), 0);
+}
+
static u32 man_trk_ctl_enable_bit_get(struct intel_display *display)
{
struct drm_i915_private *dev_priv = to_i915(display->drm);
@@ -2310,18 +2347,9 @@ static u32 man_trk_ctl_continuos_full_frame(struct intel_display *display)
PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME;
}
-static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp)
+static void intel_psr_force_update(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
- enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
-
- if (intel_dp->psr.psr2_sel_fetch_enabled)
- intel_de_write(display,
- PSR2_MAN_TRK_CTL(display, cpu_transcoder),
- man_trk_ctl_enable_bit_get(display) |
- man_trk_ctl_partial_frame_bit_get(display) |
- man_trk_ctl_single_full_frame_bit_get(display) |
- man_trk_ctl_continuos_full_frame(display));
/*
* Display WA #0884: skl+
@@ -2339,7 +2367,8 @@ static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp)
intel_de_write(display, CURSURFLIVE(display, intel_dp->psr.pipe), 0);
}
-void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state)
+void intel_psr2_program_trans_man_trk_ctl(struct intel_dsb *dsb,
+ const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
@@ -2353,20 +2382,23 @@ void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_st
crtc_state->uapi.encoder_mask) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- lockdep_assert_held(&intel_dp->psr.lock);
- if (intel_dp->psr.psr2_sel_fetch_cff_enabled)
+ if (!dsb)
+ lockdep_assert_held(&intel_dp->psr.lock);
+
+ if (DISPLAY_VER(display) < 20 && intel_dp->psr.psr2_sel_fetch_cff_enabled)
return;
break;
}
- intel_de_write(display, PSR2_MAN_TRK_CTL(display, cpu_transcoder),
- crtc_state->psr2_man_track_ctl);
+ intel_de_write_dsb(display, dsb,
+ PSR2_MAN_TRK_CTL(display, cpu_transcoder),
+ crtc_state->psr2_man_track_ctl);
if (!crtc_state->enable_psr2_su_region_et)
return;
- intel_de_write(display, PIPE_SRCSZ_ERLY_TPT(crtc->pipe),
- crtc_state->pipe_srcsz_early_tpt);
+ intel_de_write_dsb(display, dsb, PIPE_SRCSZ_ERLY_TPT(crtc->pipe),
+ crtc_state->pipe_srcsz_early_tpt);
}
static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
@@ -2381,7 +2413,6 @@ static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
val |= man_trk_ctl_partial_frame_bit_get(display);
if (full_update) {
- val |= man_trk_ctl_single_full_frame_bit_get(display);
val |= man_trk_ctl_continuos_full_frame(display);
goto exit;
}
@@ -2790,32 +2821,30 @@ void intel_psr_pre_plane_update(struct intel_atomic_state *state,
old_crtc_state->uapi.encoder_mask) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_psr *psr = &intel_dp->psr;
- bool needs_to_disable = false;
mutex_lock(&psr->lock);
- /*
- * Reasons to disable:
- * - PSR disabled in new state
- * - All planes will go inactive
- * - Changing between PSR versions
- * - Region Early Transport changing
- * - Display WA #1136: skl, bxt
- */
- needs_to_disable |= intel_crtc_needs_modeset(new_crtc_state);
- needs_to_disable |= !new_crtc_state->has_psr;
- needs_to_disable |= !new_crtc_state->active_planes;
- needs_to_disable |= new_crtc_state->has_sel_update != psr->sel_update_enabled;
- needs_to_disable |= new_crtc_state->enable_psr2_su_region_et !=
- psr->su_region_et_enabled;
- needs_to_disable |= DISPLAY_VER(i915) < 11 &&
- new_crtc_state->wm_level_disabled;
-
- if (psr->enabled && needs_to_disable)
- intel_psr_disable_locked(intel_dp);
- else if (psr->enabled && new_crtc_state->wm_level_disabled)
- /* Wa_14015648006 */
- wm_optimization_wa(intel_dp, new_crtc_state);
+ if (psr->enabled) {
+ /*
+ * Reasons to disable:
+ * - PSR disabled in new state
+ * - All planes will go inactive
+ * - Changing between PSR versions
+ * - Region Early Transport changing
+ * - Display WA #1136: skl, bxt
+ */
+ if (intel_crtc_needs_modeset(new_crtc_state) ||
+ !new_crtc_state->has_psr ||
+ !new_crtc_state->active_planes ||
+ new_crtc_state->has_sel_update != psr->sel_update_enabled ||
+ new_crtc_state->enable_psr2_su_region_et != psr->su_region_et_enabled ||
+ new_crtc_state->has_panel_replay != psr->panel_replay_enabled ||
+ (DISPLAY_VER(i915) < 11 && new_crtc_state->wm_level_disabled))
+ intel_psr_disable_locked(intel_dp);
+ else if (new_crtc_state->wm_level_disabled)
+ /* Wa_14015648006 */
+ wm_optimization_wa(intel_dp, new_crtc_state);
+ }
mutex_unlock(&psr->lock);
}
@@ -2858,7 +2887,7 @@ void intel_psr_post_plane_update(struct intel_atomic_state *state,
/* Force a PSR exit when enabling CRC to avoid CRC timeouts */
if (crtc_state->crc_enabled && psr->enabled)
- psr_force_hw_tracking_exit(intel_dp);
+ intel_psr_force_update(intel_dp);
/*
* Clear possible busy bits in case we have
@@ -3120,31 +3149,35 @@ unlock:
mutex_unlock(&intel_dp->psr.lock);
}
-static void _psr_invalidate_handle(struct intel_dp *intel_dp)
+static void intel_psr_configure_full_frame_update(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
- if (intel_dp->psr.psr2_sel_fetch_enabled) {
- u32 val;
-
- if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
- /* Send one update otherwise lag is observed in screen */
- intel_de_write(display,
- CURSURFLIVE(display, intel_dp->psr.pipe),
- 0);
- return;
- }
+ if (!intel_dp->psr.psr2_sel_fetch_enabled)
+ return;
- val = man_trk_ctl_enable_bit_get(display) |
- man_trk_ctl_partial_frame_bit_get(display) |
- man_trk_ctl_continuos_full_frame(display);
+ if (DISPLAY_VER(display) >= 20)
+ intel_de_write(display, LNL_SFF_CTL(cpu_transcoder),
+ LNL_SFF_CTL_SF_SINGLE_FULL_FRAME);
+ else
intel_de_write(display,
PSR2_MAN_TRK_CTL(display, cpu_transcoder),
- val);
- intel_de_write(display,
- CURSURFLIVE(display, intel_dp->psr.pipe), 0);
- intel_dp->psr.psr2_sel_fetch_cff_enabled = true;
+ man_trk_ctl_enable_bit_get(display) |
+ man_trk_ctl_partial_frame_bit_get(display) |
+ man_trk_ctl_single_full_frame_bit_get(display) |
+ man_trk_ctl_continuos_full_frame(display));
+}
+
+static void _psr_invalidate_handle(struct intel_dp *intel_dp)
+{
+ if (intel_dp->psr.psr2_sel_fetch_enabled) {
+ if (!intel_dp->psr.psr2_sel_fetch_cff_enabled) {
+ intel_dp->psr.psr2_sel_fetch_cff_enabled = true;
+ intel_psr_configure_full_frame_update(intel_dp);
+ }
+
+ intel_psr_force_update(intel_dp);
} else {
intel_psr_exit(intel_dp);
}
@@ -3225,44 +3258,31 @@ static void _psr_flush_handle(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
struct drm_i915_private *dev_priv = to_i915(display->drm);
- enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
if (intel_dp->psr.psr2_sel_fetch_enabled) {
if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
/* can we turn CFF off? */
- if (intel_dp->psr.busy_frontbuffer_bits == 0) {
- u32 val = man_trk_ctl_enable_bit_get(display) |
- man_trk_ctl_partial_frame_bit_get(display) |
- man_trk_ctl_single_full_frame_bit_get(display) |
- man_trk_ctl_continuos_full_frame(display);
-
- /*
- * Set psr2_sel_fetch_cff_enabled as false to allow selective
- * updates. Still keep cff bit enabled as we don't have proper
- * SU configuration in case update is sent for any reason after
- * sff bit gets cleared by the HW on next vblank.
- */
- intel_de_write(display,
- PSR2_MAN_TRK_CTL(display, cpu_transcoder),
- val);
- intel_de_write(display,
- CURSURFLIVE(display, intel_dp->psr.pipe),
- 0);
+ if (intel_dp->psr.busy_frontbuffer_bits == 0)
intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
- }
- } else {
- /*
- * continuous full frame is disabled, only a single full
- * frame is required
- */
- psr_force_hw_tracking_exit(intel_dp);
}
- } else {
- psr_force_hw_tracking_exit(intel_dp);
- if (!intel_dp->psr.active && !intel_dp->psr.busy_frontbuffer_bits)
- queue_work(dev_priv->unordered_wq, &intel_dp->psr.work);
+ /*
+ * Still keep cff bit enabled as we don't have proper SU
+ * configuration in case update is sent for any reason after
+ * sff bit gets cleared by the HW on next vblank.
+ *
+ * NOTE: Setting cff bit is not needed for LunarLake onwards as
+ * we have own register for SFF bit and we are not overwriting
+ * existing SU configuration
+ */
+ intel_psr_configure_full_frame_update(intel_dp);
}
+
+ intel_psr_force_update(intel_dp);
+
+ if (!intel_dp->psr.psr2_sel_fetch_enabled && !intel_dp->psr.active &&
+ !intel_dp->psr.busy_frontbuffer_bits)
+ queue_work(dev_priv->unordered_wq, &intel_dp->psr.work);
}
/**