summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/display/intel_dp.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/display/intel_dp.c')
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c297
1 files changed, 197 insertions, 100 deletions
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index e2d991edfd89..6ece2c563c7a 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -36,6 +36,7 @@
#include <asm/byteorder.h>
#include <drm/display/drm_dp_helper.h>
+#include <drm/display/drm_dp_tunnel.h>
#include <drm/display/drm_dsc_helper.h>
#include <drm/display/drm_hdmi_helper.h>
#include <drm/drm_atomic_helper.h>
@@ -63,6 +64,7 @@
#include "intel_dp_hdcp.h"
#include "intel_dp_link_training.h"
#include "intel_dp_mst.h"
+#include "intel_dp_tunnel.h"
#include "intel_dpio_phy.h"
#include "intel_dpll.h"
#include "intel_fifo_underrun.h"
@@ -152,6 +154,22 @@ int intel_dp_link_symbol_clock(int rate)
return DIV_ROUND_CLOSEST(rate * 10, intel_dp_link_symbol_size(rate));
}
+static int max_dprx_rate(struct intel_dp *intel_dp)
+{
+ if (intel_dp_tunnel_bw_alloc_is_enabled(intel_dp))
+ return drm_dp_tunnel_max_dprx_rate(intel_dp->tunnel);
+
+ return drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]);
+}
+
+static int max_dprx_lane_count(struct intel_dp *intel_dp)
+{
+ if (intel_dp_tunnel_bw_alloc_is_enabled(intel_dp))
+ return drm_dp_tunnel_max_dprx_lane_count(intel_dp->tunnel);
+
+ return drm_dp_max_lane_count(intel_dp->dpcd);
+}
+
static void intel_dp_set_default_sink_rates(struct intel_dp *intel_dp)
{
intel_dp->sink_rates[0] = 162000;
@@ -180,7 +198,7 @@ static void intel_dp_set_dpcd_sink_rates(struct intel_dp *intel_dp)
/*
* Sink rates for 8b/10b.
*/
- max_rate = drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]);
+ max_rate = max_dprx_rate(intel_dp);
max_lttpr_rate = drm_dp_lttpr_max_link_rate(intel_dp->lttpr_common_caps);
if (max_lttpr_rate)
max_rate = min(max_rate, max_lttpr_rate);
@@ -259,7 +277,7 @@ static void intel_dp_set_max_sink_lane_count(struct intel_dp *intel_dp)
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *encoder = &intel_dig_port->base;
- intel_dp->max_sink_lane_count = drm_dp_max_lane_count(intel_dp->dpcd);
+ intel_dp->max_sink_lane_count = max_dprx_lane_count(intel_dp);
switch (intel_dp->max_sink_lane_count) {
case 1:
@@ -309,7 +327,7 @@ static int intel_dp_common_rate(struct intel_dp *intel_dp, int index)
}
/* Theoretical max between source and sink */
-static int intel_dp_max_common_rate(struct intel_dp *intel_dp)
+int intel_dp_max_common_rate(struct intel_dp *intel_dp)
{
return intel_dp_common_rate(intel_dp, intel_dp->num_common_rates - 1);
}
@@ -326,7 +344,7 @@ static int intel_dp_max_source_lane_count(struct intel_digital_port *dig_port)
}
/* Theoretical max between source and sink */
-static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp)
+int intel_dp_max_common_lane_count(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
int source_max = intel_dp_max_source_lane_count(dig_port);
@@ -383,50 +401,27 @@ int intel_dp_effective_data_rate(int pixel_clock, int bpp_x16,
1000000 * 16 * 8);
}
-/*
- * Given a link rate and lanes, get the data bandwidth.
- *
- * Data bandwidth is the actual payload rate, which depends on the data
- * bandwidth efficiency and the link rate.
+/**
+ * intel_dp_max_link_data_rate: Calculate the maximum rate for the given link params
+ * @intel_dp: Intel DP object
+ * @max_dprx_rate: Maximum data rate of the DPRX
+ * @max_dprx_lanes: Maximum lane count of the DPRX
*
- * For 8b/10b channel encoding, SST and non-FEC, the data bandwidth efficiency
- * is 80%. For example, for a 1.62 Gbps link, 1.62*10^9 bps * 0.80 * (1/8) =
- * 162000 kBps. With 8-bit symbols, we have 162000 kHz symbol clock. Just by
- * coincidence, the port clock in kHz matches the data bandwidth in kBps, and
- * they equal the link bit rate in Gbps multiplied by 100000. (Note that this no
- * longer holds for data bandwidth as soon as FEC or MST is taken into account!)
+ * Calculate the maximum data rate for the provided link parameters taking into
+ * account any BW limitations by a DP tunnel attached to @intel_dp.
*
- * For 128b/132b channel encoding, the data bandwidth efficiency is 96.71%. For
- * example, for a 10 Gbps link, 10*10^9 bps * 0.9671 * (1/8) = 1208875
- * kBps. With 32-bit symbols, we have 312500 kHz symbol clock. The value 1000000
- * does not match the symbol clock, the port clock (not even if you think in
- * terms of a byte clock), nor the data bandwidth. It only matches the link bit
- * rate in units of 10000 bps.
+ * Returns the maximum data rate in kBps units.
*/
-int
-intel_dp_max_data_rate(int max_link_rate, int max_lanes)
+int intel_dp_max_link_data_rate(struct intel_dp *intel_dp,
+ int max_dprx_rate, int max_dprx_lanes)
{
- int ch_coding_efficiency =
- drm_dp_bw_channel_coding_efficiency(drm_dp_is_uhbr_rate(max_link_rate));
- int max_link_rate_kbps = max_link_rate * 10;
+ int max_rate = drm_dp_max_dprx_data_rate(max_dprx_rate, max_dprx_lanes);
- /*
- * UHBR rates always use 128b/132b channel encoding, and have
- * 97.71% data bandwidth efficiency. Consider max_link_rate the
- * link bit rate in units of 10000 bps.
- */
- /*
- * Lower than UHBR rates always use 8b/10b channel encoding, and have
- * 80% data bandwidth efficiency for SST non-FEC. However, this turns
- * out to be a nop by coincidence:
- *
- * int max_link_rate_kbps = max_link_rate * 10;
- * max_link_rate_kbps = DIV_ROUND_DOWN_ULL(max_link_rate_kbps * 8, 10);
- * max_link_rate = max_link_rate_kbps / 8;
- */
- return DIV_ROUND_DOWN_ULL(mul_u32_u32(max_link_rate_kbps * max_lanes,
- ch_coding_efficiency),
- 1000000 * 8);
+ if (intel_dp_tunnel_bw_alloc_is_enabled(intel_dp))
+ max_rate = min(max_rate,
+ drm_dp_tunnel_available_bw(intel_dp->tunnel));
+
+ return max_rate;
}
bool intel_dp_can_bigjoiner(struct intel_dp *intel_dp)
@@ -658,7 +653,7 @@ static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp *intel_dp,
int mode_rate, max_rate;
mode_rate = intel_dp_link_required(fixed_mode->clock, 18);
- max_rate = intel_dp_max_data_rate(link_rate, lane_count);
+ max_rate = intel_dp_max_link_data_rate(intel_dp, link_rate, lane_count);
if (mode_rate > max_rate)
return false;
@@ -1205,11 +1200,13 @@ bool intel_dp_need_bigjoiner(struct intel_dp *intel_dp,
int hdisplay, int clock)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_connector *connector = intel_dp->attached_connector;
if (!intel_dp_can_bigjoiner(intel_dp))
return false;
- return clock > i915->max_dotclk_freq || hdisplay > 5120;
+ return clock > i915->max_dotclk_freq || hdisplay > 5120 ||
+ connector->force_bigjoiner_enable;
}
static enum drm_mode_status
@@ -1260,7 +1257,8 @@ intel_dp_mode_valid(struct drm_connector *_connector,
max_link_clock = intel_dp_max_link_rate(intel_dp);
max_lanes = intel_dp_max_lane_count(intel_dp);
- max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
+ max_rate = intel_dp_max_link_data_rate(intel_dp, max_link_clock, max_lanes);
+
mode_rate = intel_dp_link_required(target_clock,
intel_dp_mode_min_output_bpp(connector, mode));
@@ -1610,8 +1608,10 @@ intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
for (lane_count = limits->min_lane_count;
lane_count <= limits->max_lane_count;
lane_count <<= 1) {
- link_avail = intel_dp_max_data_rate(link_rate,
- lane_count);
+ link_avail = intel_dp_max_link_data_rate(intel_dp,
+ link_rate,
+ lane_count);
+
if (mode_rate <= link_avail) {
pipe_config->lane_count = lane_count;
@@ -2387,6 +2387,17 @@ intel_dp_compute_config_limits(struct intel_dp *intel_dp,
limits);
}
+int intel_dp_config_required_rate(const struct intel_crtc_state *crtc_state)
+{
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
+ int bpp = crtc_state->dsc.compression_enable ?
+ to_bpp_int_roundup(crtc_state->dsc.compressed_bpp_x16) :
+ crtc_state->pipe_bpp;
+
+ return intel_dp_link_required(adjusted_mode->crtc_clock, bpp);
+}
+
static int
intel_dp_compute_link_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
@@ -2454,31 +2465,16 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
return ret;
}
- if (pipe_config->dsc.compression_enable) {
- drm_dbg_kms(&i915->drm,
- "DP lane count %d clock %d Input bpp %d Compressed bpp " BPP_X16_FMT "\n",
- pipe_config->lane_count, pipe_config->port_clock,
- pipe_config->pipe_bpp,
- BPP_X16_ARGS(pipe_config->dsc.compressed_bpp_x16));
-
- drm_dbg_kms(&i915->drm,
- "DP link rate required %i available %i\n",
- intel_dp_link_required(adjusted_mode->crtc_clock,
- to_bpp_int_roundup(pipe_config->dsc.compressed_bpp_x16)),
- intel_dp_max_data_rate(pipe_config->port_clock,
- pipe_config->lane_count));
- } else {
- drm_dbg_kms(&i915->drm, "DP lane count %d clock %d bpp %d\n",
- pipe_config->lane_count, pipe_config->port_clock,
- pipe_config->pipe_bpp);
+ drm_dbg_kms(&i915->drm,
+ "DP lane count %d clock %d bpp input %d compressed " BPP_X16_FMT " link rate required %d available %d\n",
+ pipe_config->lane_count, pipe_config->port_clock,
+ pipe_config->pipe_bpp,
+ BPP_X16_ARGS(pipe_config->dsc.compressed_bpp_x16),
+ intel_dp_config_required_rate(pipe_config),
+ intel_dp_max_link_data_rate(intel_dp,
+ pipe_config->port_clock,
+ pipe_config->lane_count));
- drm_dbg_kms(&i915->drm,
- "DP link rate required %i available %i\n",
- intel_dp_link_required(adjusted_mode->crtc_clock,
- pipe_config->pipe_bpp),
- intel_dp_max_data_rate(pipe_config->port_clock,
- pipe_config->lane_count));
- }
return 0;
}
@@ -2840,12 +2836,47 @@ intel_dp_audio_compute_config(struct intel_encoder *encoder,
intel_dp_is_uhbr(pipe_config);
}
+void intel_dp_queue_modeset_retry_work(struct intel_connector *connector)
+{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+
+ drm_connector_get(&connector->base);
+ if (!queue_work(i915->unordered_wq, &connector->modeset_retry_work))
+ drm_connector_put(&connector->base);
+}
+
+void
+intel_dp_queue_modeset_retry_for_link(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_connector *connector;
+ struct intel_digital_connector_state *conn_state;
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ int i;
+
+ if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)) {
+ intel_dp_queue_modeset_retry_work(intel_dp->attached_connector);
+
+ return;
+ }
+
+ for_each_new_intel_connector_in_state(state, connector, conn_state, i) {
+ if (!conn_state->base.crtc)
+ continue;
+
+ if (connector->mst_port == intel_dp)
+ intel_dp_queue_modeset_retry_work(connector);
+ }
+}
+
int
intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_atomic_state *state = to_intel_atomic_state(conn_state->state);
struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
const struct drm_display_mode *fixed_mode;
@@ -2946,7 +2977,8 @@ intel_dp_compute_config(struct intel_encoder *encoder,
intel_dp_compute_vsc_sdp(intel_dp, pipe_config, conn_state);
intel_dp_compute_hdr_metadata_infoframe_sdp(intel_dp, pipe_config, conn_state);
- return 0;
+ return intel_dp_tunnel_atomic_compute_stream_bw(state, intel_dp, connector,
+ pipe_config);
}
void intel_dp_set_link_params(struct intel_dp *intel_dp,
@@ -3282,18 +3314,21 @@ void intel_dp_sync_state(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
-
- if (!crtc_state)
- return;
+ bool dpcd_updated = false;
/*
* Don't clobber DPCD if it's been already read out during output
* setup (eDP) or detect.
*/
- if (intel_dp->dpcd[DP_DPCD_REV] == 0)
+ if (crtc_state && intel_dp->dpcd[DP_DPCD_REV] == 0) {
intel_dp_get_dpcd(intel_dp);
+ dpcd_updated = true;
+ }
- intel_dp_reset_max_link_params(intel_dp);
+ intel_dp_tunnel_resume(intel_dp, crtc_state, dpcd_updated);
+
+ if (crtc_state)
+ intel_dp_reset_max_link_params(intel_dp);
}
bool intel_dp_initial_fastset_check(struct intel_encoder *encoder,
@@ -3959,6 +3994,13 @@ intel_dp_has_sink_count(struct intel_dp *intel_dp)
&intel_dp->desc);
}
+void intel_dp_update_sink_caps(struct intel_dp *intel_dp)
+{
+ intel_dp_set_sink_rates(intel_dp);
+ intel_dp_set_max_sink_lane_count(intel_dp);
+ intel_dp_set_common_rates(intel_dp);
+}
+
static bool
intel_dp_get_dpcd(struct intel_dp *intel_dp)
{
@@ -3975,9 +4017,7 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
drm_dp_is_branch(intel_dp->dpcd));
- intel_dp_set_sink_rates(intel_dp);
- intel_dp_set_max_sink_lane_count(intel_dp);
- intel_dp_set_common_rates(intel_dp);
+ intel_dp_update_sink_caps(intel_dp);
}
if (intel_dp_has_sink_count(intel_dp)) {
@@ -4800,13 +4840,15 @@ static bool intel_dp_mst_link_status(struct intel_dp *intel_dp)
* - %true if pending interrupts were serviced (or no interrupts were
* pending) w/o detecting an error condition.
* - %false if an error condition - like AUX failure or a loss of link - is
- * detected, which needs servicing from the hotplug work.
+ * detected, or another condition - like a DP tunnel BW state change - needs
+ * servicing from the hotplug work.
*/
static bool
intel_dp_check_mst_status(struct intel_dp *intel_dp)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
bool link_ok = true;
+ bool reprobe_needed = false;
drm_WARN_ON_ONCE(&i915->drm, intel_dp->active_mst_links < 0);
@@ -4833,6 +4875,13 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp)
intel_dp_mst_hpd_irq(intel_dp, esi, ack);
+ if (esi[3] & DP_TUNNELING_IRQ) {
+ if (drm_dp_tunnel_handle_irq(i915->display.dp_tunnel_mgr,
+ &intel_dp->aux))
+ reprobe_needed = true;
+ ack[3] |= DP_TUNNELING_IRQ;
+ }
+
if (!memchr_inv(ack, 0, sizeof(ack)))
break;
@@ -4843,7 +4892,7 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp)
drm_dp_mst_hpd_irq_send_new_request(&intel_dp->mst_mgr);
}
- return link_ok;
+ return link_ok && !reprobe_needed;
}
static void
@@ -4970,9 +5019,10 @@ int intel_dp_get_active_pipes(struct intel_dp *intel_dp,
if (!crtc_state->hw.active)
continue;
- if (conn_state->commit &&
- !try_wait_for_completion(&conn_state->commit->hw_done))
- continue;
+ if (conn_state->commit)
+ drm_WARN_ON(&i915->drm,
+ !wait_for_completion_timeout(&conn_state->commit->hw_done,
+ msecs_to_jiffies(5000)));
*pipe_mask |= BIT(crtc->pipe);
}
@@ -5202,23 +5252,32 @@ static void intel_dp_check_device_service_irq(struct intel_dp *intel_dp)
drm_dbg_kms(&i915->drm, "Sink specific irq unhandled\n");
}
-static void intel_dp_check_link_service_irq(struct intel_dp *intel_dp)
+static bool intel_dp_check_link_service_irq(struct intel_dp *intel_dp)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ bool reprobe_needed = false;
u8 val;
if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
- return;
+ return false;
if (drm_dp_dpcd_readb(&intel_dp->aux,
DP_LINK_SERVICE_IRQ_VECTOR_ESI0, &val) != 1 || !val)
- return;
+ return false;
+
+ if ((val & DP_TUNNELING_IRQ) &&
+ drm_dp_tunnel_handle_irq(i915->display.dp_tunnel_mgr,
+ &intel_dp->aux))
+ reprobe_needed = true;
if (drm_dp_dpcd_writeb(&intel_dp->aux,
DP_LINK_SERVICE_IRQ_VECTOR_ESI0, val) != 1)
- return;
+ return reprobe_needed;
if (val & HDMI_LINK_STATUS_CHANGED)
intel_dp_handle_hdmi_link_status_change(intel_dp);
+
+ return reprobe_needed;
}
/*
@@ -5239,6 +5298,7 @@ intel_dp_short_pulse(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
u8 old_sink_count = intel_dp->sink_count;
+ bool reprobe_needed = false;
bool ret;
/*
@@ -5261,7 +5321,7 @@ intel_dp_short_pulse(struct intel_dp *intel_dp)
}
intel_dp_check_device_service_irq(intel_dp);
- intel_dp_check_link_service_irq(intel_dp);
+ reprobe_needed = intel_dp_check_link_service_irq(intel_dp);
/* Handle CEC interrupts, if any */
drm_dp_cec_irq(&intel_dp->aux);
@@ -5288,10 +5348,10 @@ intel_dp_short_pulse(struct intel_dp *intel_dp)
* FIXME get rid of the ad-hoc phy test modeset code
* and properly incorporate it into the normal modeset.
*/
- return false;
+ reprobe_needed = true;
}
- return true;
+ return !reprobe_needed;
}
/* XXX this is probably wrong for multiple downstream ports */
@@ -5601,6 +5661,7 @@ intel_dp_detect(struct drm_connector *connector,
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *encoder = &dig_port->base;
enum drm_connector_status status;
+ int ret;
drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
@@ -5636,9 +5697,18 @@ intel_dp_detect(struct drm_connector *connector,
intel_dp->is_mst);
}
+ intel_dp_tunnel_disconnect(intel_dp);
+
goto out;
}
+ ret = intel_dp_tunnel_detect(intel_dp, ctx);
+ if (ret == -EDEADLK)
+ return ret;
+
+ if (ret == 1)
+ intel_connector->base.epoch_counter++;
+
intel_dp_detect_dsc_caps(intel_dp, intel_connector);
intel_dp_configure_mst(intel_dp);
@@ -5669,8 +5739,6 @@ intel_dp_detect(struct drm_connector *connector,
* with an IRQ_HPD, so force a link status check.
*/
if (!intel_dp_is_edp(intel_dp)) {
- int ret;
-
ret = intel_dp_retrain_link(encoder, ctx);
if (ret)
return ret;
@@ -5810,6 +5878,8 @@ void intel_dp_encoder_flush_work(struct drm_encoder *encoder)
intel_dp_mst_encoder_cleanup(dig_port);
+ intel_dp_tunnel_destroy(intel_dp);
+
intel_pps_vdd_off_sync(intel_dp);
/*
@@ -5826,6 +5896,8 @@ void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder);
intel_pps_vdd_off_sync(intel_dp);
+
+ intel_dp_tunnel_suspend(intel_dp);
}
void intel_dp_encoder_shutdown(struct intel_encoder *intel_encoder)
@@ -5963,6 +6035,15 @@ static int intel_dp_connector_atomic_check(struct drm_connector *conn,
return ret;
}
+ if (!intel_connector_needs_modeset(state, conn))
+ return 0;
+
+ ret = intel_dp_tunnel_atomic_check_state(state,
+ intel_dp,
+ intel_conn);
+ if (ret)
+ return ret;
+
/*
* We don't enable port sync on BDW due to missing w/as and
* due to not having adjusted the modeset sequence appropriately.
@@ -5970,9 +6051,6 @@ static int intel_dp_connector_atomic_check(struct drm_connector *conn,
if (DISPLAY_VER(dev_priv) < 9)
return 0;
- if (!intel_connector_needs_modeset(state, conn))
- return 0;
-
if (conn->has_tile) {
ret = intel_modeset_tile_group(state, conn->tile_group->id);
if (ret)
@@ -6029,6 +6107,7 @@ intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
struct intel_dp *intel_dp = &dig_port->dp;
+ u8 dpcd[DP_RECEIVER_CAP_SIZE];
if (dig_port->base.type == INTEL_OUTPUT_EDP &&
(long_hpd || !intel_pps_have_panel_power_or_vdd(intel_dp))) {
@@ -6051,6 +6130,17 @@ intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd)
dig_port->base.base.name,
long_hpd ? "long" : "short");
+ /*
+ * TBT DP tunnels require the GFX driver to read out the DPRX caps in
+ * response to long HPD pulses. The DP hotplug handler does that,
+ * however the hotplug handler may be blocked by another
+ * connector's/encoder's hotplug handler. Since the TBT CM may not
+ * complete the DP tunnel BW request for the latter connector/encoder
+ * waiting for this encoder's DPRX read, perform a dummy read here.
+ */
+ if (long_hpd)
+ intel_dp_read_dprx_caps(intel_dp, dpcd);
+
if (long_hpd) {
intel_dp->reset_link_params = true;
return IRQ_NONE;
@@ -6371,6 +6461,14 @@ static void intel_dp_modeset_retry_work_fn(struct work_struct *work)
mutex_unlock(&connector->dev->mode_config.mutex);
/* Send Hotplug uevent so userspace can reprobe */
drm_kms_helper_connector_hotplug_event(connector);
+
+ drm_connector_put(connector);
+}
+
+void intel_dp_init_modeset_retry_work(struct intel_connector *connector)
+{
+ INIT_WORK(&connector->modeset_retry_work,
+ intel_dp_modeset_retry_work_fn);
}
bool
@@ -6387,8 +6485,7 @@ intel_dp_init_connector(struct intel_digital_port *dig_port,
int type;
/* Initialize the work for modeset in case of link train failure */
- INIT_WORK(&intel_connector->modeset_retry_work,
- intel_dp_modeset_retry_work_fn);
+ intel_dp_init_modeset_retry_work(intel_connector);
if (drm_WARN(dev, dig_port->max_lanes < 1,
"Not enough lanes (%d) for DP on [ENCODER:%d:%s]\n",